diff --git a/.gitattributes b/.gitattributes index 2b4f3d472632..b223c8ac5fb8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -4,7 +4,8 @@ *.cpp rust *.h rust *.rs rust +*.fixed linguist-language=Rust src/etc/installer/gfx/* binary *.woff binary src/vendor/** -text -Cargo.lock -merge +Cargo.lock -merge linguist-generated=false diff --git a/.gitignore b/.gitignore index 57407a2399a2..9ffaa82e1c8b 100644 --- a/.gitignore +++ b/.gitignore @@ -74,26 +74,30 @@ __pycache__/ /obj/ /rt/ /rustllvm/ -/src/libstd_unicode/DerivedCoreProperties.txt -/src/libstd_unicode/DerivedNormalizationProps.txt -/src/libstd_unicode/PropList.txt -/src/libstd_unicode/ReadMe.txt -/src/libstd_unicode/Scripts.txt -/src/libstd_unicode/SpecialCasing.txt -/src/libstd_unicode/UnicodeData.txt +/src/libcore/unicode/DerivedCoreProperties.txt +/src/libcore/unicode/DerivedNormalizationProps.txt +/src/libcore/unicode/PropList.txt +/src/libcore/unicode/ReadMe.txt +/src/libcore/unicode/Scripts.txt +/src/libcore/unicode/SpecialCasing.txt +/src/libcore/unicode/UnicodeData.txt /stage[0-9]+/ /target +target/ /test/ /tmp/ +tags +tags.* TAGS -TAGS.emacs -TAGS.vi +TAGS.* \#* \#*\# config.mk config.stamp keywords.md lexer.ml +mir_dump +Session.vim src/etc/dl tmp.*.rs version.md diff --git a/.gitmodules b/.gitmodules index ffa7b321ba6b..1631daac76c2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -2,10 +2,6 @@ path = src/llvm url = https://github.com/rust-lang/llvm.git branch = master -[submodule "src/rt/hoedown"] - path = src/rt/hoedown - url = https://github.com/rust-lang/hoedown.git - branch = rust-2015-09-21-do-not-delete [submodule "src/jemalloc"] path = src/jemalloc url = https://github.com/rust-lang/jemalloc.git @@ -45,9 +41,26 @@ [submodule "src/dlmalloc"] path = src/dlmalloc url = https://github.com/alexcrichton/dlmalloc-rs.git -[submodule "src/binaryen"] - path = src/binaryen - url = https://github.com/alexcrichton/binaryen.git [submodule "src/doc/rust-by-example"] path = src/doc/rust-by-example url = https://github.com/rust-lang/rust-by-example +[submodule "src/llvm-emscripten"] + path = src/llvm-emscripten + url = https://github.com/rust-lang/llvm +[submodule "src/stdsimd"] + path = src/stdsimd + url = https://github.com/rust-lang-nursery/stdsimd +[submodule "src/tools/lld"] + path = src/tools/lld + url = https://github.com/rust-lang/lld.git +[submodule "src/libbacktrace"] + path = src/libbacktrace + url = https://github.com/rust-lang-nursery/libbacktrace +[submodule "src/tools/lldb"] + path = src/tools/lldb + url = https://github.com/rust-lang-nursery/lldb/ + branch = rust-release-70 +[submodule "src/tools/clang"] + path = src/tools/clang + url = https://github.com/rust-lang-nursery/clang/ + branch = release_70 diff --git a/.mailmap b/.mailmap index c2d3b28602bc..8f4287a43858 100644 --- a/.mailmap +++ b/.mailmap @@ -41,15 +41,17 @@ Boris Egorov Brandon Sanderson Brandon Sanderson Brett Cannon Brett Cannon Brian Anderson +Brian Anderson Brian Dawn Brian Leibig Brian Leibig Carl-Anton Ingmarsson -Carol (Nichols || Goulding) -Carol (Nichols || Goulding) +Carol (Nichols || Goulding) +Carol (Nichols || Goulding) Carol Nichols Carol Willing Chris C Cerami Chris C Cerami Chris Pressey Chris Thorn Chris Thorn +Chris Vittal Christopher Vittal Clark Gaebel Clinton Ryan Corey Richardson Elaine "See More" Nemo @@ -94,9 +96,9 @@ Herman J. Radtke III Herman J. Radtke III Ivan Ivaschenko J. J. Weber -Jakub Bukaj -Jakub Bukaj -Jakub Bukaj Jakub Bukaj +Jakub Adam Wieczorek +Jakub Adam Wieczorek +Jakub Adam Wieczorek James Deng James Miller James Perry diff --git a/.travis.yml b/.travis.yml index 6e242b74894c..b1701e4a6545 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,6 +3,10 @@ sudo: required dist: trusty services: - docker +addons: + apt: + packages: + - gdb git: depth: 2 @@ -12,7 +16,7 @@ matrix: fast_finish: true include: # Images used in testing PR and try-build should be run first. - - env: IMAGE=x86_64-gnu-llvm-3.9 RUST_BACKTRACE=1 + - env: IMAGE=x86_64-gnu-llvm-5.0 RUST_BACKTRACE=1 if: type = pull_request OR branch = auto - env: IMAGE=dist-x86_64-linux DEPLOY=1 @@ -21,27 +25,29 @@ matrix: # "alternate" deployments, these are "nightlies" but have LLVM assertions # turned on, they're deployed to a different location primarily for # additional testing. - - env: IMAGE=dist-x86_64-linux DEPLOY_ALT=1 + - env: IMAGE=dist-x86_64-linux DEPLOY_ALT=1 CI_JOB_NAME=dist-x86_64-linux-alt if: branch = try OR branch = auto - env: > RUST_CHECK_TARGET=dist - RUST_CONFIGURE_ARGS="--enable-extended --enable-profiler" + RUST_CONFIGURE_ARGS="--enable-extended --enable-profiler --enable-lldb" SRC=. DEPLOY_ALT=1 RUSTC_RETRY_LINKER_ON_SEGFAULT=1 - SCCACHE_ERROR_LOG=/tmp/sccache.log MACOSX_DEPLOYMENT_TARGET=10.7 NO_LLVM_ASSERTIONS=1 NO_DEBUG_ASSERTIONS=1 + CI_JOB_NAME=dist-x86_64-apple-alt os: osx - osx_image: xcode7.3 + osx_image: xcode9.3-moar if: branch = auto # macOS builders. These are placed near the beginning because they are very # slow to run. # OSX builders running tests, these run the full test suite. + # NO_DEBUG_ASSERTIONS=1 to make them go faster, but also do have some + # runners that run `//ignore-debug` tests. # # Note that the compiler is compiled to target 10.8 here because the Xcode # version that we're using, 8.2, cannot compile LLVM for OSX 10.7. @@ -50,13 +56,13 @@ matrix: RUST_CONFIGURE_ARGS="--build=x86_64-apple-darwin --enable-sanitizers --enable-profiler" SRC=. RUSTC_RETRY_LINKER_ON_SEGFAULT=1 - SCCACHE_ERROR_LOG=/tmp/sccache.log MACOSX_DEPLOYMENT_TARGET=10.8 MACOSX_STD_DEPLOYMENT_TARGET=10.7 NO_LLVM_ASSERTIONS=1 NO_DEBUG_ASSERTIONS=1 + CI_JOB_NAME=x86_64-apple os: osx - osx_image: xcode8.3 + osx_image: xcode9.3-moar if: branch = auto - env: > @@ -64,13 +70,13 @@ matrix: RUST_CONFIGURE_ARGS=--build=i686-apple-darwin SRC=. RUSTC_RETRY_LINKER_ON_SEGFAULT=1 - SCCACHE_ERROR_LOG=/tmp/sccache.log MACOSX_DEPLOYMENT_TARGET=10.8 MACOSX_STD_DEPLOYMENT_TARGET=10.7 NO_LLVM_ASSERTIONS=1 NO_DEBUG_ASSERTIONS=1 + CI_JOB_NAME=i686-apple os: osx - osx_image: xcode8.3 + osx_image: xcode9.3-moar if: branch = auto # OSX builders producing releases. These do not run the full test suite and @@ -81,30 +87,30 @@ matrix: # OSX 10.7 and `xcode7` is the latest Xcode able to compile LLVM for 10.7. - env: > RUST_CHECK_TARGET=dist - RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --enable-extended --enable-profiler" + RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --enable-full-tools --enable-profiler --enable-lldb" SRC=. DEPLOY=1 RUSTC_RETRY_LINKER_ON_SEGFAULT=1 - SCCACHE_ERROR_LOG=/tmp/sccache.log MACOSX_DEPLOYMENT_TARGET=10.7 NO_LLVM_ASSERTIONS=1 NO_DEBUG_ASSERTIONS=1 + CI_JOB_NAME=dist-i686-apple os: osx - osx_image: xcode7.3 + osx_image: xcode9.3-moar if: branch = auto - env: > RUST_CHECK_TARGET=dist - RUST_CONFIGURE_ARGS="--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios --enable-extended --enable-sanitizers --enable-profiler" + RUST_CONFIGURE_ARGS="--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios --enable-full-tools --enable-sanitizers --enable-profiler --enable-lldb" SRC=. DEPLOY=1 RUSTC_RETRY_LINKER_ON_SEGFAULT=1 - SCCACHE_ERROR_LOG=/tmp/sccache.log MACOSX_DEPLOYMENT_TARGET=10.7 NO_LLVM_ASSERTIONS=1 NO_DEBUG_ASSERTIONS=1 + CI_JOB_NAME=dist-x86_64-apple os: osx - osx_image: xcode7.3 + osx_image: xcode9.3-moar if: branch = auto # Linux builders, remaining docker images @@ -169,26 +175,25 @@ matrix: - env: IMAGE=x86_64-gnu-aux if: branch = auto - env: IMAGE=x86_64-gnu-tools - if: branch = auto + if: branch = auto OR (type = pull_request AND commit_message =~ /(?i:^update.*\b(rls|rustfmt|clippy|miri)\b)/) - env: IMAGE=x86_64-gnu-debug if: branch = auto - env: IMAGE=x86_64-gnu-nopt if: branch = auto - env: IMAGE=x86_64-gnu-distcheck if: branch = auto - - env: IMAGE=x86_64-gnu-incremental - if: branch = auto + - env: IMAGE=mingw-check + if: type = pull_request OR branch = auto - stage: publish toolstate if: branch = master AND type = push before_install: [] install: [] - cache: false sudo: false script: MESSAGE_FILE=$(mktemp -t msg.XXXXXX); . src/ci/docker/x86_64-gnu-tools/repo.sh; - commit_toolstate_change "$MESSAGE_FILE" "$TRAVIS_BUILD_DIR/src/tools/publish_toolstate.py" "$(git rev-parse HEAD)" "$(git log --format=%s -n1 HEAD)" "$MESSAGE_FILE" + commit_toolstate_change "$MESSAGE_FILE" "$TRAVIS_BUILD_DIR/src/tools/publish_toolstate.py" "$(git rev-parse HEAD)" "$(git log --format=%s -n1 HEAD)" "$MESSAGE_FILE" "$TOOLSTATE_REPO_ACCESS_TOKEN"; env: global: @@ -198,10 +203,15 @@ env: # AWS_SECRET_ACCESS_KEY=... - secure: "j96XxTVOSUf4s4r4htIxn/fvIa5DWbMgLqWl7r8z2QfgUwscmkMXAwXuFNc7s7bGTpV/+CgDiMFFM6BAFLGKutytIF6oA02s9b+usQYnM0th7YQ2AIgm9GtMTJCJp4AoyfFmh8F2faUICBZlfVLUJ34udHEe35vOklix+0k4WDo=" # TOOLSTATE_REPO_ACCESS_TOKEN=... - - secure: "cFh8thThqEJLC98XKI5pfqflUzOlxsYPRW20AWRaYOOgYHPTiGWypTXiPbGSKaeAXTZoOA+DpQtEmefc0U6lt9dHc7a/MIaK6isFurjlnKYiLOeTruzyu1z7PWCeZ/jKXsU2RK/88DBtlNwfMdaMIeuKj14IVfpepPPL71ETbuk=" + - secure: "ESfcXqv4N2VMhqi2iIyw6da9VrsA78I4iR1asouCaq4hzTTrkB4WNRrfURy6xg72gQ4nMhtRJbB0/2jmc9Cu1+g2CzXtyiL223aJ5CKrXdcvbitopQSDfp07dMWm+UED+hNFEanpErKAeU/6FM3A+J+60PMk8MCF1h9tqNRISJw=" before_install: - - zcat $HOME/docker/rust-ci.tar.gz | docker load || true + # We'll use the AWS cli to download/upload cached docker layers, so install + # that here. + - if [ "$TRAVIS_OS_NAME" = linux ]; then + pip install --user awscli; + export PATH=$PATH:$HOME/.local/bin; + fi - mkdir -p $HOME/rustsrc # FIXME(#46924): these two commands are required to enable IPv6, # they shouldn't exist, please revert once more official solutions appeared. @@ -221,12 +231,17 @@ install: osx) if [[ "$RUST_CHECK_TARGET" == dist ]]; then travis_retry brew update && - travis_retry brew install xz; + travis_retry brew install xz && + travis_retry brew install swig; fi && - travis_retry curl -fo /usr/local/bin/sccache https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-05-12-sccache-x86_64-apple-darwin && + travis_retry curl -fo /usr/local/bin/sccache https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2018-04-02-sccache-x86_64-apple-darwin && chmod +x /usr/local/bin/sccache && travis_retry curl -fo /usr/local/bin/stamp https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-03-17-stamp-x86_64-apple-darwin && - chmod +x /usr/local/bin/stamp + chmod +x /usr/local/bin/stamp && + travis_retry curl -f http://releases.llvm.org/6.0.0/clang+llvm-6.0.0-x86_64-apple-darwin.tar.xz | tar xJf - && + export CC=`pwd`/clang+llvm-6.0.0-x86_64-apple-darwin/bin/clang && + export CXX=`pwd`/clang+llvm-6.0.0-x86_64-apple-darwin/bin/clang++ && + export AR=ar ;; esac @@ -241,6 +256,8 @@ before_script: export RUN_SCRIPT="$RUN_SCRIPT && src/ci/run.sh"; else export RUN_SCRIPT="$RUN_SCRIPT && src/ci/docker/run.sh $IMAGE"; + # Enable core dump on Linux. + sudo sh -c 'echo "/checkout/obj/cores/core.%p.%E" > /proc/sys/kernel/core_pattern'; fi # Log time information from this machine and an external machine for insight into possible @@ -264,44 +281,46 @@ after_failure: df -h; du . | sort -nr | head -n100 - # One of these is the linux sccache log, one is the OSX sccache log. Instead - # of worrying about what system we are just cat both. One of these commands - # will fail but that's ok, they'll both get executed. - - cat obj/tmp/sccache.log - - cat /tmp/sccache.log - # Random attempt at debugging currently. Just poking around in here to see if # anything shows up. + + # Dump backtrace for macOS - ls -lat $HOME/Library/Logs/DiagnosticReports/ - find $HOME/Library/Logs/DiagnosticReports -type f + -name '*.crash' -not -name '*.stage2-*.crash' -not -name 'com.apple.CoreSimulator.CoreSimulatorService-*.crash' -exec printf travis_fold":start:crashlog\n\033[31;1m%s\033[0m\n" {} \; -exec head -750 {} \; - -exec echo travis_fold":"end:crashlog \; + -exec echo travis_fold":"end:crashlog \; || true + + # Dump backtrace for Linux + - ln -s . checkout && + for CORE in obj/cores/core.*; do + EXE=$(echo $CORE | sed 's|obj/cores/core\.[0-9]*\.!checkout!\(.*\)|\1|;y|!|/|'); + if [ -f "$EXE" ]; then + printf travis_fold":start:crashlog\n\033[31;1m%s\033[0m\n" "$CORE"; + gdb -q -c "$CORE" "$EXE" + -iex 'set auto-load off' + -iex 'dir src/' + -iex 'set sysroot .' + -ex bt + -ex q; + echo travis_fold":"end:crashlog; + fi; + done || true + + # see #50887 + - cat ./obj/build/x86_64-unknown-linux-gnu/native/asan/build/lib/asan/clang_rt.asan-dynamic-i386.vers || true # attempt to debug anything killed by the oom killer on linux, just to see if # it happened - dmesg | grep -i kill -# Save tagged docker images we created and load them if they're available -# Travis saves caches whether the build failed or not, nuke rustsrc if -# the failure was while updating it (as it may be in a bad state) -# https://github.com/travis-ci/travis-ci/issues/4472 -before_cache: - - docker history -q rust-ci | - grep -v missing | - xargs docker save | - gzip > $HOME/docker/rust-ci.tar.gz - notifications: email: false -cache: - directories: - - $HOME/docker - before_deploy: - mkdir -p deploy/$TRAVIS_COMMIT - > @@ -312,7 +331,6 @@ before_deploy: rm -rf obj/build/dist/doc && cp -r obj/build/dist/* deploy/$TRAVIS_COMMIT; fi - - travis_retry gem update --system - ls -la deploy/$TRAVIS_COMMIT deploy: diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index e9b39717c700..d70b2b52aca1 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -11,9 +11,9 @@ A version of this document [can be found online](https://www.rust-lang.org/condu * Please be kind and courteous. There's no need to be mean or rude. * Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer. * Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. -* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term "harassment" as including the definition in the Citizen Code of Conduct; if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups. +* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term "harassment" as including the definition in the Citizen Code of Conduct; if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups. * Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [Rust moderation team][mod_team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back. -* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behaviour is not welcome. +* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome. ## Moderation diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 54c507304f9f..ea9f2c194300 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -26,10 +26,10 @@ As a reminder, all contributors are expected to follow our [Code of Conduct][coc ## Feature Requests [feature-requests]: #feature-requests -To request a change to the way that the Rust language works, please open an -issue in the [RFCs repository](https://github.com/rust-lang/rfcs/issues/new) -rather than this one. New features and other significant language changes -must go through the RFC process. +To request a change to the way the Rust language works, please head over +to the [RFCs repository](https://github.com/rust-lang/rfcs) and view the +[README](https://github.com/rust-lang/rfcs/blob/master/README.md) +for instructions. ## Bug Reports [bug-reports]: #bug-reports @@ -47,6 +47,12 @@ as it's possible that someone else has already reported your error. This doesn't always work, and sometimes it's hard to know what to search for, so consider this extra credit. We won't mind if you accidentally file a duplicate report. +Similarly, to help others who encountered the bug find your issue, +consider filing an issue with a descriptive title, which contains information that might be unique to it. +This can be the language or compiler feature used, the conditions that trigger the bug, +or part of the error message if there is any. +An example could be: **"impossible case reached" on lifetime inference for impl Trait in return position**. + Opening an issue is as easy as following [this link](https://github.com/rust-lang/rust/issues/new) and filling out the fields. Here's a template that you can use to file a bug, though it's not necessary to @@ -121,6 +127,7 @@ configuration used in the build process. Some options to note: #### `[rust]`: - `debuginfo = true` - Build a compiler with debuginfo. Makes building rustc slower, but then you can use a debugger to debug `rustc`. - `debuginfo-lines = true` - An alternative to `debuginfo = true` that doesn't let you use a debugger, but doesn't make building rustc slower and still gives you line numbers in backtraces. +- `debuginfo-tools = true` - Build the extended tools with debuginfo. - `debug-assertions = true` - Makes the log output of `debug!` work. - `optimize = false` - Disable optimizations to speed up compilation of stage1 rust, but makes the stage1 compiler x100 slower. @@ -135,6 +142,8 @@ file. If you still have a `config.mk` file in your directory - from ### Building [building]: #building +A default configuration requires around 3.5 GB of disk space, whereas building a debug configuration may require more than 30 GB. + Dependencies - [build dependencies](README.md#building-from-source) - `gdb` 6.2.0 minimum, 7.1 or later recommended for test builds @@ -301,12 +310,12 @@ It's absolutely fine to have multiple build directories with different [pull-requests]: #pull-requests Pull requests are the primary mechanism we use to change Rust. GitHub itself -has some [great documentation][pull-requests] on using the Pull Request feature. +has some [great documentation][about-pull-requests] on using the Pull Request feature. We use the "fork and pull" model [described here][development-models], where contributors push changes to their personal fork and create pull requests to bring those changes into the source repository. -[pull-requests]: https://help.github.com/articles/about-pull-requests/ +[about-pull-requests]: https://help.github.com/articles/about-pull-requests/ [development-models]: https://help.github.com/articles/about-collaborative-development-models/ Please make pull requests against the `master` branch. @@ -594,7 +603,7 @@ If you're looking for somewhere to start, check out the [E-easy][eeasy] tag. [inom]: https://github.com/rust-lang/rust/issues?q=is%3Aopen+is%3Aissue+label%3AI-nominated [eeasy]: https://github.com/rust-lang/rust/issues?q=is%3Aopen+is%3Aissue+label%3AE-easy [lru]: https://github.com/rust-lang/rust/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-asc -[rfcbot]: https://github.com/dikaiosune/rust-dashboard/blob/master/RFCBOT.md +[rfcbot]: https://github.com/anp/rfcbot-rs/ ## Out-of-tree Contributions [out-of-tree-contributions]: #out-of-tree-contributions @@ -623,6 +632,7 @@ For people new to Rust, and just starting to contribute, or even for more seasoned developers, some useful places to look for information are: +* The [rustc guide] contains information about how various parts of the compiler work * [Rust Forge][rustforge] contains additional documentation, including write-ups of how to achieve common tasks * The [Rust Internals forum][rif], a place to ask questions and discuss Rust's internals @@ -635,6 +645,7 @@ are: * **Google!** ([search only in Rust Documentation][gsearchdocs] to find types, traits, etc. quickly) * Don't be afraid to ask! The Rust community is friendly and helpful. +[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/about-this-guide.html [gdfrustc]: http://manishearth.github.io/rust-internals-docs/rustc/ [gsearchdocs]: https://www.google.com/search?q=site:doc.rust-lang.org+your+query+here [rif]: http://internals.rust-lang.org diff --git a/COPYRIGHT b/COPYRIGHT index f8b637d204ac..e2d0ed77224e 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -10,8 +10,8 @@ Copyrights in the Rust project are retained by their contributors. No copyright assignment is required to contribute to the Rust project. Some files include explicit copyright notices and/or license notices. -For full authorship information, see AUTHORS.txt and the version control -history. +For full authorship information, see the version control history or +https://thanks.rust-lang.org Except as otherwise noted (below and/or in individual files), Rust is licensed under the Apache License, Version 2.0 or @@ -192,28 +192,6 @@ their own copyright notices and license terms: USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -* Hoedown, the markdown parser, under src/rt/hoedown, is - licensed as follows. - - Copyright (c) 2008, Natacha Porté - Copyright (c) 2011, Vicent Martí - Copyright (c) 2013, Devin Torres and the Hoedown authors - - Permission to use, copy, modify, and distribute this - software for any purpose with or without fee is hereby - granted, provided that the above copyright notice and - this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR - DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE - INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND - FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR - ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA - OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * libbacktrace, under src/libbacktrace: Copyright (C) 2012-2014 Free Software Foundation, Inc. diff --git a/README.md b/README.md index 589aa1afe35e..a2acfe8b478e 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ Read ["Installation"] from [The Book]. 3. Build and install: ```sh + $ git submodule update --init --recursive --progress $ ./x.py build && sudo ./x.py install ``` @@ -119,7 +120,7 @@ shell with: > python x.py build ``` -Currently building Rust only works with some known versions of Visual Studio. If +Currently, building Rust only works with some known versions of Visual Studio. If you have a more recent version installed the build system doesn't understand then you may need to force rustbuild to use an older version. This can be done by manually calling the appropriate vcvars file before running the bootstrap. @@ -129,14 +130,11 @@ CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64\vcvars64. python x.py build ``` -If you are seeing build failure when compiling `rustc_binaryen`, make sure the path -length of the rust folder is not longer than 22 characters. - #### Specifying an ABI [specifying-an-abi]: #specifying-an-abi Each specific ABI can also be used from either environment (for example, using -the GNU ABI in powershell) by using an explicit build triple. The available +the GNU ABI in PowerShell) by using an explicit build triple. The available Windows build triples are: - GNU ABI (using GCC) - `i686-pc-windows-gnu` @@ -182,7 +180,7 @@ the ABI used. I.e., if the ABI was `x86_64-pc-windows-msvc`, the directory will [notes]: #notes Since the Rust compiler is written in Rust, it must be built by a -precompiled "snapshot" version of itself (made in an earlier state of +precompiled "snapshot" version of itself (made in an earlier stage of development). As such, source builds require a connection to the Internet, to fetch snapshots, and an OS that can execute the available snapshot binaries. @@ -227,9 +225,16 @@ variety of channels on Mozilla's IRC network, irc.mozilla.org. The most popular channel is [#rust], a venue for general discussion about Rust. And a good place to ask for help would be [#rust-beginners]. +The [rustc guide] might be a good place to start if you want to find out how +various parts of the compiler work. + +Also, you may find the [rustdocs for the compiler itself][rustdocs] useful. + [IRC]: https://en.wikipedia.org/wiki/Internet_Relay_Chat [#rust]: irc://irc.mozilla.org/rust [#rust-beginners]: irc://irc.mozilla.org/rust-beginners +[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/about-this-guide.html +[rustdocs]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ ## License [license]: #license diff --git a/RELEASES.md b/RELEASES.md index 45c389d72afc..1f7ffb53d3ca 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,3 +1,814 @@ +Version 1.28.0 (2018-08-02) +=========================== + +Language +-------- +- [The `#[repr(transparent)]` attribute is now stable.][51562] This attribute + allows a Rust newtype wrapper (`struct NewType(T);`) to be represented as + the inner type across Foreign Function Interface (FFI) boundaries. +- [The keywords `pure`, `sizeof`, `alignof`, and `offsetof` have been unreserved + and can now be used as identifiers.][51196] +- [The `GlobalAlloc` trait and `#[global_allocator]` attribute are now + stable.][51241] This will allow users to specify a global allocator for + their program. +- [Unit test functions marked with the `#[test]` attribute can now return + `Result<(), E: Debug>` in addition to `()`.][51298] +- [The `lifetime` specifier for `macro_rules!` is now stable.][50385] This + allows macros to easily target lifetimes. + +Compiler +-------- +- [The `s` and `z` optimisation levels are now stable.][50265] These optimisations + prioritise making smaller binary sizes. `z` is the same as `s` with the + exception that it does not vectorise loops, which typically results in an even + smaller binary. +- [The short error format is now stable.][49546] Specified with + `--error-format=short` this option will provide a more compressed output of + rust error messages. +- [Added a lint warning when you have duplicated `macro_export`s.][50143] +- [Reduced the number of allocations in the macro parser.][50855] This can + improve compile times of macro heavy crates on average by 5%. + +Libraries +--------- +- [Implemented `Default` for `&mut str`.][51306] +- [Implemented `From` for all integer and unsigned number types.][50554] +- [Implemented `Extend` for `()`.][50234] +- [The `Debug` implementation of `time::Duration` should now be more easily + human readable.][50364] Previously a `Duration` of one second would printed as + `Duration { secs: 1, nanos: 0 }` and will now be printed as `1s`. +- [Implemented `From<&String>` for `Cow`, `From<&Vec>` for `Cow<[T]>`, + `From>` for `CString`, `From, From, From<&CString>` + for `Cow`, `From, From, From<&OsString>` for + `Cow`, `From<&PathBuf>` for `Cow`, and `From>` + for `PathBuf`.][50170] +- [Implemented `Shl` and `Shr` for `Wrapping` + and `Wrapping`.][50465] +- [`DirEntry::metadata` now uses `fstatat` instead of `lstat` when + possible.][51050] This can provide up to a 40% speed increase. +- [Improved error messages when using `format!`.][50610] + +Stabilized APIs +--------------- +- [`Iterator::step_by`] +- [`Path::ancestors`] +- [`SystemTime::UNIX_EPOCH`] +- [`alloc::GlobalAlloc`] +- [`alloc::Layout`] +- [`alloc::LayoutErr`] +- [`alloc::System`] +- [`alloc::alloc`] +- [`alloc::alloc_zeroed`] +- [`alloc::dealloc`] +- [`alloc::realloc`] +- [`alloc::handle_alloc_error`] +- [`btree_map::Entry::or_default`] +- [`fmt::Alignment`] +- [`hash_map::Entry::or_default`] +- [`iter::repeat_with`] +- [`num::NonZeroUsize`] +- [`num::NonZeroU128`] +- [`num::NonZeroU16`] +- [`num::NonZeroU32`] +- [`num::NonZeroU64`] +- [`num::NonZeroU8`] +- [`ops::RangeBounds`] +- [`slice::SliceIndex`] +- [`slice::from_mut`] +- [`slice::from_ref`] +- [`{Any + Send + Sync}::downcast_mut`] +- [`{Any + Send + Sync}::downcast_ref`] +- [`{Any + Send + Sync}::is`] + +Cargo +----- +- [Cargo will now no longer allow you to publish crates with build scripts that + modify the `src` directory.][cargo/5584] The `src` directory in a crate should be + considered to be immutable. + +Misc +---- +- [The `suggestion_applicability` field in `rustc`'s json output is now + stable.][50486] This will allow dev tools to check whether a code suggestion + would apply to them. + +Compatibility Notes +------------------- +- [Rust will consider trait objects with duplicated constraints to be the same + type as without the duplicated constraint.][51276] For example the below code will + now fail to compile. + ```rust + trait Trait {} + + impl Trait + Send { + fn test(&self) { println!("one"); } //~ ERROR duplicate definitions with name `test` + } + + impl Trait + Send + Send { + fn test(&self) { println!("two"); } + } + ``` + +[49546]: https://github.com/rust-lang/rust/pull/49546/ +[50143]: https://github.com/rust-lang/rust/pull/50143/ +[50170]: https://github.com/rust-lang/rust/pull/50170/ +[50234]: https://github.com/rust-lang/rust/pull/50234/ +[50265]: https://github.com/rust-lang/rust/pull/50265/ +[50364]: https://github.com/rust-lang/rust/pull/50364/ +[50385]: https://github.com/rust-lang/rust/pull/50385/ +[50465]: https://github.com/rust-lang/rust/pull/50465/ +[50486]: https://github.com/rust-lang/rust/pull/50486/ +[50554]: https://github.com/rust-lang/rust/pull/50554/ +[50610]: https://github.com/rust-lang/rust/pull/50610/ +[50855]: https://github.com/rust-lang/rust/pull/50855/ +[51050]: https://github.com/rust-lang/rust/pull/51050/ +[51196]: https://github.com/rust-lang/rust/pull/51196/ +[51200]: https://github.com/rust-lang/rust/pull/51200/ +[51241]: https://github.com/rust-lang/rust/pull/51241/ +[51276]: https://github.com/rust-lang/rust/pull/51276/ +[51298]: https://github.com/rust-lang/rust/pull/51298/ +[51306]: https://github.com/rust-lang/rust/pull/51306/ +[51562]: https://github.com/rust-lang/rust/pull/51562/ +[cargo/5584]: https://github.com/rust-lang/cargo/pull/5584/ +[`Iterator::step_by`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.step_by +[`Path::ancestors`]: https://doc.rust-lang.org/std/path/struct.Path.html#method.ancestors +[`SystemTime::UNIX_EPOCH`]: https://doc.rust-lang.org/std/time/struct.SystemTime.html#associatedconstant.UNIX_EPOCH +[`alloc::GlobalAlloc`]: https://doc.rust-lang.org/std/alloc/trait.GlobalAlloc.html +[`alloc::Layout`]: https://doc.rust-lang.org/std/alloc/struct.Layout.html +[`alloc::LayoutErr`]: https://doc.rust-lang.org/std/alloc/struct.LayoutErr.html +[`alloc::System`]: https://doc.rust-lang.org/std/alloc/struct.System.html +[`alloc::alloc`]: https://doc.rust-lang.org/std/alloc/fn.alloc.html +[`alloc::alloc_zeroed`]: https://doc.rust-lang.org/std/alloc/fn.alloc_zeroed.html +[`alloc::dealloc`]: https://doc.rust-lang.org/std/alloc/fn.dealloc.html +[`alloc::realloc`]: https://doc.rust-lang.org/std/alloc/fn.realloc.html +[`alloc::handle_alloc_error`]: https://doc.rust-lang.org/std/alloc/fn.handle_alloc_error.html +[`btree_map::Entry::or_default`]: https://doc.rust-lang.org/std/collections/btree_map/enum.Entry.html#method.or_default +[`fmt::Alignment`]: https://doc.rust-lang.org/std/fmt/enum.Alignment.html +[`hash_map::Entry::or_default`]: https://doc.rust-lang.org/std/collections/hash_map/enum.Entry.html#method.or_default +[`iter::repeat_with`]: https://doc.rust-lang.org/std/iter/fn.repeat_with.html +[`num::NonZeroUsize`]: https://doc.rust-lang.org/std/num/struct.NonZeroUsize.html +[`num::NonZeroU128`]: https://doc.rust-lang.org/std/num/struct.NonZeroU128.html +[`num::NonZeroU16`]: https://doc.rust-lang.org/std/num/struct.NonZeroU16.html +[`num::NonZeroU32`]: https://doc.rust-lang.org/std/num/struct.NonZeroU32.html +[`num::NonZeroU64`]: https://doc.rust-lang.org/std/num/struct.NonZeroU64.html +[`num::NonZeroU8`]: https://doc.rust-lang.org/std/num/struct.NonZeroU8.html +[`ops::RangeBounds`]: https://doc.rust-lang.org/std/ops/trait.RangeBounds.html +[`slice::SliceIndex`]: https://doc.rust-lang.org/std/slice/trait.SliceIndex.html +[`slice::from_mut`]: https://doc.rust-lang.org/std/slice/fn.from_mut.html +[`slice::from_ref`]: https://doc.rust-lang.org/std/slice/fn.from_ref.html +[`{Any + Send + Sync}::downcast_mut`]: https://doc.rust-lang.org/std/any/trait.Any.html#method.downcast_mut-2 +[`{Any + Send + Sync}::downcast_ref`]: https://doc.rust-lang.org/std/any/trait.Any.html#method.downcast_ref-2 +[`{Any + Send + Sync}::is`]: https://doc.rust-lang.org/std/any/trait.Any.html#method.is-2 + +Version 1.27.2 (2018-07-20) +=========================== + +Compatibility Notes +------------------- + +- The borrow checker was fixed to avoid potential unsoundness when using + match ergonomics: [#52213][52213]. + +[52213]: https://github.com/rust-lang/rust/issues/52213 + +Version 1.27.1 (2018-07-10) +=========================== + +Security Notes +-------------- + +- rustdoc would execute plugins in the /tmp/rustdoc/plugins directory + when running, which enabled executing code as some other user on a + given machine. This release fixes that vulnerability; you can read + more about this on the [blog][rustdoc-sec]. The associated CVE is [CVE-2018-1000622]. + + Thank you to Red Hat for responsibily disclosing this vulnerability to us. + +Compatibility Notes +------------------- + +- The borrow checker was fixed to avoid an additional potential unsoundness when using + match ergonomics: [#51415][51415], [#49534][49534]. + +[51415]: https://github.com/rust-lang/rust/issues/51415 +[49534]: https://github.com/rust-lang/rust/issues/49534 +[rustdoc-sec]: https://blog.rust-lang.org/2018/07/06/security-advisory-for-rustdoc.html +[CVE-2018-1000622]: https://cve.mitre.org/cgi-bin/cvename.cgi?name=%20CVE-2018-1000622 + +Version 1.27.0 (2018-06-21) +========================== + +Language +-------- +- [Removed 'proc' from the reserved keywords list.][49699] This allows `proc` to + be used as an identifier. +- [The dyn syntax is now available.][49968] This syntax is equivalent to the + bare `Trait` syntax, and should make it clearer when being used in tandem with + `impl Trait`. Since it is equivalent to the following syntax: + `&Trait == &dyn Trait`, `&mut Trait == &mut dyn Trait`, and + `Box == Box`. +- [Attributes on generic parameters such as types and lifetimes are + now stable.][48851] e.g. + `fn foo<#[lifetime_attr] 'a, #[type_attr] T: 'a>() {}` +- [The `#[must_use]` attribute can now also be used on functions as well as + types.][48925] It provides a lint that by default warns users when the + value returned by a function has not been used. + +Compiler +-------- +- [Added the `armv5te-unknown-linux-musleabi` target.][50423] + +Libraries +--------- +- [SIMD (Single Instruction Multiple Data) on x86/x86_64 is now stable.][49664] + This includes [`arch::x86`] & [`arch::x86_64`] modules which contain + SIMD intrinsics, a new macro called `is_x86_feature_detected!`, the + `#[target_feature(enable="")]` attribute, and adding `target_feature = ""` to + the `cfg` attribute. +- [A lot of methods for `[u8]`, `f32`, and `f64` previously only available in + std are now available in core.][49896] +- [The generic `Rhs` type parameter on `ops::{Shl, ShlAssign, Shr}` now defaults + to `Self`.][49630] +- [`std::str::replace` now has the `#[must_use]` attribute][50177] to clarify + that the operation isn't done in place. +- [`Clone::clone`, `Iterator::collect`, and `ToOwned::to_owned` now have + the `#[must_use]` attribute][49533] to warn about unused potentially + expensive allocations. + +Stabilized APIs +--------------- +- [`DoubleEndedIterator::rfind`] +- [`DoubleEndedIterator::rfold`] +- [`DoubleEndedIterator::try_rfold`] +- [`Duration::from_micros`] +- [`Duration::from_nanos`] +- [`Duration::subsec_micros`] +- [`Duration::subsec_millis`] +- [`HashMap::remove_entry`] +- [`Iterator::try_fold`] +- [`Iterator::try_for_each`] +- [`NonNull::cast`] +- [`Option::filter`] +- [`String::replace_range`] +- [`Take::set_limit`] +- [`hint::unreachable_unchecked`] +- [`os::unix::process::parent_id`] +- [`ptr::swap_nonoverlapping`] +- [`slice::rsplit_mut`] +- [`slice::rsplit`] +- [`slice::swap_with_slice`] + +Cargo +----- +- [`cargo-metadata` now includes `authors`, `categories`, `keywords`, + `readme`, and `repository` fields.][cargo/5386] +- [`cargo-metadata` now includes a package's `metadata` table.][cargo/5360] +- [Added the `--target-dir` optional argument.][cargo/5393] This allows you to specify + a different directory than `target` for placing compilation artifacts. +- [Cargo will be adding automatic target inference for binaries, benchmarks, + examples, and tests in the Rust 2018 edition.][cargo/5335] If your project specifies + specific targets e.g. using `[[bin]]` and have other binaries in locations + where cargo would infer a binary, Cargo will produce a warning. You can + disable this feature ahead of time by setting any of the following `autobins`, + `autobenches`, `autoexamples`, `autotests` to false. +- [Cargo will now cache compiler information.][cargo/5359] This can be disabled by + setting `CARGO_CACHE_RUSTC_INFO=0` in your environment. + +Misc +---- +- [Added “The Rustc book” into the official documentation.][49707] + [“The Rustc book”] documents and teaches how to use the rustc compiler. +- [All books available on `doc.rust-lang.org` are now searchable.][49623] + +Compatibility Notes +------------------- +- [Calling a `CharExt` or `StrExt` method directly on core will no longer + work.][49896] e.g. `::core::prelude::v1::StrExt::is_empty("")` will not + compile, `"".is_empty()` will still compile. +- [`Debug` output on `atomic::{AtomicBool, AtomicIsize, AtomicPtr, AtomicUsize}` + will only print the inner type.][48553] e.g. + `print!("{:?}", AtomicBool::new(true))` will print `true` + not `AtomicBool(true)`. +- [The maximum number for `repr(align(N))` is now 2²⁹.][50378] Previously you + could enter higher numbers but they were not supported by LLVM. Up to 512MB + alignment should cover all use cases. + +[48553]: https://github.com/rust-lang/rust/pull/48553/ +[48851]: https://github.com/rust-lang/rust/pull/48851/ +[48925]: https://github.com/rust-lang/rust/pull/48925/ +[49533]: https://github.com/rust-lang/rust/pull/49533/ +[49623]: https://github.com/rust-lang/rust/pull/49623/ +[49630]: https://github.com/rust-lang/rust/pull/49630/ +[49664]: https://github.com/rust-lang/rust/pull/49664/ +[49699]: https://github.com/rust-lang/rust/pull/49699/ +[49707]: https://github.com/rust-lang/rust/pull/49707/ +[49719]: https://github.com/rust-lang/rust/pull/49719/ +[49896]: https://github.com/rust-lang/rust/pull/49896/ +[49968]: https://github.com/rust-lang/rust/pull/49968/ +[50177]: https://github.com/rust-lang/rust/pull/50177/ +[50378]: https://github.com/rust-lang/rust/pull/50378/ +[50398]: https://github.com/rust-lang/rust/pull/50398/ +[50423]: https://github.com/rust-lang/rust/pull/50423/ +[cargo/5203]: https://github.com/rust-lang/cargo/pull/5203/ +[cargo/5335]: https://github.com/rust-lang/cargo/pull/5335/ +[cargo/5359]: https://github.com/rust-lang/cargo/pull/5359/ +[cargo/5360]: https://github.com/rust-lang/cargo/pull/5360/ +[cargo/5386]: https://github.com/rust-lang/cargo/pull/5386/ +[cargo/5393]: https://github.com/rust-lang/cargo/pull/5393/ +[`DoubleEndedIterator::rfind`]: https://doc.rust-lang.org/std/iter/trait.DoubleEndedIterator.html#method.rfind +[`DoubleEndedIterator::rfold`]: https://doc.rust-lang.org/std/iter/trait.DoubleEndedIterator.html#method.rfold +[`DoubleEndedIterator::try_rfold`]: https://doc.rust-lang.org/std/iter/trait.DoubleEndedIterator.html#method.try_rfold +[`Duration::from_micros`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.from_micros +[`Duration::from_nanos`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.from_nanos +[`Duration::subsec_micros`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.subsec_micros +[`Duration::subsec_millis`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.subsec_millis +[`HashMap::remove_entry`]: https://doc.rust-lang.org/std/collections/struct.HashMap.html#method.remove_entry +[`Iterator::try_fold`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.try_fold +[`Iterator::try_for_each`]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.try_for_each +[`NonNull::cast`]: https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.cast +[`Option::filter`]: https://doc.rust-lang.org/std/option/enum.Option.html#method.filter +[`String::replace_range`]: https://doc.rust-lang.org/std/string/struct.String.html#method.replace_range +[`Take::set_limit`]: https://doc.rust-lang.org/std/io/struct.Take.html#method.set_limit +[`hint::unreachable_unchecked`]: https://doc.rust-lang.org/std/hint/fn.unreachable_unchecked.html +[`os::unix::process::parent_id`]: https://doc.rust-lang.org/std/os/unix/process/fn.parent_id.html +[`process::id`]: https://doc.rust-lang.org/std/process/fn.id.html +[`ptr::swap_nonoverlapping`]: https://doc.rust-lang.org/std/ptr/fn.swap_nonoverlapping.html +[`slice::rsplit_mut`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rsplit_mut +[`slice::rsplit`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rsplit +[`slice::swap_with_slice`]: https://doc.rust-lang.org/std/primitive.slice.html#method.swap_with_slice +[`arch::x86_64`]: https://doc.rust-lang.org/std/arch/x86_64/index.html +[`arch::x86`]: https://doc.rust-lang.org/std/arch/x86/index.html +[“The Rustc book”]: https://doc.rust-lang.org/rustc + + +Version 1.26.2 (2018-06-05) +========================== + +Compatibility Notes +------------------- + +- [The borrow checker was fixed to avoid unsoundness when using match ergonomics][51117] + +[51117]: https://github.com/rust-lang/rust/issues/51117 + + +Version 1.26.1 (2018-05-29) +========================== + +Tools +----- + +- [RLS now works on Windows][50646] +- [Rustfmt stopped badly formatting text in some cases][rustfmt/2695] + + +Compatibility Notes +-------- + +- [`fn main() -> impl Trait` no longer works for non-Termination + trait][50656] + This reverts an accidental stabilization. +- [`NaN > NaN` no longer returns true in const-fn contexts][50812] +- [Prohibit using turbofish for `impl Trait` in method arguments][50950] + +[50646]: https://github.com/rust-lang/rust/issues/50646 +[50656]: https://github.com/rust-lang/rust/pull/50656 +[50812]: https://github.com/rust-lang/rust/pull/50812 +[50950]: https://github.com/rust-lang/rust/issues/50950 +[rustfmt/2695]: https://github.com/rust-lang-nursery/rustfmt/issues/2695 + +Version 1.26.0 (2018-05-10) +========================== + +Language +-------- +- [Closures now implement `Copy` and/or `Clone` if all captured variables + implement either or both traits.][49299] +- [The inclusive range syntax e.g. `for x in 0..=10` is now stable.][47813] +- [The `'_` lifetime is now stable. The underscore lifetime can be used anywhere where a + lifetime can be elided.][49458] +- [`impl Trait` is now stable allowing you to have abstract types in returns + or in function parameters.][49255] e.g. `fn foo() -> impl Iterator` or + `fn open(path: impl AsRef)`. +- [Pattern matching will now automatically apply dereferences.][49394] +- [128-bit integers in the form of `u128` and `i128` are now stable.][49101] +- [`main` can now return `Result<(), E: Debug>`][49162] in addition to `()`. +- [A lot of operations are now available in a const context.][46882] E.g. You + can now index into constant arrays, reference and dereference into constants, + and use Tuple struct constructors. +- [Fixed entry slice patterns are now stable.][48516] e.g. + ```rust + let points = [1, 2, 3, 4]; + match points { + [1, 2, 3, 4] => println!("All points were sequential."), + _ => println!("Not all points were sequential."), + } + ``` + + +Compiler +-------- +- [LLD is now used as the default linker for `wasm32-unknown-unknown`.][48125] +- [Fixed exponential projection complexity on nested types.][48296] + This can provide up to a ~12% reduction in compile times for certain crates. +- [Added the `--remap-path-prefix` option to rustc.][48359] Allowing you + to remap path prefixes outputted by the compiler. +- [Added `powerpc-unknown-netbsd` target.][48281] + +Libraries +--------- +- [Implemented `From for usize` & `From<{u8, i16}> for isize`.][49305] +- [Added hexadecimal formatting for integers with fmt::Debug][48978] + e.g. `assert!(format!("{:02x?}", b"Foo\0") == "[46, 6f, 6f, 00]")` +- [Implemented `Default, Hash` for `cmp::Reverse`.][48628] +- [Optimized `str::repeat` being 8x faster in large cases.][48657] +- [`ascii::escape_default` is now available in libcore.][48735] +- [Trailing commas are now supported in std and core macros.][48056] +- [Implemented `Copy, Clone` for `cmp::Reverse`][47379] +- [Implemented `Clone` for `char::{ToLowercase, ToUppercase}`.][48629] + +Stabilized APIs +--------------- +- [`*const T::add`] +- [`*const T::copy_to_nonoverlapping`] +- [`*const T::copy_to`] +- [`*const T::read_unaligned`] +- [`*const T::read_volatile`] +- [`*const T::read`] +- [`*const T::sub`] +- [`*const T::wrapping_add`] +- [`*const T::wrapping_sub`] +- [`*mut T::add`] +- [`*mut T::copy_to_nonoverlapping`] +- [`*mut T::copy_to`] +- [`*mut T::read_unaligned`] +- [`*mut T::read_volatile`] +- [`*mut T::read`] +- [`*mut T::replace`] +- [`*mut T::sub`] +- [`*mut T::swap`] +- [`*mut T::wrapping_add`] +- [`*mut T::wrapping_sub`] +- [`*mut T::write_bytes`] +- [`*mut T::write_unaligned`] +- [`*mut T::write_volatile`] +- [`*mut T::write`] +- [`Box::leak`] +- [`FromUtf8Error::as_bytes`] +- [`LocalKey::try_with`] +- [`Option::cloned`] +- [`btree_map::Entry::and_modify`] +- [`fs::read_to_string`] +- [`fs::read`] +- [`fs::write`] +- [`hash_map::Entry::and_modify`] +- [`iter::FusedIterator`] +- [`ops::RangeInclusive`] +- [`ops::RangeToInclusive`] +- [`process::id`] +- [`slice::rotate_left`] +- [`slice::rotate_right`] +- [`String::retain`] + + +Cargo +----- +- [Cargo will now output path to custom commands when `-v` is + passed with `--list`][cargo/5041] +- [The Cargo binary version is now the same as the Rust version][cargo/5083] + +Misc +---- +- [The second edition of "The Rust Programming Language" book is now recommended + over the first.][48404] + +Compatibility Notes +------------------- + +- [aliasing a `Fn` trait as `dyn` no longer works.][48481] E.g. the following + syntax is now invalid. + ``` + use std::ops::Fn as dyn; + fn g(_: Box) {} + ``` +- [The result of dereferences are no longer promoted to `'static`.][47408] + e.g. + ```rust + fn main() { + const PAIR: &(i32, i32) = &(0, 1); + let _reversed_pair: &'static _ = &(PAIR.1, PAIR.0); // Doesn't work + } + ``` +- [Deprecate `AsciiExt` trait in favor of inherent methods.][49109] +- [`".e0"` will now no longer parse as `0.0` and will instead cause + an error.][48235] +- [Removed hoedown from rustdoc.][48274] +- [Bounds on higher-kinded lifetimes a hard error.][48326] + +[46882]: https://github.com/rust-lang/rust/pull/46882 +[47379]: https://github.com/rust-lang/rust/pull/47379 +[47408]: https://github.com/rust-lang/rust/pull/47408 +[47813]: https://github.com/rust-lang/rust/pull/47813 +[48056]: https://github.com/rust-lang/rust/pull/48056 +[48125]: https://github.com/rust-lang/rust/pull/48125 +[48166]: https://github.com/rust-lang/rust/pull/48166 +[48235]: https://github.com/rust-lang/rust/pull/48235 +[48274]: https://github.com/rust-lang/rust/pull/48274 +[48281]: https://github.com/rust-lang/rust/pull/48281 +[48296]: https://github.com/rust-lang/rust/pull/48296 +[48326]: https://github.com/rust-lang/rust/pull/48326 +[48359]: https://github.com/rust-lang/rust/pull/48359 +[48404]: https://github.com/rust-lang/rust/pull/48404 +[48481]: https://github.com/rust-lang/rust/pull/48481 +[48516]: https://github.com/rust-lang/rust/pull/48516 +[48628]: https://github.com/rust-lang/rust/pull/48628 +[48629]: https://github.com/rust-lang/rust/pull/48629 +[48657]: https://github.com/rust-lang/rust/pull/48657 +[48735]: https://github.com/rust-lang/rust/pull/48735 +[48978]: https://github.com/rust-lang/rust/pull/48978 +[49101]: https://github.com/rust-lang/rust/pull/49101 +[49109]: https://github.com/rust-lang/rust/pull/49109 +[49121]: https://github.com/rust-lang/rust/pull/49121 +[49162]: https://github.com/rust-lang/rust/pull/49162 +[49184]: https://github.com/rust-lang/rust/pull/49184 +[49234]: https://github.com/rust-lang/rust/pull/49234 +[49255]: https://github.com/rust-lang/rust/pull/49255 +[49299]: https://github.com/rust-lang/rust/pull/49299 +[49305]: https://github.com/rust-lang/rust/pull/49305 +[49394]: https://github.com/rust-lang/rust/pull/49394 +[49458]: https://github.com/rust-lang/rust/pull/49458 +[`*const T::add`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.add +[`*const T::copy_to_nonoverlapping`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_to_nonoverlapping +[`*const T::copy_to`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_to +[`*const T::read_unaligned`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.read_unaligned +[`*const T::read_volatile`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.read_volatile +[`*const T::read`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.read +[`*const T::sub`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.sub +[`*const T::wrapping_add`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add +[`*const T::wrapping_sub`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_sub +[`*mut T::add`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.add-1 +[`*mut T::copy_to_nonoverlapping`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_to_nonoverlapping-1 +[`*mut T::copy_to`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.copy_to-1 +[`*mut T::read_unaligned`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.read_unaligned-1 +[`*mut T::read_volatile`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.read_volatile-1 +[`*mut T::read`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.read-1 +[`*mut T::replace`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.replace +[`*mut T::sub`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.sub-1 +[`*mut T::swap`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.swap +[`*mut T::wrapping_add`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_add-1 +[`*mut T::wrapping_sub`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.wrapping_sub-1 +[`*mut T::write_bytes`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.write_bytes +[`*mut T::write_unaligned`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.write_unaligned +[`*mut T::write_volatile`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.write_volatile +[`*mut T::write`]: https://doc.rust-lang.org/std/primitive.pointer.html#method.write +[`Box::leak`]: https://doc.rust-lang.org/std/boxed/struct.Box.html#method.leak +[`FromUtf8Error::as_bytes`]: https://doc.rust-lang.org/std/string/struct.FromUtf8Error.html#method.as_bytes +[`LocalKey::try_with`]: https://doc.rust-lang.org/std/thread/struct.LocalKey.html#method.try_with +[`Option::cloned`]: https://doc.rust-lang.org/std/option/enum.Option.html#method.cloned +[`btree_map::Entry::and_modify`]: https://doc.rust-lang.org/std/collections/btree_map/enum.Entry.html#method.and_modify +[`fs::read_to_string`]: https://doc.rust-lang.org/std/fs/fn.read_to_string.html +[`fs::read`]: https://doc.rust-lang.org/std/fs/fn.read.html +[`fs::write`]: https://doc.rust-lang.org/std/fs/fn.write.html +[`hash_map::Entry::and_modify`]: https://doc.rust-lang.org/std/collections/hash_map/enum.Entry.html#method.and_modify +[`iter::FusedIterator`]: https://doc.rust-lang.org/std/iter/trait.FusedIterator.html +[`ops::RangeInclusive`]: https://doc.rust-lang.org/std/ops/struct.RangeInclusive.html +[`ops::RangeToInclusive`]: https://doc.rust-lang.org/std/ops/struct.RangeToInclusive.html +[`process::id`]: https://doc.rust-lang.org/std/process/fn.id.html +[`slice::rotate_left`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rotate_left +[`slice::rotate_right`]: https://doc.rust-lang.org/std/primitive.slice.html#method.rotate_right +[`String::retain`]: https://doc.rust-lang.org/std/string/struct.String.html#method.retain +[cargo/5041]: https://github.com/rust-lang/cargo/pull/5041 +[cargo/5083]: https://github.com/rust-lang/cargo/pull/5083 + + +Version 1.25.0 (2018-03-29) +========================== + +Language +-------- +- [The `#[repr(align(x))]` attribute is now stable.][47006] [RFC 1358] +- [You can now use nested groups of imports.][47948] + e.g. `use std::{fs::File, io::Read, path::{Path, PathBuf}};` +- [You can now have `|` at the start of a match arm.][47947] e.g. +```rust +enum Foo { A, B, C } + +fn main() { + let x = Foo::A; + match x { + | Foo::A + | Foo::B => println!("AB"), + | Foo::C => println!("C"), + } +} +``` + +Compiler +-------- +- [Upgraded to LLVM 6.][47828] +- [Added `-C lto=val` option.][47521] +- [Added `i586-unknown-linux-musl` target][47282] + +Libraries +--------- +- [Impl Send for `process::Command` on Unix.][47760] +- [Impl PartialEq and Eq for `ParseCharError`.][47790] +- [`UnsafeCell::into_inner` is now safe.][47204] +- [Implement libstd for CloudABI.][47268] +- [`Float::{from_bits, to_bits}` is now available in libcore.][46931] +- [Implement `AsRef` for Component][46985] +- [Implemented `Write` for `Cursor<&mut Vec>`][46830] +- [Moved `Duration` to libcore.][46666] + +Stabilized APIs +--------------- +- [`Location::column`] +- [`ptr::NonNull`] + +The following functions can now be used in a constant expression. +eg. `static MINUTE: Duration = Duration::from_secs(60);` +- [`Duration::new`][47300] +- [`Duration::from_secs`][47300] +- [`Duration::from_millis`][47300] + +Cargo +----- +- [`cargo new` no longer removes `rust` or `rs` prefixs/suffixs.][cargo/5013] +- [`cargo new` now defaults to creating a binary crate, instead of a + library crate.][cargo/5029] + +Misc +---- +- [Rust by example is now shipped with new releases][46196] + +Compatibility Notes +------------------- +- [Deprecated `net::lookup_host`.][47510] +- [`rustdoc` has switched to pulldown as the default markdown renderer.][47398] +- The borrow checker was sometimes incorrectly permitting overlapping borrows + around indexing operations (see [#47349][47349]). This has been fixed (which also + enabled some correct code that used to cause errors (e.g. [#33903][33903] and [#46095][46095]). +- [Removed deprecated unstable attribute `#[simd]`.][47251] + +[33903]: https://github.com/rust-lang/rust/pull/33903 +[47947]: https://github.com/rust-lang/rust/pull/47947 +[47948]: https://github.com/rust-lang/rust/pull/47948 +[47760]: https://github.com/rust-lang/rust/pull/47760 +[47790]: https://github.com/rust-lang/rust/pull/47790 +[47828]: https://github.com/rust-lang/rust/pull/47828 +[47398]: https://github.com/rust-lang/rust/pull/47398 +[47510]: https://github.com/rust-lang/rust/pull/47510 +[47521]: https://github.com/rust-lang/rust/pull/47521 +[47204]: https://github.com/rust-lang/rust/pull/47204 +[47251]: https://github.com/rust-lang/rust/pull/47251 +[47268]: https://github.com/rust-lang/rust/pull/47268 +[47282]: https://github.com/rust-lang/rust/pull/47282 +[47300]: https://github.com/rust-lang/rust/pull/47300 +[47349]: https://github.com/rust-lang/rust/pull/47349 +[46931]: https://github.com/rust-lang/rust/pull/46931 +[46985]: https://github.com/rust-lang/rust/pull/46985 +[47006]: https://github.com/rust-lang/rust/pull/47006 +[46830]: https://github.com/rust-lang/rust/pull/46830 +[46095]: https://github.com/rust-lang/rust/pull/46095 +[46666]: https://github.com/rust-lang/rust/pull/46666 +[46196]: https://github.com/rust-lang/rust/pull/46196 +[cargo/5013]: https://github.com/rust-lang/cargo/pull/5013 +[cargo/5029]: https://github.com/rust-lang/cargo/pull/5029 +[RFC 1358]: https://github.com/rust-lang/rfcs/pull/1358 +[`Location::column`]: https://doc.rust-lang.org/std/panic/struct.Location.html#method.column +[`ptr::NonNull`]: https://doc.rust-lang.org/std/ptr/struct.NonNull.html + + +Version 1.24.1 (2018-03-01) +========================== + + - [Do not abort when unwinding through FFI][48251] + - [Emit UTF-16 files for linker arguments on Windows][48318] + - [Make the error index generator work again][48308] + - [Cargo will warn on Windows 7 if an update is needed][cargo/5069]. + +[48251]: https://github.com/rust-lang/rust/issues/48251 +[48308]: https://github.com/rust-lang/rust/issues/48308 +[48318]: https://github.com/rust-lang/rust/issues/48318 +[cargo/5069]: https://github.com/rust-lang/cargo/pull/5069 + + +Version 1.24.0 (2018-02-15) +========================== + +Language +-------- +- [External `sysv64` ffi is now available.][46528] + eg. `extern "sysv64" fn foo () {}` + +Compiler +-------- +- [rustc now uses 16 codegen units by default for release builds.][46910] + For the fastest builds, utilize `codegen-units=1`. +- [Added `armv4t-unknown-linux-gnueabi` target.][47018] +- [Add `aarch64-unknown-openbsd` support][46760] + +Libraries +--------- +- [`str::find::` now uses memchr.][46735] This should lead to a 10x + improvement in performance in the majority of cases. +- [`OsStr`'s `Debug` implementation is now lossless and consistent + with Windows.][46798] +- [`time::{SystemTime, Instant}` now implement `Hash`.][46828] +- [impl `From` for `AtomicBool`][46293] +- [impl `From<{CString, &CStr}>` for `{Arc, Rc}`][45990] +- [impl `From<{OsString, &OsStr}>` for `{Arc, Rc}`][45990] +- [impl `From<{PathBuf, &Path}>` for `{Arc, Rc}`][45990] +- [float::from_bits now just uses transmute.][46012] This provides + some optimisations from LLVM. +- [Copied `AsciiExt` methods onto `char`][46077] +- [Remove `T: Sized` requirement on `ptr::is_null()`][46094] +- [impl `From` for `{TryRecvError, RecvTimeoutError}`][45506] +- [Optimised `f32::{min, max}` to generate more efficient x86 assembly][47080] +- [`[u8]::contains` now uses memchr which provides a 3x speed improvement][46713] + +Stabilized APIs +--------------- +- [`RefCell::replace`] +- [`RefCell::swap`] +- [`atomic::spin_loop_hint`] + +The following functions can now be used in a constant expression. +eg. `let buffer: [u8; size_of::()];`, `static COUNTER: AtomicUsize = AtomicUsize::new(1);` + +- [`AtomicBool::new`][46287] +- [`AtomicUsize::new`][46287] +- [`AtomicIsize::new`][46287] +- [`AtomicPtr::new`][46287] +- [`Cell::new`][46287] +- [`{integer}::min_value`][46287] +- [`{integer}::max_value`][46287] +- [`mem::size_of`][46287] +- [`mem::align_of`][46287] +- [`ptr::null`][46287] +- [`ptr::null_mut`][46287] +- [`RefCell::new`][46287] +- [`UnsafeCell::new`][46287] + +Cargo +----- +- [Added a `workspace.default-members` config that + overrides implied `--all` in virtual workspaces.][cargo/4743] +- [Enable incremental by default on development builds.][cargo/4817] Also added + configuration keys to `Cargo.toml` and `.cargo/config` to disable on a + per-project or global basis respectively. + +Misc +---- + +Compatibility Notes +------------------- +- [Floating point types `Debug` impl now always prints a decimal point.][46831] +- [`Ipv6Addr` now rejects superfluous `::`'s in IPv6 addresses][46671] This is + in accordance with IETF RFC 4291 §2.2. +- [Unwinding will no longer go past FFI boundaries, and will instead abort.][46833] +- [`Formatter::flags` method is now deprecated.][46284] The `sign_plus`, + `sign_minus`, `alternate`, and `sign_aware_zero_pad` should be used instead. +- [Leading zeros in tuple struct members is now an error][47084] +- [`column!()` macro is one-based instead of zero-based][46977] +- [`fmt::Arguments` can no longer be shared across threads][45198] +- [Access to `#[repr(packed)]` struct fields is now unsafe][44884] +- [Cargo sets a different working directory for the compiler][cargo/4788] + +[44884]: https://github.com/rust-lang/rust/pull/44884 +[45198]: https://github.com/rust-lang/rust/pull/45198 +[45506]: https://github.com/rust-lang/rust/pull/45506 +[45904]: https://github.com/rust-lang/rust/pull/45904 +[45990]: https://github.com/rust-lang/rust/pull/45990 +[46012]: https://github.com/rust-lang/rust/pull/46012 +[46077]: https://github.com/rust-lang/rust/pull/46077 +[46094]: https://github.com/rust-lang/rust/pull/46094 +[46284]: https://github.com/rust-lang/rust/pull/46284 +[46287]: https://github.com/rust-lang/rust/pull/46287 +[46293]: https://github.com/rust-lang/rust/pull/46293 +[46528]: https://github.com/rust-lang/rust/pull/46528 +[46671]: https://github.com/rust-lang/rust/pull/46671 +[46713]: https://github.com/rust-lang/rust/pull/46713 +[46735]: https://github.com/rust-lang/rust/pull/46735 +[46749]: https://github.com/rust-lang/rust/pull/46749 +[46760]: https://github.com/rust-lang/rust/pull/46760 +[46798]: https://github.com/rust-lang/rust/pull/46798 +[46828]: https://github.com/rust-lang/rust/pull/46828 +[46831]: https://github.com/rust-lang/rust/pull/46831 +[46833]: https://github.com/rust-lang/rust/pull/46833 +[46910]: https://github.com/rust-lang/rust/pull/46910 +[46977]: https://github.com/rust-lang/rust/pull/46977 +[47018]: https://github.com/rust-lang/rust/pull/47018 +[47080]: https://github.com/rust-lang/rust/pull/47080 +[47084]: https://github.com/rust-lang/rust/pull/47084 +[cargo/4743]: https://github.com/rust-lang/cargo/pull/4743 +[cargo/4788]: https://github.com/rust-lang/cargo/pull/4788 +[cargo/4817]: https://github.com/rust-lang/cargo/pull/4817 +[`RefCell::replace`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html#method.replace +[`RefCell::swap`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html#method.swap +[`atomic::spin_loop_hint`]: https://doc.rust-lang.org/std/sync/atomic/fn.spin_loop_hint.html + + Version 1.23.0 (2018-01-04) ========================== @@ -2307,7 +3118,7 @@ Language [RFC 1513](https://github.com/rust-lang/rfcs/blob/master/text/1513-less-unwinding.md). * [Add a new crate type, 'cdylib'](https://github.com/rust-lang/rust/pull/33553). cdylibs are dynamic libraries suitable for loading by non-Rust hosts. - [RFC 1510](https://github.com/rust-lang/rfcs/blob/master/text/1510-rdylib.md). + [RFC 1510](https://github.com/rust-lang/rfcs/blob/master/text/1510-cdylib.md). Note that Cargo does not yet directly support cdylibs. Stabilized APIs @@ -2382,7 +3193,7 @@ Stabilized APIs * [`UnixDatagram::shutdown`](http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.shutdown) * RawFd impls for `UnixDatagram` * `{BTree,Hash}Map::values_mut` -* [`<[_]>::binary_search_by_key`](http://doc.rust-lang.org/beta/std/primitive.slice.html#method.binary_search_by_key) +* [`<[_]>::binary_search_by_key`](http://doc.rust-lang.org/std/primitive.slice.html#method.binary_search_by_key) Libraries --------- @@ -3300,7 +4111,7 @@ Compatibility Notes [1.6bh]: https://github.com/rust-lang/rust/pull/29811 [1.6c]: https://github.com/rust-lang/cargo/pull/2192 [1.6cc]: https://github.com/rust-lang/cargo/pull/2131 -[1.6co]: http://doc.rust-lang.org/beta/core/index.html +[1.6co]: http://doc.rust-lang.org/core/index.html [1.6dv]: https://github.com/rust-lang/rust/pull/30000 [1.6f]: https://github.com/rust-lang/rust/pull/29129 [1.6m]: https://github.com/rust-lang/rust/pull/29828 @@ -3995,7 +4806,7 @@ Language -------- * Patterns with `ref mut` now correctly invoke [`DerefMut`] when - matching against dereferencable values. + matching against dereferenceable values. Libraries --------- diff --git a/appveyor.yml b/appveyor.yml index 1a186c080ce0..b1e2e1545cf8 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,7 +6,7 @@ environment: secure: 7Y+JiquYedOAgnUU26uL0DPzrxmTtR+qIwG6rNKSuWDffqU3vVZxbGXim9QpTO80 SCCACHE_DIGEST: f808afabb4a4eb1d7112bcb3fa6be03b61e93412890c88e177c667eb37f46353d7ec294e559b16f9f4b5e894f2185fe7670a0df15fd064889ecbd80f0c34166c TOOLSTATE_REPO_ACCESS_TOKEN: - secure: PTZiSxJMVUZ0VnMR5i13E4OagbXfglj7pcskDQiKufVrDm13mLoI0vDJAEM35+bY + secure: gKGlVktr7iuqCoYSxHxDE9ltLOKU0nYDEuQxvWbNxUIW7ri5ppn8L06jQzN0GGzN # By default schannel checks revocation of certificates unlike some other SSL # backends, but we've historically had problems on CI where a revocation @@ -18,19 +18,31 @@ environment: - MSYS_BITS: 64 RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc --enable-profiler SCRIPT: python x.py test + CI_JOB_NAME: x86_64-msvc - MSYS_BITS: 32 - RUST_CONFIGURE_ARGS: --build=i686-pc-windows-msvc --target=i686-pc-windows-msvc - SCRIPT: python x.py test --host i686-pc-windows-msvc --target i686-pc-windows-msvc + RUST_CONFIGURE_ARGS: --build=i686-pc-windows-msvc + SCRIPT: make appveyor-subset-1 + CI_JOB_NAME: i686-msvc-1 + - MSYS_BITS: 32 + RUST_CONFIGURE_ARGS: --build=i686-pc-windows-msvc + SCRIPT: make appveyor-subset-2 + CI_JOB_NAME: i686-msvc-2 # MSVC aux tests - MSYS_BITS: 64 - RUST_CHECK_TARGET: check-aux + RUST_CHECK_TARGET: check-aux EXCLUDE_CARGO=1 RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc + CI_JOB_NAME: x86_64-msvc-aux + - MSYS_BITS: 64 + SCRIPT: python x.py test src/tools/cargotest src/tools/cargo + RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc + CI_JOB_NAME: x86_64-msvc-cargo # MSVC tools tests - MSYS_BITS: 64 SCRIPT: src/ci/docker/x86_64-gnu-tools/checktools.sh x.py /tmp/toolstates.json windows RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc --save-toolstates=/tmp/toolstates.json --enable-test-miri + CI_JOB_NAME: x86_64-msvc-tools # 32/64-bit MinGW builds. # @@ -47,51 +59,65 @@ environment: # SourceForge is notoriously flaky, so we mirror it on our own infrastructure. - MSYS_BITS: 32 RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu - SCRIPT: python x.py test + SCRIPT: make appveyor-subset-1 MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror MINGW_ARCHIVE: i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z MINGW_DIR: mingw32 + CI_JOB_NAME: i686-mingw-1 + - MSYS_BITS: 32 + RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu + SCRIPT: make appveyor-subset-2 + MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror + MINGW_ARCHIVE: i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z + MINGW_DIR: mingw32 + CI_JOB_NAME: i686-mingw-2 - MSYS_BITS: 64 SCRIPT: python x.py test RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror MINGW_ARCHIVE: x86_64-6.3.0-release-posix-seh-rt_v5-rev2.7z MINGW_DIR: mingw64 + CI_JOB_NAME: x86_64-mingw # 32/64 bit MSVC and GNU deployment - RUST_CONFIGURE_ARGS: > --build=x86_64-pc-windows-msvc - --enable-extended + --enable-full-tools --enable-profiler SCRIPT: python x.py dist DEPLOY: 1 + CI_JOB_NAME: dist-x86_64-msvc - RUST_CONFIGURE_ARGS: > --build=i686-pc-windows-msvc --target=i586-pc-windows-msvc - --enable-extended + --enable-full-tools --enable-profiler SCRIPT: python x.py dist DEPLOY: 1 + CI_JOB_NAME: dist-i686-msvc - MSYS_BITS: 32 - RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu --enable-extended + RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu --enable-full-tools SCRIPT: python x.py dist MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror MINGW_ARCHIVE: i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z MINGW_DIR: mingw32 DEPLOY: 1 + CI_JOB_NAME: dist-i686-mingw - MSYS_BITS: 64 SCRIPT: python x.py dist - RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu --enable-extended + RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu --enable-full-tools MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror MINGW_ARCHIVE: x86_64-6.3.0-release-posix-seh-rt_v5-rev2.7z MINGW_DIR: mingw64 DEPLOY: 1 + CI_JOB_NAME: dist-x86_64-mingw # "alternate" deployment, see .travis.yml for more info - MSYS_BITS: 64 RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc --enable-extended --enable-profiler SCRIPT: python x.py dist DEPLOY_ALT: 1 + CI_JOB_NAME: dist-x86_64-msvc-alt matrix: fast_finish: true @@ -112,6 +138,20 @@ install: - if defined MINGW_URL 7z x -y %MINGW_ARCHIVE% > nul - if defined MINGW_URL set PATH=%CD%\%MINGW_DIR%\bin;C:\msys64\usr\bin;%PATH% + # If we're compiling for MSVC then we, like most other distribution builders, + # switch to clang as the compiler. This'll allow us eventually to enable LTO + # amongst LLVM and rustc. Note that we only do this on MSVC as I don't think + # clang has an output mode compatible with MinGW that we need. If it does we + # should switch to clang for MinGW as well! + # + # Note that the LLVM installer is an NSIS installer + # + # Original downloaded here came from + # http://releases.llvm.org/6.0.0/LLVM-6.0.0-win64.exe + - if NOT defined MINGW_URL appveyor-retry appveyor DownloadFile https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/LLVM-6.0.0-win64.exe + - if NOT defined MINGW_URL .\LLVM-6.0.0-win64.exe /S /NCRC /D=C:\clang-rust + - if NOT defined MINGW_URL set RUST_CONFIGURE_ARGS=%RUST_CONFIGURE_ARGS% --set llvm.clang-cl=C:\clang-rust\bin\clang-cl.exe + # Here we do a pretty heinous thing which is to mangle the MinGW installation # we just had above. Currently, as of this writing, we're using MinGW-w64 # builds of gcc, and that's currently at 6.3.0. We use 6.3.0 as it appears to @@ -140,8 +180,8 @@ install: - set PATH=C:\Python27;%PATH% # Download and install sccache - - appveyor-retry appveyor DownloadFile https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-05-12-sccache-x86_64-pc-windows-msvc - - mv 2017-05-12-sccache-x86_64-pc-windows-msvc sccache.exe + - appveyor-retry appveyor DownloadFile https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2018-04-26-sccache-x86_64-pc-windows-msvc + - mv 2018-04-26-sccache-x86_64-pc-windows-msvc sccache.exe - set PATH=%PATH%;%CD% # Download and install ninja @@ -164,9 +204,6 @@ install: - set PATH=%PATH%;%CD%\handle - handle.exe -accepteula -help - # Attempt to debug sccache failures - - set SCCACHE_ERROR_LOG=%CD%/sccache.log - test_script: - if not exist C:\cache\rustsrc\NUL mkdir C:\cache\rustsrc - sh src/ci/init_repo.sh . /c/cache/rustsrc @@ -175,7 +212,9 @@ test_script: - sh src/ci/run.sh on_failure: - - cat %CD%\sccache.log || exit 0 + # Dump crash log + - set PATH=%PATH%;"C:\Program Files (x86)\Windows Kits\10\Debuggers\X64" + - if exist %LOCALAPPDATA%\CrashDumps for %%f in (%LOCALAPPDATA%\CrashDumps\*) do cdb -c "k;q" -G -z "%%f" branches: only: diff --git a/config.toml.example b/config.toml.example index 18c1f160c03d..107375ac5cc3 100644 --- a/config.toml.example +++ b/config.toml.example @@ -61,8 +61,8 @@ # the same format as above, but since these targets are experimental, they are # not built by default and the experimental Rust compilation targets that depend # on them will not work unless the user opts in to building them. By default the -# `WebAssembly` target is enabled when compiling LLVM from scratch. -#experimental-targets = "WebAssembly" +# `WebAssembly` and `RISCV` targets are enabled when compiling LLVM from scratch. +#experimental-targets = "WebAssembly;RISCV" # Cap the number of parallel linker invocations when compiling LLVM. # This can be useful when building LLVM with debug info, which significantly @@ -76,6 +76,10 @@ # passed to prefer linking to shared libraries. #link-shared = false +# On MSVC you can compile LLVM with clang-cl, but the test suite doesn't pass +# with clang-cl, so this is special in that it only compiles LLVM with clang-cl +#clang-cl = '/path/to/clang-cl.exe' + # ============================================================================= # General build configuration options # ============================================================================= @@ -118,6 +122,10 @@ # Indicate whether submodules are managed and updated automatically. #submodules = true +# Update submodules only when the checked out commit in the submodules differs +# from what is committed in the main rustc repo. +#fast-submodules = true + # The path to (or name of) the GDB executable to use. This is only used for # executing the debuginfo test suite. #gdb = "gdb" @@ -151,6 +159,10 @@ # default. #extended = false +# Installs chosen set of extended tools if enables. By default builds all. +# If chosen tool failed to build the installation fails. +#tools = ["cargo", "rls", "rustfmt", "analysis", "src"] + # Verbosity level: 0 == not verbose, 1 == verbose, 2 == very verbose #verbose = 0 @@ -178,6 +190,10 @@ # essentially skipping stage0 as the local compiler is recompiling itself again. #local-rebuild = false +# Print out how long each rustbuild step took (mostly intended for CI and +# tracking over time) +#print-step-timings = false + # ============================================================================= # General install configuration options # ============================================================================= @@ -250,6 +266,10 @@ # standard library. #debuginfo-only-std = false +# Enable debuginfo for the extended tools: cargo, rls, rustfmt +# Adding debuginfo makes them several times larger. +#debuginfo-tools = false + # Whether or not jemalloc is built and enabled #use-jemalloc = true @@ -259,6 +279,12 @@ # Whether or not `panic!`s generate backtraces (RUST_BACKTRACE) #backtrace = true +# Whether to always use incremental compilation when building rustc +#incremental = false + +# Build rustc with experimental parallelization +#experimental-parallel-queries = false + # The default linker that will be hard-coded into the generated compiler for # targets that don't specify linker explicitly in their target specifications. # Note that this is not the linker used to link said compiler. @@ -275,9 +301,9 @@ # desired in distributions, for example. #rpath = true -# Suppresses extraneous output from tests to ensure the output of the test -# harness is relatively clean. -#quiet-tests = false +# Emits extraneous output from tests to ensure that failures of the test +# harness are debuggable just from logfiles. +#verbose-tests = false # Flag indicating whether tests are compiled with optimizations (the -O flag) or # with debuginfo (the -g flag) @@ -290,7 +316,7 @@ # Flag indicating whether git info will be retrieved from .git automatically. # Having the git information can cause a lot of rebuilds during development. -# Note: If this attribute is not explicity set (e.g. if left commented out) it +# Note: If this attribute is not explicitly set (e.g. if left commented out) it # will default to true if channel = "dev", but will default to false otherwise. #ignore-git = true @@ -305,6 +331,42 @@ # result (broken, compiling, testing) into this JSON file. #save-toolstates = "/path/to/toolstates.json" +# This is an array of the codegen backends that will be compiled for the rustc +# that's being compiled. The default is to only build the LLVM codegen backend, +# but you can also optionally enable the "emscripten" backend for asm.js or +# make this an empty array (but that probably won't get too far in the +# bootstrap) +#codegen-backends = ["llvm"] + +# This is the name of the directory in which codegen backends will get installed +#codegen-backends-dir = "codegen-backends" + +# Flag indicating whether `libstd` calls an imported function to handle basic IO +# when targeting WebAssembly. Enable this to debug tests for the `wasm32-unknown-unknown` +# target, as without this option the test output will not be captured. +#wasm-syscall = false + +# Indicates whether LLD will be compiled and made available in the sysroot for +# rustc to execute. +#lld = false + +# Indicates whether some LLVM tools, like llvm-objdump, will be made available in the +# sysroot. +#llvm-tools = false + +# Indicates whether LLDB will be made available in the sysroot. +# This is only built if LLVM is also being built. +#lldb = false + +# Whether to deny warnings in crates +#deny-warnings = true + +# Print backtrace on internal compiler errors during bootstrap +#backtrace-on-ice = false + +# Whether to verify generated LLVM IR +#verify-llvm-ir = false + # ============================================================================= # Options for specific targets # @@ -332,7 +394,7 @@ #linker = "cc" # Path to the `llvm-config` binary of the installation of a custom LLVM to link -# against. Note that if this is specifed we don't compile LLVM at all for this +# against. Note that if this is specified we don't compile LLVM at all for this # target. #llvm-config = "../path/to/llvm/root/bin/llvm-config" diff --git a/src/Cargo.lock b/src/Cargo.lock index 1e37237ac9c7..4e16e61aa0d3 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -1,23 +1,6 @@ -[[package]] -name = "advapi32-sys" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "aho-corasick" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "aho-corasick" -version = "0.6.4" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -27,19 +10,18 @@ dependencies = [ name = "alloc" version = "0.0.0" dependencies = [ + "compiler_builtins 0.0.0", "core 0.0.0", - "rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "std_unicode 0.0.0", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "alloc_jemalloc" version = "0.0.0" dependencies = [ - "alloc 0.0.0", - "alloc_system 0.0.0", "build_helper 0.1.0", - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.0.0", "core 0.0.0", "libc 0.0.0", ] @@ -48,69 +30,92 @@ dependencies = [ name = "alloc_system" version = "0.0.0" dependencies = [ - "alloc 0.0.0", + "compiler_builtins 0.0.0", "core 0.0.0", "dlmalloc 0.0.0", "libc 0.0.0", ] [[package]] -name = "ansi_term" -version = "0.10.2" +name = "ammonia" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "html5ever 0.22.3 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tendril 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] -name = "ar" -version = "0.3.1" +name = "ansi_term" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "arena" version = "0.0.0" +dependencies = [ + "rustc_data_structures 0.0.0", +] + +[[package]] +name = "arrayvec" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "assert_cli" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "environment 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "failure_derive 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "atty" -version = "0.2.3" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "backtrace" -version = "0.3.4" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace-sys 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace-sys 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-demangle 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "backtrace-sys" -version = "0.1.16" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "bin_lib" -version = "0.1.0" - -[[package]] -name = "bitflags" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "bitflags" version = "0.9.1" @@ -118,7 +123,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bitflags" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -126,23 +131,22 @@ name = "bootstrap" version = "0.0.0" dependencies = [ "build_helper 0.1.0", - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "petgraph 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "pretty_assertions 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "borrow_error" -version = "0.1.0" - [[package]] name = "bufstream" version = "0.1.3" @@ -152,108 +156,140 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "build-manifest" version = "0.1.0" dependencies = [ - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "build_helper" version = "0.1.0" -dependencies = [ - "filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "byteorder" -version = "1.2.1" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cargo" -version = "0.25.0" -dependencies = [ - "atty 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "cargotest 0.1.0", - "core-foundation 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "crates-io 0.14.0", - "crossbeam 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crypto-hash 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", - "git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "home 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ignore 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "jobserver 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "libgit2-sys 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "psapi-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "shell-escape 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "tar 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)", - "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "termcolor 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cargo_metadata" -version = "0.2.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crates-io 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crypto-hash 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)", + "git2-curl 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "home 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ignore 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.7.7 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "shell-escape 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tar 0.4.16 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cargo" +version = "0.30.0" +dependencies = [ + "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crates-io 0.18.0", + "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crypto-hash 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fwdansi 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)", + "git2-curl 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "home 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ignore 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazycell 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.7.7 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.10.11 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-workspace-hack 1.0.0", + "rustfix 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "shell-escape 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tar 0.4.16 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "cargo_metadata" -version = "0.4.0" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "cargotest" -version = "0.1.0" +name = "cargo_metadata" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cargo 0.25.0", - "filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", - "hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tar 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -262,82 +298,121 @@ version = "0.1.0" [[package]] name = "cc" -version = "1.0.3" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cfg-if" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "clap" -version = "2.29.0" +name = "chalk-engine" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "chalk-macros 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "chalk-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "chrono" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "clap" +version = "2.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "clippy" -version = "0.0.174" +version = "0.0.212" dependencies = [ - "cargo_metadata 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy-mini-macro-test 0.1.0", - "clippy_lints 0.0.174", - "compiletest_rs 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "duct 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo_metadata 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy-mini-macro-test 0.2.0", + "clippy_lints 0.0.212", + "compiletest_rs 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)", + "derive-new 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-workspace-hack 1.0.0", + "rustc_version 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "clippy-mini-macro-test" -version = "0.1.0" +version = "0.2.0" [[package]] name = "clippy_lints" -version = "0.0.174" +version = "0.0.212" dependencies = [ - "if_chain 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "pulldown-cmark 0.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo_metadata 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)", + "if_chain 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "cmake" -version = "0.1.29" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "coco" -version = "0.1.1" +name = "colored" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -353,14 +428,14 @@ name = "commoncrypto-sys" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "compiler_builtins" version = "0.0.0" dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", ] @@ -369,171 +444,236 @@ name = "compiletest" version = "0.0.0" dependencies = [ "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "rustfix 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "compiletest_rs" -version = "0.3.3" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "completion" -version = "0.1.0" - [[package]] name = "core" version = "0.0.0" +dependencies = [ + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "core-foundation" -version = "0.4.6" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "core-foundation-sys 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "core-foundation-sys 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-foundation" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "core-foundation-sys 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "core-foundation-sys" -version = "0.4.6" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-foundation-sys" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "crates-io" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "curl 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crates-io" -version = "0.14.0" +version = "0.18.0" dependencies = [ - "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam" -version = "0.2.10" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "crossbeam" -version = "0.3.0" +name = "crossbeam-channel" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-epoch 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-deque" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-epoch 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-utils" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-utils" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "crossbeam-utils" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "crypto-hash" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.9.23 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cssparser" -version = "0.13.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "procedural-masquerade 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cssparser-macros" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "procedural-masquerade 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.10.11 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "curl" -version = "0.4.8" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "curl-sys 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "curl-sys 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.23 (registry+https://github.com/rust-lang/crates.io-index)", - "socket2 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.35 (registry+https://github.com/rust-lang/crates.io-index)", + "schannel 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "socket2 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "curl-sys" -version = "0.3.15" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.23 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.35 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", + "vcpkg 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "dbghelp-sys" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "debug_unreachable" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "deglob" +name = "datafrog" version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "derive-new" -version = "0.5.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.13.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "derive_more" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.13.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -541,92 +681,82 @@ name = "diff" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "difference" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "dlmalloc" version = "0.0.0" dependencies = [ - "alloc 0.0.0", + "compiler_builtins 0.0.0", "core 0.0.0", ] -[[package]] -name = "docopt" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "dtoa" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "duct" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "error-chain 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazycell 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "os_pipe 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "shared_child 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "either" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "endian-type" -version = "0.1.2" +name = "elasticlunr-rs" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "strum 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "strum_macros 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] -name = "enum_primitive" +name = "ena" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "env_logger" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "environment" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "env_logger" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "env_logger" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "error-chain" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "backtrace 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "error-chain" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "error-chain" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -638,51 +768,45 @@ dependencies = [ [[package]] name = "failure" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "failure_derive 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "failure_derive" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.14.4 (registry+https://github.com/rust-lang/crates.io-index)", + "synstructure 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "features" -version = "0.1.0" - [[package]] name = "filetime" -version = "0.1.14" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "find_all_refs_no_cfg_test" -version = "0.1.0" - -[[package]] -name = "find_impls" -version = "0.1.0" +name = "fixedbitset" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "flate2" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -710,69 +834,87 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "fs2" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fst" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "fuchsia-zircon" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "fuchsia-zircon-sys" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "futf" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "debug_unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "futures" -version = "0.1.17" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "fwdansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "getopts" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "git2" -version = "0.6.10" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "libgit2-sys 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.7.7 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.23 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.35 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "git2-curl" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -782,175 +924,166 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "globset" -version = "0.2.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "graphviz" version = "0.0.0" -[[package]] -name = "hamcrest" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "handlebars" -version = "0.29.1" +version = "0.32.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "pest 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "quick-error 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "pest 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "pest_derive 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "hex" -version = "0.2.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "home" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "scopeguard 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "userenv-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "html-diff" -version = "0.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "kuchiki 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "html5ever" -version = "0.20.0" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "markup5ever 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", + "markup5ever 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.13.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "humantime" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "idna" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "if_chain" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "ignore" -version = "0.2.2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", - "globset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "globset 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "infer_bin" -version = "0.1.0" - -[[package]] -name = "infer_custom_bin" -version = "0.1.0" - -[[package]] -name = "infer_lib" -version = "0.1.0" - [[package]] name = "installer" version = "0.0.0" dependencies = [ - "clap 2.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tar 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)", - "walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "xz2 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tar 0.4.16 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "xz2 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "is-match" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "isatty" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "itertools" -version = "0.6.5" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "itoa" -version = "0.3.4" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jobserver" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "json" -version = "0.11.12" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "jsonrpc-core" version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futures 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -962,27 +1095,18 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "kuchiki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cssparser 0.13.7 (registry+https://github.com/rust-lang/crates.io-index)", - "html5ever 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "selectors 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "languageserver-types" -version = "0.16.0" +version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "enum_primitive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "num-derive 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -993,51 +1117,58 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "lazy_static" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "lazycell" -version = "0.5.1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "lazycell" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" version = "0.0.0" dependencies = [ + "compiler_builtins 0.0.0", "core 0.0.0", ] [[package]] name = "libc" -version = "0.2.35" +version = "0.2.43" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libgit2-sys" -version = "0.6.18" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "curl-sys 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", + "curl-sys 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "libssh2-sys 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.23 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.35 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "libssh2-sys" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.23 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.35 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", + "vcpkg 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1045,10 +1176,10 @@ name = "libz-sys" version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", + "vcpkg 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1060,34 +1191,34 @@ name = "log" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "log" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "log_settings" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "lzma-sys" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1095,59 +1226,62 @@ name = "mac" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "macro-utils" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "maplit" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "markup5ever" -version = "0.5.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache_codegen 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "phf 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_codegen 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "string_cache_codegen 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "tendril 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "matches" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "mdbook" -version = "0.0.26" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "clap 2.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ammonia 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "elasticlunr-rs 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "handlebars 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "handlebars 0.32.4 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "open 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "pulldown-cmark 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "memchr" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "memchr" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "toml-query 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1155,7 +1289,29 @@ name = "memchr" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memmap" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memoffset" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "minifier" +version = "0.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "macro-utils 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1163,136 +1319,76 @@ name = "miniz-sys" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "miow" -version = "0.2.1" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "socket2 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "miri" version = "0.1.0" dependencies = [ - "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cargo_metadata 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "compiletest_rs 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo_metadata 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "compiletest_rs 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "multiple_bins" -version = "0.1.0" - -[[package]] -name = "net2" -version = "0.2.31" +name = "new_debug_unreachable" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "nibble_vec" -version = "0.0.3" +name = "nodrop" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "nix" -version = "0.8.1" +name = "num-derive" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num-bigint 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "num-complex 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "num-iter 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", - "num-rational 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num-bigint" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num-complex" -version = "0.1.41" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.14.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-integer" -version = "0.1.35" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num-iter" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "num-rational" -version = "0.1.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "num-bigint 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num-traits" -version = "0.1.41" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "num_cpus" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1302,14 +1398,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "openssl" -version = "0.9.23" +version = "0.10.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.23 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.35 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1318,38 +1415,48 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "openssl-sys" -version = "0.9.23" +name = "openssl-src" +version = "110.0.6+1.1.0h" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "os_pipe" -version = "0.5.1" +name = "openssl-sys" +version = "0.9.35" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "nix 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-src 110.0.6+1.1.0h (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", + "vcpkg 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ordermap" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ordslice" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "owning_ref" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "panic_abort" version = "0.0.0" dependencies = [ + "compiler_builtins 0.0.0", "core 0.0.0", "libc 0.0.0", ] @@ -1359,6 +1466,7 @@ name = "panic_unwind" version = "0.0.0" dependencies = [ "alloc 0.0.0", + "compiler_builtins 0.0.0", "core 0.0.0", "libc 0.0.0", "unwind 0.0.0", @@ -1366,23 +1474,22 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.5.3" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot_core 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "parking_lot_core" -version = "0.2.9" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1392,38 +1499,57 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "pest" -version = "0.3.3" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "phf" -version = "0.7.21" +name = "pest_derive" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "pest 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "petgraph" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "phf" +version = "0.7.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "phf_shared 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "phf_codegen" -version = "0.7.21" +version = "0.7.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "phf_generator 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_generator 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_shared 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "phf_generator" -version = "0.7.21" +version = "0.7.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_shared 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "phf_shared" -version = "0.7.21" +version = "0.7.22" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "siphasher 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1431,66 +1557,80 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "polonius-engine" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "datafrog 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "precomputed-hash" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "pretty_assertions" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro2" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro2" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "proc_macro" version = "0.0.0" dependencies = [ + "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] -[[package]] -name = "procedural-masquerade" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "profiler_builtins" version = "0.0.0" dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.0.0", "core 0.0.0", ] -[[package]] -name = "psapi-sys" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "pulldown-cmark" -version = "0.0.15" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "pulldown-cmark" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", - "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "quick-error" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1504,61 +1644,89 @@ version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "racer" -version = "2.0.12" +name = "quote" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "clap 2.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_errors 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "radix_trie" -version = "0.1.2" +name = "quote" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "endian-type 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "nibble_vec 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "racer" +version = "2.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "derive_more 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", + "humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rand" -version = "0.3.19" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "fuchsia-zircon 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "rayon" -version = "0.9.0" +name = "rand" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon-core 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_core" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rayon" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon-core 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rayon-core" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "redox_syscall" -version = "0.1.32" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1566,50 +1734,48 @@ name = "redox_termios" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "reformat" -version = "0.1.0" - -[[package]] -name = "reformat_with_range" -version = "0.1.0" - -[[package]] -name = "regex" -version = "0.1.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex" -version = "0.2.3" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex-syntax" -version = "0.3.9" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "regex-syntax" -version = "0.4.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "remote-test-client" @@ -1620,58 +1786,81 @@ name = "remote-test-server" version = "0.1.0" [[package]] -name = "rls" -version = "0.124.0" +name = "remove_dir_all" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cargo 0.25.0", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "json 0.11.12 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rls" +version = "0.130.5" +dependencies = [ + "cargo 0.30.0", + "cargo_metadata 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy_lints 0.0.212", + "crossbeam-channel 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "languageserver-types 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "racer 2.0.12 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-analysis 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-rustc 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "languageserver-types 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ordslice 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "racer 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-analysis 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-blacklist 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-rustc 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rustfmt-nightly 0.3.4", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-vfs 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-workspace-hack 1.0.0", + "rustfmt-nightly 0.99.2", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rls-analysis" -version = "0.10.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "derive-new 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "radix_trie 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", + "derive-new 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", + "fst 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rls-blacklist" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "rls-data" -version = "0.14.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rls-rustc" -version = "0.1.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1680,16 +1869,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rls-vfs" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "racer 2.0.12 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1697,8 +1885,8 @@ dependencies = [ name = "rustbook" version = "0.1.0" dependencies = [ - "clap 2.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "mdbook 0.0.26 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "mdbook 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1706,35 +1894,165 @@ name = "rustc" version = "0.0.0" dependencies = [ "arena 0.0.0", - "backtrace 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "chalk-engine 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "fmt_macros 0.0.0", "graphviz 0.0.0", - "jobserver 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc_macro 0.0.0", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_apfloat 0.0.0", - "rustc_back 0.0.0", - "rustc_const_math 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", + "rustc_fs_util 0.0.0", + "rustc_target 0.0.0", + "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serialize 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", + "tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-arena" +version = "218.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc-ap-rustc_data_structures 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-rustc_cratesio_shim" +version = "218.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-rustc_data_structures" +version = "218.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "ena 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_cratesio_shim 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-rustc_errors" +version = "218.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax_pos 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-rustc_target" +version = "218.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_cratesio_shim 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-serialize" +version = "218.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-ap-syntax" +version = "218.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_errors 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_target 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax_pos 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-syntax_pos" +version = "218.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-arena 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc-demangle" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "rustc-hash" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rustc-main" version = "0.0.0" dependencies = [ - "rustc_back 0.0.0", "rustc_driver 0.0.0", + "rustc_target 0.0.0", +] + +[[package]] +name = "rustc-rayon" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-rayon-core" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1742,12 +2060,23 @@ name = "rustc-serialize" version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "rustc-workspace-hack" +version = "1.0.0" +dependencies = [ + "syn 0.14.4 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rustc_allocator" version = "0.0.0" dependencies = [ + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", + "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", + "rustc_target 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] @@ -1756,7 +2085,7 @@ dependencies = [ name = "rustc_apfloat" version = "0.0.0" dependencies = [ - "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_cratesio_shim 0.0.0", ] @@ -1767,36 +2096,19 @@ dependencies = [ "alloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", - "cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.0.0", "core 0.0.0", ] -[[package]] -name = "rustc_back" -version = "0.0.0" -dependencies = [ - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "serialize 0.0.0", - "syntax 0.0.0", -] - -[[package]] -name = "rustc_binaryen" -version = "0.0.0" -dependencies = [ - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "rustc_borrowck" version = "0.0.0" dependencies = [ "graphviz 0.0.0", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", + "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "rustc_mir 0.0.0", "syntax 0.0.0", @@ -1804,61 +2116,69 @@ dependencies = [ ] [[package]] -name = "rustc_const_eval" +name = "rustc_codegen_llvm" version = "0.0.0" dependencies = [ - "arena 0.0.0", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc 0.0.0", - "rustc_const_math 0.0.0", - "rustc_data_structures 0.0.0", - "rustc_errors 0.0.0", - "syntax 0.0.0", - "syntax_pos 0.0.0", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-demangle 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_llvm 0.0.0", ] [[package]] -name = "rustc_const_math" +name = "rustc_codegen_utils" version = "0.0.0" dependencies = [ - "rustc_apfloat 0.0.0", - "serialize 0.0.0", + "flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_incremental 0.0.0", + "rustc_metadata_utils 0.0.0", + "rustc_mir 0.0.0", + "rustc_target 0.0.0", "syntax 0.0.0", + "syntax_pos 0.0.0", ] [[package]] name = "rustc_cratesio_shim" version = "0.0.0" dependencies = [ - "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc_data_structures" version = "0.0.0" dependencies = [ - "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot_core 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "ena 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_cratesio_shim 0.0.0", "serialize 0.0.0", - "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc_driver" version = "0.0.0" dependencies = [ - "ar 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "arena 0.0.0", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", + "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_allocator 0.0.0", - "rustc_back 0.0.0", "rustc_borrowck 0.0.0", - "rustc_const_eval 0.0.0", + "rustc_codegen_utils 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "rustc_incremental 0.0.0", @@ -1870,9 +2190,10 @@ dependencies = [ "rustc_privacy 0.0.0", "rustc_resolve 0.0.0", "rustc_save_analysis 0.0.0", - "rustc_trans 0.0.0", - "rustc_trans_utils 0.0.0", + "rustc_target 0.0.0", + "rustc_traits 0.0.0", "rustc_typeck 0.0.0", + "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serialize 0.0.0", "syntax 0.0.0", "syntax_ext 0.0.0", @@ -1883,21 +2204,28 @@ dependencies = [ name = "rustc_errors" version = "0.0.0" dependencies = [ + "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_data_structures 0.0.0", "serialize 0.0.0", "syntax_pos 0.0.0", - "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rustc_fs_util" +version = "0.0.0" + [[package]] name = "rustc_incremental" version = "0.0.0" dependencies = [ "graphviz 0.0.0", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc_data_structures 0.0.0", + "rustc_fs_util 0.0.0", "serialize 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", @@ -1907,9 +2235,10 @@ dependencies = [ name = "rustc_lint" version = "0.0.0" dependencies = [ - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", - "rustc_const_eval 0.0.0", + "rustc_mir 0.0.0", + "rustc_target 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] @@ -1918,10 +2247,8 @@ dependencies = [ name = "rustc_llvm" version = "0.0.0" dependencies = [ - "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "build_helper 0.1.0", - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_cratesio_shim 0.0.0", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1931,7 +2258,8 @@ dependencies = [ "alloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", - "cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.0.0", "core 0.0.0", ] @@ -1940,35 +2268,45 @@ name = "rustc_metadata" version = "0.0.0" dependencies = [ "flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "proc_macro 0.0.0", "rustc 0.0.0", - "rustc_back 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", + "rustc_metadata_utils 0.0.0", + "rustc_target 0.0.0", "serialize 0.0.0", "syntax 0.0.0", "syntax_ext 0.0.0", "syntax_pos 0.0.0", ] +[[package]] +name = "rustc_metadata_utils" +version = "0.0.0" +dependencies = [ + "rustc 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + [[package]] name = "rustc_mir" version = "0.0.0" dependencies = [ - "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "arena 0.0.0", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log_settings 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc_apfloat 0.0.0", - "rustc_back 0.0.0", - "rustc_const_eval 0.0.0", - "rustc_const_math 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", - "rustc_trans_utils 0.0.0", + "rustc_target 0.0.0", "serialize 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", @@ -1981,7 +2319,8 @@ dependencies = [ "alloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", - "cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.0.0", "core 0.0.0", ] @@ -1989,11 +2328,11 @@ dependencies = [ name = "rustc_passes" version = "0.0.0" dependencies = [ - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", - "rustc_const_eval 0.0.0", - "rustc_const_math 0.0.0", + "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", + "rustc_mir 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] @@ -2018,6 +2357,7 @@ name = "rustc_privacy" version = "0.0.0" dependencies = [ "rustc 0.0.0", + "rustc_data_structures 0.0.0", "rustc_typeck 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", @@ -2028,10 +2368,11 @@ name = "rustc_resolve" version = "0.0.0" dependencies = [ "arena 0.0.0", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", + "rustc_metadata 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] @@ -2040,56 +2381,37 @@ dependencies = [ name = "rustc_save_analysis" version = "0.0.0" dependencies = [ - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_data_structures 0.0.0", + "rustc_target 0.0.0", "rustc_typeck 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] [[package]] -name = "rustc_trans" +name = "rustc_target" version = "0.0.0" dependencies = [ - "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "jobserver 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc 0.0.0", - "rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_allocator 0.0.0", - "rustc_apfloat 0.0.0", - "rustc_back 0.0.0", - "rustc_binaryen 0.0.0", - "rustc_const_math 0.0.0", - "rustc_data_structures 0.0.0", - "rustc_errors 0.0.0", - "rustc_incremental 0.0.0", - "rustc_llvm 0.0.0", - "rustc_mir 0.0.0", - "rustc_platform_intrinsics 0.0.0", - "rustc_trans_utils 0.0.0", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_cratesio_shim 0.0.0", "serialize 0.0.0", - "syntax 0.0.0", - "syntax_pos 0.0.0", - "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "rustc_trans_utils" +name = "rustc_traits" version = "0.0.0" dependencies = [ - "ar 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "chalk-engine 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "graphviz 0.0.0", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", - "rustc_back 0.0.0", "rustc_data_structures 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", @@ -2102,7 +2424,8 @@ dependencies = [ "alloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", - "cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.0.0", "core 0.0.0", ] @@ -2111,28 +2434,37 @@ name = "rustc_typeck" version = "0.0.0" dependencies = [ "arena 0.0.0", - "fmt_macros 0.0.0", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", - "rustc_const_math 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "rustc_platform_intrinsics 0.0.0", + "rustc_target 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] +[[package]] +name = "rustc_version" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rustdoc" version = "0.0.0" dependencies = [ - "build_helper 0.1.0", - "cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "html-diff 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "pulldown-cmark 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "minifier 0.0.19 (registry+https://github.com/rust-lang/crates.io-index)", + "pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rustdoc-themes" +version = "0.1.0" + [[package]] name = "rustdoc-tool" version = "0.0.0" @@ -2141,43 +2473,63 @@ dependencies = [ ] [[package]] -name = "rustfmt-nightly" -version = "0.3.4" +name = "rustfix" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cargo_metadata 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "derive-new 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustfmt-nightly" +version = "0.99.2" +dependencies = [ + "assert_cli 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo_metadata 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "derive-new 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", - "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-segmentation 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", + "isatty 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_target 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax_pos 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-segmentation 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "same-file" -version = "0.1.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "schannel" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "scoped-tls" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "scopeguard" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -2186,36 +2538,13 @@ name = "scopeguard" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "selectors" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cssparser 0.13.7 (registry+https://github.com/rust-lang/crates.io-index)", - "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "semver" -version = "0.6.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "semver" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2225,26 +2554,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "serde" -version = "1.0.25" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "serde_derive" -version = "1.0.25" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive_internals 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "serde_derive_internals" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", - "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.14.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2252,18 +2572,17 @@ name = "serde_ignored" version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_json" -version = "1.0.8" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "dtoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2271,18 +2590,13 @@ name = "serialize" version = "0.0.0" [[package]] -name = "shared_child" -version = "0.2.1" +name = "shell-escape" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] -name = "shell-escape" -version = "0.1.3" +name = "shlex" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2292,29 +2606,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "smallvec" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "smallvec" -version = "0.6.0" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "socket2" -version = "0.2.4" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "stable_deref_trait" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2325,50 +2636,44 @@ dependencies = [ "alloc_jemalloc 0.0.0", "alloc_system 0.0.0", "build_helper 0.1.0", + "cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", "compiler_builtins 0.0.0", "core 0.0.0", "libc 0.0.0", "panic_abort 0.0.0", "panic_unwind 0.0.0", "profiler_builtins 0.0.0", - "rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_asan 0.0.0", "rustc_lsan 0.0.0", "rustc_msan 0.0.0", "rustc_tsan 0.0.0", - "std_unicode 0.0.0", "unwind 0.0.0", ] -[[package]] -name = "std_unicode" -version = "0.0.0" -dependencies = [ - "core 0.0.0", -] - [[package]] name = "string_cache" -version = "0.6.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "debug_unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_shared 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", "precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache_codegen 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "string_cache_codegen 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "string_cache_codegen" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "phf_generator 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_generator 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", + "phf_shared 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2379,9 +2684,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "strsim" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "strum" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "strum_macros" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.14.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "syn" version = "0.11.11" @@ -2392,6 +2712,26 @@ dependencies = [ "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "syn" +version = "0.13.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "syn" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "synom" version = "0.11.3" @@ -2402,22 +2742,25 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.6.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.14.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "syntax" version = "0.0.0" dependencies = [ - "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_cratesio_shim 0.0.0", + "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", + "rustc_target 0.0.0", + "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serialize 0.0.0", "syntax_pos 0.0.0", ] @@ -2428,7 +2771,9 @@ version = "0.0.0" dependencies = [ "fmt_macros 0.0.0", "proc_macro 0.0.0", + "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", + "rustc_target 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] @@ -2437,64 +2782,35 @@ dependencies = [ name = "syntax_pos" version = "0.0.0" dependencies = [ + "arena 0.0.0", + "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_data_structures 0.0.0", + "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serialize 0.0.0", - "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "syntex_errors" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_pos 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "syntex_pos" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "syntex_syntax" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_errors 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_pos 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)", - "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tar" -version = "0.4.14" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", - "xattr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "tempdir" -version = "0.3.5" +name = "tempfile" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2502,9 +2818,9 @@ name = "tendril" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "futf 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "futf 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "utf-8 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "utf-8 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2513,19 +2829,27 @@ version = "0.0.0" [[package]] name = "term" -version = "0.4.6" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "termcolor" -version = "0.3.3" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "wincolor 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "wincolor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "termcolor" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "wincolor 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2533,8 +2857,8 @@ name = "termion" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", "redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2542,33 +2866,16 @@ dependencies = [ name = "test" version = "0.0.0" dependencies = [ - "getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", "term 0.0.0", ] [[package]] name = "textwrap" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "thread-id" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "thread_local" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2576,56 +2883,75 @@ name = "thread_local" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tidy" version = "0.1.0" +dependencies = [ + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] -name = "toml" -version = "0.2.1" +name = "time" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "toml" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "toml-query" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "is-match 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ucd-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "unicode-bidi" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "unicode-normalization" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "unicode-segmentation" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "unicode-width" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicode-xid" -version = "0.0.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2634,12 +2960,9 @@ version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "unreachable" -version = "0.1.1" +name = "unicode-xid" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "unreachable" @@ -2653,6 +2976,7 @@ dependencies = [ name = "unstable-book-gen" version = "0.1.0" dependencies = [ + "num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "tidy 0.1.0", ] @@ -2660,17 +2984,18 @@ dependencies = [ name = "unwind" version = "0.0.0" dependencies = [ + "compiler_builtins 0.0.0", "core 0.0.0", "libc 0.0.0", ] [[package]] name = "url" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2679,8 +3004,8 @@ name = "url_serde" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2694,15 +3019,7 @@ dependencies = [ [[package]] name = "utf-8" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "utf8-ranges" -version = "0.1.3" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2712,12 +3029,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "vcpkg" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "vec_map" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2727,12 +3044,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "walkdir" -version = "1.0.7" +version = "2.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2740,47 +3056,60 @@ name = "winapi" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "winapi" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "winapi-build" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "wincolor" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "workspace_symbol" -version = "0.1.0" - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" +name = "wincolor" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "xattr" -version = "0.1.11" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "xz2" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lzma-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "lzma-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2789,222 +3118,255 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" [metadata] -"checksum advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e06588080cb19d0acb6739808aafa5f26bfb2ca015b2b6370028b44cf7cb8a9a" -"checksum aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ca972c2ea5f742bfce5687b9aef75506a764f61d37f8f649047846a9686ddb66" -"checksum aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d6531d44de723825aa81398a6415283229725a00fa30713812ab9323faa82fc4" -"checksum ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6b3568b48b7cefa6b8ce125f9bb4989e52fbcc29ebea88df04cc7c5f12f70455" -"checksum ar 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "35c7a5669cb64f085739387e1308b74e6d44022464b7f1b63bbd4ceb6379ec31" -"checksum atty 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "21e50800ec991574876040fff8ee46b136a53e985286fbe6a3bdfe6421b78860" -"checksum backtrace 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8709cc7ec06f6f0ae6c2c7e12f6ed41540781f72b488d83734978295ceae182e" -"checksum backtrace-sys 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "44585761d6161b0f57afc49482ab6bd067e4edef48c12a152c237eb0203f7661" -"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d" +"checksum aho-corasick 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c1c6d463cbe7ed28720b5b489e7c083eeb8f90d08be2a0d6bb9e1ffea9ce1afa" +"checksum ammonia 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fd4c682378117e4186a492b2252b9537990e1617f44aed9788b9a1149de45477" +"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +"checksum arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a1e964f9e24d588183fcb43503abda40d288c8657dfc27311516ce2f05675aef" +"checksum assert_cli 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "98589b0e465a6c510d95fceebd365bb79bedece7f6e18a480897f2015f85ec51" +"checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652" +"checksum backtrace 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "89a47830402e9981c5c41223151efcced65a0510c13097c769cede7efb34782a" +"checksum backtrace-sys 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)" = "c66d56ac8dabd07f6aacdaf633f4b8262f5b3601a810a0dcddffd5c22c69daa0" "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" -"checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf" +"checksum bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d0c54bb8f454c567f21197eefcdbf5679d0bd99f2ddbe52e84c77061952e6789" "checksum bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f2f382711e76b9de6c744cc00d0497baba02fb00a787f088c879f01d09468e32" -"checksum byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "652805b7e73fada9d85e9a6682a4abd490cb52d96aeecc12e33a0de34dfd0d23" -"checksum cargo_metadata 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "be1057b8462184f634c3a208ee35b0f935cfd94b694b26deadccd98732088d7b" -"checksum cargo_metadata 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "20d6fb2b5574726329c85cdba0df0347fddfec3cf9c8b588f9931708280f5643" -"checksum cc 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a9b13a57efd6b30ecd6598ebdb302cca617930b5470647570468a65d12ef9719" -"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de" -"checksum clap 2.29.0 (registry+https://github.com/rust-lang/crates.io-index)" = "110d43e343eb29f4f51c1db31beb879d546db27998577e5715270a54bcf41d3f" -"checksum cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "56d741ea7a69e577f6d06b36b7dff4738f680593dc27a701ffa8506b73ce28bb" -"checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd" +"checksum byteorder 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "74c0b906e9446b0a2e4f760cdb3fa4b2c48cdc6db8766a845c54b6ff063fd2e9" +"checksum cargo 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)" = "21dd0ac7737313b8c5c6fbfaf351aa93d4e90f66d4a33a11d1f3fb29584ac631" +"checksum cargo_metadata 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1efca0b863ca03ed4c109fb1c55e0bc4bbeb221d3e103d86251046b06a526bd0" +"checksum cargo_metadata 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2d6809b327f87369e6f3651efd2c5a96c49847a3ed2559477ecba79014751ee1" +"checksum cc 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)" = "2119ea4867bd2b8ed3aecab467709720b2d55b1bcfe09f772fd68066eaf15275" +"checksum cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "efe5c877e17a9c717a0bf3613b2709f723202c4e4675cc8f12926ded29bcb17e" +"checksum chalk-engine 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "25ce2f28f55ed544a2a3756b7acf41dd7d6f27acffb2086439950925506af7d0" +"checksum chalk-macros 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "295635afd6853aa9f20baeb7f0204862440c0fe994c5a253d5f479dac41d047e" +"checksum chrono 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6962c635d530328acc53ac6a955e83093fedc91c5809dfac1fa60fa470830a37" +"checksum clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b957d88f4b6a63b9d70d5f454ac8011819c6efa7727858f458ab71c756ce2d3e" +"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +"checksum cmake 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)" = "95470235c31c726d72bf2e1f421adc1e65b9d561bf5529612cbe1a72da1467b3" +"checksum colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b0aa3473e85a3161b59845d6096b289bb577874cafeaf75ea1b1beaa6572c7fc" "checksum commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d056a8586ba25a1e4d61cb090900e495952c7886786fc55f909ab2f819b69007" "checksum commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fed34f46747aa73dfaa578069fd8279d2818ade2b55f38f22a9401c7f4083e2" -"checksum compiletest_rs 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "562bafeec9aef1e3e08f1c5b0c542220bb80ff2894e5373a1f9d17c346412c66" -"checksum core-foundation 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "8047f547cd6856d45b1cdd75ef8d2f21f3d0e4bf1dab0a0041b0ae9a5dda9c0e" -"checksum core-foundation-sys 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "152195421a2e6497a8179195672e9d4ee8e45ed8c465b626f1606d27a08ebcd5" -"checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97" -"checksum crossbeam 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8837ab96533202c5b610ed44bc7f4183e7957c1c8f56e8cc78bb098593c8ba0a" -"checksum crypto-hash 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34903878eec1694faf53cae8473a088df333181de421d4d3d48061d6559fe602" -"checksum cssparser 0.13.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef6124306e5ebc5ab11891d063aeafdd0cdc308079b708c8b566125f3680292b" -"checksum cssparser-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "079adec4af52bb5275eadd004292028c79eb3c5f5b4ee8086a36d4197032f6df" -"checksum curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7034c534a1d7d22f7971d6088aa9d281d219ef724026c3428092500f41ae9c2c" -"checksum curl-sys 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "4bee31aa3a079d5f3ff9579ea4dcfb1b1a17a40886f5f467436d383e78134b55" -"checksum dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97590ba53bcb8ac28279161ca943a924d1fd4a8fb3fa63302591647c4fc5b850" -"checksum debug_unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9a032eac705ca39214d169f83e3d3da290af06d8d1d344d1baad2fd002dca4b3" -"checksum derive-new 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "415f627ab054041c3eb748c2e1da0ef751989f5f0c386b63a098e545854a98ba" +"checksum compiletest_rs 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)" = "d3064bc712922596dd5ab449fca9261d411893356581fe5297b96aa8f53bb1b8" +"checksum core-foundation 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "286e0b41c3a20da26536c6000a280585d519fd07b3956b43aed8a79e9edce980" +"checksum core-foundation 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cc3532ec724375c7cb7ff0a097b714fde180bb1f6ed2ab27cfcd99ffca873cd2" +"checksum core-foundation-sys 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "716c271e8613ace48344f723b60b900a93150271e5be206212d052bbc0883efa" +"checksum core-foundation-sys 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a3fb15cdbdd9cf8b82d97d0296bb5cd3631bba58d6e31650a002a8e7fb5721f9" +"checksum crates-io 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5f78703ef5348db1e3244fb6b496e840965fb4754a5319270f2bd77ddb856e1c" +"checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19" +"checksum crossbeam-channel 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "efff2d411e0ac3731b9f6de882b2790fdd2de651577500a806ce78b95b2b9f31" +"checksum crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3" +"checksum crossbeam-epoch 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150" +"checksum crossbeam-epoch 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "285987a59c4d91388e749850e3cb7b3a92299668528caaacd08005b8f238c0ea" +"checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9" +"checksum crossbeam-utils 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ea52fab26a99d96cdff39d0ca75c9716125937f5dba2ab83923aaaf5928f684a" +"checksum crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "677d453a17e8bd2b913fa38e8b9cf04bcdbb5be790aa294f2389661d72036015" +"checksum crypto-hash 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "09de9ee0fc255ace04c7fa0763c9395a945c37c8292bb554f8d48361d1dcf1b4" +"checksum curl 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "893713db705eab9847e050268507b0e2a2aad64e90a831874bd4e8e0d67f9523" +"checksum curl-sys 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "de9cf174efdf90b5887c4e2e900769373c89c5e18152e8f3ed75b501a6f1c0fb" +"checksum datafrog 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "16d724bf4ffe77cdceeecd461009b5f8d9e23c5d645d68bedb4586bf43e7e142" +"checksum derive-new 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ceed73957c449214f8440eec8ad7fa282b67dc9eacbb24a3085b15d60397a17a" +"checksum derive_more 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46c7f14685a20f5dd08e7f754f2ea8cc064d8f4214ae21116c106a2768ba7b9b" "checksum diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3c2b69f912779fbb121ceb775d74d51e915af17aaebc38d28a592843a2dd0a3a" -"checksum docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3b5b93718f8b3e5544fcc914c43de828ca6c6ace23e0332c6080a2977b49787a" -"checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab" -"checksum duct 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e45aa15fe0a8a8f511e6d834626afd55e49b62e5c8802e18328a87e8a8f6065c" -"checksum either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "740178ddf48b1a9e878e6d6509a1442a2d42fd2928aae8e7a6f8a36fb01981b3" -"checksum endian-type 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" -"checksum enum_primitive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be4551092f4d519593039259a9ed8daedf0da12e5109c5280338073eaeb81180" -"checksum env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f" -"checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" +"checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" +"checksum dtoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6d301140eb411af13d3115f9a562c85cc6b541ade9dfa314132244aaee7489dd" +"checksum either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0" +"checksum elasticlunr-rs 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4837d77a1e157489a3933b743fd774ae75074e0e390b2b7f071530048a0d87ee" +"checksum ena 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "88dc8393b3c7352f94092497f6b52019643e493b6b890eb417cdb7c46117e621" +"checksum env_logger 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0e6e40ebb0e66918a37b38c7acab4e10d299e0463fe2af5d29b9cc86710cfd2a" +"checksum environment 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f4b14e20978669064c33b4c1e0fb4083412e40fe56cbea2eae80fd7591503ee" "checksum error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" -"checksum error-chain 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6930e04918388a9a2e41d518c25cf679ccafe26733fb4127dbf21993f2575d46" -"checksum failure 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "934799b6c1de475a012a02dab0ace1ace43789ee4b99bcfbf1a2e3e8ced5de82" -"checksum failure_derive 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c7cdda555bb90c9bb67a3b670a0f42de8e73f5981524123ad8578aafec8ddb8b" -"checksum filetime 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "aa75ec8f7927063335a9583e7fa87b0110bb888cf766dc01b54c0ff70d760c8e" +"checksum error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07e791d3be96241c77c43846b665ef1384606da2cd2a48730abe606a12906e02" +"checksum failure 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7efb22686e4a466b1ec1a15c2898f91fa9cb340452496dca654032de20ff95b9" +"checksum failure_derive 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "946d0e98a50d9831f5d589038d2ca7f8f455b1c21028c0db0e84116a12696426" +"checksum filetime 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "da4b9849e77b13195302c174324b5ba73eec9b236b24c221a61000daefb95c5f" +"checksum fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33" "checksum flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9fac2277e84e5e858483756647a9d0aa8d9a2b7cba517fd84325a0aaa69a0909" "checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" "checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" "checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" -"checksum fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab76cfd2aaa59b7bf6688ad9ba15bbae64bff97f04ea02144cfd3443e5c2866" -"checksum fuchsia-zircon 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bd510087c325af53ba24f3be8f1c081b0982319adcb8b03cad764512923ccc19" -"checksum fuchsia-zircon-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "08b3a6f13ad6b96572b53ce7af74543132f1a7055ccceb6d073dd36c54481859" -"checksum futf 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "51f93f3de6ba1794dcd5810b3546d004600a59a98266487c8407bc4b24e398f3" -"checksum futures 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "118b49cac82e04121117cbd3121ede3147e885627d82c4546b87c702debb90c1" -"checksum getopts 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "65922871abd2f101a2eb0eaebadc66668e54a87ad9c3dd82520b5f86ede5eff9" -"checksum git2 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "40a111aecd59985496012976beca164b4f6c930d507a099831e06b07f19d54f1" -"checksum git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "68676bc784bf0bef83278898929bf64a251e87c0340723d0b93fa096c9c5bf8e" +"checksum fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +"checksum fst 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d94485a00b1827b861dd9d1a2cc9764f9044d4c535514c0760a5a2012ef3399f" +"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +"checksum futf 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7c9c1ce3fa9336301af935ab852c437817d14cd33690446569392e65170aac3b" +"checksum futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "1a70b146671de62ec8c8ed572219ca5d594d9b06c0b364d5e67b722fc559b48c" +"checksum fwdansi 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "34dd4c507af68d37ffef962063dfa1944ce0dd4d5b82043dbab1dabe088610c3" +"checksum getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)" = "b900c08c1939860ce8b54dc6a89e26e00c04c380fd0e09796799bd7f12861e05" +"checksum git2 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)" = "591f8be1674b421644b6c030969520bc3fa12114d2eb467471982ed3e9584e71" +"checksum git2-curl 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b502f6b1b467957403d168f0039e0c46fa6a1220efa2adaef25d5b267b5fe024" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" -"checksum globset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "464627f948c3190ae3d04b1bc6d7dca2f785bda0ac01278e6db129ad383dbeb6" -"checksum hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bf088f042a467089e9baa4972f57f9247e42a0cc549ba264c7a04fbb8ecb89d4" -"checksum handlebars 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fb04af2006ea09d985fef82b81e0eb25337e51b691c76403332378a53d521edc" -"checksum hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6a22814455d41612f41161581c2883c0c6a1c41852729b17d5ed88f01e153aa" -"checksum home 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9f25ae61099d8f3fee8b483df0bd4ecccf4b2731897aad40d50eca1b641fe6db" -"checksum html-diff 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ee4cfdf62a484a3ac0d9b80f562d37f99366db08a63621b917ea3056565345f7" -"checksum html5ever 0.20.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5bfb46978eb757a603b7dfe2dafb1c62cb4dee3428d8ac1de734d83d6b022d06" -"checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d" -"checksum if_chain 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "61bb90bdd39e3af69b0172dfc6130f6cd6332bf040fbb9bdd4401d37adbd48b8" -"checksum ignore 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b3fcaf2365eb14b28ec7603c98c06cc531f19de9eb283d89a3dff8417c8c99f5" -"checksum itertools 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d3f2be4da1690a039e9ae5fd575f706a63ad5a2120f161b1d653c9da3930dd21" -"checksum itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c" -"checksum jobserver 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "565f6106bd87b394398f813bea4e5ecad6d6b0f6aa077592d088f882a506481d" -"checksum json 0.11.12 (registry+https://github.com/rust-lang/crates.io-index)" = "39ebf0fac977ee3a4a3242b6446004ff64514889e3e2730bbd4f764a67a2e483" +"checksum globset 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "142754da2c9b3722affd909f9e27f2a6700a7a303f362971e0a74c652005a43d" +"checksum handlebars 0.32.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d89ec99d1594f285d4590fc32bac5f75cdab383f1123d504d27862c644a807dd" +"checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" +"checksum home 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8f94f6fbdc000a6eba0c8cf08632b2091bb59141d36ac321a2a96d6365e5e4dc" +"checksum html5ever 0.22.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b04478cf718862650a0bf66acaf8f2f8c906fbc703f35c916c1f4211b069a364" +"checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e" +"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" +"checksum if_chain 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4bac95d9aa0624e7b78187d6fb8ab012b41d9f6f54b1bcb61e61c4845f8357ec" +"checksum ignore 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "787a5940ab88e0f2f3b2cad3687060bddcf67520f3b761abc31065c9c495d088" +"checksum is-match 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7e5b386aef33a1c677be65237cb9d32c3f3ef56bd035949710c4bb13083eb053" +"checksum isatty 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6c324313540cd4d7ba008d43dc6606a32a5579f13cc17b2804c13096f0a5c522" +"checksum itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f58856976b776fedd95533137617a02fb25719f40e7d9b01c7043cd65474f450" +"checksum itoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5adb58558dcd1d786b5f0bd15f3226ee23486e24b7b58304b60f64dc68e62606" +"checksum jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "60af5f849e1981434e4a31d3d782c4774ae9b434ce55b101a96ecfd09147e8be" "checksum jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ddf83704f4e79979a424d1082dd2c1e52683058056c9280efa19ac5f6bc9033c" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum kuchiki 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e03098e8e719c92b7794515dfd5c1724e2b12f5ce1788e61cfa4663f82eba8d8" -"checksum languageserver-types 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "773e175c945800aeea4c21c04090bcb9db987b1a566ad9c6f569972299950e3e" +"checksum languageserver-types 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9d91d91d1c23db74187096d191967cb49f49bb175ad6d855fa9229d16ef2c982" "checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73" -"checksum lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c8f31047daa365f19be14b47c29df4f7c3b581832407daabe6ae77397619237d" -"checksum lazycell 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3b585b7a6811fb03aa10e74b278a0f00f8dd9b45dc681f148bb29fa5cb61859b" -"checksum libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)" = "96264e9b293e95d25bfcbbf8a88ffd1aedc85b754eba8b7d78012f638ba220eb" -"checksum libgit2-sys 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)" = "82fc20bd8beefe7c9f98aae2d3cff78e57f544cdd83d58fe181ec37a5fbe0c77" -"checksum libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0db4ec23611747ef772db1c4d650f8bd762f07b461727ec998f953c614024b75" +"checksum lazy_static 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fb497c35d362b6a331cfd94956a07fc2c78a4604cdbee844a81170386b996dd3" +"checksum lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a6f08839bc70ef4a3fe1d566d5350f519c5912ea86be0df1740a7d247c7fc0ef" +"checksum lazycell 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d33a48d0365c96081958cc663eef834975cb1e8d8bea3378513fc72bdbf11e50" +"checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d" +"checksum libgit2-sys 0.7.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6ab62b46003ba97701554631fa570d9f7e7947e2480ae3d941e555a54a2c0f05" +"checksum libssh2-sys 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "c628b499e8d1a4f4bd09a95d6cb1f8aeb231b46a9d40959bbd0408f14dd63adf" "checksum libz-sys 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)" = "87f737ad6cc6fd6eefe3d9dc5412f1573865bded441300904d2f42269e140f16" "checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" -"checksum log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "89f010e843f2b1a31dbd316b3b8d443758bc634bed37aabade59c686d644e0a2" -"checksum log_settings 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3d382732ea0fbc09790c4899db3255bdea0fc78b54bf234bd18a63bb603915b6" -"checksum lzma-sys 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c1b93b78f89e8737dac81837fc8f5521ac162abcba902e1a3db949d55346d1da" +"checksum log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "61bd98ae7f7b754bc53dca7d44b604f733c6bba044ea6f41bc8d89272d8161d2" +"checksum log_settings 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19af41f0565d7c19b2058153ad0b42d4d5ce89ec4dbf06ed6741114a8b63e7cd" +"checksum lzma-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d1eaa027402541975218bb0eec67d6b0412f6233af96e0d096d31dbdfd22e614" "checksum mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" -"checksum markup5ever 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "047150a0e03b57e638fc45af33a0b63a0362305d5b9f92ecef81df472a4cceb0" -"checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376" -"checksum mdbook 0.0.26 (registry+https://github.com/rust-lang/crates.io-index)" = "8a1ac668292d1e5c7b1c6fd64f70d3a85105b8069a89558a0d67bdb2ff298ca1" -"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" -"checksum memchr 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a" +"checksum macro-utils 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2c4deaccc2ead6a28c16c0ba82f07d52b6475397415ce40876e559b0b0ea510" +"checksum maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08cbb6b4fef96b6d77bfc40ec491b1690c779e77b05cd9f07f787ed376fd4c43" +"checksum markup5ever 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfedc97d5a503e96816d10fedcd5b42f760b2e525ce2f7ec71f6a41780548475" +"checksum matches 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "835511bab37c34c47da5cb44844bea2cfde0236db0b506f90ea4224482c9774a" +"checksum mdbook 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "90b5a8d7e341ceee5db3882a06078d42661ddcfa2b3687319cc5da76ec4e782f" "checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d" +"checksum memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e2ffa2c986de11a9df78620c01eeaaf27d94d3ff02bf81bfcca953102dd0c6ff" +"checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3" +"checksum minifier 0.0.19 (registry+https://github.com/rust-lang/crates.io-index)" = "9908ed7c62f990c21ab41fdca53a864a3ada0da69d8729c4de727b397e27bc11" "checksum miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "609ce024854aeb19a0ef7567d348aaa5a746b32fb72e336df7fcc16869d7e2b4" -"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" -"checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09" -"checksum nibble_vec 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "62e678237a4c70c5f2b917cefd7d080dfbf800421f06e8a59d4e28ef5130fd9e" -"checksum nix 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "47e49f6982987135c5e9620ab317623e723bd06738fd85377e8d55f57c8b6487" -"checksum num 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "cc4083e14b542ea3eb9b5f33ff48bd373a92d78687e74f4cc0a30caeb754f0ca" -"checksum num-bigint 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "bdc1494b5912f088f260b775799468d9b9209ac60885d8186a547a0476289e23" -"checksum num-complex 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "58de7b4bf7cf5dbecb635a5797d489864eadd03b107930cbccf9e0fd7428b47c" -"checksum num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "d1452e8b06e448a07f0e6ebb0bb1d92b8890eea63288c0b627331d53514d0fba" -"checksum num-iter 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)" = "7485fcc84f85b4ecd0ea527b14189281cf27d60e583ae65ebc9c088b13dffe01" -"checksum num-rational 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "0c7cb72a95250d8a370105c828f388932373e0e94414919891a0f945222310fe" -"checksum num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "cacfcab5eb48250ee7d0c7896b51a2c5eec99c1feea5f32025635f5ae4b00070" -"checksum num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "514f0d73e64be53ff320680ca671b64fe3fb91da01e1ae2ddc99eb51d453b20d" +"checksum miow 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9224c91f82b3c47cf53dcf78dfaa20d6888fbcc5d272d5f2fcdf8a697f3c987d" +"checksum new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0cdc457076c78ab54d5e0d6fa7c47981757f1e34dc39ff92787f217dede586c4" +"checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2" +"checksum num-derive 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0d2c31b75c36a993d30c7a13d70513cb93f02acafdd5b7ba250f9b0e18615de7" +"checksum num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "e83d528d2677f0518c570baf2b7abdcf0cd2d248860b68507bdcb3e91d4c0cea" +"checksum num-traits 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "630de1ef5cc79d0cdd78b7e33b81f083cbfe90de0f4b2b2f07f905867c70e9fe" +"checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30" "checksum open 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c281318d992e4432cfa799969467003d05921582a7489a8325e37f8a450d5113" -"checksum openssl 0.9.23 (registry+https://github.com/rust-lang/crates.io-index)" = "169a4b9160baf9b9b1ab975418c673686638995ba921683a7f1e01470dcb8854" +"checksum openssl 0.10.11 (registry+https://github.com/rust-lang/crates.io-index)" = "6c24d3508b4fb6da175c10baac54c578b33f09c89ae90c6fe9788b3b4768efdc" "checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -"checksum openssl-sys 0.9.23 (registry+https://github.com/rust-lang/crates.io-index)" = "2200ffec628e3f14c39fc0131a301db214f1a7d584e36507ee8700b0c7fb7a46" -"checksum os_pipe 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "998bfbb3042e715190fe2a41abfa047d7e8cb81374d2977d7f100eacd8619cb1" +"checksum openssl-src 110.0.6+1.1.0h (registry+https://github.com/rust-lang/crates.io-index)" = "2011250f011d9c0f2e982f36721c9cbf451a9b04f425ea43a6a3f1bfa889a3b4" +"checksum openssl-sys 0.9.35 (registry+https://github.com/rust-lang/crates.io-index)" = "912f301a749394e1025d9dcddef6106ddee9252620e6d0a0e5f8d0681de9b129" +"checksum ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063" +"checksum ordslice 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd20eec3dbe4376829cb7d80ae6ac45e0a766831dca50202ff2d40db46a8a024" "checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37" -"checksum parking_lot 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3e7f7c9857874e54afeb950eebeae662b1e51a2493666d2ea4c0a5d91dcf0412" -"checksum parking_lot_core 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6bf05dc61189828dfd7a59fd6e66d538e88d6b30390da1124a291e09fd3098b3" +"checksum parking_lot 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4d05f1349491390b1730afba60bb20d55761bef489a954546b58b4b34e1e2ac" +"checksum parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4db1a8ccf734a7bce794cc19b3df06ed87ab2f3907036b693c68f56b4d4537fa" "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" -"checksum pest 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0a6dda33d67c26f0aac90d324ab2eb7239c819fc7b2552fe9faa4fe88441edc8" -"checksum phf 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "cb325642290f28ee14d8c6201159949a872f220c62af6e110a56ea914fbe42fc" -"checksum phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "d62594c0bb54c464f633175d502038177e90309daf2e0158be42ed5f023ce88f" -"checksum phf_generator 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "6b07ffcc532ccc85e3afc45865469bf5d9e4ef5bfcf9622e3cfe80c2d275ec03" -"checksum phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "07e24b0ca9643bdecd0632f2b3da6b1b89bbb0030e0b992afc1113b23a7bc2f2" -"checksum pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3a8b4c6b8165cd1a1cd4b9b120978131389f64bdaf456435caa41e630edba903" +"checksum pest 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0fce5d8b5cc33983fc74f78ad552b5522ab41442c4ca91606e4236eb4b5ceefc" +"checksum pest_derive 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ab94faafeb93f4c5e3ce81ca0e5a779529a602ad5d09ae6d21996bfb8b6a52bf" +"checksum petgraph 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "8b30dc85588cd02b9b76f5e386535db546d21dc68506cff2abebee0b6445e8e4" +"checksum phf 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)" = "7d37a244c75a9748e049225155f56dbcb98fe71b192fd25fd23cb914b5ad62f2" +"checksum phf_codegen 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)" = "4e4048fe7dd7a06b8127ecd6d3803149126e9b33c7558879846da3a63f734f2b" +"checksum phf_generator 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)" = "05a079dd052e7b674d21cb31cbb6c05efd56a2cd2827db7692e2f1a507ebd998" +"checksum phf_shared 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)" = "c2261d544c2bb6aa3b10022b0be371b9c7c64f762ef28c6f5d4f1ef6d97b5930" +"checksum pkg-config 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)" = "110d5ee3593dbb73f56294327fe5668bcc997897097cbc76b51e7aed3f52452f" +"checksum polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b6b0a7f5f4278b991ffd14abce1d01b013121ad297460237ef0a2f08d43201" "checksum precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" -"checksum procedural-masquerade 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "dc1bcafee1590f81acb329ae45ec627b318123f085153913620316ae9a144b2a" -"checksum psapi-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f71c7e142c25f297077a8ebc21f10847096b5d21ad7619d7bf0c1fcecb40bb0" -"checksum pulldown-cmark 0.0.15 (registry+https://github.com/rust-lang/crates.io-index)" = "378e941dbd392c101f2cb88097fa4d7167bc421d4b88de3ff7dbee503bc3233b" -"checksum pulldown-cmark 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a656fdb8b6848f896df5e478a0eb9083681663e37dcb77dd16981ff65329fe8b" -"checksum quick-error 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eda5fe9b71976e62bc81b781206aaa076401769b2143379d3eb2118388babac4" +"checksum pretty_assertions 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a029430f0d744bc3d15dd474d591bed2402b645d024583082b9f63bb936dac6" +"checksum proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1b06e2f335f48d24442b35a19df506a835fb3547bc3c06ef27340da9acf5cae7" +"checksum proc-macro2 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "c65b1ea15bb859d922cade2d1765b4b88beac339cbfad545ef2d2ef8c8215ee6" +"checksum pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d6fdf85cda6cadfae5428a54661d431330b312bc767ddbc57adbedc24da66e32" +"checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" "checksum quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "07589615d719a60c8dd8a4622e7946465dfef20d1a428f969e3443e7386d5f45" "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" -"checksum racer 2.0.12 (registry+https://github.com/rust-lang/crates.io-index)" = "034f1c4528581c40a60e96875467c03315868084e08ff4ceb46a00f7be3b16b4" -"checksum radix_trie 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "211c49b6a9995cac0fd1dd9ca60b42cf3a51e151a12eb954b3a9e75513426ee8" -"checksum rand 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "9e7944d95d25ace8f377da3ac7068ce517e4c646754c43a1b1849177bbf72e59" -"checksum rayon 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ed02d09394c94ffbdfdc755ad62a132e94c3224a8354e78a1200ced34df12edf" -"checksum rayon-core 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e64b609139d83da75902f88fd6c01820046840a18471e4dfcd5ac7c0f46bea53" -"checksum redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)" = "ab105df655884ede59d45b7070c8a65002d921461ee813a024558ca16030eea0" +"checksum quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9949cfe66888ffe1d53e6ec9d9f3b70714083854be20fd5e271b232a017401e8" +"checksum quote 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e44651a0dc4cdd99f71c83b561e221f714912d11af1a4dff0631f923d53af035" +"checksum racer 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "dcbc5ababaffee8d8f34910f925287c8f716b1ead48561c4278a152d08264f7c" +"checksum rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "eba5f8cb59cc50ed56be8880a5c7b496bfd9bd26394e176bc67884094145c2c5" +"checksum rand 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "12397506224b2f93e6664ffc4f664b29be8208e5157d3d90b44f09b5fae470ea" +"checksum rand_core 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "edecf0f94da5551fc9b492093e30b041a891657db7940ee221f9d2f66e82eef2" +"checksum rayon 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "80e811e76f1dbf68abf87a759083d34600017fc4e10b6bd5ad84a700f9dba4b1" +"checksum rayon-core 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9d24ad214285a7729b174ed6d3bcfcb80177807f959d95fafd5bfc5c4f201ac8" +"checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1" "checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76" -"checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f" -"checksum regex 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ac6ab4e9218ade5b423358bbd2567d1617418403c7a512603630181813316322" -"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" -"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" -"checksum rls-analysis 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "38841e3c5271715a574ac220d9b408b59ed9e2626909c3bc54b5853b4eaadb7b" -"checksum rls-data 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8024f1feaca72d0aa4ae1e2a8d454a31b9a33ed02f8d0e9c8559bf53c267ec3c" -"checksum rls-rustc 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b21ea952e9bf1569929abf1bb920262cde04b7b1b26d8e0260286302807299d2" +"checksum regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9329abc99e39129fcceabd24cf5d85b4671ef7c29c50e972bc5afe32438ec384" +"checksum regex 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13c93d55961981ba9226a213b385216f83ab43bd6ac53ab16b2eeb47e337cf4e" +"checksum regex-syntax 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7d707a4fa2637f2dca2ef9fd02225ec7661fe01a53623c1e6515b6916511f7a7" +"checksum regex-syntax 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05b06a75f5217880fc5e905952a42750bf44787e56a6c6d6852ed0992f5e1d54" +"checksum remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3488ba1b9a2084d38645c4c08276a1752dcbf2c7130d74f1569681ad5d2799c5" +"checksum rls-analysis 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)" = "96f84d303dcbe1c1bdd41b10867d3399c38fbdac32c4e3645cdb6dbd7f82db1d" +"checksum rls-blacklist 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e4a9cc2545ccb7e05b355bfe047b8039a6ec12270d5f3c996b766b340a50f7d2" +"checksum rls-data 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3dd20763e1c60ae8945384c8a8fa4ac44f8afa7b0a817511f5e8927e5d24f988" +"checksum rls-rustc 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9dba7390427aefa953608429701e3665192ca810ba8ae09301e001b7c7bed0" "checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a" -"checksum rls-vfs 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd34691a510938bb67fe0444fb363103c73ffb31c121d1e16bc92d8945ea8ff" -"checksum rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "aee45432acc62f7b9a108cc054142dac51f979e69e71ddce7d6fc7adf29e817e" +"checksum rls-vfs 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ecbc8541b4c341d6271eae10f869dd9d36db871afe184f5b6f9bffbd6ed0373f" +"checksum rustc-ap-arena 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6e905467184ce31ccdbd33ac33b9ba377f8cc7aefb340a733ab7e5efe34cddda" +"checksum rustc-ap-rustc_cratesio_shim 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c2a1a45817e78d0c1e2800fb933c526747ef2c5ee4b2dc0946e0c2d901329b88" +"checksum rustc-ap-rustc_data_structures 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4e9e5588883318e0e58bb7ea7cde2a66eaca55b25e32908f0982365988657" +"checksum rustc-ap-rustc_errors 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d233c0d9beda42a52d329a5df865c8f20c64773d2ab7aa6b4ae4248bacf3188" +"checksum rustc-ap-rustc_target 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eec0bc13feecf9e88e39439b24b4b3ca54db8caf12fb7172d0c430451c8b377c" +"checksum rustc-ap-serialize 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ffcfb1102cd7cbf5f25c008a00f7253427af9dfac8989ede48c19bd47f556893" +"checksum rustc-ap-syntax 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3a2ca0ef078a735c81a0d33589e04148dcf41f80ee7ebe30e72904a631b7c669" +"checksum rustc-ap-syntax_pos 218.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1bbd31d1bbc7210983c3bbbcb9ee35bac443c6c899f979b8114e58bb7101c28" +"checksum rustc-demangle 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "76d7ba1feafada44f2d38eed812bd2489a03c0f5abb975799251518b68848649" +"checksum rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8" +"checksum rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c6d5a683c6ba4ed37959097e88d71c9e8e26659a3cb5be8b389078e7ad45306" +"checksum rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40f06724db71e18d68b3b946fdf890ca8c921d9edccc1404fdfdb537b0d12649" "checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" -"checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7" -"checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d" -"checksum scopeguard 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "59a076157c1e2dc561d8de585151ee6965d910dd4dcb5dabb7ae3e83981a6c57" +"checksum rustc_version 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a54aa04a10c68c1c4eacb4337fd883b435997ede17a9385784b990777686b09a" +"checksum rustfix 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "756567f00f7d89c9f89a5c401b8b1caaa122e27240b9eaadd0bb52ee0b680b1b" +"checksum same-file 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cfb6eded0b06a0b512c8ddbcf04089138c9b4362c2f696f3c3d76039d68f3637" +"checksum schannel 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "dc1fabf2a7b6483a141426e1afd09ad543520a77ac49bd03c286e7696ccfd77f" +"checksum scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" "checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" -"checksum selectors 0.18.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e3c89b1c6a3c029c82263f7dd2d44d0005ee7374eb09e254ab59dede4353a8c0" -"checksum semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" -"checksum semver 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bee2bc909ab2d8d60dab26e8cad85b25d795b14603a0dcb627b78b9d30b6454b" +"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum serde 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "386122ba68c214599c44587e0c0b411e8d90894503a95425b4f9508e4317901f" -"checksum serde_derive 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "ec0bfa6c5784e7d110514448da0e1dbad41ea5514c3e68be755b23858b83a399" -"checksum serde_derive_internals 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)" = "730fe9f29fe8db69a601837f416e46cba07792031ed6b27557a43e49d62d89ae" +"checksum serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)" = "0c3adf19c07af6d186d91dae8927b83b0553d07ca56cbf7f2f32560455c91920" +"checksum serde_derive 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)" = "3525a779832b08693031b8ecfb0de81cd71cfd3812088fafe9a7496789572124" "checksum serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "190e9765dcedb56be63b6e0993a006c7e3b071a016a304736e4a315dc01fb142" -"checksum serde_json 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7cf5b0b5b4bd22eeecb7e01ac2e1225c7ef5e4272b79ee28a8392a8c8489c839" -"checksum shared_child 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "099b38928dbe4a0a01fcd8c233183072f14a7d126a34bed05880869be66e14cc" -"checksum shell-escape 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "dd5cc96481d54583947bfe88bf30c23d53f883c6cd0145368b69989d97b84ef8" +"checksum serde_json 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c6908c7b925cd6c590358a4034de93dbddb20c45e1d021931459fd419bf0e2" +"checksum shell-escape 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "170a13e64f2a51b77a45702ba77287f5c6829375b04a69cf2222acd17d0cfab9" +"checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" "checksum siphasher 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0df90a788073e8d0235a67e50441d47db7c8ad9debd91cbf43736a2a92d36537" -"checksum smallvec 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4f8266519bc1d17d0b5b16f6c21295625d562841c708f6376f49028a43e9c11e" -"checksum smallvec 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44db0ecb22921ef790d17ae13a3f6d15784183ff5f2a01aa32098c7498d2b4b9" -"checksum socket2 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "36b4896961171cd3317c7e9603d88f379f8c6e45342212235d356496680c68fd" -"checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b" -"checksum string_cache 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "413fc7852aeeb5472f1986ef755f561ddf0c789d3d796e65f0b6fe293ecd4ef8" -"checksum string_cache_codegen 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "479cde50c3539481f33906a387f2bd17c8e87cb848c35b6021d41fb81ff9b4d7" +"checksum smallvec 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "26df3bb03ca5eac2e64192b723d51f56c1b1e0860e7c766281f4598f181acdc8" +"checksum socket2 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "962a516af4d3a7c272cb3a1d50a8cc4e5b41802e4ad54cfb7bee8ba61d37d703" +"checksum stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ffbc596e092fe5f598b12ef46cc03754085ac2f4d8c739ad61c4ae266cc3b3fa" +"checksum string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "25d70109977172b127fe834e5449e5ab1740b9ba49fa18a2020f509174f25423" +"checksum string_cache_codegen 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "35293b05cf1494e8ddd042a7df6756bf18d07f42d234f32e71dce8a7aabb0191" "checksum string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1884d1bc09741d466d9b14e6d37ac89d6909cbcac41dd9ae982d4d063bbedfc" -"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694" +"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550" +"checksum strum 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "099e21b5dd6dd07b5adcf8c4b723a7c0b7efd7a9359bf963d58c0caae8532545" +"checksum strum_macros 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1292d85e688e4696ecb69b2db2648994fb8af266974e89be53cefdf003861a5d" "checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" +"checksum syn 0.13.11 (registry+https://github.com/rust-lang/crates.io-index)" = "14f9bf6292f3a61d2c716723fdb789a41bbe104168e6f496dc6497e531ea1b9b" +"checksum syn 0.14.4 (registry+https://github.com/rust-lang/crates.io-index)" = "2beff8ebc3658f07512a413866875adddd20f4fd47b2a4e6c9da65cd281baaea" "checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" -"checksum synstructure 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a761d12e6d8dcb4dcf952a7a89b475e3a9d69e4a69307e01a470977642914bd" -"checksum syntex_errors 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9e52bffe6202cfb67587784cf23e0ec5bf26d331eef4922a16d5c42e12aa1e9b" -"checksum syntex_pos 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)" = "955ef4b16af4c468e4680d1497f873ff288f557d338180649e18f915af5e15ac" -"checksum syntex_syntax 0.52.0 (registry+https://github.com/rust-lang/crates.io-index)" = "76a302e717e348aa372ff577791c3832395650073b8d8432f8b3cb170b34afde" -"checksum tar 0.4.14 (registry+https://github.com/rust-lang/crates.io-index)" = "1605d3388ceb50252952ffebab4b5dc43017ead7e4481b175961c283bb951195" -"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6" +"checksum synstructure 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "85bb9b7550d063ea184027c9b8c20ac167cd36d3e06b3a40bceb9d746dc1a7b7" +"checksum tar 0.4.16 (registry+https://github.com/rust-lang/crates.io-index)" = "e8f41ca4a5689f06998f0247fcb60da6c760f1950cc9df2a10d71575ad0b062a" +"checksum tempfile 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "47776f63b85777d984a50ce49d6b9e58826b6a3766a449fc95bc66cd5663c15b" "checksum tendril 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9de21546595a0873061940d994bbbc5c35f024ae4fd61ec5c5b159115684f508" -"checksum term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "fa63644f74ce96fbeb9b794f66aff2a52d601cbd5e80f4b97123e3899f4570f1" -"checksum termcolor 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9065bced9c3e43453aa3d56f1e98590b8455b341d2fa191a1090c0dd0b242c75" +"checksum term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e6b677dd1e8214ea1ef4297f85dbcbed8e8cdddb561040cc998ca2551c37561" +"checksum termcolor 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "adc4587ead41bf016f11af03e55a624c06568b5a19db4e90fde573d805074f83" +"checksum termcolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "722426c4a0539da2c4ffd9b419d90ad540b4cff4a053be9069c908d4d07e2836" "checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" -"checksum textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0b59b6b4b44d867f1370ef1bd91bfb262bf07bf0ae65c202ea2fbc16153b693" -"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03" -"checksum thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8576dbbfcaef9641452d5cf0df9b0e7eeab7694956dd33bb61515fb8f18cfdd5" +"checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6" "checksum thread_local 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "279ef31c19ededf577bfd12dfae728040a21f635b06a24cd670ff510edd38963" -"checksum toml 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "736b60249cb25337bc196faa43ee12c705e426f3d55c214d73a4e7be06f92cb4" -"checksum toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7540f4ffc193e0d3c94121edb19b055670d369f77d5804db11ae053a45b6e7e" +"checksum time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "d825be0eb33fda1a7e68012d51e9c7f451dc1a69391e7fdc197060bb8c56667b" +"checksum toml 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a0263c6c02c4db6c8f7681f9fd35e90de799ebd4cfdeab77a38f4ff6b3d8c0d9" +"checksum toml-query 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6854664bfc6df0360c695480836ee90e2d0c965f06db291d10be9344792d43e8" +"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d" "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -"checksum unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "51ccda9ef9efa3f7ef5d91e8f9b83bbe6955f9bf86aec89d5cce2c874625920f" -"checksum unicode-segmentation 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a8083c594e02b8ae1654ae26f0ade5158b119bd88ad0e8227a5d8fcd72407946" -"checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f" -"checksum unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "36dff09cafb4ec7c8cf0023eb0b686cb6ce65499116a12201c9e11840ca01beb" +"checksum unicode-normalization 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6a0180bc61fc5a987082bfa111f4cc95c4caff7f9799f3e46df09163a937aa25" +"checksum unicode-segmentation 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "aa6024fc12ddfd1c6dbc14a80fa2324d4568849869b779f6bd37e5e4c03344d1" +"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526" "checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" -"checksum unreachable 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f2ae5ddb18e1c92664717616dd9549dde73f539f01bd7b77c2edb2446bdff91" +"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" "checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" -"checksum url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fa35e768d4daf1d85733418a49fb42e10d7f633e394fccab4ab7aba897053fe2" +"checksum url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2a321979c09843d272956e73700d12c4e7d3d92b2ee112b31548aef0d4efc5a6" "checksum url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "74e7d099f1ee52f823d4bdd60c93c3602043c728f5db3b97bdb548467f7bddea" "checksum userenv-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "71d28ea36bbd9192d75bd9fa9b39f96ddb986eaee824adae5d53b6e51919b2f3" -"checksum utf-8 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b6f923c601c7ac48ef1d66f7d5b5b2d9a7ba9c51333ab75a3ddf8d0309185a56" -"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f" +"checksum utf-8 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1262dfab4c30d5cb7c07026be00ee343a6cf5027fdc0104a9160f354e5db75c" "checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122" -"checksum vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9e0a7d8bed3178a8fb112199d466eeca9ed09a14ba8ad67718179b4fd5487d0b" -"checksum vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "887b5b631c2ad01628bbbaa7dd4c869f80d3186688f8d0b6f58774fbe324988c" +"checksum vcpkg 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cbe533e138811704c0e3cbde65a818b35d3240409b4346256c5ede403e082474" +"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" -"checksum walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bb08f9e670fab86099470b97cd2b252d6527f0b3cc1401acdb595ffc9dd288ff" +"checksum walkdir 2.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "63636bd0eb3d00ccb8b9036381b526efac53caf112b7783b730ab3f8e44da369" "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" +"checksum winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "773ef9dcc5f24b7d850d0ff101e542ff24c3b090a9768e03ff889fdef41f00fd" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" -"checksum wincolor 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a39ee4464208f6430992ff20154216ab2357772ac871d994c51628d60e58b8b0" -"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -"checksum xattr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "5f04de8a1346489a2f9e9bd8526b73d135ec554227b17568456e86aa35b6f3fc" -"checksum xz2 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "98df591c3504d014dd791d998123ed00a476c7e26dc6b2e873cb55c6ac9e59fa" +"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +"checksum wincolor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "eeb06499a3a4d44302791052df005d5232b927ed1a9658146d842165c4de7767" +"checksum wincolor 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b9dc3aa9dcda98b5a16150c54619c1ead22e3d3a5d458778ae914be760aa981a" +"checksum xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "244c3741f4240ef46274860397c7c74e50eb23624996930e484c16679633a54c" +"checksum xz2 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "df8bf41d3030c3577c9458fd6640a05afbf43b150d0b531b16bd77d3f794f27a" "checksum yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e66366e18dc58b46801afbf2ca7661a9f59cc8c5962c29892b6039b4f86fa992" diff --git a/src/Cargo.toml b/src/Cargo.toml index 15594a54ef62..01663487f92e 100644 --- a/src/Cargo.toml +++ b/src/Cargo.toml @@ -4,6 +4,7 @@ members = [ "rustc", "libstd", "libtest", + "librustc_codegen_llvm", "tools/cargotest", "tools/clippy", "tools/compiletest", @@ -21,26 +22,14 @@ members = [ "tools/rls", "tools/rustfmt", "tools/miri", - # FIXME(https://github.com/rust-lang/cargo/issues/4089): move these to exclude - "tools/rls/test_data/bin_lib", - "tools/rls/test_data/borrow_error", - "tools/rls/test_data/common", - "tools/rls/test_data/deglob", - "tools/rls/test_data/features", - "tools/rls/test_data/find_all_refs_no_cfg_test", - "tools/rls/test_data/find_impls", - "tools/rls/test_data/infer_bin", - "tools/rls/test_data/infer_custom_bin", - "tools/rls/test_data/infer_lib", - "tools/rls/test_data/multiple_bins", - "tools/rls/test_data/reformat", - "tools/rls/test_data/reformat_with_range", - "tools/rls/test_data/workspace_symbol", + "tools/rustdoc-themes", +] +exclude = [ + "tools/rls/test_data", ] -# Curiously, compiletest will segfault if compiled with opt-level=3 on 64-bit -# MSVC when running the compile-fail test suite when a should-fail test panics. -# But hey if this is removed and it gets past the bots, sounds good to me. +# Curiously, LLVM 7.0 will segfault if compiled with opt-level=3 +# See issue https://github.com/rust-lang/rust/issues/52378 [profile.release] opt-level = 2 [profile.bench] @@ -55,8 +44,23 @@ debug-assertions = false debug = false debug-assertions = false +# We want the RLS to use the version of Cargo that we've got vendored in this +# repository to ensure that the same exact version of Cargo is used by both the +# RLS and the Cargo binary itself. The RLS depends on Cargo as a git repository +# so we use a `[patch]` here to override the github repository with our local +# vendored copy. [patch."https://github.com/rust-lang/cargo"] cargo = { path = "tools/cargo" } [patch.crates-io] +# Similar to Cargo above we want the RLS to use a vendored version of `rustfmt` +# that we're shipping as well (to ensure that the rustfmt in RLS and the +# `rustfmt` executable are the same exact version). rustfmt-nightly = { path = "tools/rustfmt" } + +# See comments in `tools/rustc-workspace-hack/README.md` for what's going on +# here +rustc-workspace-hack = { path = 'tools/rustc-workspace-hack' } + +[patch."https://github.com/rust-lang-nursery/rust-clippy"] +clippy_lints = { path = "tools/clippy/clippy_lints" } diff --git a/src/README.md b/src/README.md new file mode 100644 index 000000000000..6da4944c392d --- /dev/null +++ b/src/README.md @@ -0,0 +1,15 @@ +This directory contains the source code of the rust project, including: +- `rustc` and its tests +- `libstd` +- Various submodules for tools, like rustdoc, rls, etc. + +For more information on how various parts of the compiler work, see the [rustc guide]. + +Their is also useful content in the following READMEs, which are gradually being moved over to the guide: +- https://github.com/rust-lang/rust/tree/master/src/librustc/ty/query +- https://github.com/rust-lang/rust/tree/master/src/librustc/dep_graph +- https://github.com/rust-lang/rust/blob/master/src/librustc/infer/region_constraints +- https://github.com/rust-lang/rust/tree/master/src/librustc/infer/higher_ranked +- https://github.com/rust-lang/rust/tree/master/src/librustc/infer/lexical_region_resolve + +[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/about-this-guide.html diff --git a/src/binaryen b/src/binaryen deleted file mode 160000 index 1c9bf65aa0e3..000000000000 --- a/src/binaryen +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1c9bf65aa0e371b84755a8ddd6e79497fac57171 diff --git a/src/bootstrap/Cargo.toml b/src/bootstrap/Cargo.toml index bbbbf0e19155..57a526038041 100644 --- a/src/bootstrap/Cargo.toml +++ b/src/bootstrap/Cargo.toml @@ -28,10 +28,15 @@ name = "sccache-plus-cl" path = "bin/sccache-plus-cl.rs" test = false +[[bin]] +name = "llvm-config-wrapper" +path = "bin/llvm-config-wrapper.rs" +test = false + [dependencies] build_helper = { path = "../build_helper" } cmake = "0.1.23" -filetime = "0.1" +filetime = "0.2" num_cpus = "1.0" getopts = "0.2" cc = "1.0.1" @@ -41,3 +46,8 @@ serde_derive = "1.0.8" serde_json = "1.0.2" toml = "0.4" lazy_static = "0.2" +time = "0.1" +petgraph = "0.4.12" + +[dev-dependencies] +pretty_assertions = "0.5" diff --git a/src/bootstrap/README.md b/src/bootstrap/README.md index 9ff681ac6808..98c353eb6ec8 100644 --- a/src/bootstrap/README.md +++ b/src/bootstrap/README.md @@ -64,6 +64,10 @@ The script accepts commands, flags, and arguments to determine what to do: # execute tests in the standard library in stage0 ./x.py test --stage 0 src/libstd + # execute tests in the core and standard library in stage0, + # without running doc tests (thus avoid depending on building the compiler) + ./x.py test --stage 0 --no-doc src/libcore src/libstd + # execute all doc tests ./x.py test src/doc ``` diff --git a/src/bootstrap/bin/llvm-config-wrapper.rs b/src/bootstrap/bin/llvm-config-wrapper.rs new file mode 100644 index 000000000000..b1703f8c728e --- /dev/null +++ b/src/bootstrap/bin/llvm-config-wrapper.rs @@ -0,0 +1,27 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// The sheer existence of this file is an awful hack. See the comments in +// `src/bootstrap/native.rs` for why this is needed when compiling LLD. + +use std::env; +use std::process::{self, Stdio, Command}; +use std::io::{self, Write}; + +fn main() { + let real_llvm_config = env::var_os("LLVM_CONFIG_REAL").unwrap(); + let mut cmd = Command::new(real_llvm_config); + cmd.args(env::args().skip(1)).stderr(Stdio::piped()); + let output = cmd.output().expect("failed to spawn llvm-config"); + let stdout = String::from_utf8_lossy(&output.stdout); + print!("{}", stdout.replace("\\", "/")); + io::stdout().flush().unwrap(); + process::exit(output.status.code().unwrap_or(1)); +} diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index 37336a56d76c..7192cae8956e 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -31,9 +31,11 @@ extern crate bootstrap; use std::env; use std::ffi::OsString; -use std::str::FromStr; +use std::io; use std::path::PathBuf; -use std::process::{Command, ExitStatus}; +use std::process::Command; +use std::str::FromStr; +use std::time::Instant; fn main() { let mut args = env::args_os().skip(1).collect::>(); @@ -61,6 +63,11 @@ fn main() { args.remove(n); } + if let Some(s) = env::var_os("RUSTC_ERROR_FORMAT") { + args.push("--error-format".into()); + args.push(s); + } + // Detect whether or not we're a build script depending on whether --target // is passed (a bit janky...) let target = args.windows(2) @@ -85,12 +92,12 @@ fn main() { }; let stage = env::var("RUSTC_STAGE").expect("RUSTC_STAGE was not set"); let sysroot = env::var_os("RUSTC_SYSROOT").expect("RUSTC_SYSROOT was not set"); - let mut on_fail = env::var_os("RUSTC_ON_FAIL").map(|of| Command::new(of)); + let on_fail = env::var_os("RUSTC_ON_FAIL").map(|of| Command::new(of)); let rustc = env::var_os(rustc).unwrap_or_else(|| panic!("{:?} was not set", rustc)); let libdir = env::var_os(libdir).unwrap_or_else(|| panic!("{:?} was not set", libdir)); let mut dylib_path = bootstrap::util::dylib_path(); - dylib_path.insert(0, PathBuf::from(libdir)); + dylib_path.insert(0, PathBuf::from(&libdir)); let mut cmd = Command::new(rustc); cmd.args(&args) @@ -98,11 +105,19 @@ fn main() { .arg(format!("stage{}", stage)) .env(bootstrap::util::dylib_path_var(), env::join_paths(&dylib_path).unwrap()); + let mut maybe_crate = None; + + // Print backtrace in case of ICE + if env::var("RUSTC_BACKTRACE_ON_ICE").is_ok() && env::var("RUST_BACKTRACE").is_err() { + cmd.env("RUST_BACKTRACE", "1"); + } + + cmd.env("RUSTC_BREAK_ON_ICE", "1"); if let Some(target) = target { // The stage0 compiler has a special sysroot distinct from what we // actually downloaded, so we just always pass the `--sysroot` option. - cmd.arg("--sysroot").arg(sysroot); + cmd.arg("--sysroot").arg(&sysroot); // When we build Rust dylibs they're all intended for intermediate // usage, so make sure we pass the -Cprefer-dynamic flag instead of @@ -125,15 +140,11 @@ fn main() { cmd.arg(format!("-Clinker={}", target_linker)); } - // Pass down incremental directory, if any. - if let Ok(dir) = env::var("RUSTC_INCREMENTAL") { - cmd.arg(format!("-Zincremental={}", dir)); - } - let crate_name = args.windows(2) .find(|a| &*a[0] == "--crate-name") .unwrap(); let crate_name = &*crate_name[1]; + maybe_crate = Some(crate_name); // If we're compiling specifically the `panic_abort` crate then we pass // the `-C panic=abort` option. Note that we do not do this for any @@ -175,9 +186,6 @@ fn main() { if let Ok(s) = env::var("RUSTC_CODEGEN_UNITS") { cmd.arg("-C").arg(format!("codegen-units={}", s)); } - if env::var("RUSTC_THINLTO").is_ok() { - cmd.arg("-Ccodegen-units=16").arg("-Zthinlto"); - } // Emit save-analysis info. if env::var("RUSTC_SAVE_ANALYSIS") == Ok("api".to_string()) { @@ -224,7 +232,7 @@ fn main() { // flesh out rpath support more fully in the future. cmd.arg("-Z").arg("osx-rpath-install-name"); Some("-Wl,-rpath,@loader_path/../lib") - } else if !target.contains("windows") { + } else if !target.contains("windows") && !target.contains("wasm32") { Some("-Wl,-rpath,$ORIGIN/../lib") } else { None @@ -246,9 +254,6 @@ fn main() { // When running miri tests, we need to generate MIR for all libraries if env::var("TEST_MIRI").ok().map_or(false, |val| val == "true") { cmd.arg("-Zalways-encode-mir"); - if stage != "0" { - cmd.arg("-Zmiri"); - } cmd.arg("-Zmir-emit-validate=1"); } @@ -263,12 +268,25 @@ fn main() { if let Ok(host_linker) = env::var("RUSTC_HOST_LINKER") { cmd.arg(format!("-Clinker={}", host_linker)); } + + if let Ok(s) = env::var("RUSTC_HOST_CRT_STATIC") { + if s == "true" { + cmd.arg("-C").arg("target-feature=+crt-static"); + } + if s == "false" { + cmd.arg("-C").arg("target-feature=-crt-static"); + } + } } if env::var_os("RUSTC_PARALLEL_QUERIES").is_some() { cmd.arg("--cfg").arg("parallel_queries"); } + if env::var_os("RUSTC_VERIFY_LLVM_IR").is_some() { + cmd.arg("-Z").arg("verify-llvm-ir"); + } + let color = match env::var("RUSTC_COLOR") { Ok(s) => usize::from_str(&s).expect("RUSTC_COLOR should be an integer"), Err(_) => 0, @@ -278,35 +296,69 @@ fn main() { cmd.arg("--color=always"); } - if verbose > 1 { - eprintln!("rustc command: {:?}", cmd); + if env::var_os("RUSTC_DENY_WARNINGS").is_some() && env::var_os("RUSTC_EXTERNAL_TOOL").is_none() + { + cmd.arg("-Dwarnings"); + cmd.arg("-Dbare_trait_objects"); } - // Actually run the compiler! - std::process::exit(if let Some(ref mut on_fail) = on_fail { - match cmd.status() { - Ok(s) if s.success() => 0, - _ => { - println!("\nDid not run successfully:\n{:?}\n-------------", cmd); - exec_cmd(on_fail).expect("could not run the backup command"); - 1 + if verbose > 1 { + eprintln!( + "rustc command: {:?}={:?} {:?}", + bootstrap::util::dylib_path_var(), + env::join_paths(&dylib_path).unwrap(), + cmd, + ); + eprintln!("sysroot: {:?}", sysroot); + eprintln!("libdir: {:?}", libdir); + } + + if let Some(mut on_fail) = on_fail { + let e = match cmd.status() { + Ok(s) if s.success() => std::process::exit(0), + e => e, + }; + println!("\nDid not run successfully: {:?}\n{:?}\n-------------", e, cmd); + exec_cmd(&mut on_fail).expect("could not run the backup command"); + std::process::exit(1); + } + + if env::var_os("RUSTC_PRINT_STEP_TIMINGS").is_some() { + if let Some(krate) = maybe_crate { + let start = Instant::now(); + let status = cmd + .status() + .unwrap_or_else(|_| panic!("\n\n failed to run {:?}", cmd)); + let dur = start.elapsed(); + + let is_test = args.iter().any(|a| a == "--test"); + eprintln!("[RUSTC-TIMING] {} test:{} {}.{:03}", + krate.to_string_lossy(), + is_test, + dur.as_secs(), + dur.subsec_nanos() / 1_000_000); + + match status.code() { + Some(i) => std::process::exit(i), + None => { + eprintln!("rustc exited with {}", status); + std::process::exit(0xfe); + } } } - } else { - std::process::exit(match exec_cmd(&mut cmd) { - Ok(s) => s.code().unwrap_or(0xfe), - Err(e) => panic!("\n\nfailed to run {:?}: {}\n\n", cmd, e), - }) - }) + } + + let code = exec_cmd(&mut cmd).unwrap_or_else(|_| panic!("\n\n failed to run {:?}", cmd)); + std::process::exit(code); } #[cfg(unix)] -fn exec_cmd(cmd: &mut Command) -> ::std::io::Result { +fn exec_cmd(cmd: &mut Command) -> io::Result { use std::os::unix::process::CommandExt; Err(cmd.exec()) } #[cfg(not(unix))] -fn exec_cmd(cmd: &mut Command) -> ::std::io::Result { - cmd.status() +fn exec_cmd(cmd: &mut Command) -> io::Result { + cmd.status().map(|status| status.code().unwrap()) } diff --git a/src/bootstrap/bin/rustdoc.rs b/src/bootstrap/bin/rustdoc.rs index 389b504c64cd..a54e58665cce 100644 --- a/src/bootstrap/bin/rustdoc.rs +++ b/src/bootstrap/bin/rustdoc.rs @@ -35,7 +35,7 @@ fn main() { }; let mut dylib_path = bootstrap::util::dylib_path(); - dylib_path.insert(0, PathBuf::from(libdir)); + dylib_path.insert(0, PathBuf::from(libdir.clone())); let mut cmd = Command::new(rustdoc); cmd.args(&args) @@ -62,16 +62,14 @@ fn main() { // it up so we can make rustdoc print this into the docs if let Some(version) = env::var_os("RUSTDOC_CRATE_VERSION") { // This "unstable-options" can be removed when `--crate-version` is stabilized - cmd.arg("-Z").arg("unstable-options") + cmd.arg("-Z") + .arg("unstable-options") .arg("--crate-version").arg(version); - - // While we can assume that `-Z unstable-options` is set, let's also force rustdoc to panic - // if pulldown rendering differences are found - cmd.arg("--deny-render-differences"); } if verbose > 1 { eprintln!("rustdoc command: {:?}", cmd); + eprintln!("libdir: {:?}", libdir); } std::process::exit(match cmd.status() { diff --git a/src/bootstrap/bin/sccache-plus-cl.rs b/src/bootstrap/bin/sccache-plus-cl.rs index 8584014d48d5..0a20ac7e492d 100644 --- a/src/bootstrap/bin/sccache-plus-cl.rs +++ b/src/bootstrap/bin/sccache-plus-cl.rs @@ -16,8 +16,8 @@ use std::process::{self, Command}; fn main() { let target = env::var("SCCACHE_TARGET").unwrap(); // Locate the actual compiler that we're invoking - env::remove_var("CC"); - env::remove_var("CXX"); + env::set_var("CC", env::var_os("SCCACHE_CC").unwrap()); + env::set_var("CXX", env::var_os("SCCACHE_CXX").unwrap()); let mut cfg = cc::Build::new(); cfg.cargo_metadata(false) .out_dir("/") @@ -39,6 +39,12 @@ fn main() { cmd.arg(arg); } + if let Ok(s) = env::var("SCCACHE_EXTRA_ARGS") { + for s in s.split_whitespace() { + cmd.arg(s); + } + } + let status = cmd.status().expect("failed to spawn"); process::exit(status.code().unwrap_or(2)) } diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index 707aceebb1ed..d9c66ce2d779 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -88,7 +88,10 @@ def _download(path, url, probably_big, verbose, exception): option = "-#" else: option = "-s" - run(["curl", option, "--retry", "3", "-Sf", "-o", path, url], + run(["curl", option, + "-y", "30", "-Y", "10", # timeout if speed is < 10 bytes/sec for > 30 seconds + "--connect-timeout", "30", # timeout if cannot connect within 30 seconds + "--retry", "3", "-Sf", "-o", path, url], verbose=verbose, exception=exception) @@ -294,7 +297,7 @@ def default_build_triple(): raise ValueError('unknown byteorder: {}'.format(sys.byteorder)) # only the n64 ABI is supported, indicate it ostype += 'abi64' - elif cputype == 'sparcv9' or cputype == 'sparc64': + elif cputype == 'sparc' or cputype == 'sparcv9' or cputype == 'sparc64': pass else: err = "unknown cpu type: {}".format(cputype) @@ -303,6 +306,19 @@ def default_build_triple(): return "{}-{}".format(cputype, ostype) +@contextlib.contextmanager +def output(filepath): + tmp = filepath + '.tmp' + with open(tmp, 'w') as f: + yield f + try: + os.remove(filepath) # PermissionError/OSError on Win32 if in use + os.rename(tmp, filepath) + except OSError: + shutil.copy2(tmp, filepath) + os.remove(tmp) + + class RustBuild(object): """Provide all the methods required to build Rust""" def __init__(self): @@ -314,8 +330,7 @@ class RustBuild(object): self.build_dir = os.path.join(os.getcwd(), "build") self.clean = False self.config_toml = '' - self.printed = False - self.rust_root = os.path.abspath(os.path.join(__file__, '../../..')) + self.rust_root = '' self.use_locked_deps = '' self.use_vendored_sources = '' self.verbose = False @@ -336,7 +351,6 @@ class RustBuild(object): if self.rustc().startswith(self.bin_root()) and \ (not os.path.exists(self.rustc()) or self.program_out_of_date(self.rustc_stamp())): - self.print_what_bootstrap_means() if os.path.exists(self.bin_root()): shutil.rmtree(self.bin_root()) filename = "rust-std-{}-{}.tar.gz".format( @@ -348,9 +362,12 @@ class RustBuild(object): self._download_stage0_helper(filename, "rustc") self.fix_executable("{}/bin/rustc".format(self.bin_root())) self.fix_executable("{}/bin/rustdoc".format(self.bin_root())) - with open(self.rustc_stamp(), 'w') as rust_stamp: + with output(self.rustc_stamp()) as rust_stamp: rust_stamp.write(self.date) + # This is required so that we don't mix incompatible MinGW + # libraries/binaries that are included in rust-std with + # the system MinGW ones. if "pc-windows-gnu" in self.build: filename = "rust-mingw-{}-{}.tar.gz".format( rustc_channel, self.build) @@ -359,11 +376,10 @@ class RustBuild(object): if self.cargo().startswith(self.bin_root()) and \ (not os.path.exists(self.cargo()) or self.program_out_of_date(self.cargo_stamp())): - self.print_what_bootstrap_means() filename = "cargo-{}-{}.tar.gz".format(cargo_channel, self.build) self._download_stage0_helper(filename, "cargo") self.fix_executable("{}/bin/cargo".format(self.bin_root())) - with open(self.cargo_stamp(), 'w') as cargo_stamp: + with output(self.cargo_stamp()) as cargo_stamp: cargo_stamp.write(self.date) def _download_stage0_helper(self, filename, pattern): @@ -489,7 +505,7 @@ class RustBuild(object): """ return os.path.join(self.build_dir, self.build, "stage0") - def get_toml(self, key): + def get_toml(self, key, section=None): """Returns the value of the given key in config.toml, otherwise returns None >>> rb = RustBuild() @@ -501,12 +517,29 @@ class RustBuild(object): >>> rb.get_toml("key3") is None True + + Optionally also matches the section the key appears in + + >>> rb.config_toml = '[a]\\nkey = "value1"\\n[b]\\nkey = "value2"' + >>> rb.get_toml('key', 'a') + 'value1' + >>> rb.get_toml('key', 'b') + 'value2' + >>> rb.get_toml('key', 'c') is None + True """ + + cur_section = None for line in self.config_toml.splitlines(): + section_match = re.match(r'^\s*\[(.*)\]\s*$', line) + if section_match is not None: + cur_section = section_match.group(1) + match = re.match(r'^{}\s*=(.*)$'.format(key), line) if match is not None: value = match.group(1) - return self.get_string(value) or value.strip() + if section is None or section == cur_section: + return self.get_string(value) or value.strip() return None def cargo(self): @@ -560,23 +593,6 @@ class RustBuild(object): return '.exe' return '' - def print_what_bootstrap_means(self): - """Prints more information about the build system""" - if hasattr(self, 'printed'): - return - self.printed = True - if os.path.exists(self.bootstrap_binary()): - return - if '--help' not in sys.argv or len(sys.argv) == 1: - return - - print('info: the build system for Rust is written in Rust, so this') - print(' script is now going to download a stage0 rust compiler') - print(' and then compile the build system itself') - print('') - print('info: in the meantime you can read more about rustbuild at') - print(' src/bootstrap/README.md before the download finishes') - def bootstrap_binary(self): """Return the path of the boostrap binary @@ -590,7 +606,6 @@ class RustBuild(object): def build_bootstrap(self): """Build bootstrap""" - self.print_what_bootstrap_means() build_dir = os.path.join(self.build_dir, "bootstrap") if self.clean and os.path.exists(build_dir): shutil.rmtree(build_dir) @@ -607,6 +622,17 @@ class RustBuild(object): env["LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib") + \ (os.pathsep + env["LIBRARY_PATH"]) \ if "LIBRARY_PATH" in env else "" + env["RUSTFLAGS"] = "-Cdebuginfo=2 " + + build_section = "target.{}".format(self.build_triple()) + target_features = [] + if self.get_toml("crt-static", build_section) == "true": + target_features += ["+crt-static"] + elif self.get_toml("crt-static", build_section) == "false": + target_features += ["-crt-static"] + if target_features: + env["RUSTFLAGS"] += "-C target-feature=" + (",".join(target_features)) + " " + env["PATH"] = os.path.join(self.bin_root(), "bin") + \ os.pathsep + env["PATH"] if not os.path.isfile(self.cargo()): @@ -614,10 +640,8 @@ class RustBuild(object): self.cargo())) args = [self.cargo(), "build", "--manifest-path", os.path.join(self.rust_root, "src/bootstrap/Cargo.toml")] - if self.verbose: + for _ in range(1, self.verbose): args.append("--verbose") - if self.verbose > 1: - args.append("--verbose") if self.use_locked_deps: args.append("--locked") if self.use_vendored_sources: @@ -631,53 +655,118 @@ class RustBuild(object): return config return default_build_triple() + def check_submodule(self, module, slow_submodules): + if not slow_submodules: + checked_out = subprocess.Popen(["git", "rev-parse", "HEAD"], + cwd=os.path.join(self.rust_root, module), + stdout=subprocess.PIPE) + return checked_out + else: + return None + + def update_submodule(self, module, checked_out, recorded_submodules): + module_path = os.path.join(self.rust_root, module) + + if checked_out != None: + default_encoding = sys.getdefaultencoding() + checked_out = checked_out.communicate()[0].decode(default_encoding).strip() + if recorded_submodules[module] == checked_out: + return + + print("Updating submodule", module) + + run(["git", "submodule", "-q", "sync", module], + cwd=self.rust_root, verbose=self.verbose) + run(["git", "submodule", "update", + "--init", "--recursive", module], + cwd=self.rust_root, verbose=self.verbose) + run(["git", "reset", "-q", "--hard"], + cwd=module_path, verbose=self.verbose) + run(["git", "clean", "-qdfx"], + cwd=module_path, verbose=self.verbose) + def update_submodules(self): """Update submodules""" if (not os.path.exists(os.path.join(self.rust_root, ".git"))) or \ self.get_toml('submodules') == "false": return - print('Updating submodules') + slow_submodules = self.get_toml('fast-submodules') == "false" + start_time = time() + if slow_submodules: + print('Unconditionally updating all submodules') + else: + print('Updating only changed submodules') default_encoding = sys.getdefaultencoding() - run(["git", "submodule", "-q", "sync"], cwd=self.rust_root, verbose=self.verbose) submodules = [s.split(' ', 1)[1] for s in subprocess.check_output( ["git", "config", "--file", os.path.join(self.rust_root, ".gitmodules"), "--get-regexp", "path"] ).decode(default_encoding).splitlines()] - submodules = [module for module in submodules - if not ((module.endswith("llvm") and - self.get_toml('llvm-config')) or - (module.endswith("jemalloc") and - (self.get_toml('use-jemalloc') == "false" or - self.get_toml('jemalloc'))))] - run(["git", "submodule", "update", - "--init", "--recursive"] + submodules, - cwd=self.rust_root, verbose=self.verbose) - run(["git", "submodule", "-q", "foreach", "git", - "reset", "-q", "--hard"], - cwd=self.rust_root, verbose=self.verbose) - run(["git", "submodule", "-q", "foreach", "git", - "clean", "-qdfx"], - cwd=self.rust_root, verbose=self.verbose) + filtered_submodules = [] + submodules_names = [] + for module in submodules: + if module.endswith("llvm"): + if self.get_toml('llvm-config'): + continue + if module.endswith("llvm-emscripten"): + backends = self.get_toml('codegen-backends') + if backends is None or not 'emscripten' in backends: + continue + if module.endswith("jemalloc"): + if self.get_toml('use-jemalloc') == 'false': + continue + if self.get_toml('jemalloc'): + continue + if module.endswith("lld"): + config = self.get_toml('lld') + if config is None or config == 'false': + continue + if module.endswith("lldb") or module.endswith("clang"): + config = self.get_toml('lldb') + if config is None or config == 'false': + continue + check = self.check_submodule(module, slow_submodules) + filtered_submodules.append((module, check)) + submodules_names.append(module) + recorded = subprocess.Popen(["git", "ls-tree", "HEAD"] + submodules_names, + cwd=self.rust_root, stdout=subprocess.PIPE) + recorded = recorded.communicate()[0].decode(default_encoding).strip().splitlines() + recorded_submodules = {} + for data in recorded: + data = data.split() + recorded_submodules[data[3]] = data[2] + for module in filtered_submodules: + self.update_submodule(module[0], module[1], recorded_submodules) + print("Submodules updated in %.2f seconds" % (time() - start_time)) def set_dev_environment(self): """Set download URL for development environment""" self._download_url = 'https://dev-static.rust-lang.org' -def bootstrap(): +def bootstrap(help_triggered): """Configure, fetch, build and run the initial bootstrap""" + + # If the user is asking for help, let them know that the whole download-and-build + # process has to happen before anything is printed out. + if help_triggered: + print("info: Downloading and building bootstrap before processing --help") + print(" command. See src/bootstrap/README.md for help with common") + print(" commands.") + parser = argparse.ArgumentParser(description='Build rust') parser.add_argument('--config') parser.add_argument('--build') + parser.add_argument('--src') parser.add_argument('--clean', action='store_true') - parser.add_argument('-v', '--verbose', action='store_true') + parser.add_argument('-v', '--verbose', action='count', default=0) args = [a for a in sys.argv if a != '-h' and a != '--help'] args, _ = parser.parse_known_args(args) # Configure initial bootstrap build = RustBuild() + build.rust_root = args.src or os.path.abspath(os.path.join(__file__, '../../..')) build.verbose = args.verbose build.clean = args.clean @@ -687,10 +776,9 @@ def bootstrap(): except (OSError, IOError): pass - if '\nverbose = 2' in build.config_toml: - build.verbose = 2 - elif '\nverbose = 1' in build.config_toml: - build.verbose = 1 + match = re.search(r'\nverbose = (\d+)', build.config_toml) + if match is not None: + build.verbose = max(build.verbose, int(match.group(1))) build.use_vendored_sources = '\nvendor = true' in build.config_toml @@ -703,12 +791,12 @@ def bootstrap(): print(' and so in order to preserve your $HOME this will now') print(' use vendored sources by default. Note that if this') print(' does not work you should run a normal build first') - print(' before running a command like `sudo make install`') + print(' before running a command like `sudo ./x.py install`') if build.use_vendored_sources: if not os.path.exists('.cargo'): os.makedirs('.cargo') - with open('.cargo/config', 'w') as cargo_config: + with output('.cargo/config') as cargo_config: cargo_config.write(""" [source.crates-io] replace-with = 'vendored-sources' @@ -746,6 +834,10 @@ def bootstrap(): env["SRC"] = build.rust_root env["BOOTSTRAP_PARENT_ID"] = str(os.getpid()) env["BOOTSTRAP_PYTHON"] = sys.executable + env["BUILD_DIR"] = build.build_dir + env["RUSTC_BOOTSTRAP"] = '1' + env["CARGO"] = build.cargo() + env["RUSTC"] = build.rustc() run(args, env=env, verbose=build.verbose) @@ -755,7 +847,7 @@ def main(): help_triggered = ( '-h' in sys.argv) or ('--help' in sys.argv) or (len(sys.argv) == 1) try: - bootstrap() + bootstrap(help_triggered) if not help_triggered: print("Build completed successfully in {}".format( format_build_time(time() - start_time))) diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index 8e35ecc8090c..12c1972c2203 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -9,8 +9,9 @@ // except according to those terms. use std::any::Any; -use std::cell::RefCell; +use std::cell::{Cell, RefCell}; use std::collections::BTreeSet; +use std::collections::HashMap; use std::env; use std::fmt::Debug; use std::fs; @@ -18,27 +19,37 @@ use std::hash::Hash; use std::ops::Deref; use std::path::{Path, PathBuf}; use std::process::Command; +use std::time::{Duration, Instant}; -use compile; -use install; -use dist; -use util::{exe, libdir, add_lib_path}; -use {Build, Mode}; -use cache::{INTERNER, Interned, Cache}; +use cache::{Cache, Interned, INTERNER}; use check; -use flags::Subcommand; +use compile; +use dist; use doc; -use tool; +use flags::Subcommand; +use install; use native; +use test; +use tool; +use util::{add_lib_path, exe, libdir}; +use {Build, DocTests, Mode}; pub use Compiler; +use petgraph::graph::NodeIndex; +use petgraph::Graph; + pub struct Builder<'a> { pub build: &'a Build, pub top_stage: u32, pub kind: Kind, cache: Cache, - stack: RefCell>>, + stack: RefCell>>, + time_spent_on_dependencies: Cell, + pub paths: Vec, + graph_nodes: RefCell>, + graph: RefCell>, + parent: Cell>, } impl<'a> Deref for Builder<'a> { @@ -59,12 +70,6 @@ pub trait Step: 'static + Clone + Debug + PartialEq + Eq + Hash { /// Run this rule for all hosts without cross compiling. const ONLY_HOSTS: bool = false; - /// Run this rule for all targets, but only with the native host. - const ONLY_BUILD_TARGETS: bool = false; - - /// Only run this step with the build triple as host and target. - const ONLY_BUILD: bool = false; - /// Primary function to execute this rule. Can call `builder.ensure(...)` /// with other steps to run those. fn run(self, builder: &Builder) -> Self::Output; @@ -94,16 +99,51 @@ pub struct RunConfig<'a> { pub builder: &'a Builder<'a>, pub host: Interned, pub target: Interned, - pub path: Option<&'a Path>, + pub path: PathBuf, } struct StepDescription { default: bool, only_hosts: bool, - only_build_targets: bool, - only_build: bool, should_run: fn(ShouldRun) -> ShouldRun, make_run: fn(RunConfig), + name: &'static str, +} + +#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] +pub enum PathSet { + Set(BTreeSet), + Suite(PathBuf), +} + +impl PathSet { + fn empty() -> PathSet { + PathSet::Set(BTreeSet::new()) + } + + fn one>(path: P) -> PathSet { + let mut set = BTreeSet::new(); + set.insert(path.into()); + PathSet::Set(set) + } + + fn has(&self, needle: &Path) -> bool { + match self { + PathSet::Set(set) => set.iter().any(|p| p.ends_with(needle)), + PathSet::Suite(_) => false, + } + } + + fn path(&self, builder: &Builder) -> PathBuf { + match self { + PathSet::Set(set) => set + .iter() + .next() + .unwrap_or(&builder.build.src) + .to_path_buf(), + PathSet::Suite(path) => PathBuf::from(path), + } + } } impl StepDescription { @@ -111,39 +151,40 @@ impl StepDescription { StepDescription { default: S::DEFAULT, only_hosts: S::ONLY_HOSTS, - only_build_targets: S::ONLY_BUILD_TARGETS, - only_build: S::ONLY_BUILD, should_run: S::should_run, make_run: S::make_run, + name: unsafe { ::std::intrinsics::type_name::() }, } } - fn maybe_run(&self, builder: &Builder, path: Option<&Path>) { - let build = builder.build; - let hosts = if self.only_build_targets || self.only_build { - build.build_triple() - } else { - &build.hosts - }; + fn maybe_run(&self, builder: &Builder, pathset: &PathSet) { + if builder.config.exclude.iter().any(|e| pathset.has(e)) { + eprintln!("Skipping {:?} because it is excluded", pathset); + return; + } else if !builder.config.exclude.is_empty() { + eprintln!( + "{:?} not skipped for {:?} -- not in {:?}", + pathset, self.name, builder.config.exclude + ); + } + let hosts = &builder.hosts; // Determine the targets participating in this rule. let targets = if self.only_hosts { - if build.config.run_host_only { - &[] - } else if self.only_build { - build.build_triple() + if !builder.config.run_host_only { + return; // don't run anything } else { - &build.hosts + &builder.hosts } } else { - &build.targets + &builder.targets }; for host in hosts { for target in targets { let run = RunConfig { builder, - path, + path: pathset.path(builder), host: *host, target: *target, }; @@ -153,27 +194,49 @@ impl StepDescription { } fn run(v: &[StepDescription], builder: &Builder, paths: &[PathBuf]) { - let should_runs = v.iter().map(|desc| { - (desc.should_run)(ShouldRun::new(builder)) - }).collect::>(); + let should_runs = v + .iter() + .map(|desc| (desc.should_run)(ShouldRun::new(builder))) + .collect::>(); + + // sanity checks on rules + for (desc, should_run) in v.iter().zip(&should_runs) { + assert!( + !should_run.paths.is_empty(), + "{:?} should have at least one pathset", + desc.name + ); + } + if paths.is_empty() { for (desc, should_run) in v.iter().zip(should_runs) { if desc.default && should_run.is_really_default { - desc.maybe_run(builder, None); + for pathset in &should_run.paths { + desc.maybe_run(builder, pathset); + } } } } else { for path in paths { + // strip CurDir prefix if present + let path = match path.strip_prefix(".") { + Ok(p) => p, + Err(_) => path, + }; + let mut attempted_run = false; for (desc, should_run) in v.iter().zip(&should_runs) { - if should_run.run(path) { + if let Some(suite) = should_run.is_suite_path(path) { attempted_run = true; - desc.maybe_run(builder, Some(path)); + desc.maybe_run(builder, suite); + } else if let Some(pathset) = should_run.pathset_for_path(path) { + attempted_run = true; + desc.maybe_run(builder, pathset); } } if !attempted_run { - eprintln!("Warning: no rules matched {}.", path.display()); + panic!("Error: no rules matched {}.", path.display()); } } } @@ -184,10 +247,10 @@ impl StepDescription { pub struct ShouldRun<'a> { pub builder: &'a Builder<'a>, // use a BTreeSet to maintain sort order - paths: BTreeSet, + paths: BTreeSet, // If this is a default rule, this is an additional constraint placed on - // it's run. Generally something like compiler docs being enabled. + // its run. Generally something like compiler docs being enabled. is_really_default: bool, } @@ -205,31 +268,64 @@ impl<'a> ShouldRun<'a> { self } + // Unlike `krate` this will create just one pathset. As such, it probably shouldn't actually + // ever be used, but as we transition to having all rules properly handle passing krate(...) by + // actually doing something different for every crate passed. + pub fn all_krates(mut self, name: &str) -> Self { + let mut set = BTreeSet::new(); + for krate in self.builder.in_tree_crates(name) { + set.insert(PathBuf::from(&krate.path)); + } + self.paths.insert(PathSet::Set(set)); + self + } + pub fn krate(mut self, name: &str) -> Self { - for (_, krate_path) in self.builder.crates(name) { - self.paths.insert(PathBuf::from(krate_path)); + for krate in self.builder.in_tree_crates(name) { + self.paths.insert(PathSet::one(&krate.path)); } self } - pub fn path(mut self, path: &str) -> Self { - self.paths.insert(PathBuf::from(path)); + // single, non-aliased path + pub fn path(self, path: &str) -> Self { + self.paths(&[path]) + } + + // multiple aliases for the same job + pub fn paths(mut self, paths: &[&str]) -> Self { + self.paths + .insert(PathSet::Set(paths.iter().map(PathBuf::from).collect())); + self + } + + pub fn is_suite_path(&self, path: &Path) -> Option<&PathSet> { + self.paths.iter().find(|pathset| match pathset { + PathSet::Suite(p) => path.starts_with(p), + PathSet::Set(_) => false, + }) + } + + pub fn suite_path(mut self, suite: &str) -> Self { + self.paths.insert(PathSet::Suite(PathBuf::from(suite))); self } // allows being more explicit about why should_run in Step returns the value passed to it - pub fn never(self) -> ShouldRun<'a> { + pub fn never(mut self) -> ShouldRun<'a> { + self.paths.insert(PathSet::empty()); self } - fn run(&self, path: &Path) -> bool { - self.paths.iter().any(|p| path.ends_with(p)) + fn pathset_for_path(&self, path: &Path) -> Option<&PathSet> { + self.paths.iter().find(|pathset| pathset.has(path)) } } #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Kind { Build, + Check, Test, Bench, Dist, @@ -245,26 +341,141 @@ impl<'a> Builder<'a> { }}; } match kind { - Kind::Build => describe!(compile::Std, compile::Test, compile::Rustc, - compile::StartupObjects, tool::BuildManifest, tool::Rustbook, tool::ErrorIndex, - tool::UnstableBookGen, tool::Tidy, tool::Linkchecker, tool::CargoTest, - tool::Compiletest, tool::RemoteTestServer, tool::RemoteTestClient, - tool::RustInstaller, tool::Cargo, tool::Rls, tool::Rustdoc, tool::Clippy, - native::Llvm, tool::Rustfmt, tool::Miri), - Kind::Test => describe!(check::Tidy, check::Bootstrap, check::DefaultCompiletest, - check::HostCompiletest, check::Crate, check::CrateLibrustc, check::Rustdoc, - check::Linkcheck, check::Cargotest, check::Cargo, check::Rls, check::Docs, - check::ErrorIndex, check::Distcheck, check::Rustfmt, check::Miri, check::Clippy), - Kind::Bench => describe!(check::Crate, check::CrateLibrustc), - Kind::Doc => describe!(doc::UnstableBook, doc::UnstableBookGen, doc::TheBook, - doc::Standalone, doc::Std, doc::Test, doc::Rustc, doc::ErrorIndex, doc::Nomicon, - doc::Reference, doc::Rustdoc, doc::RustByExample, doc::CargoBook), - Kind::Dist => describe!(dist::Docs, dist::Mingw, dist::Rustc, dist::DebuggerScripts, - dist::Std, dist::Analysis, dist::Src, dist::PlainSourceTarball, dist::Cargo, - dist::Rls, dist::Rustfmt, dist::Extended, dist::HashSign, - dist::DontDistWithMiriEnabled), - Kind::Install => describe!(install::Docs, install::Std, install::Cargo, install::Rls, - install::Rustfmt, install::Analysis, install::Src, install::Rustc), + Kind::Build => describe!( + compile::Std, + compile::Test, + compile::Rustc, + compile::CodegenBackend, + compile::StartupObjects, + tool::BuildManifest, + tool::Rustbook, + tool::ErrorIndex, + tool::UnstableBookGen, + tool::Tidy, + tool::Linkchecker, + tool::CargoTest, + tool::Compiletest, + tool::RemoteTestServer, + tool::RemoteTestClient, + tool::RustInstaller, + tool::Cargo, + tool::Rls, + tool::Rustdoc, + tool::Clippy, + native::Llvm, + tool::Rustfmt, + tool::Miri, + native::Lld + ), + Kind::Check => describe!( + check::Std, + check::Test, + check::Rustc, + check::CodegenBackend, + check::Rustdoc + ), + Kind::Test => describe!( + test::Tidy, + test::Ui, + test::RunPass, + test::CompileFail, + test::ParseFail, + test::RunFail, + test::RunPassValgrind, + test::MirOpt, + test::Codegen, + test::CodegenUnits, + test::Incremental, + test::Debuginfo, + test::UiFullDeps, + test::RunPassFullDeps, + test::RunFailFullDeps, + test::CompileFailFullDeps, + test::IncrementalFullDeps, + test::Rustdoc, + test::Pretty, + test::RunPassPretty, + test::RunFailPretty, + test::RunPassValgrindPretty, + test::RunPassFullDepsPretty, + test::RunFailFullDepsPretty, + test::Crate, + test::CrateLibrustc, + test::CrateRustdoc, + test::Linkcheck, + test::Cargotest, + test::Cargo, + test::Rls, + test::ErrorIndex, + test::Distcheck, + test::RunMakeFullDeps, + test::Nomicon, + test::Reference, + test::RustdocBook, + test::RustByExample, + test::TheBook, + test::UnstableBook, + test::RustcBook, + test::Rustfmt, + test::Miri, + test::Clippy, + test::RustdocJS, + test::RustdocTheme, + // Run bootstrap close to the end as it's unlikely to fail + test::Bootstrap, + // Run run-make last, since these won't pass without make on Windows + test::RunMake, + test::RustdocUi + ), + Kind::Bench => describe!(test::Crate, test::CrateLibrustc), + Kind::Doc => describe!( + doc::UnstableBook, + doc::UnstableBookGen, + doc::TheBook, + doc::Standalone, + doc::Std, + doc::Test, + doc::WhitelistedRustc, + doc::Rustc, + doc::Rustdoc, + doc::ErrorIndex, + doc::Nomicon, + doc::Reference, + doc::RustdocBook, + doc::RustByExample, + doc::RustcBook, + doc::CargoBook + ), + Kind::Dist => describe!( + dist::Docs, + dist::RustcDocs, + dist::Mingw, + dist::Rustc, + dist::DebuggerScripts, + dist::Std, + dist::Analysis, + dist::Src, + dist::PlainSourceTarball, + dist::Cargo, + dist::Rls, + dist::Rustfmt, + dist::Clippy, + dist::LlvmTools, + dist::Lldb, + dist::Extended, + dist::HashSign + ), + Kind::Install => describe!( + install::Docs, + install::Std, + install::Cargo, + install::Rls, + install::Rustfmt, + install::Clippy, + install::Analysis, + install::Src, + install::Rustc + ), } } @@ -285,6 +496,11 @@ impl<'a> Builder<'a> { kind, cache: Cache::new(), stack: RefCell::new(Vec::new()), + time_spent_on_dependencies: Cell::new(Duration::new(0, 0)), + paths: vec![], + graph_nodes: RefCell::new(HashMap::new()), + graph: RefCell::new(Graph::new()), + parent: Cell::new(None), }; let builder = &builder; @@ -293,15 +509,22 @@ impl<'a> Builder<'a> { should_run = (desc.should_run)(should_run); } let mut help = String::from("Available paths:\n"); - for path in should_run.paths { - help.push_str(format!(" ./x.py {} {}\n", subcommand, path.display()).as_str()); + for pathset in should_run.paths { + if let PathSet::Set(set) = pathset { + set.iter().for_each(|path| { + help.push_str( + format!(" ./x.py {} {}\n", subcommand, path.display()).as_str(), + ) + }) + } } Some(help) } - pub fn run(build: &Build) { + pub fn new(build: &Build) -> Builder { let (kind, paths) = match build.config.cmd { Subcommand::Build { ref paths } => (Kind::Build, &paths[..]), + Subcommand::Check { ref paths } => (Kind::Check, &paths[..]), Subcommand::Doc { ref paths } => (Kind::Doc, &paths[..]), Subcommand::Test { ref paths, .. } => (Kind::Test, &paths[..]), Subcommand::Bench { ref paths, .. } => (Kind::Bench, &paths[..]), @@ -316,14 +539,37 @@ impl<'a> Builder<'a> { kind, cache: Cache::new(), stack: RefCell::new(Vec::new()), + time_spent_on_dependencies: Cell::new(Duration::new(0, 0)), + paths: paths.to_owned(), + graph_nodes: RefCell::new(HashMap::new()), + graph: RefCell::new(Graph::new()), + parent: Cell::new(None), }; - StepDescription::run(&Builder::get_step_descriptions(builder.kind), &builder, paths); + if kind == Kind::Dist { + assert!( + !builder.config.test_miri, + "Do not distribute with miri enabled.\n\ + The distributed libraries would include all MIR (increasing binary size). + The distributed MIR would include validation statements." + ); + } + + builder + } + + pub fn execute_cli(&self) -> Graph { + self.run_step_descriptions(&Builder::get_step_descriptions(self.kind), &self.paths); + self.graph.borrow().clone() } pub fn default_doc(&self, paths: Option<&[PathBuf]>) { let paths = paths.unwrap_or(&[]); - StepDescription::run(&Builder::get_step_descriptions(Kind::Doc), self, paths); + self.run_step_descriptions(&Builder::get_step_descriptions(Kind::Doc), paths); + } + + fn run_step_descriptions(&self, v: &[StepDescription], paths: &[PathBuf]) { + StepDescription::run(v, self, paths); } /// Obtain a compiler at a given stage and for a given host. Explicitly does @@ -331,7 +577,9 @@ impl<'a> Builder<'a> { /// obtained through this function, since it ensures that they are valid /// (i.e., built and assembled). pub fn compiler(&self, stage: u32, host: Interned) -> Compiler { - self.ensure(compile::Assemble { target_compiler: Compiler { stage, host } }) + self.ensure(compile::Assemble { + target_compiler: Compiler { stage, host }, + }) } pub fn sysroot(&self, compiler: Compiler) -> Interned { @@ -341,7 +589,9 @@ impl<'a> Builder<'a> { /// Returns the libdir where the standard library and other artifacts are /// found for a compiler's sysroot. pub fn sysroot_libdir( - &self, compiler: Compiler, target: Interned + &self, + compiler: Compiler, + target: Interned, ) -> Interned { #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] struct Libdir { @@ -357,13 +607,18 @@ impl<'a> Builder<'a> { fn run(self, builder: &Builder) -> Interned { let compiler = self.compiler; - let lib = if compiler.stage >= 1 && builder.build.config.libdir.is_some() { - builder.build.config.libdir.clone().unwrap() + let config = &builder.build.config; + let lib = if compiler.stage >= 1 && config.libdir_relative().is_some() { + builder.build.config.libdir_relative().unwrap() } else { - PathBuf::from("lib") + Path::new("lib") }; - let sysroot = builder.sysroot(self.compiler).join(lib) - .join("rustlib").join(self.target).join("lib"); + let sysroot = builder + .sysroot(self.compiler) + .join(lib) + .join("rustlib") + .join(self.target) + .join("lib"); let _ = fs::remove_dir_all(&sysroot); t!(fs::create_dir_all(&sysroot)); INTERNER.intern_path(sysroot) @@ -372,6 +627,11 @@ impl<'a> Builder<'a> { self.ensure(Libdir { compiler, target }) } + pub fn sysroot_codegen_backends(&self, compiler: Compiler) -> PathBuf { + self.sysroot_libdir(compiler, compiler.host) + .with_file_name(self.config.rust_codegen_backends_dir.clone()) + } + /// Returns the compiler's libdir where it stores the dynamic libraries that /// it itself links against. /// @@ -379,7 +639,7 @@ impl<'a> Builder<'a> { /// Windows. pub fn rustc_libdir(&self, compiler: Compiler) -> PathBuf { if compiler.is_snapshot(self) { - self.build.rustc_snapshot_libdir() + self.rustc_snapshot_libdir() } else { self.sysroot(compiler).join(libdir(&compiler.host)) } @@ -392,7 +652,7 @@ impl<'a> Builder<'a> { // compiler live next to the compiler and the system will find them // automatically. if cfg!(windows) { - return + return; } add_lib_path(vec![self.rustc_libdir(compiler)], cmd); @@ -403,7 +663,9 @@ impl<'a> Builder<'a> { if compiler.is_snapshot(self) { self.initial_rustc.clone() } else { - self.sysroot(compiler).join("bin").join(exe("rustc", &compiler.host)) + self.sysroot(compiler) + .join("bin") + .join(exe("rustc", &compiler.host)) } } @@ -415,13 +677,16 @@ impl<'a> Builder<'a> { let mut cmd = Command::new(&self.out.join("bootstrap/debug/rustdoc")); let compiler = self.compiler(self.top_stage, host); cmd.env("RUSTC_STAGE", compiler.stage.to_string()) - .env("RUSTC_SYSROOT", self.sysroot(compiler)) - .env("RUSTDOC_LIBDIR", self.sysroot_libdir(compiler, self.build.build)) - .env("CFG_RELEASE_CHANNEL", &self.build.config.channel) - .env("RUSTDOC_REAL", self.rustdoc(host)) - .env("RUSTDOC_CRATE_VERSION", self.build.rust_version()) - .env("RUSTC_BOOTSTRAP", "1"); - if let Some(linker) = self.build.linker(host) { + .env("RUSTC_SYSROOT", self.sysroot(compiler)) + .env( + "RUSTDOC_LIBDIR", + self.sysroot_libdir(compiler, self.config.build), + ) + .env("CFG_RELEASE_CHANNEL", &self.config.channel) + .env("RUSTDOC_REAL", self.rustdoc(host)) + .env("RUSTDOC_CRATE_VERSION", self.rust_version()) + .env("RUSTC_BOOTSTRAP", "1"); + if let Some(linker) = self.linker(host) { cmd.env("RUSTC_TARGET_LINKER", linker); } cmd @@ -434,26 +699,48 @@ impl<'a> Builder<'a> { /// rustc compiler, its output will be scoped by `mode`'s output directory, /// it will pass the `--target` flag for the specified `target`, and will be /// executing the Cargo command `cmd`. - pub fn cargo(&self, - compiler: Compiler, - mode: Mode, - target: Interned, - cmd: &str) -> Command { + pub fn cargo( + &self, + compiler: Compiler, + mode: Mode, + target: Interned, + cmd: &str, + ) -> Command { let mut cargo = Command::new(&self.initial_cargo); let out_dir = self.stage_out(compiler, mode); - cargo.env("CARGO_TARGET_DIR", out_dir) - .arg(cmd) - .arg("--target").arg(target); + cargo + .env("CARGO_TARGET_DIR", out_dir) + .arg(cmd); - // If we were invoked from `make` then that's already got a jobserver - // set up for us so no need to tell Cargo about jobs all over again. - if env::var_os("MAKEFLAGS").is_none() && env::var_os("MFLAGS").is_none() { - cargo.arg("-j").arg(self.jobs().to_string()); + if cmd != "install" { + cargo.arg("--target") + .arg(target); + } else { + assert_eq!(target, compiler.host); } + // Set a flag for `check` so that certain build scripts can do less work + // (e.g. not building/requiring LLVM). + if cmd == "check" { + cargo.env("RUST_CHECK", "1"); + } + + cargo.arg("-j").arg(self.jobs().to_string()); + // Remove make-related flags to ensure Cargo can correctly set things up + cargo.env_remove("MAKEFLAGS"); + cargo.env_remove("MFLAGS"); + // FIXME: Temporary fix for https://github.com/rust-lang/cargo/issues/3005 // Force cargo to output binaries with disambiguating hashes in the name - cargo.env("__CARGO_DEFAULT_LIB_METADATA", &self.config.channel); + let metadata = if compiler.stage == 0 { + // Treat stage0 like special channel, whether it's a normal prior- + // release rustc or a local rebuild with the same version, so we + // never mix these libraries by accident. + "bootstrap" + } else { + &self.config.channel + }; + cargo.env("__CARGO_DEFAULT_LIB_METADATA", &metadata); let stage; if compiler.stage == 0 && self.local_rebuild { @@ -463,49 +750,103 @@ impl<'a> Builder<'a> { stage = compiler.stage; } + let mut extra_args = env::var(&format!("RUSTFLAGS_STAGE_{}", stage)).unwrap_or_default(); + if stage != 0 { + let s = env::var("RUSTFLAGS_STAGE_NOT_0").unwrap_or_default(); + if !extra_args.is_empty() { + extra_args.push_str(" "); + } + extra_args.push_str(&s); + } + + if !extra_args.is_empty() { + cargo.env( + "RUSTFLAGS", + format!( + "{} {}", + env::var("RUSTFLAGS").unwrap_or_default(), + extra_args + ), + ); + } + + let want_rustdoc = self.doc_tests != DocTests::No; + + // We synthetically interpret a stage0 compiler used to build tools as a + // "raw" compiler in that it's the exact snapshot we download. Normally + // the stage0 build means it uses libraries build by the stage0 + // compiler, but for tools we just use the precompiled libraries that + // we've downloaded + let use_snapshot = mode == Mode::ToolBootstrap; + assert!(!use_snapshot || stage == 0 || self.local_rebuild); + + let maybe_sysroot = self.sysroot(compiler); + let sysroot = if use_snapshot { + self.rustc_snapshot_sysroot() + } else { + &maybe_sysroot + }; + let libdir = sysroot.join(libdir(&compiler.host)); + // Customize the compiler we're running. Specify the compiler to cargo // as our shim and then pass it some various options used to configure // how the actual compiler itself is called. // // These variables are primarily all read by // src/bootstrap/bin/{rustc.rs,rustdoc.rs} - cargo.env("RUSTBUILD_NATIVE_DIR", self.native_dir(target)) - .env("RUSTC", self.out.join("bootstrap/debug/rustc")) - .env("RUSTC_REAL", self.rustc(compiler)) - .env("RUSTC_STAGE", stage.to_string()) - .env("RUSTC_DEBUG_ASSERTIONS", - self.config.rust_debug_assertions.to_string()) - .env("RUSTC_SYSROOT", self.sysroot(compiler)) - .env("RUSTC_LIBDIR", self.rustc_libdir(compiler)) - .env("RUSTC_RPATH", self.config.rust_rpath.to_string()) - .env("RUSTDOC", self.out.join("bootstrap/debug/rustdoc")) - .env("RUSTDOC_REAL", if cmd == "doc" || cmd == "test" { - self.rustdoc(compiler.host) - } else { - PathBuf::from("/path/to/nowhere/rustdoc/not/required") - }) - .env("TEST_MIRI", self.config.test_miri.to_string()) - .env("RUSTC_ERROR_METADATA_DST", self.extended_error_dir()); - if let Some(n) = self.config.rust_codegen_units { - cargo.env("RUSTC_CODEGEN_UNITS", n.to_string()); - } + cargo + .env("RUSTBUILD_NATIVE_DIR", self.native_dir(target)) + .env("RUSTC", self.out.join("bootstrap/debug/rustc")) + .env("RUSTC_REAL", self.rustc(compiler)) + .env("RUSTC_STAGE", stage.to_string()) + .env( + "RUSTC_DEBUG_ASSERTIONS", + self.config.rust_debug_assertions.to_string(), + ) + .env("RUSTC_SYSROOT", &sysroot) + .env("RUSTC_LIBDIR", &libdir) + .env("RUSTC_RPATH", self.config.rust_rpath.to_string()) + .env("RUSTDOC", self.out.join("bootstrap/debug/rustdoc")) + .env( + "RUSTDOC_REAL", + if cmd == "doc" || (cmd == "test" && want_rustdoc) { + self.rustdoc(compiler.host) + } else { + PathBuf::from("/path/to/nowhere/rustdoc/not/required") + }, + ) + .env("TEST_MIRI", self.config.test_miri.to_string()) + .env("RUSTC_ERROR_METADATA_DST", self.extended_error_dir()); - if let Some(host_linker) = self.build.linker(compiler.host) { + if let Some(host_linker) = self.linker(compiler.host) { cargo.env("RUSTC_HOST_LINKER", host_linker); } - if let Some(target_linker) = self.build.linker(target) { + if let Some(target_linker) = self.linker(target) { cargo.env("RUSTC_TARGET_LINKER", target_linker); } - if cmd != "build" { - cargo.env("RUSTDOC_LIBDIR", self.rustc_libdir(self.compiler(2, self.build.build))); + if let Some(ref error_format) = self.config.rustc_error_format { + cargo.env("RUSTC_ERROR_FORMAT", error_format); + } + if cmd != "build" && cmd != "check" && want_rustdoc { + cargo.env("RUSTDOC_LIBDIR", self.sysroot_libdir(compiler, self.config.build)); } - if mode != Mode::Tool { - // Tools don't get debuginfo right now, e.g. cargo and rls don't - // get compiled with debuginfo. - // Adding debuginfo increases their sizes by a factor of 3-4. + if mode.is_tool() { + // Tools like cargo and rls don't get debuginfo by default right now, but this can be + // enabled in the config. Adding debuginfo makes them several times larger. + if self.config.rust_debuginfo_tools { + cargo.env("RUSTC_DEBUGINFO", self.config.rust_debuginfo.to_string()); + cargo.env( + "RUSTC_DEBUGINFO_LINES", + self.config.rust_debuginfo_lines.to_string(), + ); + } + } else { cargo.env("RUSTC_DEBUGINFO", self.config.rust_debuginfo.to_string()); - cargo.env("RUSTC_DEBUGINFO_LINES", self.config.rust_debuginfo_lines.to_string()); + cargo.env( + "RUSTC_DEBUGINFO_LINES", + self.config.rust_debuginfo_lines.to_string(), + ); cargo.env("RUSTC_FORCE_UNSTABLE", "1"); // Currently the compiler depends on crates from crates.io, and @@ -531,6 +872,10 @@ impl<'a> Builder<'a> { cargo.env("RUSTC_CRT_STATIC", x.to_string()); } + if let Some(x) = self.crt_static(compiler.host) { + cargo.env("RUSTC_HOST_CRT_STATIC", x.to_string()); + } + // Enable usage of unstable features cargo.env("RUSTC_BOOTSTRAP", "1"); self.add_rust_test_threads(&mut cargo); @@ -550,28 +895,43 @@ impl<'a> Builder<'a> { // build scripts in that situation. // // If LLVM support is disabled we need to use the snapshot compiler to compile - // build scripts, as the new compiler doesnt support executables. - if mode == Mode::Libstd || !self.build.config.llvm_enabled { - cargo.env("RUSTC_SNAPSHOT", &self.initial_rustc) - .env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_snapshot_libdir()); + // build scripts, as the new compiler doesn't support executables. + if mode == Mode::Std || !self.config.llvm_enabled { + cargo + .env("RUSTC_SNAPSHOT", &self.initial_rustc) + .env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_snapshot_libdir()); } else { - cargo.env("RUSTC_SNAPSHOT", self.rustc(compiler)) - .env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_libdir(compiler)); + cargo + .env("RUSTC_SNAPSHOT", self.rustc(compiler)) + .env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_libdir(compiler)); } - // Ignore incremental modes except for stage0, since we're - // not guaranteeing correctness across builds if the compiler - // is changing under your feet.` - if self.config.incremental && compiler.stage == 0 { - let incr_dir = self.incremental_dir(compiler); - cargo.env("RUSTC_INCREMENTAL", incr_dir); + if self.config.incremental { + cargo.env("CARGO_INCREMENTAL", "1"); } if let Some(ref on_fail) = self.config.on_fail { cargo.env("RUSTC_ON_FAIL", on_fail); } - cargo.env("RUSTC_VERBOSE", format!("{}", self.verbosity)); + if self.config.print_step_timings { + cargo.env("RUSTC_PRINT_STEP_TIMINGS", "1"); + } + + if self.config.backtrace_on_ice { + cargo.env("RUSTC_BACKTRACE_ON_ICE", "1"); + } + + if self.config.rust_verify_llvm_ir { + cargo.env("RUSTC_VERIFY_LLVM_IR", "1"); + } + + cargo.env("RUSTC_VERBOSE", self.verbosity.to_string()); + + // in std, we want to avoid denying warnings for stage 0 as that makes cfg's painful. + if self.config.deny_warnings && !(mode == Mode::Std && stage == 0) { + cargo.env("RUSTC_DENY_WARNINGS", "1"); + } // Throughout the build Cargo can execute a number of build scripts // compiling C/C++ code and we need to pass compilers, archivers, flags, etc @@ -580,37 +940,64 @@ impl<'a> Builder<'a> { // the options through environment variables that are fetched and understood by both. // // FIXME: the guard against msvc shouldn't need to be here - if !target.contains("msvc") { - let cc = self.cc(target); - cargo.env(format!("CC_{}", target), cc) - .env("CC", cc); + if target.contains("msvc") { + if let Some(ref cl) = self.config.llvm_clang_cl { + cargo.env("CC", cl).env("CXX", cl); + } + } else { + let ccache = self.config.ccache.as_ref(); + let ccacheify = |s: &Path| { + let ccache = match ccache { + Some(ref s) => s, + None => return s.display().to_string(), + }; + // FIXME: the cc-rs crate only recognizes the literal strings + // `ccache` and `sccache` when doing caching compilations, so we + // mirror that here. It should probably be fixed upstream to + // accept a new env var or otherwise work with custom ccache + // vars. + match &ccache[..] { + "ccache" | "sccache" => format!("{} {}", ccache, s.display()), + _ => s.display().to_string(), + } + }; + let cc = ccacheify(&self.cc(target)); + cargo.env(format!("CC_{}", target), &cc).env("CC", &cc); let cflags = self.cflags(target).join(" "); - cargo.env(format!("CFLAGS_{}", target), cflags.clone()) - .env("CFLAGS", cflags.clone()); + cargo + .env(format!("CFLAGS_{}", target), cflags.clone()) + .env("CFLAGS", cflags.clone()); if let Some(ar) = self.ar(target) { let ranlib = format!("{} s", ar.display()); - cargo.env(format!("AR_{}", target), ar) - .env("AR", ar) - .env(format!("RANLIB_{}", target), ranlib.clone()) - .env("RANLIB", ranlib); + cargo + .env(format!("AR_{}", target), ar) + .env("AR", ar) + .env(format!("RANLIB_{}", target), ranlib.clone()) + .env("RANLIB", ranlib); } if let Ok(cxx) = self.cxx(target) { - cargo.env(format!("CXX_{}", target), cxx) - .env("CXX", cxx) - .env(format!("CXXFLAGS_{}", target), cflags.clone()) - .env("CXXFLAGS", cflags); + let cxx = ccacheify(&cxx); + cargo + .env(format!("CXX_{}", target), &cxx) + .env("CXX", &cxx) + .env(format!("CXXFLAGS_{}", target), cflags.clone()) + .env("CXXFLAGS", cflags); } } - if mode == Mode::Libstd && self.config.extended && compiler.is_final_stage(self) { + if cmd == "build" + && mode == Mode::Std + && self.config.extended + && compiler.is_final_stage(self) + { cargo.env("RUSTC_SAVE_ANALYSIS", "api".to_string()); } // For `cargo doc` invocations, make rustdoc print the Rust version into the docs - cargo.env("RUSTDOC_CRATE_VERSION", self.build.rust_version()); + cargo.env("RUSTDOC_CRATE_VERSION", self.rust_version()); // Environment variables *required* throughout the build // @@ -618,23 +1005,60 @@ impl<'a> Builder<'a> { cargo.env("CFG_COMPILER_HOST_TRIPLE", target); // Set this for all builds to make sure doc builds also get it. - cargo.env("CFG_RELEASE_CHANNEL", &self.build.config.channel); + cargo.env("CFG_RELEASE_CHANNEL", &self.config.channel); - if self.is_very_verbose() { + // This one's a bit tricky. As of the time of this writing the compiler + // links to the `winapi` crate on crates.io. This crate provides raw + // bindings to Windows system functions, sort of like libc does for + // Unix. This crate also, however, provides "import libraries" for the + // MinGW targets. There's an import library per dll in the windows + // distribution which is what's linked to. These custom import libraries + // are used because the winapi crate can reference Windows functions not + // present in the MinGW import libraries. + // + // For example MinGW may ship libdbghelp.a, but it may not have + // references to all the functions in the dbghelp dll. Instead the + // custom import library for dbghelp in the winapi crates has all this + // information. + // + // Unfortunately for us though the import libraries are linked by + // default via `-ldylib=winapi_foo`. That is, they're linked with the + // `dylib` type with a `winapi_` prefix (so the winapi ones don't + // conflict with the system MinGW ones). This consequently means that + // the binaries we ship of things like rustc_codegen_llvm (aka the rustc_codegen_llvm + // DLL) when linked against *again*, for example with procedural macros + // or plugins, will trigger the propagation logic of `-ldylib`, passing + // `-lwinapi_foo` to the linker again. This isn't actually available in + // our distribution, however, so the link fails. + // + // To solve this problem we tell winapi to not use its bundled import + // libraries. This means that it will link to the system MinGW import + // libraries by default, and the `-ldylib=foo` directives will still get + // passed to the final linker, but they'll look like `-lfoo` which can + // be resolved because MinGW has the import library. The downside is we + // don't get newer functions from Windows, but we don't use any of them + // anyway. + if !mode.is_tool() { + cargo.env("WINAPI_NO_BUNDLED_LIBRARIES", "1"); + } + + for _ in 1..self.verbosity { cargo.arg("-v"); } + + // This must be kept before the thinlto check, as we set codegen units + // to 1 forcibly there. + if let Some(n) = self.config.rust_codegen_units { + cargo.env("RUSTC_CODEGEN_UNITS", n.to_string()); + } + if self.config.rust_optimize { - // FIXME: cargo bench does not accept `--release` - if cmd != "bench" { + // FIXME: cargo bench/install do not accept `--release` + if cmd != "bench" && cmd != "install" { cargo.arg("--release"); } - - if self.config.rust_codegen_units.is_none() && - self.build.is_rust_llvm(compiler.host) - { - cargo.env("RUSTC_THINLTO", "1"); - } } + if self.config.locked_deps { cargo.arg("--locked"); } @@ -647,7 +1071,7 @@ impl<'a> Builder<'a> { cargo } - /// Ensure that a given step is built, returning it's output. This will + /// Ensure that a given step is built, returning its output. This will /// cache the step, so it is safe (and good!) to call this as often as /// needed to ensure that all dependencies are built. pub fn ensure(&'a self, step: S) -> S::Output { @@ -655,7 +1079,10 @@ impl<'a> Builder<'a> { let mut stack = self.stack.borrow_mut(); for stack_step in stack.iter() { // should skip - if stack_step.downcast_ref::().map_or(true, |stack_step| *stack_step != step) { + if stack_step + .downcast_ref::() + .map_or(true, |stack_step| *stack_step != step) + { continue; } let mut out = String::new(); @@ -666,21 +1093,694 @@ impl<'a> Builder<'a> { panic!(out); } if let Some(out) = self.cache.get(&step) { - self.build.verbose(&format!("{}c {:?}", " ".repeat(stack.len()), step)); + self.verbose(&format!("{}c {:?}", " ".repeat(stack.len()), step)); + + { + let mut graph = self.graph.borrow_mut(); + let parent = self.parent.get(); + let us = *self + .graph_nodes + .borrow_mut() + .entry(format!("{:?}", step)) + .or_insert_with(|| graph.add_node(format!("{:?}", step))); + if let Some(parent) = parent { + graph.add_edge(parent, us, false); + } + } return out; } - self.build.verbose(&format!("{}> {:?}", " ".repeat(stack.len()), step)); + self.verbose(&format!("{}> {:?}", " ".repeat(stack.len()), step)); stack.push(Box::new(step.clone())); } - let out = step.clone().run(self); + + let prev_parent = self.parent.get(); + + { + let mut graph = self.graph.borrow_mut(); + let parent = self.parent.get(); + let us = *self + .graph_nodes + .borrow_mut() + .entry(format!("{:?}", step)) + .or_insert_with(|| graph.add_node(format!("{:?}", step))); + self.parent.set(Some(us)); + if let Some(parent) = parent { + graph.add_edge(parent, us, true); + } + } + + let (out, dur) = { + let start = Instant::now(); + let zero = Duration::new(0, 0); + let parent = self.time_spent_on_dependencies.replace(zero); + let out = step.clone().run(self); + let dur = start.elapsed(); + let deps = self.time_spent_on_dependencies.replace(parent + dur); + (out, dur - deps) + }; + + self.parent.set(prev_parent); + + if self.config.print_step_timings && dur > Duration::from_millis(100) { + println!( + "[TIMING] {:?} -- {}.{:03}", + step, + dur.as_secs(), + dur.subsec_nanos() / 1_000_000 + ); + } + { let mut stack = self.stack.borrow_mut(); let cur_step = stack.pop().expect("step stack empty"); assert_eq!(cur_step.downcast_ref(), Some(&step)); } - self.build.verbose(&format!("{}< {:?}", " ".repeat(self.stack.borrow().len()), step)); + self.verbose(&format!( + "{}< {:?}", + " ".repeat(self.stack.borrow().len()), + step + )); self.cache.put(step, out.clone()); out } } + +#[cfg(test)] +mod __test { + use super::*; + use config::Config; + use std::thread; + + fn configure(host: &[&str], target: &[&str]) -> Config { + let mut config = Config::default_opts(); + // don't save toolstates + config.save_toolstates = None; + config.run_host_only = true; + config.dry_run = true; + // try to avoid spurious failures in dist where we create/delete each others file + let dir = config.out.join("tmp-rustbuild-tests").join( + &thread::current() + .name() + .unwrap_or("unknown") + .replace(":", "-"), + ); + t!(fs::create_dir_all(&dir)); + config.out = dir; + config.build = INTERNER.intern_str("A"); + config.hosts = vec![config.build] + .clone() + .into_iter() + .chain(host.iter().map(|s| INTERNER.intern_str(s))) + .collect::>(); + config.targets = config + .hosts + .clone() + .into_iter() + .chain(target.iter().map(|s| INTERNER.intern_str(s))) + .collect::>(); + config + } + + fn first(v: Vec<(A, B)>) -> Vec { + v.into_iter().map(|(a, _)| a).collect::>() + } + + #[test] + fn dist_baseline() { + let build = Build::new(configure(&[], &[])); + let mut builder = Builder::new(&build); + builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Dist), &[]); + + let a = INTERNER.intern_str("A"); + + assert_eq!( + first(builder.cache.all::()), + &[dist::Docs { stage: 2, host: a },] + ); + assert_eq!( + first(builder.cache.all::()), + &[dist::Mingw { host: a },] + ); + assert_eq!( + first(builder.cache.all::()), + &[dist::Rustc { + compiler: Compiler { host: a, stage: 2 } + },] + ); + assert_eq!( + first(builder.cache.all::()), + &[dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: a, + },] + ); + assert_eq!(first(builder.cache.all::()), &[dist::Src]); + } + + #[test] + fn dist_with_targets() { + let build = Build::new(configure(&[], &["B"])); + let mut builder = Builder::new(&build); + builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Dist), &[]); + + let a = INTERNER.intern_str("A"); + let b = INTERNER.intern_str("B"); + + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Docs { stage: 2, host: a }, + dist::Docs { stage: 2, host: b }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[dist::Mingw { host: a }, dist::Mingw { host: b },] + ); + assert_eq!( + first(builder.cache.all::()), + &[dist::Rustc { + compiler: Compiler { host: a, stage: 2 } + },] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: a, + }, + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: b, + }, + ] + ); + assert_eq!(first(builder.cache.all::()), &[dist::Src]); + } + + #[test] + fn dist_with_hosts() { + let build = Build::new(configure(&["B"], &[])); + let mut builder = Builder::new(&build); + builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Dist), &[]); + + let a = INTERNER.intern_str("A"); + let b = INTERNER.intern_str("B"); + + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Docs { stage: 2, host: a }, + dist::Docs { stage: 2, host: b }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[dist::Mingw { host: a }, dist::Mingw { host: b },] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Rustc { + compiler: Compiler { host: a, stage: 2 } + }, + dist::Rustc { + compiler: Compiler { host: b, stage: 2 } + }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: a, + }, + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: b, + }, + ] + ); + assert_eq!(first(builder.cache.all::()), &[dist::Src]); + } + + #[test] + fn dist_with_targets_and_hosts() { + let build = Build::new(configure(&["B"], &["C"])); + let mut builder = Builder::new(&build); + builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Dist), &[]); + + let a = INTERNER.intern_str("A"); + let b = INTERNER.intern_str("B"); + let c = INTERNER.intern_str("C"); + + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Docs { stage: 2, host: a }, + dist::Docs { stage: 2, host: b }, + dist::Docs { stage: 2, host: c }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Mingw { host: a }, + dist::Mingw { host: b }, + dist::Mingw { host: c }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Rustc { + compiler: Compiler { host: a, stage: 2 } + }, + dist::Rustc { + compiler: Compiler { host: b, stage: 2 } + }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: a, + }, + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: b, + }, + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: c, + }, + ] + ); + assert_eq!(first(builder.cache.all::()), &[dist::Src]); + } + + #[test] + fn dist_with_target_flag() { + let mut config = configure(&["B"], &["C"]); + config.run_host_only = false; // as-if --target=C was passed + let build = Build::new(config); + let mut builder = Builder::new(&build); + builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Dist), &[]); + + let a = INTERNER.intern_str("A"); + let b = INTERNER.intern_str("B"); + let c = INTERNER.intern_str("C"); + + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Docs { stage: 2, host: a }, + dist::Docs { stage: 2, host: b }, + dist::Docs { stage: 2, host: c }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Mingw { host: a }, + dist::Mingw { host: b }, + dist::Mingw { host: c }, + ] + ); + assert_eq!(first(builder.cache.all::()), &[]); + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: a, + }, + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: b, + }, + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: c, + }, + ] + ); + assert_eq!(first(builder.cache.all::()), &[]); + } + + #[test] + fn dist_with_same_targets_and_hosts() { + let build = Build::new(configure(&["B"], &["B"])); + let mut builder = Builder::new(&build); + builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Dist), &[]); + + let a = INTERNER.intern_str("A"); + let b = INTERNER.intern_str("B"); + + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Docs { stage: 2, host: a }, + dist::Docs { stage: 2, host: b }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[dist::Mingw { host: a }, dist::Mingw { host: b },] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Rustc { + compiler: Compiler { host: a, stage: 2 } + }, + dist::Rustc { + compiler: Compiler { host: b, stage: 2 } + }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: a, + }, + dist::Std { + compiler: Compiler { host: a, stage: 2 }, + target: b, + }, + ] + ); + assert_eq!(first(builder.cache.all::()), &[dist::Src]); + assert_eq!( + first(builder.cache.all::()), + &[ + compile::Std { + compiler: Compiler { host: a, stage: 0 }, + target: a, + }, + compile::Std { + compiler: Compiler { host: a, stage: 1 }, + target: a, + }, + compile::Std { + compiler: Compiler { host: a, stage: 2 }, + target: a, + }, + compile::Std { + compiler: Compiler { host: a, stage: 1 }, + target: b, + }, + compile::Std { + compiler: Compiler { host: a, stage: 2 }, + target: b, + }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + compile::Test { + compiler: Compiler { host: a, stage: 0 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: a, stage: 1 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: a, stage: 2 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: a, stage: 1 }, + target: b, + }, + compile::Test { + compiler: Compiler { host: a, stage: 2 }, + target: b, + }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + compile::Assemble { + target_compiler: Compiler { host: a, stage: 0 }, + }, + compile::Assemble { + target_compiler: Compiler { host: a, stage: 1 }, + }, + compile::Assemble { + target_compiler: Compiler { host: a, stage: 2 }, + }, + compile::Assemble { + target_compiler: Compiler { host: b, stage: 2 }, + }, + ] + ); + } + + #[test] + fn build_default() { + let build = Build::new(configure(&["B"], &["C"])); + let mut builder = Builder::new(&build); + builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Build), &[]); + + let a = INTERNER.intern_str("A"); + let b = INTERNER.intern_str("B"); + let c = INTERNER.intern_str("C"); + + assert!(!builder.cache.all::().is_empty()); + assert!(!builder.cache.all::().is_empty()); + assert_eq!( + first(builder.cache.all::()), + &[ + compile::Rustc { + compiler: Compiler { host: a, stage: 0 }, + target: a, + }, + compile::Rustc { + compiler: Compiler { host: a, stage: 1 }, + target: a, + }, + compile::Rustc { + compiler: Compiler { host: a, stage: 2 }, + target: a, + }, + compile::Rustc { + compiler: Compiler { host: b, stage: 2 }, + target: a, + }, + compile::Rustc { + compiler: Compiler { host: a, stage: 0 }, + target: b, + }, + compile::Rustc { + compiler: Compiler { host: a, stage: 1 }, + target: b, + }, + compile::Rustc { + compiler: Compiler { host: a, stage: 2 }, + target: b, + }, + compile::Rustc { + compiler: Compiler { host: b, stage: 2 }, + target: b, + }, + ] + ); + + assert_eq!( + first(builder.cache.all::()), + &[ + compile::Test { + compiler: Compiler { host: a, stage: 0 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: a, stage: 1 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: a, stage: 2 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: b, stage: 2 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: a, stage: 0 }, + target: b, + }, + compile::Test { + compiler: Compiler { host: a, stage: 1 }, + target: b, + }, + compile::Test { + compiler: Compiler { host: a, stage: 2 }, + target: b, + }, + compile::Test { + compiler: Compiler { host: b, stage: 2 }, + target: b, + }, + compile::Test { + compiler: Compiler { host: a, stage: 2 }, + target: c, + }, + compile::Test { + compiler: Compiler { host: b, stage: 2 }, + target: c, + }, + ] + ); + } + + #[test] + fn build_with_target_flag() { + let mut config = configure(&["B"], &["C"]); + config.run_host_only = false; + let build = Build::new(config); + let mut builder = Builder::new(&build); + builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Build), &[]); + + let a = INTERNER.intern_str("A"); + let b = INTERNER.intern_str("B"); + let c = INTERNER.intern_str("C"); + + assert!(!builder.cache.all::().is_empty()); + assert_eq!( + first(builder.cache.all::()), + &[ + compile::Assemble { + target_compiler: Compiler { host: a, stage: 0 }, + }, + compile::Assemble { + target_compiler: Compiler { host: a, stage: 1 }, + }, + compile::Assemble { + target_compiler: Compiler { host: b, stage: 1 }, + }, + compile::Assemble { + target_compiler: Compiler { host: a, stage: 2 }, + }, + compile::Assemble { + target_compiler: Compiler { host: b, stage: 2 }, + }, + ] + ); + assert_eq!( + first(builder.cache.all::()), + &[ + compile::Rustc { + compiler: Compiler { host: a, stage: 0 }, + target: a, + }, + compile::Rustc { + compiler: Compiler { host: a, stage: 1 }, + target: a, + }, + compile::Rustc { + compiler: Compiler { host: a, stage: 0 }, + target: b, + }, + compile::Rustc { + compiler: Compiler { host: a, stage: 1 }, + target: b, + }, + ] + ); + + assert_eq!( + first(builder.cache.all::()), + &[ + compile::Test { + compiler: Compiler { host: a, stage: 0 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: a, stage: 1 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: a, stage: 2 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: b, stage: 2 }, + target: a, + }, + compile::Test { + compiler: Compiler { host: a, stage: 0 }, + target: b, + }, + compile::Test { + compiler: Compiler { host: a, stage: 1 }, + target: b, + }, + compile::Test { + compiler: Compiler { host: a, stage: 2 }, + target: b, + }, + compile::Test { + compiler: Compiler { host: b, stage: 2 }, + target: b, + }, + compile::Test { + compiler: Compiler { host: a, stage: 2 }, + target: c, + }, + compile::Test { + compiler: Compiler { host: b, stage: 2 }, + target: c, + }, + ] + ); + } + + #[test] + fn test_with_no_doc_stage0() { + let mut config = configure(&[], &[]); + config.stage = Some(0); + config.cmd = Subcommand::Test { + paths: vec!["src/libstd".into()], + test_args: vec![], + rustc_args: vec![], + fail_fast: true, + doc_tests: DocTests::No, + bless: false, + compare_mode: None, + }; + + let build = Build::new(config); + let mut builder = Builder::new(&build); + + let host = INTERNER.intern_str("A"); + + builder.run_step_descriptions( + &[StepDescription::from::()], + &["src/libstd".into()], + ); + + // Ensure we don't build any compiler artifacts. + assert!(builder.cache.all::().is_empty()); + assert_eq!( + first(builder.cache.all::()), + &[test::Crate { + compiler: Compiler { host, stage: 0 }, + target: host, + mode: Mode::Std, + test_kind: test::TestKind::Test, + krate: INTERNER.intern_str("std"), + },] + ); + } +} diff --git a/src/bootstrap/cache.rs b/src/bootstrap/cache.rs index c27493158826..bca5ff85ba23 100644 --- a/src/bootstrap/cache.rs +++ b/src/bootstrap/cache.rs @@ -21,6 +21,7 @@ use std::mem; use std::ops::Deref; use std::path::{Path, PathBuf}; use std::sync::Mutex; +use std::cmp::{PartialOrd, Ord, Ordering}; use builder::Step; @@ -154,6 +155,19 @@ impl AsRef for Interned { } } +impl PartialOrd> for Interned { + fn partial_cmp(&self, other: &Self) -> Option { + let l = INTERNER.strs.lock().unwrap(); + l.get(*self).partial_cmp(l.get(*other)) + } +} + +impl Ord for Interned { + fn cmp(&self, other: &Self) -> Ordering { + let l = INTERNER.strs.lock().unwrap(); + l.get(*self).cmp(l.get(*other)) + } +} struct TyIntern { items: Vec, @@ -235,7 +249,7 @@ lazy_static! { pub struct Cache( RefCell, // actually a HashMap> + Box, // actually a HashMap> >> ); @@ -264,4 +278,16 @@ impl Cache { .expect("invalid type mapped"); stepcache.get(step).cloned() } + + #[cfg(test)] + pub fn all(&mut self) -> Vec<(S, S::Output)> { + let cache = self.0.get_mut(); + let type_id = TypeId::of::(); + let mut v = cache.remove(&type_id) + .map(|b| b.downcast::>().expect("correct type")) + .map(|m| m.into_iter().collect::>()) + .unwrap_or_default(); + v.sort_by_key(|&(a, _)| a); + v + } } diff --git a/src/bootstrap/cc_detect.rs b/src/bootstrap/cc_detect.rs index e531fdaf2923..698903f128d4 100644 --- a/src/bootstrap/cc_detect.rs +++ b/src/bootstrap/cc_detect.rs @@ -77,8 +77,19 @@ pub fn find(build: &mut Build) { .collect::>(); for target in targets.into_iter() { let mut cfg = cc::Build::new(); - cfg.cargo_metadata(false).opt_level(0).warnings(false).debug(false) + cfg.cargo_metadata(false).opt_level(2).warnings(false).debug(false) .target(&target).host(&build.build); + match build.crt_static(target) { + Some(a) => { cfg.static_crt(a); } + None => { + if target.contains("msvc") { + cfg.static_crt(true); + } + if target.contains("musl") { + cfg.static_flag(true); + } + } + } let config = build.config.target_config.get(&target); if let Some(cc) = config.and_then(|c| c.cc.as_ref()) { @@ -94,8 +105,9 @@ pub fn find(build: &mut Build) { cc2ar(compiler.path(), &target) }; - build.verbose(&format!("CC_{} = {:?}", &target, compiler.path())); build.cc.insert(target, compiler); + build.verbose(&format!("CC_{} = {:?}", &target, build.cc(target))); + build.verbose(&format!("CFLAGS_{} = {:?}", &target, build.cflags(target))); if let Some(ar) = ar { build.verbose(&format!("AR_{} = {:?}", &target, ar)); build.ar.insert(target, ar); @@ -106,7 +118,7 @@ pub fn find(build: &mut Build) { let hosts = build.hosts.iter().cloned().chain(iter::once(build.build)).collect::>(); for host in hosts.into_iter() { let mut cfg = cc::Build::new(); - cfg.cargo_metadata(false).opt_level(0).warnings(false).debug(false).cpp(true) + cfg.cargo_metadata(false).opt_level(2).warnings(false).debug(false).cpp(true) .target(&host).host(&build.build); let config = build.config.target_config.get(&host); if let Some(cxx) = config.and_then(|c| c.cxx.as_ref()) { diff --git a/src/bootstrap/channel.rs b/src/bootstrap/channel.rs index 4e3f3a00b15e..fa2b58fb2daa 100644 --- a/src/bootstrap/channel.rs +++ b/src/bootstrap/channel.rs @@ -24,12 +24,7 @@ use Build; use config::Config; // The version number -pub const CFG_RELEASE_NUM: &str = "1.25.0"; - -// An optional number to put after the label, e.g. '.2' -> '-beta.2' -// Be sure to make this starts with a dot to conform to semver pre-release -// versions (section 9) -pub const CFG_PRERELEASE_VERSION: &str = ".1"; +pub const CFG_RELEASE_NUM: &str = "1.30.0"; pub struct GitInfo { inner: Option, diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index ed110762cb3c..133e5aa37a7d 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -1,4 +1,4 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -8,1492 +8,279 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Implementation of the test-related targets of the build system. -//! -//! This file implements the various regression test suites that we execute on -//! our CI. +//! Implementation of compiling the compiler and standard library, in "check" mode. -use std::collections::HashSet; -use std::env; -use std::ffi::OsString; -use std::iter; -use std::fmt; -use std::fs::{self, File}; -use std::path::{PathBuf, Path}; -use std::process::Command; -use std::io::Read; - -use build_helper::{self, output}; - -use builder::{Kind, RunConfig, ShouldRun, Builder, Compiler, Step}; +use compile::{run_cargo, std_cargo, test_cargo, rustc_cargo, rustc_cargo_env, add_to_sysroot}; +use builder::{RunConfig, Builder, ShouldRun, Step}; +use tool::{self, prepare_tool_cargo, SourceType}; +use {Compiler, Mode}; use cache::{INTERNER, Interned}; -use compile; -use dist; -use native; -use tool::{self, Tool}; -use util::{self, dylib_path, dylib_path_var}; -use {Build, Mode}; -use toolstate::ToolState; - -const ADB_TEST_DIR: &str = "/data/tmp/work"; - -/// The two modes of the test runner; tests or benchmarks. -#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] -pub enum TestKind { - /// Run `cargo test` - Test, - /// Run `cargo bench` - Bench, -} - -impl TestKind { - // Return the cargo subcommand for this test kind - fn subcommand(self) -> &'static str { - match self { - TestKind::Test => "test", - TestKind::Bench => "bench", - } - } -} - -impl fmt::Display for TestKind { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match *self { - TestKind::Test => "Testing", - TestKind::Bench => "Benchmarking", - }) - } -} - -fn try_run(build: &Build, cmd: &mut Command) -> bool { - if !build.fail_fast { - if !build.try_run(cmd) { - let mut failures = build.delayed_failures.borrow_mut(); - failures.push(format!("{:?}", cmd)); - return false; - } - } else { - build.run(cmd); - } - true -} - -fn try_run_quiet(build: &Build, cmd: &mut Command) { - if !build.fail_fast { - if !build.try_run_quiet(cmd) { - let mut failures = build.delayed_failures.borrow_mut(); - failures.push(format!("{:?}", cmd)); - } - } else { - build.run_quiet(cmd); - } -} +use std::path::PathBuf; #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Linkcheck { - host: Interned, +pub struct Std { + pub target: Interned, } -impl Step for Linkcheck { - type Output = (); - const ONLY_HOSTS: bool = true; - const DEFAULT: bool = true; - - /// Runs the `linkchecker` tool as compiled in `stage` by the `host` compiler. - /// - /// This tool in `src/tools` will verify the validity of all our links in the - /// documentation to ensure we don't have a bunch of dead ones. - fn run(self, builder: &Builder) { - let build = builder.build; - let host = self.host; - - println!("Linkcheck ({})", host); - - builder.default_doc(None); - - let _time = util::timeit(); - try_run(build, builder.tool_cmd(Tool::Linkchecker) - .arg(build.out.join(host).join("doc"))); - } - - fn should_run(run: ShouldRun) -> ShouldRun { - let builder = run.builder; - run.path("src/tools/linkchecker").default_condition(builder.build.config.docs) - } - - fn make_run(run: RunConfig) { - run.builder.ensure(Linkcheck { host: run.target }); - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Cargotest { - stage: u32, - host: Interned, -} - -impl Step for Cargotest { - type Output = (); - const ONLY_HOSTS: bool = true; - - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/tools/cargotest") - } - - fn make_run(run: RunConfig) { - run.builder.ensure(Cargotest { - stage: run.builder.top_stage, - host: run.target, - }); - } - - /// Runs the `cargotest` tool as compiled in `stage` by the `host` compiler. - /// - /// This tool in `src/tools` will check out a few Rust projects and run `cargo - /// test` to ensure that we don't regress the test suites there. - fn run(self, builder: &Builder) { - let build = builder.build; - let compiler = builder.compiler(self.stage, self.host); - builder.ensure(compile::Rustc { compiler, target: compiler.host }); - - // Note that this is a short, cryptic, and not scoped directory name. This - // is currently to minimize the length of path on Windows where we otherwise - // quickly run into path name limit constraints. - let out_dir = build.out.join("ct"); - t!(fs::create_dir_all(&out_dir)); - - let _time = util::timeit(); - let mut cmd = builder.tool_cmd(Tool::CargoTest); - try_run(build, cmd.arg(&build.initial_cargo) - .arg(&out_dir) - .env("RUSTC", builder.rustc(compiler)) - .env("RUSTDOC", builder.rustdoc(compiler.host))); - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Cargo { - stage: u32, - host: Interned, -} - -impl Step for Cargo { - type Output = (); - const ONLY_HOSTS: bool = true; - - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/tools/cargo") - } - - fn make_run(run: RunConfig) { - run.builder.ensure(Cargo { - stage: run.builder.top_stage, - host: run.target, - }); - } - - /// Runs `cargo test` for `cargo` packaged with Rust. - fn run(self, builder: &Builder) { - let build = builder.build; - let compiler = builder.compiler(self.stage, self.host); - - builder.ensure(tool::Cargo { compiler, target: self.host }); - let mut cargo = builder.cargo(compiler, Mode::Tool, self.host, "test"); - cargo.arg("--manifest-path").arg(build.src.join("src/tools/cargo/Cargo.toml")); - if !build.fail_fast { - cargo.arg("--no-fail-fast"); - } - - // Don't build tests dynamically, just a pain to work with - cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); - - // Don't run cross-compile tests, we may not have cross-compiled libstd libs - // available. - cargo.env("CFG_DISABLE_CROSS_TESTS", "1"); - - try_run(build, cargo.env("PATH", &path_for_cargo(builder, compiler))); - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Rls { - stage: u32, - host: Interned, -} - -impl Step for Rls { - type Output = (); - const ONLY_HOSTS: bool = true; - - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/tools/rls") - } - - fn make_run(run: RunConfig) { - run.builder.ensure(Rls { - stage: run.builder.top_stage, - host: run.target, - }); - } - - /// Runs `cargo test` for the rls. - fn run(self, builder: &Builder) { - let build = builder.build; - let stage = self.stage; - let host = self.host; - let compiler = builder.compiler(stage, host); - - builder.ensure(tool::Rls { compiler, target: self.host }); - let mut cargo = tool::prepare_tool_cargo(builder, - compiler, - host, - "test", - "src/tools/rls"); - - // Don't build tests dynamically, just a pain to work with - cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); - - builder.add_rustc_lib_path(compiler, &mut cargo); - - if try_run(build, &mut cargo) { - build.save_toolstate("rls", ToolState::TestPass); - } - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Rustfmt { - stage: u32, - host: Interned, -} - -impl Step for Rustfmt { - type Output = (); - const ONLY_HOSTS: bool = true; - - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/tools/rustfmt") - } - - fn make_run(run: RunConfig) { - run.builder.ensure(Rustfmt { - stage: run.builder.top_stage, - host: run.target, - }); - } - - /// Runs `cargo test` for rustfmt. - fn run(self, builder: &Builder) { - let build = builder.build; - let stage = self.stage; - let host = self.host; - let compiler = builder.compiler(stage, host); - - builder.ensure(tool::Rustfmt { compiler, target: self.host }); - let mut cargo = tool::prepare_tool_cargo(builder, - compiler, - host, - "test", - "src/tools/rustfmt"); - - // Don't build tests dynamically, just a pain to work with - cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); - - builder.add_rustc_lib_path(compiler, &mut cargo); - - if try_run(build, &mut cargo) { - build.save_toolstate("rustfmt", ToolState::TestPass); - } - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Miri { - stage: u32, - host: Interned, -} - -impl Step for Miri { - type Output = (); - const ONLY_HOSTS: bool = true; - const DEFAULT: bool = true; - - fn should_run(run: ShouldRun) -> ShouldRun { - let test_miri = run.builder.build.config.test_miri; - run.path("src/tools/miri").default_condition(test_miri) - } - - fn make_run(run: RunConfig) { - run.builder.ensure(Miri { - stage: run.builder.top_stage, - host: run.target, - }); - } - - /// Runs `cargo test` for miri. - fn run(self, builder: &Builder) { - let build = builder.build; - let stage = self.stage; - let host = self.host; - let compiler = builder.compiler(stage, host); - - if let Some(miri) = builder.ensure(tool::Miri { compiler, target: self.host }) { - let mut cargo = builder.cargo(compiler, Mode::Tool, host, "test"); - cargo.arg("--manifest-path").arg(build.src.join("src/tools/miri/Cargo.toml")); - - // Don't build tests dynamically, just a pain to work with - cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); - // miri tests need to know about the stage sysroot - cargo.env("MIRI_SYSROOT", builder.sysroot(compiler)); - cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler)); - cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler)); - cargo.env("MIRI_PATH", miri); - - builder.add_rustc_lib_path(compiler, &mut cargo); - - if try_run(build, &mut cargo) { - build.save_toolstate("miri", ToolState::TestPass); - } - } else { - eprintln!("failed to test miri: could not build"); - } - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Clippy { - stage: u32, - host: Interned, -} - -impl Step for Clippy { - type Output = (); - const ONLY_HOSTS: bool = true; - const DEFAULT: bool = false; - - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/tools/clippy") - } - - fn make_run(run: RunConfig) { - run.builder.ensure(Clippy { - stage: run.builder.top_stage, - host: run.target, - }); - } - - /// Runs `cargo test` for clippy. - fn run(self, builder: &Builder) { - let build = builder.build; - let stage = self.stage; - let host = self.host; - let compiler = builder.compiler(stage, host); - - if let Some(clippy) = builder.ensure(tool::Clippy { compiler, target: self.host }) { - let mut cargo = builder.cargo(compiler, Mode::Tool, host, "test"); - cargo.arg("--manifest-path").arg(build.src.join("src/tools/clippy/Cargo.toml")); - - // Don't build tests dynamically, just a pain to work with - cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); - // clippy tests need to know about the stage sysroot - cargo.env("SYSROOT", builder.sysroot(compiler)); - cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler)); - cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler)); - let host_libs = builder.stage_out(compiler, Mode::Tool).join(builder.cargo_dir()); - cargo.env("HOST_LIBS", host_libs); - // clippy tests need to find the driver - cargo.env("CLIPPY_DRIVER_PATH", clippy); - - builder.add_rustc_lib_path(compiler, &mut cargo); - - if try_run(build, &mut cargo) { - build.save_toolstate("clippy-driver", ToolState::TestPass); - } - } else { - eprintln!("failed to test clippy: could not build"); - } - } -} - -fn path_for_cargo(builder: &Builder, compiler: Compiler) -> OsString { - // Configure PATH to find the right rustc. NB. we have to use PATH - // and not RUSTC because the Cargo test suite has tests that will - // fail if rustc is not spelled `rustc`. - let path = builder.sysroot(compiler).join("bin"); - let old_path = env::var_os("PATH").unwrap_or_default(); - env::join_paths(iter::once(path).chain(env::split_paths(&old_path))).expect("") -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Tidy { - host: Interned, -} - -impl Step for Tidy { - type Output = (); - const DEFAULT: bool = true; - const ONLY_HOSTS: bool = true; - const ONLY_BUILD: bool = true; - - /// Runs the `tidy` tool as compiled in `stage` by the `host` compiler. - /// - /// This tool in `src/tools` checks up on various bits and pieces of style and - /// otherwise just implements a few lint-like checks that are specific to the - /// compiler itself. - fn run(self, builder: &Builder) { - let build = builder.build; - let host = self.host; - - let _folder = build.fold_output(|| "tidy"); - println!("tidy check ({})", host); - let mut cmd = builder.tool_cmd(Tool::Tidy); - cmd.arg(build.src.join("src")); - if !build.config.vendor { - cmd.arg("--no-vendor"); - } - if build.config.quiet_tests { - cmd.arg("--quiet"); - } - try_run(build, &mut cmd); - } - - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/tools/tidy") - } - - fn make_run(run: RunConfig) { - run.builder.ensure(Tidy { - host: run.builder.build.build, - }); - } -} - -fn testdir(build: &Build, host: Interned) -> PathBuf { - build.out.join(host).join("test") -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -struct Test { - path: &'static str, - mode: &'static str, - suite: &'static str, -} - -static DEFAULT_COMPILETESTS: &[Test] = &[ - Test { path: "src/test/ui", mode: "ui", suite: "ui" }, - Test { path: "src/test/run-pass", mode: "run-pass", suite: "run-pass" }, - Test { path: "src/test/compile-fail", mode: "compile-fail", suite: "compile-fail" }, - Test { path: "src/test/parse-fail", mode: "parse-fail", suite: "parse-fail" }, - Test { path: "src/test/run-fail", mode: "run-fail", suite: "run-fail" }, - Test { - path: "src/test/run-pass-valgrind", - mode: "run-pass-valgrind", - suite: "run-pass-valgrind" - }, - Test { path: "src/test/mir-opt", mode: "mir-opt", suite: "mir-opt" }, - Test { path: "src/test/codegen", mode: "codegen", suite: "codegen" }, - Test { path: "src/test/codegen-units", mode: "codegen-units", suite: "codegen-units" }, - Test { path: "src/test/incremental", mode: "incremental", suite: "incremental" }, - - // What this runs varies depending on the native platform being apple - Test { path: "src/test/debuginfo", mode: "debuginfo-XXX", suite: "debuginfo" }, -]; - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct DefaultCompiletest { - compiler: Compiler, - target: Interned, - mode: &'static str, - suite: &'static str, -} - -impl Step for DefaultCompiletest { +impl Step for Std { type Output = (); const DEFAULT: bool = true; - fn should_run(mut run: ShouldRun) -> ShouldRun { - for test in DEFAULT_COMPILETESTS { - run = run.path(test.path); - } - run - } - - fn make_run(run: RunConfig) { - let compiler = run.builder.compiler(run.builder.top_stage, run.host); - - let test = run.path.map(|path| { - DEFAULT_COMPILETESTS.iter().find(|&&test| { - path.ends_with(test.path) - }).unwrap_or_else(|| { - panic!("make_run in compile test to receive test path, received {:?}", path); - }) - }); - - if let Some(test) = test { - run.builder.ensure(DefaultCompiletest { - compiler, - target: run.target, - mode: test.mode, - suite: test.suite, - }); - } else { - for test in DEFAULT_COMPILETESTS { - run.builder.ensure(DefaultCompiletest { - compiler, - target: run.target, - mode: test.mode, - suite: test.suite - }); - } - } - } - - fn run(self, builder: &Builder) { - builder.ensure(Compiletest { - compiler: self.compiler, - target: self.target, - mode: self.mode, - suite: self.suite, - }) - } -} - -// Also default, but host-only. -static HOST_COMPILETESTS: &[Test] = &[ - Test { path: "src/test/ui-fulldeps", mode: "ui", suite: "ui-fulldeps" }, - Test { path: "src/test/run-pass-fulldeps", mode: "run-pass", suite: "run-pass-fulldeps" }, - Test { path: "src/test/run-fail-fulldeps", mode: "run-fail", suite: "run-fail-fulldeps" }, - Test { - path: "src/test/compile-fail-fulldeps", - mode: "compile-fail", - suite: "compile-fail-fulldeps", - }, - Test { path: "src/test/run-make", mode: "run-make", suite: "run-make" }, - Test { path: "src/test/rustdoc", mode: "rustdoc", suite: "rustdoc" }, - - Test { path: "src/test/pretty", mode: "pretty", suite: "pretty" }, - Test { path: "src/test/run-pass/pretty", mode: "pretty", suite: "run-pass" }, - Test { path: "src/test/run-fail/pretty", mode: "pretty", suite: "run-fail" }, - Test { path: "src/test/run-pass-valgrind/pretty", mode: "pretty", suite: "run-pass-valgrind" }, - Test { path: "src/test/run-pass-fulldeps/pretty", mode: "pretty", suite: "run-pass-fulldeps" }, - Test { path: "src/test/run-fail-fulldeps/pretty", mode: "pretty", suite: "run-fail-fulldeps" }, -]; - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct HostCompiletest { - compiler: Compiler, - target: Interned, - mode: &'static str, - suite: &'static str, -} - -impl Step for HostCompiletest { - type Output = (); - const DEFAULT: bool = true; - const ONLY_HOSTS: bool = true; - - fn should_run(mut run: ShouldRun) -> ShouldRun { - for test in HOST_COMPILETESTS { - run = run.path(test.path); - } - run - } - - fn make_run(run: RunConfig) { - let compiler = run.builder.compiler(run.builder.top_stage, run.host); - - let test = run.path.map(|path| { - HOST_COMPILETESTS.iter().find(|&&test| { - path.ends_with(test.path) - }).unwrap_or_else(|| { - panic!("make_run in compile test to receive test path, received {:?}", path); - }) - }); - - if let Some(test) = test { - run.builder.ensure(HostCompiletest { - compiler, - target: run.target, - mode: test.mode, - suite: test.suite, - }); - } else { - for test in HOST_COMPILETESTS { - if test.mode == "pretty" { - continue; - } - run.builder.ensure(HostCompiletest { - compiler, - target: run.target, - mode: test.mode, - suite: test.suite - }); - } - } - } - - fn run(self, builder: &Builder) { - builder.ensure(Compiletest { - compiler: self.compiler, - target: self.target, - mode: self.mode, - suite: self.suite, - }) - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -struct Compiletest { - compiler: Compiler, - target: Interned, - mode: &'static str, - suite: &'static str, -} - -impl Step for Compiletest { - type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { - run.never() + run.all_krates("std") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Std { + target: run.target, + }); } - /// Executes the `compiletest` tool to run a suite of tests. - /// - /// Compiles all tests with `compiler` for `target` with the specified - /// compiletest `mode` and `suite` arguments. For example `mode` can be - /// "run-pass" or `suite` can be something like `debuginfo`. fn run(self, builder: &Builder) { - let build = builder.build; - let compiler = self.compiler; let target = self.target; - let mode = self.mode; - let suite = self.suite; + let compiler = builder.compiler(0, builder.config.build); - // Skip codegen tests if they aren't enabled in configuration. - if !build.config.codegen_tests && suite == "codegen" { - return; - } + let out_dir = builder.stage_out(compiler, Mode::Std); + builder.clear_if_dirty(&out_dir, &builder.rustc(compiler)); - if suite == "debuginfo" { - // Skip debuginfo tests on MSVC - if build.build.contains("msvc") { - return; - } + let mut cargo = builder.cargo(compiler, Mode::Std, target, "check"); + std_cargo(builder, &compiler, target, &mut cargo); - if mode == "debuginfo-XXX" { - return if build.build.contains("apple") { - builder.ensure(Compiletest { - mode: "debuginfo-lldb", - ..self - }); - } else { - builder.ensure(Compiletest { - mode: "debuginfo-gdb", - ..self - }); - }; - } + let _folder = builder.fold_output(|| format!("stage{}-std", compiler.stage)); + println!("Checking std artifacts ({} -> {})", &compiler.host, target); + run_cargo(builder, + &mut cargo, + &libstd_stamp(builder, compiler, target), + true); - builder.ensure(dist::DebuggerScripts { - sysroot: builder.sysroot(compiler), - host: target - }); - } - - if suite.ends_with("fulldeps") || - // FIXME: Does pretty need librustc compiled? Note that there are - // fulldeps test suites with mode = pretty as well. - mode == "pretty" || - mode == "rustdoc" || - mode == "run-make" { - builder.ensure(compile::Rustc { compiler, target }); - } - - builder.ensure(compile::Test { compiler, target }); - builder.ensure(native::TestHelpers { target }); - builder.ensure(RemoteCopyLibs { compiler, target }); - - let _folder = build.fold_output(|| format!("test_{}", suite)); - println!("Check compiletest suite={} mode={} ({} -> {})", - suite, mode, &compiler.host, target); - let mut cmd = builder.tool_cmd(Tool::Compiletest); - - // compiletest currently has... a lot of arguments, so let's just pass all - // of them! - - cmd.arg("--compile-lib-path").arg(builder.rustc_libdir(compiler)); - cmd.arg("--run-lib-path").arg(builder.sysroot_libdir(compiler, target)); - cmd.arg("--rustc-path").arg(builder.rustc(compiler)); - - // Avoid depending on rustdoc when we don't need it. - if mode == "rustdoc" || mode == "run-make" { - cmd.arg("--rustdoc-path").arg(builder.rustdoc(compiler.host)); - } - - cmd.arg("--src-base").arg(build.src.join("src/test").join(suite)); - cmd.arg("--build-base").arg(testdir(build, compiler.host).join(suite)); - cmd.arg("--stage-id").arg(format!("stage{}-{}", compiler.stage, target)); - cmd.arg("--mode").arg(mode); - cmd.arg("--target").arg(target); - cmd.arg("--host").arg(&*compiler.host); - cmd.arg("--llvm-filecheck").arg(build.llvm_filecheck(build.build)); - - if let Some(ref nodejs) = build.config.nodejs { - cmd.arg("--nodejs").arg(nodejs); - } - - let mut flags = vec!["-Crpath".to_string()]; - if build.config.rust_optimize_tests { - flags.push("-O".to_string()); - } - if build.config.rust_debuginfo_tests { - flags.push("-g".to_string()); - } - flags.push("-Zmiri -Zunstable-options".to_string()); - - if let Some(linker) = build.linker(target) { - cmd.arg("--linker").arg(linker); - } - - let hostflags = flags.clone(); - cmd.arg("--host-rustcflags").arg(hostflags.join(" ")); - - let mut targetflags = flags.clone(); - targetflags.push(format!("-Lnative={}", - build.test_helpers_out(target).display())); - cmd.arg("--target-rustcflags").arg(targetflags.join(" ")); - - cmd.arg("--docck-python").arg(build.python()); - - if build.build.ends_with("apple-darwin") { - // Force /usr/bin/python on macOS for LLDB tests because we're loading the - // LLDB plugin's compiled module which only works with the system python - // (namely not Homebrew-installed python) - cmd.arg("--lldb-python").arg("/usr/bin/python"); - } else { - cmd.arg("--lldb-python").arg(build.python()); - } - - if let Some(ref gdb) = build.config.gdb { - cmd.arg("--gdb").arg(gdb); - } - if let Some(ref vers) = build.lldb_version { - cmd.arg("--lldb-version").arg(vers); - } - if let Some(ref dir) = build.lldb_python_dir { - cmd.arg("--lldb-python-dir").arg(dir); - } - - cmd.args(&build.config.cmd.test_args()); - - if build.is_verbose() { - cmd.arg("--verbose"); - } - - if build.config.quiet_tests { - cmd.arg("--quiet"); - } - - if build.config.llvm_enabled { - let llvm_config = build.llvm_config(target); - let llvm_version = output(Command::new(&llvm_config).arg("--version")); - cmd.arg("--llvm-version").arg(llvm_version); - if !build.is_rust_llvm(target) { - cmd.arg("--system-llvm"); - } - - // Only pass correct values for these flags for the `run-make` suite as it - // requires that a C++ compiler was configured which isn't always the case. - if suite == "run-make" { - let llvm_components = output(Command::new(&llvm_config).arg("--components")); - let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags")); - cmd.arg("--cc").arg(build.cc(target)) - .arg("--cxx").arg(build.cxx(target).unwrap()) - .arg("--cflags").arg(build.cflags(target).join(" ")) - .arg("--llvm-components").arg(llvm_components.trim()) - .arg("--llvm-cxxflags").arg(llvm_cxxflags.trim()); - if let Some(ar) = build.ar(target) { - cmd.arg("--ar").arg(ar); - } - } - } - if suite == "run-make" && !build.config.llvm_enabled { - println!("Ignoring run-make test suite as they generally dont work without LLVM"); - return; - } - - if suite != "run-make" { - cmd.arg("--cc").arg("") - .arg("--cxx").arg("") - .arg("--cflags").arg("") - .arg("--llvm-components").arg("") - .arg("--llvm-cxxflags").arg(""); - } - - if build.remote_tested(target) { - cmd.arg("--remote-test-client").arg(builder.tool_exe(Tool::RemoteTestClient)); - } - - // Running a C compiler on MSVC requires a few env vars to be set, to be - // sure to set them here. - // - // Note that if we encounter `PATH` we make sure to append to our own `PATH` - // rather than stomp over it. - if target.contains("msvc") { - for &(ref k, ref v) in build.cc[&target].env() { - if k != "PATH" { - cmd.env(k, v); - } - } - } - cmd.env("RUSTC_BOOTSTRAP", "1"); - build.add_rust_test_threads(&mut cmd); - - if build.config.sanitizers { - cmd.env("SANITIZER_SUPPORT", "1"); - } - - if build.config.profiler { - cmd.env("PROFILER_SUPPORT", "1"); - } - - cmd.arg("--adb-path").arg("adb"); - cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR); - if target.contains("android") { - // Assume that cc for this target comes from the android sysroot - cmd.arg("--android-cross-path") - .arg(build.cc(target).parent().unwrap().parent().unwrap()); - } else { - cmd.arg("--android-cross-path").arg(""); - } - - build.ci_env.force_coloring_in_ci(&mut cmd); - - let _time = util::timeit(); - try_run(build, &mut cmd); + let libdir = builder.sysroot_libdir(compiler, target); + add_to_sysroot(&builder, &libdir, &libstd_stamp(builder, compiler, target)); } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Docs { - compiler: Compiler, +pub struct Rustc { + pub target: Interned, } -impl Step for Docs { +impl Step for Rustc { type Output = (); - const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; + const DEFAULT: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/doc") + run.all_krates("rustc-main") } fn make_run(run: RunConfig) { - run.builder.ensure(Docs { - compiler: run.builder.compiler(run.builder.top_stage, run.host), + run.builder.ensure(Rustc { + target: run.target, }); } - /// Run `rustdoc --test` for all documentation in `src/doc`. + /// Build the compiler. /// - /// This will run all tests in our markdown documentation (e.g. the book) - /// located in `src/doc`. The `rustdoc` that's run is the one that sits next to - /// `compiler`. + /// This will build the compiler for a particular stage of the build using + /// the `compiler` targeting the `target` architecture. The artifacts + /// created will also be linked into the sysroot directory. fn run(self, builder: &Builder) { - let build = builder.build; - let compiler = self.compiler; - - builder.ensure(compile::Test { compiler, target: compiler.host }); - - // Do a breadth-first traversal of the `src/doc` directory and just run - // tests for all files that end in `*.md` - let mut stack = vec![build.src.join("src/doc")]; - let _time = util::timeit(); - let _folder = build.fold_output(|| "test_docs"); - - while let Some(p) = stack.pop() { - if p.is_dir() { - stack.extend(t!(p.read_dir()).map(|p| t!(p).path())); - continue - } - - if p.extension().and_then(|s| s.to_str()) != Some("md") { - continue; - } - - // The nostarch directory in the book is for no starch, and so isn't - // guaranteed to build. We don't care if it doesn't build, so skip it. - if p.to_str().map_or(false, |p| p.contains("nostarch")) { - continue; - } - - markdown_test(builder, compiler, &p); - } - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct ErrorIndex { - compiler: Compiler, -} - -impl Step for ErrorIndex { - type Output = (); - const DEFAULT: bool = true; - const ONLY_HOSTS: bool = true; - - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/tools/error_index_generator") - } - - fn make_run(run: RunConfig) { - run.builder.ensure(ErrorIndex { - compiler: run.builder.compiler(run.builder.top_stage, run.host), - }); - } - - /// Run the error index generator tool to execute the tests located in the error - /// index. - /// - /// The `error_index_generator` tool lives in `src/tools` and is used to - /// generate a markdown file from the error indexes of the code base which is - /// then passed to `rustdoc --test`. - fn run(self, builder: &Builder) { - let build = builder.build; - let compiler = self.compiler; - - builder.ensure(compile::Std { compiler, target: compiler.host }); - - let _folder = build.fold_output(|| "test_error_index"); - println!("Testing error-index stage{}", compiler.stage); - - let dir = testdir(build, compiler.host); - t!(fs::create_dir_all(&dir)); - let output = dir.join("error-index.md"); - - let _time = util::timeit(); - build.run(builder.tool_cmd(Tool::ErrorIndex) - .arg("markdown") - .arg(&output) - .env("CFG_BUILD", &build.build) - .env("RUSTC_ERROR_METADATA_DST", build.extended_error_dir())); - - markdown_test(builder, compiler, &output); - } -} - -fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) { - let build = builder.build; - let mut file = t!(File::open(markdown)); - let mut contents = String::new(); - t!(file.read_to_string(&mut contents)); - if !contents.contains("```") { - return; - } - - println!("doc tests for: {}", markdown.display()); - let mut cmd = builder.rustdoc_cmd(compiler.host); - build.add_rust_test_threads(&mut cmd); - cmd.arg("--test"); - cmd.arg(markdown); - cmd.env("RUSTC_BOOTSTRAP", "1"); - - let test_args = build.config.cmd.test_args().join(" "); - cmd.arg("--test-args").arg(test_args); - - if build.config.quiet_tests { - try_run_quiet(build, &mut cmd); - } else { - try_run(build, &mut cmd); - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct CrateLibrustc { - compiler: Compiler, - target: Interned, - test_kind: TestKind, - krate: Option>, -} - -impl Step for CrateLibrustc { - type Output = (); - const DEFAULT: bool = true; - const ONLY_HOSTS: bool = true; - - fn should_run(run: ShouldRun) -> ShouldRun { - run.krate("rustc-main") - } - - fn make_run(run: RunConfig) { - let builder = run.builder; - let compiler = builder.compiler(builder.top_stage, run.host); - - let make = |name: Option>| { - let test_kind = if builder.kind == Kind::Test { - TestKind::Test - } else if builder.kind == Kind::Bench { - TestKind::Bench - } else { - panic!("unexpected builder.kind in crate: {:?}", builder.kind); - }; - - builder.ensure(CrateLibrustc { - compiler, - target: run.target, - test_kind, - krate: name, - }); - }; - - if let Some(path) = run.path { - for (name, krate_path) in builder.crates("rustc-main") { - if path.ends_with(krate_path) { - make(Some(name)); - } - } - } else { - make(None); - } - } - - - fn run(self, builder: &Builder) { - builder.ensure(Crate { - compiler: self.compiler, - target: self.target, - mode: Mode::Librustc, - test_kind: self.test_kind, - krate: self.krate, - }); - } -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Crate { - compiler: Compiler, - target: Interned, - mode: Mode, - test_kind: TestKind, - krate: Option>, -} - -impl Step for Crate { - type Output = (); - const DEFAULT: bool = true; - - fn should_run(run: ShouldRun) -> ShouldRun { - run.krate("std").krate("test") - } - - fn make_run(run: RunConfig) { - let builder = run.builder; - let compiler = builder.compiler(builder.top_stage, run.host); - - let make = |mode: Mode, name: Option>| { - let test_kind = if builder.kind == Kind::Test { - TestKind::Test - } else if builder.kind == Kind::Bench { - TestKind::Bench - } else { - panic!("unexpected builder.kind in crate: {:?}", builder.kind); - }; - - builder.ensure(Crate { - compiler, - target: run.target, - mode, - test_kind, - krate: name, - }); - }; - - if let Some(path) = run.path { - for (name, krate_path) in builder.crates("std") { - if path.ends_with(krate_path) { - make(Mode::Libstd, Some(name)); - } - } - for (name, krate_path) in builder.crates("test") { - if path.ends_with(krate_path) { - make(Mode::Libtest, Some(name)); - } - } - } else { - make(Mode::Libstd, None); - make(Mode::Libtest, None); - } - } - - /// Run all unit tests plus documentation tests for an entire crate DAG defined - /// by a `Cargo.toml` - /// - /// This is what runs tests for crates like the standard library, compiler, etc. - /// It essentially is the driver for running `cargo test`. - /// - /// Currently this runs all tests for a DAG by passing a bunch of `-p foo` - /// arguments, and those arguments are discovered from `cargo metadata`. - fn run(self, builder: &Builder) { - let build = builder.build; - let compiler = self.compiler; + let compiler = builder.compiler(0, builder.config.build); let target = self.target; - let mode = self.mode; - let test_kind = self.test_kind; - let krate = self.krate; - builder.ensure(compile::Test { compiler, target }); - builder.ensure(RemoteCopyLibs { compiler, target }); + let stage_out = builder.stage_out(compiler, Mode::Rustc); + builder.clear_if_dirty(&stage_out, &libstd_stamp(builder, compiler, target)); + builder.clear_if_dirty(&stage_out, &libtest_stamp(builder, compiler, target)); - // If we're not doing a full bootstrap but we're testing a stage2 version of - // libstd, then what we're actually testing is the libstd produced in - // stage1. Reflect that here by updating the compiler that we're working - // with automatically. - let compiler = if build.force_use_stage1(compiler, target) { - builder.compiler(1, compiler.host) - } else { - compiler.clone() - }; + let mut cargo = builder.cargo(compiler, Mode::Rustc, target, "check"); + rustc_cargo(builder, &mut cargo); - let mut cargo = builder.cargo(compiler, mode, target, test_kind.subcommand()); - let (name, root) = match mode { - Mode::Libstd => { - compile::std_cargo(build, &compiler, target, &mut cargo); - ("libstd", "std") - } - Mode::Libtest => { - compile::test_cargo(build, &compiler, target, &mut cargo); - ("libtest", "test") - } - Mode::Librustc => { - builder.ensure(compile::Rustc { compiler, target }); - compile::rustc_cargo(build, target, &mut cargo); - ("librustc", "rustc-main") - } - _ => panic!("can only test libraries"), - }; - let root = INTERNER.intern_string(String::from(root)); - let _folder = build.fold_output(|| { - format!("{}_stage{}-{}", test_kind.subcommand(), compiler.stage, name) + let _folder = builder.fold_output(|| format!("stage{}-rustc", compiler.stage)); + println!("Checking compiler artifacts ({} -> {})", &compiler.host, target); + run_cargo(builder, + &mut cargo, + &librustc_stamp(builder, compiler, target), + true); + + let libdir = builder.sysroot_libdir(compiler, target); + add_to_sysroot(&builder, &libdir, &librustc_stamp(builder, compiler, target)); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct CodegenBackend { + pub target: Interned, + pub backend: Interned, +} + +impl Step for CodegenBackend { + type Output = (); + const ONLY_HOSTS: bool = true; + const DEFAULT: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.all_krates("rustc_codegen_llvm") + } + + fn make_run(run: RunConfig) { + let backend = run.builder.config.rust_codegen_backends.get(0); + let backend = backend.cloned().unwrap_or_else(|| { + INTERNER.intern_str("llvm") }); - println!("{} {} stage{} ({} -> {})", test_kind, name, compiler.stage, - &compiler.host, target); + run.builder.ensure(CodegenBackend { + target: run.target, + backend, + }); + } - // Build up the base `cargo test` command. - // - // Pass in some standard flags then iterate over the graph we've discovered - // in `cargo metadata` with the maps above and figure out what `-p` - // arguments need to get passed. - if test_kind.subcommand() == "test" && !build.fail_fast { - cargo.arg("--no-fail-fast"); - } + fn run(self, builder: &Builder) { + let compiler = builder.compiler(0, builder.config.build); + let target = self.target; + let backend = self.backend; - match krate { - Some(krate) => { - cargo.arg("-p").arg(krate); - } - None => { - let mut visited = HashSet::new(); - let mut next = vec![root]; - while let Some(name) = next.pop() { - // Right now jemalloc and the sanitizer crates are - // target-specific crate in the sense that it's not present - // on all platforms. Custom skip it here for now, but if we - // add more this probably wants to get more generalized. - // - // Also skip `build_helper` as it's not compiled normally - // for target during the bootstrap and it's just meant to be - // a helper crate, not tested. If it leaks through then it - // ends up messing with various mtime calculations and such. - if !name.contains("jemalloc") && - *name != *"build_helper" && - !(name.starts_with("rustc_") && name.ends_with("san")) && - name != "dlmalloc" { - cargo.arg("-p").arg(&format!("{}:0.0.0", name)); - } - for dep in build.crates[&name].deps.iter() { - if visited.insert(dep) { - next.push(*dep); - } - } - } - } - } + let out_dir = builder.cargo_out(compiler, Mode::Codegen, target); + builder.clear_if_dirty(&out_dir, &librustc_stamp(builder, compiler, target)); - // The tests are going to run with the *target* libraries, so we need to - // ensure that those libraries show up in the LD_LIBRARY_PATH equivalent. - // - // Note that to run the compiler we need to run with the *host* libraries, - // but our wrapper scripts arrange for that to be the case anyway. - let mut dylib_path = dylib_path(); - dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target))); - cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap()); + let mut cargo = builder.cargo(compiler, Mode::Codegen, target, "check"); + cargo.arg("--manifest-path").arg(builder.src.join("src/librustc_codegen_llvm/Cargo.toml")); + rustc_cargo_env(builder, &mut cargo); - cargo.arg("--"); - cargo.args(&build.config.cmd.test_args()); + // We won't build LLVM if it's not available, as it shouldn't affect `check`. - if build.config.quiet_tests { - cargo.arg("--quiet"); - } + let _folder = builder.fold_output(|| format!("stage{}-rustc_codegen_llvm", compiler.stage)); + run_cargo(builder, + &mut cargo, + &codegen_backend_stamp(builder, compiler, target, backend), + true); + } +} - let _time = util::timeit(); +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Test { + pub target: Interned, +} - if target.contains("emscripten") { - cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), - build.config.nodejs.as_ref().expect("nodejs not configured")); - } else if target.starts_with("wasm32") { - // On the wasm32-unknown-unknown target we're using LTO which is - // incompatible with `-C prefer-dynamic`, so disable that here - cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); +impl Step for Test { + type Output = (); + const DEFAULT: bool = true; - let node = build.config.nodejs.as_ref() - .expect("nodejs not configured"); - let runner = format!("{} {}/src/etc/wasm32-shim.js", - node.display(), - build.src.display()); - cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), &runner); - } else if build.remote_tested(target) { - cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), - format!("{} run", - builder.tool_exe(Tool::RemoteTestClient).display())); - } - try_run(build, &mut cargo); + fn should_run(run: ShouldRun) -> ShouldRun { + run.all_krates("test") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Test { + target: run.target, + }); + } + + fn run(self, builder: &Builder) { + let compiler = builder.compiler(0, builder.config.build); + let target = self.target; + + let out_dir = builder.stage_out(compiler, Mode::Test); + builder.clear_if_dirty(&out_dir, &libstd_stamp(builder, compiler, target)); + + let mut cargo = builder.cargo(compiler, Mode::Test, target, "check"); + test_cargo(builder, &compiler, target, &mut cargo); + + let _folder = builder.fold_output(|| format!("stage{}-test", compiler.stage)); + println!("Checking test artifacts ({} -> {})", &compiler.host, target); + run_cargo(builder, + &mut cargo, + &libtest_stamp(builder, compiler, target), + true); + + let libdir = builder.sysroot_libdir(compiler, target); + add_to_sysroot(builder, &libdir, &libtest_stamp(builder, compiler, target)); } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct Rustdoc { - host: Interned, - test_kind: TestKind, + pub target: Interned, } impl Step for Rustdoc { type Output = (); - const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; + const DEFAULT: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/librustdoc").path("src/tools/rustdoc") + run.path("src/tools/rustdoc") } fn make_run(run: RunConfig) { - let builder = run.builder; - - let test_kind = if builder.kind == Kind::Test { - TestKind::Test - } else if builder.kind == Kind::Bench { - TestKind::Bench - } else { - panic!("unexpected builder.kind in crate: {:?}", builder.kind); - }; - - builder.ensure(Rustdoc { - host: run.host, - test_kind, + run.builder.ensure(Rustdoc { + target: run.target, }); } fn run(self, builder: &Builder) { - let build = builder.build; - let test_kind = self.test_kind; - - let compiler = builder.compiler(builder.top_stage, self.host); - let target = compiler.host; - - let mut cargo = tool::prepare_tool_cargo(builder, - compiler, - target, - test_kind.subcommand(), - "src/tools/rustdoc"); - let _folder = build.fold_output(|| { - format!("{}_stage{}-rustdoc", test_kind.subcommand(), compiler.stage) - }); - println!("{} rustdoc stage{} ({} -> {})", test_kind, compiler.stage, - &compiler.host, target); - - if test_kind.subcommand() == "test" && !build.fail_fast { - cargo.arg("--no-fail-fast"); - } - - cargo.arg("-p").arg("rustdoc:0.0.0"); - - cargo.arg("--"); - cargo.args(&build.config.cmd.test_args()); - - if build.config.quiet_tests { - cargo.arg("--quiet"); - } - - let _time = util::timeit(); - - try_run(build, &mut cargo); - } -} - -fn envify(s: &str) -> String { - s.chars().map(|c| { - match c { - '-' => '_', - c => c, - } - }).flat_map(|c| c.to_uppercase()).collect() -} - -/// Some test suites are run inside emulators or on remote devices, and most -/// of our test binaries are linked dynamically which means we need to ship -/// the standard library and such to the emulator ahead of time. This step -/// represents this and is a dependency of all test suites. -/// -/// Most of the time this is a noop. For some steps such as shipping data to -/// QEMU we have to build our own tools so we've got conditional dependencies -/// on those programs as well. Note that the remote test client is built for -/// the build target (us) and the server is built for the target. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct RemoteCopyLibs { - compiler: Compiler, - target: Interned, -} - -impl Step for RemoteCopyLibs { - type Output = (); - - fn should_run(run: ShouldRun) -> ShouldRun { - run.never() - } - - fn run(self, builder: &Builder) { - let build = builder.build; - let compiler = self.compiler; + let compiler = builder.compiler(0, builder.config.build); let target = self.target; - if !build.remote_tested(target) { - return - } - builder.ensure(compile::Test { compiler, target }); + let stage_out = builder.stage_out(compiler, Mode::ToolRustc); + builder.clear_if_dirty(&stage_out, &libstd_stamp(builder, compiler, target)); + builder.clear_if_dirty(&stage_out, &libtest_stamp(builder, compiler, target)); + builder.clear_if_dirty(&stage_out, &librustc_stamp(builder, compiler, target)); - println!("REMOTE copy libs to emulator ({})", target); - t!(fs::create_dir_all(build.out.join("tmp"))); + let mut cargo = prepare_tool_cargo(builder, + compiler, + Mode::ToolRustc, + target, + "check", + "src/tools/rustdoc", + SourceType::InTree); - let server = builder.ensure(tool::RemoteTestServer { compiler, target }); + let _folder = builder.fold_output(|| format!("stage{}-rustdoc", compiler.stage)); + println!("Checking rustdoc artifacts ({} -> {})", &compiler.host, target); + run_cargo(builder, + &mut cargo, + &rustdoc_stamp(builder, compiler, target), + true); - // Spawn the emulator and wait for it to come online - let tool = builder.tool_exe(Tool::RemoteTestClient); - let mut cmd = Command::new(&tool); - cmd.arg("spawn-emulator") - .arg(target) - .arg(&server) - .arg(build.out.join("tmp")); - if let Some(rootfs) = build.qemu_rootfs(target) { - cmd.arg(rootfs); - } - build.run(&mut cmd); + let libdir = builder.sysroot_libdir(compiler, target); + add_to_sysroot(&builder, &libdir, &rustdoc_stamp(builder, compiler, target)); - // Push all our dylibs to the emulator - for f in t!(builder.sysroot_libdir(compiler, target).read_dir()) { - let f = t!(f); - let name = f.file_name().into_string().unwrap(); - if util::is_dylib(&name) { - build.run(Command::new(&tool) - .arg("push") - .arg(f.path())); - } - } + builder.ensure(tool::CleanTools { + compiler, + target, + cause: Mode::Rustc, + }); } } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Distcheck; - -impl Step for Distcheck { - type Output = (); - const ONLY_BUILD: bool = true; - - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("distcheck") - } - - fn make_run(run: RunConfig) { - run.builder.ensure(Distcheck); - } - - /// Run "distcheck", a 'make check' from a tarball - fn run(self, builder: &Builder) { - let build = builder.build; - - println!("Distcheck"); - let dir = build.out.join("tmp").join("distcheck"); - let _ = fs::remove_dir_all(&dir); - t!(fs::create_dir_all(&dir)); - - // Guarantee that these are built before we begin running. - builder.ensure(dist::PlainSourceTarball); - builder.ensure(dist::Src); - - let mut cmd = Command::new("tar"); - cmd.arg("-xzf") - .arg(builder.ensure(dist::PlainSourceTarball)) - .arg("--strip-components=1") - .current_dir(&dir); - build.run(&mut cmd); - build.run(Command::new("./configure") - .args(&build.config.configure_args) - .arg("--enable-vendor") - .current_dir(&dir)); - build.run(Command::new(build_helper::make(&build.build)) - .arg("check") - .current_dir(&dir)); - - // Now make sure that rust-src has all of libstd's dependencies - println!("Distcheck rust-src"); - let dir = build.out.join("tmp").join("distcheck-src"); - let _ = fs::remove_dir_all(&dir); - t!(fs::create_dir_all(&dir)); - - let mut cmd = Command::new("tar"); - cmd.arg("-xzf") - .arg(builder.ensure(dist::Src)) - .arg("--strip-components=1") - .current_dir(&dir); - build.run(&mut cmd); - - let toml = dir.join("rust-src/lib/rustlib/src/rust/src/libstd/Cargo.toml"); - build.run(Command::new(&build.initial_cargo) - .arg("generate-lockfile") - .arg("--manifest-path") - .arg(&toml) - .current_dir(&dir)); - } +/// Cargo's output path for the standard library in a given stage, compiled +/// by a particular compiler for the specified target. +pub fn libstd_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { + builder.cargo_out(compiler, Mode::Std, target).join(".libstd-check.stamp") } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub struct Bootstrap; - -impl Step for Bootstrap { - type Output = (); - const DEFAULT: bool = true; - const ONLY_HOSTS: bool = true; - const ONLY_BUILD: bool = true; - - /// Test the build system itself - fn run(self, builder: &Builder) { - let build = builder.build; - let mut cmd = Command::new(&build.initial_cargo); - cmd.arg("test") - .current_dir(build.src.join("src/bootstrap")) - .env("CARGO_TARGET_DIR", build.out.join("bootstrap")) - .env("RUSTC_BOOTSTRAP", "1") - .env("RUSTC", &build.initial_rustc); - if !build.fail_fast { - cmd.arg("--no-fail-fast"); - } - cmd.arg("--").args(&build.config.cmd.test_args()); - try_run(build, &mut cmd); - } - - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/bootstrap") - } - - fn make_run(run: RunConfig) { - run.builder.ensure(Bootstrap); - } +/// Cargo's output path for libtest in a given stage, compiled by a particular +/// compiler for the specified target. +pub fn libtest_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { + builder.cargo_out(compiler, Mode::Test, target).join(".libtest-check.stamp") +} + +/// Cargo's output path for librustc in a given stage, compiled by a particular +/// compiler for the specified target. +pub fn librustc_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { + builder.cargo_out(compiler, Mode::Rustc, target).join(".librustc-check.stamp") +} + +/// Cargo's output path for librustc_codegen_llvm in a given stage, compiled by a particular +/// compiler for the specified target and backend. +fn codegen_backend_stamp(builder: &Builder, + compiler: Compiler, + target: Interned, + backend: Interned) -> PathBuf { + builder.cargo_out(compiler, Mode::Codegen, target) + .join(format!(".librustc_codegen_llvm-{}-check.stamp", backend)) +} + +/// Cargo's output path for rustdoc in a given stage, compiled by a particular +/// compiler for the specified target. +pub fn rustdoc_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { + builder.cargo_out(compiler, Mode::ToolRustc, target) + .join(".rustdoc-check.stamp") } diff --git a/src/bootstrap/compile.rs b/src/bootstrap/compile.rs index c6adfc7ffae4..2f8816d111a9 100644 --- a/src/bootstrap/compile.rs +++ b/src/bootstrap/compile.rs @@ -16,6 +16,7 @@ //! compiler. This module is also responsible for assembling the sysroot as it //! goes along from the output of the previous stage. +use std::borrow::Cow; use std::env; use std::fs::{self, File}; use std::io::BufReader; @@ -23,21 +24,20 @@ use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use std::str; -use std::cmp::min; use build_helper::{output, mtime, up_to_date}; use filetime::FileTime; use serde_json; -use util::{exe, libdir, is_dylib, copy, read_stamp_file, CiEnv}; -use {Build, Compiler, Mode}; +use util::{exe, libdir, is_dylib, CiEnv}; +use {Compiler, Mode}; use native; use tool; use cache::{INTERNER, Interned}; use builder::{Step, RunConfig, ShouldRun, Builder}; -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, PartialEq, Eq, Hash)] pub struct Std { pub target: Interned, pub compiler: Compiler, @@ -48,7 +48,7 @@ impl Step for Std { const DEFAULT: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/libstd").krate("std") + run.all_krates("std") } fn make_run(run: RunConfig) { @@ -64,25 +64,34 @@ impl Step for Std { /// using the `compiler` targeting the `target` architecture. The artifacts /// created will also be linked into the sysroot directory. fn run(self, builder: &Builder) { - let build = builder.build; let target = self.target; let compiler = self.compiler; + if builder.config.keep_stage.contains(&compiler.stage) { + builder.info("Warning: Using a potentially old libstd. This may not behave well."); + builder.ensure(StdLink { + compiler: compiler, + target_compiler: compiler, + target, + }); + return; + } + builder.ensure(StartupObjects { compiler, target }); - if build.force_use_stage1(compiler, target) { - let from = builder.compiler(1, build.build); + if builder.force_use_stage1(compiler, target) { + let from = builder.compiler(1, builder.config.build); builder.ensure(Std { compiler: from, target, }); - println!("Uplifting stage1 std ({} -> {})", from.host, target); + builder.info(&format!("Uplifting stage1 std ({} -> {})", from.host, target)); // Even if we're not building std this stage, the new sysroot must // still contain the musl startup objects. - if target.contains("musl") && !target.contains("mips") { + if target.contains("musl") { let libdir = builder.sysroot_libdir(compiler, target); - copy_musl_third_party_objects(build, target, &libdir); + copy_musl_third_party_objects(builder, target, &libdir); } builder.ensure(StdLink { @@ -93,25 +102,26 @@ impl Step for Std { return; } - let _folder = build.fold_output(|| format!("stage{}-std", compiler.stage)); - println!("Building stage{} std artifacts ({} -> {})", compiler.stage, - &compiler.host, target); - - if target.contains("musl") && !target.contains("mips") { + if target.contains("musl") { let libdir = builder.sysroot_libdir(compiler, target); - copy_musl_third_party_objects(build, target, &libdir); + copy_musl_third_party_objects(builder, target, &libdir); } - let out_dir = build.stage_out(compiler, Mode::Libstd); - build.clear_if_dirty(&out_dir, &builder.rustc(compiler)); - let mut cargo = builder.cargo(compiler, Mode::Libstd, target, "build"); - std_cargo(build, &compiler, target, &mut cargo); - run_cargo(build, + let out_dir = builder.cargo_out(compiler, Mode::Std, target); + builder.clear_if_dirty(&out_dir, &builder.rustc(compiler)); + let mut cargo = builder.cargo(compiler, Mode::Std, target, "build"); + std_cargo(builder, &compiler, target, &mut cargo); + + let _folder = builder.fold_output(|| format!("stage{}-std", compiler.stage)); + builder.info(&format!("Building stage{} std artifacts ({} -> {})", compiler.stage, + &compiler.host, target)); + run_cargo(builder, &mut cargo, - &libstd_stamp(build, compiler, target)); + &libstd_stamp(builder, compiler, target), + false); builder.ensure(StdLink { - compiler: builder.compiler(compiler.stage, build.build), + compiler: builder.compiler(compiler.stage, builder.config.build), target_compiler: compiler, target, }); @@ -124,58 +134,71 @@ impl Step for Std { /// with a glibc-targeting toolchain, given we have the appropriate startup /// files. As those shipped with glibc won't work, copy the ones provided by /// musl so we have them on linux-gnu hosts. -fn copy_musl_third_party_objects(build: &Build, +fn copy_musl_third_party_objects(builder: &Builder, target: Interned, into: &Path) { for &obj in &["crt1.o", "crti.o", "crtn.o"] { - copy(&build.musl_root(target).unwrap().join("lib").join(obj), &into.join(obj)); + builder.copy(&builder.musl_root(target).unwrap().join("lib").join(obj), &into.join(obj)); } } /// Configure cargo to compile the standard library, adding appropriate env vars /// and such. -pub fn std_cargo(build: &Build, +pub fn std_cargo(builder: &Builder, compiler: &Compiler, target: Interned, cargo: &mut Command) { - let mut features = build.std_features(); - if let Some(target) = env::var_os("MACOSX_STD_DEPLOYMENT_TARGET") { cargo.env("MACOSX_DEPLOYMENT_TARGET", target); } - // When doing a local rebuild we tell cargo that we're stage1 rather than - // stage0. This works fine if the local rust and being-built rust have the - // same view of what the default allocator is, but fails otherwise. Since - // we don't have a way to express an allocator preference yet, work - // around the issue in the case of a local rebuild with jemalloc disabled. - if compiler.stage == 0 && build.local_rebuild && !build.config.use_jemalloc { - features.push_str(" force_alloc_system"); - } + if builder.no_std(target) == Some(true) { + // for no-std targets we only compile a few no_std crates + cargo.arg("--features").arg("c mem") + .args(&["-p", "alloc"]) + .args(&["-p", "compiler_builtins"]) + .arg("--manifest-path") + .arg(builder.src.join("src/rustc/compiler_builtins_shim/Cargo.toml")); + } else { + let mut features = builder.std_features(); - if compiler.stage != 0 && build.config.sanitizers { - // This variable is used by the sanitizer runtime crates, e.g. - // rustc_lsan, to build the sanitizer runtime from C code - // When this variable is missing, those crates won't compile the C code, - // so we don't set this variable during stage0 where llvm-config is - // missing - // We also only build the runtimes when --enable-sanitizers (or its - // config.toml equivalent) is used - cargo.env("LLVM_CONFIG", build.llvm_config(target)); - } - - cargo.arg("--features").arg(features) - .arg("--manifest-path") - .arg(build.src.join("src/libstd/Cargo.toml")); - - if let Some(target) = build.config.target_config.get(&target) { - if let Some(ref jemalloc) = target.jemalloc { - cargo.env("JEMALLOC_OVERRIDE", jemalloc); + // When doing a local rebuild we tell cargo that we're stage1 rather than + // stage0. This works fine if the local rust and being-built rust have the + // same view of what the default allocator is, but fails otherwise. Since + // we don't have a way to express an allocator preference yet, work + // around the issue in the case of a local rebuild with jemalloc disabled. + if compiler.stage == 0 && builder.local_rebuild && !builder.config.use_jemalloc { + features.push_str(" force_alloc_system"); } - } - if target.contains("musl") { - if let Some(p) = build.musl_root(target) { - cargo.env("MUSL_ROOT", p); + + if compiler.stage != 0 && builder.config.sanitizers { + // This variable is used by the sanitizer runtime crates, e.g. + // rustc_lsan, to build the sanitizer runtime from C code + // When this variable is missing, those crates won't compile the C code, + // so we don't set this variable during stage0 where llvm-config is + // missing + // We also only build the runtimes when --enable-sanitizers (or its + // config.toml equivalent) is used + let llvm_config = builder.ensure(native::Llvm { + target: builder.config.build, + emscripten: false, + }); + cargo.env("LLVM_CONFIG", llvm_config); + } + + cargo.arg("--features").arg(features) + .arg("--manifest-path") + .arg(builder.src.join("src/libstd/Cargo.toml")); + + if let Some(target) = builder.config.target_config.get(&target) { + if let Some(ref jemalloc) = target.jemalloc { + cargo.env("JEMALLOC_OVERRIDE", jemalloc); + } + } + if target.contains("musl") { + if let Some(p) = builder.musl_root(target) { + cargo.env("MUSL_ROOT", p); + } } } } @@ -203,35 +226,34 @@ impl Step for StdLink { /// libraries for `target`, and this method will find them in the relevant /// output directory. fn run(self, builder: &Builder) { - let build = builder.build; let compiler = self.compiler; let target_compiler = self.target_compiler; let target = self.target; - println!("Copying stage{} std from stage{} ({} -> {} / {})", + builder.info(&format!("Copying stage{} std from stage{} ({} -> {} / {})", target_compiler.stage, compiler.stage, &compiler.host, target_compiler.host, - target); + target)); let libdir = builder.sysroot_libdir(target_compiler, target); - add_to_sysroot(&libdir, &libstd_stamp(build, compiler, target)); + add_to_sysroot(builder, &libdir, &libstd_stamp(builder, compiler, target)); - if build.config.sanitizers && compiler.stage != 0 && target == "x86_64-apple-darwin" { + if builder.config.sanitizers && compiler.stage != 0 && target == "x86_64-apple-darwin" { // The sanitizers are only built in stage1 or above, so the dylibs will // be missing in stage0 and causes panic. See the `std()` function above // for reason why the sanitizers are not built in stage0. - copy_apple_sanitizer_dylibs(&build.native_dir(target), "osx", &libdir); + copy_apple_sanitizer_dylibs(builder, &builder.native_dir(target), "osx", &libdir); } builder.ensure(tool::CleanTools { compiler: target_compiler, target, - mode: Mode::Libstd, + cause: Mode::Std, }); } } -fn copy_apple_sanitizer_dylibs(native_dir: &Path, platform: &str, into: &Path) { +fn copy_apple_sanitizer_dylibs(builder: &Builder, native_dir: &Path, platform: &str, into: &Path) { for &sanitizer in &["asan", "tsan"] { let filename = format!("libclang_rt.{}_{}_dynamic.dylib", sanitizer, platform); let mut src_path = native_dir.join(sanitizer); @@ -239,7 +261,7 @@ fn copy_apple_sanitizer_dylibs(native_dir: &Path, platform: &str, into: &Path) { src_path.push("lib"); src_path.push("darwin"); src_path.push(&filename); - copy(&src_path, &into.join(filename)); + builder.copy(&src_path, &into.join(filename)); } } @@ -270,15 +292,14 @@ impl Step for StartupObjects { /// files, so we just use the nightly snapshot compiler to always build them (as /// no other compilers are guaranteed to be available). fn run(self, builder: &Builder) { - let build = builder.build; let for_compiler = self.compiler; let target = self.target; if !target.contains("pc-windows-gnu") { return } - let src_dir = &build.src.join("src/rtstartup"); - let dst_dir = &build.native_dir(target).join("rtstartup"); + let src_dir = &builder.src.join("src/rtstartup"); + let dst_dir = &builder.native_dir(target).join("rtstartup"); let sysroot_dir = &builder.sysroot_libdir(for_compiler, target); t!(fs::create_dir_all(dst_dir)); @@ -286,8 +307,8 @@ impl Step for StartupObjects { let src_file = &src_dir.join(file.to_string() + ".rs"); let dst_file = &dst_dir.join(file.to_string() + ".o"); if !up_to_date(src_file, dst_file) { - let mut cmd = Command::new(&build.initial_rustc); - build.run(cmd.env("RUSTC_BOOTSTRAP", "1") + let mut cmd = Command::new(&builder.initial_rustc); + builder.run(cmd.env("RUSTC_BOOTSTRAP", "1") .arg("--cfg").arg("stage0") .arg("--target").arg(target) .arg("--emit=obj") @@ -295,19 +316,23 @@ impl Step for StartupObjects { .arg(src_file)); } - copy(dst_file, &sysroot_dir.join(file.to_string() + ".o")); + builder.copy(dst_file, &sysroot_dir.join(file.to_string() + ".o")); } for obj in ["crt2.o", "dllcrt2.o"].iter() { - copy(&compiler_file(build.cc(target), obj), &sysroot_dir.join(obj)); + let src = compiler_file(builder, + builder.cc(target), + target, + obj); + builder.copy(&src, &sysroot_dir.join(obj)); } } } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, PartialEq, Eq, Hash)] pub struct Test { - pub compiler: Compiler, pub target: Interned, + pub compiler: Compiler, } impl Step for Test { @@ -315,7 +340,7 @@ impl Step for Test { const DEFAULT: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/libtest").krate("test") + run.all_krates("test") } fn make_run(run: RunConfig) { @@ -331,39 +356,51 @@ impl Step for Test { /// the build using the `compiler` targeting the `target` architecture. The /// artifacts created will also be linked into the sysroot directory. fn run(self, builder: &Builder) { - let build = builder.build; let target = self.target; let compiler = self.compiler; builder.ensure(Std { compiler, target }); - if build.force_use_stage1(compiler, target) { - builder.ensure(Test { - compiler: builder.compiler(1, build.build), - target, - }); - println!("Uplifting stage1 test ({} -> {})", &build.build, target); + if builder.config.keep_stage.contains(&compiler.stage) { + builder.info("Warning: Using a potentially old libtest. This may not behave well."); builder.ensure(TestLink { - compiler: builder.compiler(1, build.build), + compiler: compiler, target_compiler: compiler, target, }); return; } - let _folder = build.fold_output(|| format!("stage{}-test", compiler.stage)); - println!("Building stage{} test artifacts ({} -> {})", compiler.stage, - &compiler.host, target); - let out_dir = build.stage_out(compiler, Mode::Libtest); - build.clear_if_dirty(&out_dir, &libstd_stamp(build, compiler, target)); - let mut cargo = builder.cargo(compiler, Mode::Libtest, target, "build"); - test_cargo(build, &compiler, target, &mut cargo); - run_cargo(build, + if builder.force_use_stage1(compiler, target) { + builder.ensure(Test { + compiler: builder.compiler(1, builder.config.build), + target, + }); + builder.info( + &format!("Uplifting stage1 test ({} -> {})", builder.config.build, target)); + builder.ensure(TestLink { + compiler: builder.compiler(1, builder.config.build), + target_compiler: compiler, + target, + }); + return; + } + + let out_dir = builder.cargo_out(compiler, Mode::Test, target); + builder.clear_if_dirty(&out_dir, &libstd_stamp(builder, compiler, target)); + let mut cargo = builder.cargo(compiler, Mode::Test, target, "build"); + test_cargo(builder, &compiler, target, &mut cargo); + + let _folder = builder.fold_output(|| format!("stage{}-test", compiler.stage)); + builder.info(&format!("Building stage{} test artifacts ({} -> {})", compiler.stage, + &compiler.host, target)); + run_cargo(builder, &mut cargo, - &libtest_stamp(build, compiler, target)); + &libtest_stamp(builder, compiler, target), + false); builder.ensure(TestLink { - compiler: builder.compiler(compiler.stage, build.build), + compiler: builder.compiler(compiler.stage, builder.config.build), target_compiler: compiler, target, }); @@ -371,7 +408,7 @@ impl Step for Test { } /// Same as `std_cargo`, but for libtest -pub fn test_cargo(build: &Build, +pub fn test_cargo(builder: &Builder, _compiler: &Compiler, _target: Interned, cargo: &mut Command) { @@ -379,7 +416,7 @@ pub fn test_cargo(build: &Build, cargo.env("MACOSX_DEPLOYMENT_TARGET", target); } cargo.arg("--manifest-path") - .arg(build.src.join("src/libtest/Cargo.toml")); + .arg(builder.src.join("src/libtest/Cargo.toml")); } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] @@ -398,30 +435,29 @@ impl Step for TestLink { /// Same as `std_link`, only for libtest fn run(self, builder: &Builder) { - let build = builder.build; let compiler = self.compiler; let target_compiler = self.target_compiler; let target = self.target; - println!("Copying stage{} test from stage{} ({} -> {} / {})", + builder.info(&format!("Copying stage{} test from stage{} ({} -> {} / {})", target_compiler.stage, compiler.stage, &compiler.host, target_compiler.host, - target); - add_to_sysroot(&builder.sysroot_libdir(target_compiler, target), - &libtest_stamp(build, compiler, target)); + target)); + add_to_sysroot(builder, &builder.sysroot_libdir(target_compiler, target), + &libtest_stamp(builder, compiler, target)); builder.ensure(tool::CleanTools { compiler: target_compiler, target, - mode: Mode::Libtest, + cause: Mode::Test, }); } } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, PartialEq, Eq, Hash)] pub struct Rustc { - pub compiler: Compiler, pub target: Interned, + pub compiler: Compiler, } impl Step for Rustc { @@ -430,7 +466,7 @@ impl Step for Rustc { const DEFAULT: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/librustc").krate("rustc-main") + run.all_krates("rustc-main") } fn make_run(run: RunConfig) { @@ -446,24 +482,30 @@ impl Step for Rustc { /// the `compiler` targeting the `target` architecture. The artifacts /// created will also be linked into the sysroot directory. fn run(self, builder: &Builder) { - let build = builder.build; let compiler = self.compiler; let target = self.target; builder.ensure(Test { compiler, target }); - // Build LLVM for our target. This will implicitly build the host LLVM - // if necessary. - builder.ensure(native::Llvm { target }); - - if build.force_use_stage1(compiler, target) { - builder.ensure(Rustc { - compiler: builder.compiler(1, build.build), + if builder.config.keep_stage.contains(&compiler.stage) { + builder.info("Warning: Using a potentially old librustc. This may not behave well."); + builder.ensure(RustcLink { + compiler: compiler, + target_compiler: compiler, target, }); - println!("Uplifting stage1 rustc ({} -> {})", &build.build, target); + return; + } + + if builder.force_use_stage1(compiler, target) { + builder.ensure(Rustc { + compiler: builder.compiler(1, builder.config.build), + target, + }); + builder.info(&format!("Uplifting stage1 rustc ({} -> {})", + builder.config.build, target)); builder.ensure(RustcLink { - compiler: builder.compiler(1, build.build), + compiler: builder.compiler(1, builder.config.build), target_compiler: compiler, target, }); @@ -472,92 +514,71 @@ impl Step for Rustc { // Ensure that build scripts have a std to link against. builder.ensure(Std { - compiler: builder.compiler(self.compiler.stage, build.build), - target: build.build, + compiler: builder.compiler(self.compiler.stage, builder.config.build), + target: builder.config.build, }); + let cargo_out = builder.cargo_out(compiler, Mode::Rustc, target); + builder.clear_if_dirty(&cargo_out, &libstd_stamp(builder, compiler, target)); + builder.clear_if_dirty(&cargo_out, &libtest_stamp(builder, compiler, target)); - let _folder = build.fold_output(|| format!("stage{}-rustc", compiler.stage)); - println!("Building stage{} compiler artifacts ({} -> {})", - compiler.stage, &compiler.host, target); + let mut cargo = builder.cargo(compiler, Mode::Rustc, target, "build"); + rustc_cargo(builder, &mut cargo); - let stage_out = builder.stage_out(compiler, Mode::Librustc); - build.clear_if_dirty(&stage_out, &libstd_stamp(build, compiler, target)); - build.clear_if_dirty(&stage_out, &libtest_stamp(build, compiler, target)); - - let mut cargo = builder.cargo(compiler, Mode::Librustc, target, "build"); - rustc_cargo(build, target, &mut cargo); - run_cargo(build, + let _folder = builder.fold_output(|| format!("stage{}-rustc", compiler.stage)); + builder.info(&format!("Building stage{} compiler artifacts ({} -> {})", + compiler.stage, &compiler.host, target)); + run_cargo(builder, &mut cargo, - &librustc_stamp(build, compiler, target)); + &librustc_stamp(builder, compiler, target), + false); builder.ensure(RustcLink { - compiler: builder.compiler(compiler.stage, build.build), + compiler: builder.compiler(compiler.stage, builder.config.build), target_compiler: compiler, target, }); } } -/// Same as `std_cargo`, but for libtest -pub fn rustc_cargo(build: &Build, - target: Interned, - cargo: &mut Command) { - cargo.arg("--features").arg(build.rustc_features()) +pub fn rustc_cargo(builder: &Builder, cargo: &mut Command) { + cargo.arg("--features").arg(builder.rustc_features()) .arg("--manifest-path") - .arg(build.src.join("src/rustc/Cargo.toml")); + .arg(builder.src.join("src/rustc/Cargo.toml")); + rustc_cargo_env(builder, cargo); +} +pub fn rustc_cargo_env(builder: &Builder, cargo: &mut Command) { // Set some configuration variables picked up by build scripts and // the compiler alike - cargo.env("CFG_RELEASE", build.rust_release()) - .env("CFG_RELEASE_CHANNEL", &build.config.channel) - .env("CFG_VERSION", build.rust_version()) - .env("CFG_PREFIX", build.config.prefix.clone().unwrap_or_default()); + cargo.env("CFG_RELEASE", builder.rust_release()) + .env("CFG_RELEASE_CHANNEL", &builder.config.channel) + .env("CFG_VERSION", builder.rust_version()) + .env("CFG_PREFIX", builder.config.prefix.clone().unwrap_or_default()) + .env("CFG_CODEGEN_BACKENDS_DIR", &builder.config.rust_codegen_backends_dir); - let libdir_relative = - build.config.libdir.clone().unwrap_or(PathBuf::from("lib")); + let libdir_relative = builder.config.libdir_relative().unwrap_or(Path::new("lib")); cargo.env("CFG_LIBDIR_RELATIVE", libdir_relative); // If we're not building a compiler with debugging information then remove // these two env vars which would be set otherwise. - if build.config.rust_debuginfo_only_std { + if builder.config.rust_debuginfo_only_std { cargo.env_remove("RUSTC_DEBUGINFO"); cargo.env_remove("RUSTC_DEBUGINFO_LINES"); } - if let Some(ref ver_date) = build.rust_info.commit_date() { + if let Some(ref ver_date) = builder.rust_info.commit_date() { cargo.env("CFG_VER_DATE", ver_date); } - if let Some(ref ver_hash) = build.rust_info.sha() { + if let Some(ref ver_hash) = builder.rust_info.sha() { cargo.env("CFG_VER_HASH", ver_hash); } - if !build.unstable_features() { + if !builder.unstable_features() { cargo.env("CFG_DISABLE_UNSTABLE_FEATURES", "1"); } - // Flag that rust llvm is in use - if build.is_rust_llvm(target) { - cargo.env("LLVM_RUSTLLVM", "1"); - } - cargo.env("LLVM_CONFIG", build.llvm_config(target)); - let target_config = build.config.target_config.get(&target); - if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) { - cargo.env("CFG_LLVM_ROOT", s); - } - // Building with a static libstdc++ is only supported on linux right now, - // not for MSVC or macOS - if build.config.llvm_static_stdcpp && - !target.contains("freebsd") && - !target.contains("windows") && - !target.contains("apple") { - cargo.env("LLVM_STATIC_STDCPP", - compiler_file(build.cxx(target).unwrap(), "libstdc++.a")); - } - if build.config.llvm_link_shared { - cargo.env("LLVM_LINK_SHARED", "1"); - } - if let Some(ref s) = build.config.rustc_default_linker { + if let Some(ref s) = builder.config.rustc_default_linker { cargo.env("CFG_DEFAULT_LINKER", s); } - if build.config.rustc_parallel_queries { + if builder.config.rustc_parallel_queries { cargo.env("RUSTC_PARALLEL_QUERIES", "1"); } } @@ -578,47 +599,273 @@ impl Step for RustcLink { /// Same as `std_link`, only for librustc fn run(self, builder: &Builder) { - let build = builder.build; let compiler = self.compiler; let target_compiler = self.target_compiler; let target = self.target; - println!("Copying stage{} rustc from stage{} ({} -> {} / {})", + builder.info(&format!("Copying stage{} rustc from stage{} ({} -> {} / {})", target_compiler.stage, compiler.stage, &compiler.host, target_compiler.host, - target); - add_to_sysroot(&builder.sysroot_libdir(target_compiler, target), - &librustc_stamp(build, compiler, target)); + target)); + add_to_sysroot(builder, &builder.sysroot_libdir(target_compiler, target), + &librustc_stamp(builder, compiler, target)); builder.ensure(tool::CleanTools { compiler: target_compiler, target, - mode: Mode::Librustc, + cause: Mode::Rustc, }); } } +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct CodegenBackend { + pub compiler: Compiler, + pub target: Interned, + pub backend: Interned, +} + +impl Step for CodegenBackend { + type Output = (); + const ONLY_HOSTS: bool = true; + const DEFAULT: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.all_krates("rustc_codegen_llvm") + } + + fn make_run(run: RunConfig) { + let backend = run.builder.config.rust_codegen_backends.get(0); + let backend = backend.cloned().unwrap_or_else(|| { + INTERNER.intern_str("llvm") + }); + run.builder.ensure(CodegenBackend { + compiler: run.builder.compiler(run.builder.top_stage, run.host), + target: run.target, + backend, + }); + } + + fn run(self, builder: &Builder) { + let compiler = self.compiler; + let target = self.target; + let backend = self.backend; + + builder.ensure(Rustc { compiler, target }); + + if builder.config.keep_stage.contains(&compiler.stage) { + builder.info("Warning: Using a potentially old codegen backend. \ + This may not behave well."); + // Codegen backends are linked separately from this step today, so we don't do + // anything here. + return; + } + + if builder.force_use_stage1(compiler, target) { + builder.ensure(CodegenBackend { + compiler: builder.compiler(1, builder.config.build), + target, + backend, + }); + return; + } + + let out_dir = builder.cargo_out(compiler, Mode::Codegen, target); + builder.clear_if_dirty(&out_dir, &librustc_stamp(builder, compiler, target)); + + let mut cargo = builder.cargo(compiler, Mode::Codegen, target, "build"); + cargo.arg("--manifest-path") + .arg(builder.src.join("src/librustc_codegen_llvm/Cargo.toml")); + rustc_cargo_env(builder, &mut cargo); + + let features = build_codegen_backend(&builder, &mut cargo, &compiler, target, backend); + + let tmp_stamp = out_dir.join(".tmp.stamp"); + + let _folder = builder.fold_output(|| format!("stage{}-rustc_codegen_llvm", compiler.stage)); + let files = run_cargo(builder, + cargo.arg("--features").arg(features), + &tmp_stamp, + false); + if builder.config.dry_run { + return; + } + let mut files = files.into_iter() + .filter(|f| { + let filename = f.file_name().unwrap().to_str().unwrap(); + is_dylib(filename) && filename.contains("rustc_codegen_llvm-") + }); + let codegen_backend = match files.next() { + Some(f) => f, + None => panic!("no dylibs built for codegen backend?"), + }; + if let Some(f) = files.next() { + panic!("codegen backend built two dylibs:\n{}\n{}", + codegen_backend.display(), + f.display()); + } + let stamp = codegen_backend_stamp(builder, compiler, target, backend); + let codegen_backend = codegen_backend.to_str().unwrap(); + t!(t!(File::create(&stamp)).write_all(codegen_backend.as_bytes())); + } +} + +pub fn build_codegen_backend(builder: &Builder, + cargo: &mut Command, + compiler: &Compiler, + target: Interned, + backend: Interned) -> String { + let mut features = String::new(); + + match &*backend { + "llvm" | "emscripten" => { + // Build LLVM for our target. This will implicitly build the + // host LLVM if necessary. + let llvm_config = builder.ensure(native::Llvm { + target, + emscripten: backend == "emscripten", + }); + + if backend == "emscripten" { + features.push_str(" emscripten"); + } + + builder.info(&format!("Building stage{} codegen artifacts ({} -> {}, {})", + compiler.stage, &compiler.host, target, backend)); + + // Pass down configuration from the LLVM build into the build of + // librustc_llvm and librustc_codegen_llvm. + if builder.is_rust_llvm(target) { + cargo.env("LLVM_RUSTLLVM", "1"); + } + cargo.env("LLVM_CONFIG", &llvm_config); + if backend != "emscripten" { + let target_config = builder.config.target_config.get(&target); + if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) { + cargo.env("CFG_LLVM_ROOT", s); + } + } + // Building with a static libstdc++ is only supported on linux right now, + // not for MSVC or macOS + if builder.config.llvm_static_stdcpp && + !target.contains("freebsd") && + !target.contains("windows") && + !target.contains("apple") { + let file = compiler_file(builder, + builder.cxx(target).unwrap(), + target, + "libstdc++.a"); + cargo.env("LLVM_STATIC_STDCPP", file); + } + if builder.config.llvm_link_shared { + cargo.env("LLVM_LINK_SHARED", "1"); + } + } + _ => panic!("unknown backend: {}", backend), + } + + features +} + +/// Creates the `codegen-backends` folder for a compiler that's about to be +/// assembled as a complete compiler. +/// +/// This will take the codegen artifacts produced by `compiler` and link them +/// into an appropriate location for `target_compiler` to be a functional +/// compiler. +fn copy_codegen_backends_to_sysroot(builder: &Builder, + compiler: Compiler, + target_compiler: Compiler) { + let target = target_compiler.host; + + // Note that this step is different than all the other `*Link` steps in + // that it's not assembling a bunch of libraries but rather is primarily + // moving the codegen backend into place. The codegen backend of rustc is + // not linked into the main compiler by default but is rather dynamically + // selected at runtime for inclusion. + // + // Here we're looking for the output dylib of the `CodegenBackend` step and + // we're copying that into the `codegen-backends` folder. + let dst = builder.sysroot_codegen_backends(target_compiler); + t!(fs::create_dir_all(&dst)); + + if builder.config.dry_run { + return; + } + + for backend in builder.config.rust_codegen_backends.iter() { + let stamp = codegen_backend_stamp(builder, compiler, target, *backend); + let mut dylib = String::new(); + t!(t!(File::open(&stamp)).read_to_string(&mut dylib)); + let file = Path::new(&dylib); + let filename = file.file_name().unwrap().to_str().unwrap(); + // change `librustc_codegen_llvm-xxxxxx.so` to `librustc_codegen_llvm-llvm.so` + let target_filename = { + let dash = filename.find("-").unwrap(); + let dot = filename.find(".").unwrap(); + format!("{}-{}{}", + &filename[..dash], + backend, + &filename[dot..]) + }; + builder.copy(&file, &dst.join(target_filename)); + } +} + +fn copy_lld_to_sysroot(builder: &Builder, + target_compiler: Compiler, + lld_install_root: &Path) { + let target = target_compiler.host; + + let dst = builder.sysroot_libdir(target_compiler, target) + .parent() + .unwrap() + .join("bin"); + t!(fs::create_dir_all(&dst)); + + let src_exe = exe("lld", &target); + let dst_exe = exe("rust-lld", &target); + // we prepend this bin directory to the user PATH when linking Rust binaries. To + // avoid shadowing the system LLD we rename the LLD we provide to `rust-lld`. + builder.copy(&lld_install_root.join("bin").join(&src_exe), &dst.join(&dst_exe)); +} + /// Cargo's output path for the standard library in a given stage, compiled /// by a particular compiler for the specified target. -pub fn libstd_stamp(build: &Build, compiler: Compiler, target: Interned) -> PathBuf { - build.cargo_out(compiler, Mode::Libstd, target).join(".libstd.stamp") +pub fn libstd_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { + builder.cargo_out(compiler, Mode::Std, target).join(".libstd.stamp") } /// Cargo's output path for libtest in a given stage, compiled by a particular /// compiler for the specified target. -pub fn libtest_stamp(build: &Build, compiler: Compiler, target: Interned) -> PathBuf { - build.cargo_out(compiler, Mode::Libtest, target).join(".libtest.stamp") +pub fn libtest_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { + builder.cargo_out(compiler, Mode::Test, target).join(".libtest.stamp") } /// Cargo's output path for librustc in a given stage, compiled by a particular /// compiler for the specified target. -pub fn librustc_stamp(build: &Build, compiler: Compiler, target: Interned) -> PathBuf { - build.cargo_out(compiler, Mode::Librustc, target).join(".librustc.stamp") +pub fn librustc_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { + builder.cargo_out(compiler, Mode::Rustc, target).join(".librustc.stamp") } -fn compiler_file(compiler: &Path, file: &str) -> PathBuf { - let out = output(Command::new(compiler) - .arg(format!("-print-file-name={}", file))); +/// Cargo's output path for librustc_codegen_llvm in a given stage, compiled by a particular +/// compiler for the specified target and backend. +fn codegen_backend_stamp(builder: &Builder, + compiler: Compiler, + target: Interned, + backend: Interned) -> PathBuf { + builder.cargo_out(compiler, Mode::Codegen, target) + .join(format!(".librustc_codegen_llvm-{}.stamp", backend)) +} + +pub fn compiler_file(builder: &Builder, + compiler: &Path, + target: Interned, + file: &str) -> PathBuf { + let mut cmd = Command::new(compiler); + cmd.args(builder.cflags(target)); + cmd.arg(format!("-print-file-name={}", file)); + let out = output(&mut cmd); PathBuf::from(out.trim()) } @@ -641,12 +888,11 @@ impl Step for Sysroot { /// thinks it is by default, but it's the same as the default for stages /// 1-3. fn run(self, builder: &Builder) -> Interned { - let build = builder.build; let compiler = self.compiler; let sysroot = if compiler.stage == 0 { - build.out.join(&compiler.host).join("stage0-sysroot") + builder.out.join(&compiler.host).join("stage0-sysroot") } else { - build.out.join(&compiler.host).join(format!("stage{}", compiler.stage)) + builder.out.join(&compiler.host).join(format!("stage{}", compiler.stage)) }; let _ = fs::remove_dir_all(&sysroot); t!(fs::create_dir_all(&sysroot)); @@ -654,7 +900,7 @@ impl Step for Sysroot { } } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Copy, PartialOrd, Ord, Clone, PartialEq, Eq, Hash)] pub struct Assemble { /// The compiler which we will produce in this step. Assemble itself will /// take care of ensuring that the necessary prerequisites to do so exist, @@ -667,85 +913,99 @@ impl Step for Assemble { type Output = Compiler; fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/rustc") + run.never() } /// Prepare a new compiler from the artifacts in `stage` /// /// This will assemble a compiler in `build/$host/stage$stage`. The compiler - /// must have been previously produced by the `stage - 1` build.build + /// must have been previously produced by the `stage - 1` builder.build /// compiler. fn run(self, builder: &Builder) -> Compiler { - let build = builder.build; let target_compiler = self.target_compiler; if target_compiler.stage == 0 { - assert_eq!(build.build, target_compiler.host, + assert_eq!(builder.config.build, target_compiler.host, "Cannot obtain compiler for non-native build triple at stage 0"); // The stage 0 compiler for the build triple is always pre-built. return target_compiler; } // Get the compiler that we'll use to bootstrap ourselves. - let build_compiler = if target_compiler.host != build.build { - // Build a compiler for the host platform. We cannot use the stage0 - // compiler for the host platform for this because it doesn't have - // the libraries we need. FIXME: Perhaps we should download those - // libraries? It would make builds faster... - // FIXME: It may be faster if we build just a stage 1 - // compiler and then use that to bootstrap this compiler - // forward. - builder.compiler(target_compiler.stage - 1, build.build) - } else { - // Build the compiler we'll use to build the stage requested. This - // may build more than one compiler (going down to stage 0). - builder.compiler(target_compiler.stage - 1, target_compiler.host) - }; + // + // Note that this is where the recursive nature of the bootstrap + // happens, as this will request the previous stage's compiler on + // downwards to stage 0. + // + // Also note that we're building a compiler for the host platform. We + // only assume that we can run `build` artifacts, which means that to + // produce some other architecture compiler we need to start from + // `build` to get there. + // + // FIXME: Perhaps we should download those libraries? + // It would make builds faster... + // + // FIXME: It may be faster if we build just a stage 1 compiler and then + // use that to bootstrap this compiler forward. + let build_compiler = + builder.compiler(target_compiler.stage - 1, builder.config.build); // Build the libraries for this compiler to link to (i.e., the libraries // it uses at runtime). NOTE: Crates the target compiler compiles don't // link to these. (FIXME: Is that correct? It seems to be correct most // of the time but I think we do link to these for stage2/bin compilers // when not performing a full bootstrap). - if builder.build.config.keep_stage.map_or(false, |s| target_compiler.stage <= s) { - builder.verbose("skipping compilation of compiler due to --keep-stage"); - let compiler = build_compiler; - for stage in 0..min(target_compiler.stage, builder.config.keep_stage.unwrap()) { - let target_compiler = builder.compiler(stage, target_compiler.host); - let target = target_compiler.host; - builder.ensure(StdLink { compiler, target_compiler, target }); - builder.ensure(TestLink { compiler, target_compiler, target }); - builder.ensure(RustcLink { compiler, target_compiler, target }); - } - } else { - builder.ensure(Rustc { compiler: build_compiler, target: target_compiler.host }); + builder.ensure(Rustc { + compiler: build_compiler, + target: target_compiler.host, + }); + for &backend in builder.config.rust_codegen_backends.iter() { + builder.ensure(CodegenBackend { + compiler: build_compiler, + target: target_compiler.host, + backend, + }); } + let lld_install = if builder.config.lld_enabled { + Some(builder.ensure(native::Lld { + target: target_compiler.host, + })) + } else { + None + }; + let stage = target_compiler.stage; let host = target_compiler.host; - println!("Assembling stage{} compiler ({})", stage, host); + builder.info(&format!("Assembling stage{} compiler ({})", stage, host)); // Link in all dylibs to the libdir let sysroot = builder.sysroot(target_compiler); let sysroot_libdir = sysroot.join(libdir(&*host)); t!(fs::create_dir_all(&sysroot_libdir)); let src_libdir = builder.sysroot_libdir(build_compiler, host); - for f in t!(fs::read_dir(&src_libdir)).map(|f| t!(f)) { + for f in builder.read_dir(&src_libdir) { let filename = f.file_name().into_string().unwrap(); if is_dylib(&filename) { - copy(&f.path(), &sysroot_libdir.join(&filename)); + builder.copy(&f.path(), &sysroot_libdir.join(&filename)); } } - let out_dir = build.cargo_out(build_compiler, Mode::Librustc, host); + copy_codegen_backends_to_sysroot(builder, + build_compiler, + target_compiler); + if let Some(lld_install) = lld_install { + copy_lld_to_sysroot(builder, target_compiler, &lld_install); + } // Link the compiler binary itself into place - let rustc = out_dir.join(exe("rustc", &*host)); + let out_dir = builder.cargo_out(build_compiler, Mode::Rustc, host); + let rustc = out_dir.join(exe("rustc_binary", &*host)); let bindir = sysroot.join("bin"); t!(fs::create_dir_all(&bindir)); let compiler = builder.rustc(target_compiler); let _ = fs::remove_file(&compiler); - copy(&rustc, &compiler); + builder.copy(&rustc, &compiler); target_compiler } @@ -755,10 +1015,10 @@ impl Step for Assemble { /// /// For a particular stage this will link the file listed in `stamp` into the /// `sysroot_dst` provided. -fn add_to_sysroot(sysroot_dst: &Path, stamp: &Path) { +pub fn add_to_sysroot(builder: &Builder, sysroot_dst: &Path, stamp: &Path) { t!(fs::create_dir_all(&sysroot_dst)); - for path in read_stamp_file(stamp) { - copy(&path, &sysroot_dst.join(path.file_name().unwrap())); + for path in builder.read_stamp_file(stamp) { + builder.copy(&path, &sysroot_dst.join(path.file_name().unwrap())); } } @@ -785,25 +1045,13 @@ fn stderr_isatty() -> bool { } } -fn run_cargo(build: &Build, cargo: &mut Command, stamp: &Path) { - // Instruct Cargo to give us json messages on stdout, critically leaving - // stderr as piped so we can get those pretty colors. - cargo.arg("--message-format").arg("json") - .stdout(Stdio::piped()); - - if stderr_isatty() && build.ci_env == CiEnv::None { - // since we pass message-format=json to cargo, we need to tell the rustc - // wrapper to give us colored output if necessary. This is because we - // only want Cargo's JSON output, not rustcs. - cargo.env("RUSTC_COLOR", "1"); +pub fn run_cargo(builder: &Builder, cargo: &mut Command, stamp: &Path, is_check: bool) + -> Vec +{ + if builder.config.dry_run { + return Vec::new(); } - build.verbose(&format!("running: {:?}", cargo)); - let mut child = match cargo.spawn() { - Ok(child) => child, - Err(e) => panic!("failed to execute command: {:?}\nerror: {}", cargo, e), - }; - // `target_root_dir` looks like $dir/$target/release let target_root_dir = stamp.parent().unwrap(); // `target_deps_dir` looks like $dir/$target/release/deps @@ -818,29 +1066,21 @@ fn run_cargo(build: &Build, cargo: &mut Command, stamp: &Path) { // files we need to probe for later. let mut deps = Vec::new(); let mut toplevel = Vec::new(); - let stdout = BufReader::new(child.stdout.take().unwrap()); - for line in stdout.lines() { - let line = t!(line); - let json: serde_json::Value = if line.starts_with("{") { - t!(serde_json::from_str(&line)) - } else { - // If this was informational, just print it out and continue - println!("{}", line); - continue + let ok = stream_cargo(builder, cargo, &mut |msg| { + let filenames = match msg { + CargoMessage::CompilerArtifact { filenames, .. } => filenames, + _ => return, }; - if json["reason"].as_str() != Some("compiler-artifact") { - continue - } - for filename in json["filenames"].as_array().unwrap() { - let filename = filename.as_str().unwrap(); + for filename in filenames { // Skip files like executables if !filename.ends_with(".rlib") && !filename.ends_with(".lib") && - !is_dylib(&filename) { - continue + !is_dylib(&filename) && + !(is_check && filename.ends_with(".rmeta")) { + continue; } - let filename = Path::new(filename); + let filename = Path::new(&*filename); // If this was an output file in the "host dir" we don't actually // worry about it, it's not relevant for us. @@ -873,15 +1113,10 @@ fn run_cargo(build: &Build, cargo: &mut Command, stamp: &Path) { toplevel.push((file_stem, extension, expected_len)); } - } + }); - // Make sure Cargo actually succeeded after we read all of its stdout. - let status = t!(child.wait()); - if !status.success() { - panic!("command did not execute successfully: {:?}\n\ - expected success, got: {}", - cargo, - status); + if !ok { + panic!("cargo must succeed"); } // Ok now we need to actually find all the files listed in `toplevel`. We've @@ -928,8 +1163,8 @@ fn run_cargo(build: &Build, cargo: &mut Command, stamp: &Path) { let mut new_contents = Vec::new(); let mut max = None; let mut max_path = None; - for dep in deps { - let mtime = mtime(&dep); + for dep in deps.iter() { + let mtime = mtime(dep); if Some(mtime) > max { max = Some(mtime); max_path = Some(dep.clone()); @@ -940,14 +1175,80 @@ fn run_cargo(build: &Build, cargo: &mut Command, stamp: &Path) { let max = max.unwrap(); let max_path = max_path.unwrap(); if stamp_contents == new_contents && max <= stamp_mtime { - build.verbose(&format!("not updating {:?}; contents equal and {} <= {}", + builder.verbose(&format!("not updating {:?}; contents equal and {:?} <= {:?}", stamp, max, stamp_mtime)); - return + return deps } if max > stamp_mtime { - build.verbose(&format!("updating {:?} as {:?} changed", stamp, max_path)); + builder.verbose(&format!("updating {:?} as {:?} changed", stamp, max_path)); } else { - build.verbose(&format!("updating {:?} as deps changed", stamp)); + builder.verbose(&format!("updating {:?} as deps changed", stamp)); } t!(t!(File::create(stamp)).write_all(&new_contents)); + deps +} + +pub fn stream_cargo( + builder: &Builder, + cargo: &mut Command, + cb: &mut dyn FnMut(CargoMessage), +) -> bool { + if builder.config.dry_run { + return true; + } + // Instruct Cargo to give us json messages on stdout, critically leaving + // stderr as piped so we can get those pretty colors. + cargo.arg("--message-format").arg("json") + .stdout(Stdio::piped()); + + if stderr_isatty() && builder.ci_env == CiEnv::None && + // if the terminal is reported as dumb, then we don't want to enable color for rustc + env::var_os("TERM").map(|t| t != *"dumb").unwrap_or(true) { + // since we pass message-format=json to cargo, we need to tell the rustc + // wrapper to give us colored output if necessary. This is because we + // only want Cargo's JSON output, not rustcs. + cargo.env("RUSTC_COLOR", "1"); + } + + builder.verbose(&format!("running: {:?}", cargo)); + let mut child = match cargo.spawn() { + Ok(child) => child, + Err(e) => panic!("failed to execute command: {:?}\nerror: {}", cargo, e), + }; + + // Spawn Cargo slurping up its JSON output. We'll start building up the + // `deps` array of all files it generated along with a `toplevel` array of + // files we need to probe for later. + let stdout = BufReader::new(child.stdout.take().unwrap()); + for line in stdout.lines() { + let line = t!(line); + match serde_json::from_str::(&line) { + Ok(msg) => cb(msg), + // If this was informational, just print it out and continue + Err(_) => println!("{}", line) + } + } + + // Make sure Cargo actually succeeded after we read all of its stdout. + let status = t!(child.wait()); + if !status.success() { + eprintln!("command did not execute successfully: {:?}\n\ + expected success, got: {}", + cargo, + status); + } + status.success() +} + +#[derive(Deserialize)] +#[serde(tag = "reason", rename_all = "kebab-case")] +pub enum CargoMessage<'a> { + CompilerArtifact { + package_id: Cow<'a, str>, + features: Vec>, + filenames: Vec>, + }, + BuildScriptExecuted { + package_id: Cow<'a, str>, + } } diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 72e75fddc194..43650332d3b6 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -13,17 +13,16 @@ //! This module implements parsing `config.toml` configuration files to tweak //! how the build runs. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::env; -use std::fs::File; +use std::fs::{self, File}; use std::io::prelude::*; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::process; use std::cmp; use num_cpus; use toml; -use util::exe; use cache::{INTERNER, Interned}; use flags::Flags; pub use flags::Subcommand; @@ -45,6 +44,7 @@ pub struct Config { pub ninja: bool, pub verbose: usize, pub submodules: bool, + pub fast_submodules: bool, pub compiler_docs: bool, pub docs: bool, pub locked_deps: bool, @@ -52,19 +52,26 @@ pub struct Config { pub target_config: HashMap, Target>, pub full_bootstrap: bool, pub extended: bool, + pub tools: Option>, pub sanitizers: bool, pub profiler: bool, pub ignore_git: bool, + pub exclude: Vec, + pub rustc_error_format: Option, pub run_host_only: bool, pub on_fail: Option, pub stage: Option, - pub keep_stage: Option, + pub keep_stage: Vec, pub src: PathBuf, pub jobs: Option, pub cmd: Subcommand, pub incremental: bool, + pub dry_run: bool, + + pub deny_warnings: bool, + pub backtrace_on_ice: bool, // llvm codegen options pub llvm_enabled: bool, @@ -74,10 +81,15 @@ pub struct Config { pub llvm_version_check: bool, pub llvm_static_stdcpp: bool, pub llvm_link_shared: bool, + pub llvm_clang_cl: Option, pub llvm_targets: Option, pub llvm_experimental_targets: String, pub llvm_link_jobs: Option, + pub lld_enabled: bool, + pub lldb_enabled: bool, + pub llvm_tools_enabled: bool, + // rust codegen options pub rust_optimize: bool, pub rust_codegen_units: Option, @@ -85,12 +97,16 @@ pub struct Config { pub rust_debuginfo: bool, pub rust_debuginfo_lines: bool, pub rust_debuginfo_only_std: bool, + pub rust_debuginfo_tools: bool, pub rust_rpath: bool, pub rustc_parallel_queries: bool, pub rustc_default_linker: Option, pub rust_optimize_tests: bool, pub rust_debuginfo_tests: bool, pub rust_dist_src: bool, + pub rust_codegen_backends: Vec>, + pub rust_codegen_backends_dir: String, + pub rust_verify_llvm_ir: bool, pub build: Interned, pub hosts: Vec>, @@ -106,18 +122,21 @@ pub struct Config { pub debug_jemalloc: bool, pub use_jemalloc: bool, pub backtrace: bool, // support for RUST_BACKTRACE + pub wasm_syscall: bool, // misc pub low_priority: bool, pub channel: String, - pub quiet_tests: bool, + pub verbose_tests: bool, pub test_miri: bool, pub save_toolstates: Option, + pub print_step_timings: bool, // Fallback musl-root for all targets pub musl_root: Option, pub prefix: Option, pub sysconfdir: Option, + pub datadir: Option, pub docdir: Option, pub bindir: Option, pub libdir: Option, @@ -132,6 +151,7 @@ pub struct Config { // These are either the stage0 downloaded binaries or the locally installed ones. pub initial_cargo: PathBuf, pub initial_rustc: PathBuf, + pub out: PathBuf, } /// Per-target configuration stored in the global configuration structure. @@ -148,6 +168,7 @@ pub struct Target { pub crt_static: Option, pub musl_root: Option, pub qemu_rootfs: Option, + pub no_std: bool, } /// Structure of the `config.toml` file that configuration is read from. @@ -181,6 +202,7 @@ struct Build { compiler_docs: Option, docs: Option, submodules: Option, + fast_submodules: Option, gdb: Option, locked_deps: Option, vendor: Option, @@ -188,12 +210,14 @@ struct Build { python: Option, full_bootstrap: Option, extended: Option, + tools: Option>, verbose: Option, sanitizers: Option, profiler: Option, openssl_static: Option, configure_args: Option>, local_rebuild: Option, + print_step_timings: Option, } /// TOML representation of various global install decisions. @@ -202,13 +226,13 @@ struct Build { struct Install { prefix: Option, sysconfdir: Option, + datadir: Option, docdir: Option, bindir: Option, libdir: Option, mandir: Option, // standard paths, currently unused - datadir: Option, infodir: Option, localstatedir: Option, } @@ -229,6 +253,7 @@ struct Llvm { experimental_targets: Option, link_jobs: Option, link_shared: Option, + clang_cl: Option } #[derive(Deserialize, Default, Clone)] @@ -263,6 +288,7 @@ struct Rust { debuginfo: Option, debuginfo_lines: Option, debuginfo_only_std: Option, + debuginfo_tools: Option, experimental_parallel_queries: Option, debug_jemalloc: Option, use_jemalloc: Option, @@ -277,9 +303,19 @@ struct Rust { ignore_git: Option, debug: Option, dist_src: Option, - quiet_tests: Option, + verbose_tests: Option, test_miri: Option, + incremental: Option, save_toolstates: Option, + codegen_backends: Option>, + codegen_backends_dir: Option, + wasm_syscall: Option, + lld: Option, + lldb: Option, + llvm_tools: Option, + deny_warnings: Option, + backtrace_on_ice: Option, + verify_llvm_ir: Option, } /// TOML representation of how each build target is configured. @@ -299,9 +335,15 @@ struct TomlTarget { } impl Config { - pub fn parse(args: &[String]) -> Config { - let flags = Flags::parse(&args); - let file = flags.config.clone(); + fn path_from_python(var_key: &str) -> PathBuf { + match env::var_os(var_key) { + // Do not trust paths from Python and normalize them slightly (#49785). + Some(var_val) => Path::new(&var_val).components().collect(), + _ => panic!("expected '{}' to be set", var_key), + } + } + + pub fn default_opts() -> Config { let mut config = Config::default(); config.llvm_enabled = true; config.llvm_optimize = true; @@ -311,6 +353,7 @@ impl Config { config.rust_optimize = true; config.rust_optimize_tests = true; config.submodules = true; + config.fast_submodules = true; config.docs = true; config.rust_rpath = true; config.channel = "dev".to_string(); @@ -318,17 +361,46 @@ impl Config { config.ignore_git = false; config.rust_dist_src = true; config.test_miri = false; + config.rust_codegen_backends = vec![INTERNER.intern_str("llvm")]; + config.rust_codegen_backends_dir = "codegen-backends".to_owned(); + config.deny_warnings = true; + // set by bootstrap.py + config.build = INTERNER.intern_str(&env::var("BUILD").expect("'BUILD' to be set")); + config.src = Config::path_from_python("SRC"); + config.out = Config::path_from_python("BUILD_DIR"); + + config.initial_rustc = Config::path_from_python("RUSTC"); + config.initial_cargo = Config::path_from_python("CARGO"); + + config + } + + pub fn parse(args: &[String]) -> Config { + let flags = Flags::parse(&args); + let file = flags.config.clone(); + let mut config = Config::default_opts(); + config.exclude = flags.exclude; + config.rustc_error_format = flags.rustc_error_format; config.on_fail = flags.on_fail; config.stage = flags.stage; - config.src = flags.src; config.jobs = flags.jobs; config.cmd = flags.cmd; config.incremental = flags.incremental; + config.dry_run = flags.dry_run; config.keep_stage = flags.keep_stage; + if let Some(value) = flags.warnings { + config.deny_warnings = value; + } + + if config.dry_run { + let dir = config.out.join("tmp-dry-run"); + t!(fs::create_dir_all(&dir)); + config.out = dir; + } // If --target was specified but --host wasn't specified, don't run any host-only tests. - config.run_host_only = flags.host.is_empty() && !flags.target.is_empty(); + config.run_host_only = !(flags.host.is_empty() && !flags.target.is_empty()); let toml = file.map(|file| { let mut f = t!(File::open(&file)); @@ -345,12 +417,7 @@ impl Config { }).unwrap_or_else(|| TomlConfig::default()); let build = toml.build.clone().unwrap_or(Build::default()); - set(&mut config.build, build.build.clone().map(|x| INTERNER.intern_string(x))); - set(&mut config.build, flags.build); - if config.build.is_empty() { - // set by bootstrap.py - config.build = INTERNER.intern_str(&env::var("BUILD").unwrap()); - } + // set by bootstrap.py config.hosts.push(config.build.clone()); for host in build.host.iter() { let host = INTERNER.intern_str(host); @@ -384,21 +451,25 @@ impl Config { set(&mut config.compiler_docs, build.compiler_docs); set(&mut config.docs, build.docs); set(&mut config.submodules, build.submodules); + set(&mut config.fast_submodules, build.fast_submodules); set(&mut config.locked_deps, build.locked_deps); set(&mut config.vendor, build.vendor); set(&mut config.full_bootstrap, build.full_bootstrap); set(&mut config.extended, build.extended); + config.tools = build.tools; set(&mut config.verbose, build.verbose); set(&mut config.sanitizers, build.sanitizers); set(&mut config.profiler, build.profiler); set(&mut config.openssl_static, build.openssl_static); set(&mut config.configure_args, build.configure_args); set(&mut config.local_rebuild, build.local_rebuild); + set(&mut config.print_step_timings, build.print_step_timings); config.verbose = cmp::max(config.verbose, flags.verbose); if let Some(ref install) = toml.install { config.prefix = install.prefix.clone().map(PathBuf::from); config.sysconfdir = install.sysconfdir.clone().map(PathBuf::from); + config.datadir = install.datadir.clone().map(PathBuf::from); config.docdir = install.docdir.clone().map(PathBuf::from); config.bindir = install.bindir.clone().map(PathBuf::from); config.libdir = install.libdir.clone().map(PathBuf::from); @@ -410,6 +481,7 @@ impl Config { let mut llvm_assertions = None; let mut debuginfo_lines = None; let mut debuginfo_only_std = None; + let mut debuginfo_tools = None; let mut debug = None; let mut debug_jemalloc = None; let mut debuginfo = None; @@ -437,8 +509,9 @@ impl Config { set(&mut config.llvm_link_shared, llvm.link_shared); config.llvm_targets = llvm.targets.clone(); config.llvm_experimental_targets = llvm.experimental_targets.clone() - .unwrap_or("WebAssembly".to_string()); + .unwrap_or("WebAssembly;RISCV".to_string()); config.llvm_link_jobs = llvm.link_jobs; + config.llvm_clang_cl = llvm.clang_cl.clone(); } if let Some(ref rust) = toml.rust { @@ -447,6 +520,7 @@ impl Config { debuginfo = rust.debuginfo; debuginfo_lines = rust.debuginfo_lines; debuginfo_only_std = rust.debuginfo_only_std; + debuginfo_tools = rust.debuginfo_tools; optimize = rust.optimize; ignore_git = rust.ignore_git; debug_jemalloc = rust.debug_jemalloc; @@ -458,12 +532,31 @@ impl Config { set(&mut config.backtrace, rust.backtrace); set(&mut config.channel, rust.channel.clone()); set(&mut config.rust_dist_src, rust.dist_src); - set(&mut config.quiet_tests, rust.quiet_tests); + set(&mut config.verbose_tests, rust.verbose_tests); set(&mut config.test_miri, rust.test_miri); + // in the case "false" is set explicitly, do not overwrite the command line args + if let Some(true) = rust.incremental { + config.incremental = true; + } + set(&mut config.wasm_syscall, rust.wasm_syscall); + set(&mut config.lld_enabled, rust.lld); + set(&mut config.lldb_enabled, rust.lldb); + set(&mut config.llvm_tools_enabled, rust.llvm_tools); config.rustc_parallel_queries = rust.experimental_parallel_queries.unwrap_or(false); config.rustc_default_linker = rust.default_linker.clone(); config.musl_root = rust.musl_root.clone().map(PathBuf::from); config.save_toolstates = rust.save_toolstates.clone().map(PathBuf::from); + set(&mut config.deny_warnings, rust.deny_warnings.or(flags.warnings)); + set(&mut config.backtrace_on_ice, rust.backtrace_on_ice); + set(&mut config.rust_verify_llvm_ir, rust.verify_llvm_ir); + + if let Some(ref backends) = rust.codegen_backends { + config.rust_codegen_backends = backends.iter() + .map(|s| INTERNER.intern_str(s)) + .collect(); + } + + set(&mut config.rust_codegen_backends_dir, rust.codegen_backends_dir.clone()); match rust.codegen_units { Some(0) => config.rust_codegen_units = Some(num_cpus::get() as u32), @@ -477,13 +570,13 @@ impl Config { let mut target = Target::default(); if let Some(ref s) = cfg.llvm_config { - target.llvm_config = Some(env::current_dir().unwrap().join(s)); + target.llvm_config = Some(config.src.join(s)); } if let Some(ref s) = cfg.jemalloc { - target.jemalloc = Some(env::current_dir().unwrap().join(s)); + target.jemalloc = Some(config.src.join(s)); } if let Some(ref s) = cfg.android_ndk { - target.ndk = Some(env::current_dir().unwrap().join(s)); + target.ndk = Some(config.src.join(s)); } target.cc = cfg.cc.clone().map(PathBuf::from); target.cxx = cfg.cxx.clone().map(PathBuf::from); @@ -504,22 +597,12 @@ impl Config { set(&mut config.rust_dist_src, t.src_tarball); } - let cwd = t!(env::current_dir()); - let out = cwd.join("build"); - - let stage0_root = out.join(&config.build).join("stage0/bin"); - config.initial_rustc = match build.rustc { - Some(s) => PathBuf::from(s), - None => stage0_root.join(exe("rustc", &config.build)), - }; - config.initial_cargo = match build.cargo { - Some(s) => PathBuf::from(s), - None => stage0_root.join(exe("cargo", &config.build)), - }; - // Now that we've reached the end of our configuration, infer the // default values for all options that we haven't otherwise stored yet. + set(&mut config.initial_rustc, build.rustc.map(PathBuf::from)); + set(&mut config.initial_cargo, build.cargo.map(PathBuf::from)); + let default = false; config.llvm_assertions = llvm_assertions.unwrap_or(default); @@ -529,6 +612,7 @@ impl Config { }; config.rust_debuginfo_lines = debuginfo_lines.unwrap_or(default); config.rust_debuginfo_only_std = debuginfo_only_std.unwrap_or(default); + config.rust_debuginfo_tools = debuginfo_tools.unwrap_or(false); let default = debug == Some(true); config.debug_jemalloc = debug_jemalloc.unwrap_or(default); @@ -542,6 +626,17 @@ impl Config { config } + /// Try to find the relative path of `libdir`. + pub fn libdir_relative(&self) -> Option<&Path> { + let libdir = self.libdir.as_ref()?; + if libdir.is_relative() { + Some(libdir) + } else { + // Try to make it relative to the prefix. + libdir.strip_prefix(self.prefix.as_ref()?).ok() + } + } + pub fn verbose(&self) -> bool { self.verbose > 0 } diff --git a/src/bootstrap/configure.py b/src/bootstrap/configure.py index aa9fe459e88c..cf7f78eeba04 100755 --- a/src/bootstrap/configure.py +++ b/src/bootstrap/configure.py @@ -44,9 +44,10 @@ o("debug", "rust.debug", "debug mode; disables optimization unless `--enable-opt o("docs", "build.docs", "build standard library documentation") o("compiler-docs", "build.compiler-docs", "build compiler documentation") o("optimize-tests", "rust.optimize-tests", "build tests with optimizations") +o("experimental-parallel-queries", "rust.experimental-parallel-queries", "build rustc with experimental parallelization") o("test-miri", "rust.test-miri", "run miri's test suite") o("debuginfo-tests", "rust.debuginfo-tests", "build tests with debugger metadata") -o("quiet-tests", "rust.quiet-tests", "enable quieter output when running tests") +o("verbose-tests", "rust.verbose-tests", "enable verbose output when running tests") o("ccache", "llvm.ccache", "invoke gcc/clang via ccache to reuse object files between builds") o("sccache", None, "invoke gcc/clang via sccache to reuse object files between builds") o("local-rust", None, "use an installed rustc rather than downloading a snapshot") @@ -65,6 +66,9 @@ o("sanitizers", "build.sanitizers", "build the sanitizer runtimes (asan, lsan, m o("dist-src", "rust.dist-src", "when building tarballs enables building a source tarball") o("cargo-openssl-static", "build.openssl-static", "static openssl in cargo") o("profiler", "build.profiler", "build the profiler runtime") +o("emscripten", None, "compile the emscripten backend as well as LLVM") +o("full-tools", None, "enable all tools") +o("lldb", "rust.lldb", "build lldb") # Optimization and debugging options. These may be overridden by the release # channel, etc. @@ -76,6 +80,7 @@ o("llvm-release-debuginfo", "llvm.release-debuginfo", "build LLVM with debugger o("debuginfo", "rust.debuginfo", "build with debugger metadata") o("debuginfo-lines", "rust.debuginfo-lines", "build with line number debugger metadata") o("debuginfo-only-std", "rust.debuginfo-only-std", "build only libstd with debugging information") +o("debuginfo-tools", "rust.debuginfo-tools", "build extended tools with debugging information") o("debug-jemalloc", "rust.debug-jemalloc", "build jemalloc with --enable-debug --enable-fill") v("save-toolstates", "rust.save-toolstates", "save build and test status of external tools into this file") @@ -116,10 +121,16 @@ v("musl-root-arm", "target.arm-unknown-linux-musleabi.musl-root", "arm-unknown-linux-musleabi install directory") v("musl-root-armhf", "target.arm-unknown-linux-musleabihf.musl-root", "arm-unknown-linux-musleabihf install directory") +v("musl-root-armv5te", "target.armv5te-unknown-linux-musleabi.musl-root", + "armv5te-unknown-linux-musleabi install directory") v("musl-root-armv7", "target.armv7-unknown-linux-musleabihf.musl-root", "armv7-unknown-linux-musleabihf install directory") v("musl-root-aarch64", "target.aarch64-unknown-linux-musl.musl-root", "aarch64-unknown-linux-musl install directory") +v("musl-root-mips", "target.mips-unknown-linux-musl.musl-root", + "mips-unknown-linux-musl install directory") +v("musl-root-mipsel", "target.mipsel-unknown-linux-musl.musl-root", + "mipsel-unknown-linux-musl install directory") v("qemu-armhf-rootfs", "target.arm-unknown-linux-gnueabihf.qemu-rootfs", "rootfs in qemu testing, you probably don't want to use this") v("qemu-aarch64-rootfs", "target.aarch64-unknown-linux-gnu.qemu-rootfs", @@ -138,6 +149,7 @@ o("jemalloc", "rust.use-jemalloc", "build liballoc with jemalloc") o("full-bootstrap", "build.full-bootstrap", "build three compilers instead of two") o("extended", "build.extended", "build an extended rust tool set") +v("tools", None, "List of extended tools will be installed") v("build", "build.build", "GNUs ./configure syntax LLVM build triple") v("host", None, "GNUs ./configure syntax LLVM host triples") v("target", None, "GNUs ./configure syntax LLVM target triples") @@ -313,10 +325,19 @@ for key in known_args: set('target.{}.llvm-config'.format(build()), value + '/bin/llvm-config') elif option.name == 'jemalloc-root': set('target.{}.jemalloc'.format(build()), value + '/libjemalloc_pic.a') + elif option.name == 'tools': + set('build.tools', value.split(',')) elif option.name == 'host': set('build.host', value.split(',')) elif option.name == 'target': set('build.target', value.split(',')) + elif option.name == 'emscripten': + set('rust.codegen-backends', ['llvm', 'emscripten']) + elif option.name == 'full-tools': + set('rust.codegen-backends', ['llvm', 'emscripten']) + set('rust.lld', True) + set('rust.llvm-tools', True) + set('build.extended', True) elif option.name == 'option-checking': # this was handled above pass @@ -330,7 +351,7 @@ set('build.configure-args', sys.argv[1:]) # all the various comments and whatnot. # # Note that the `target` section is handled separately as we'll duplicate it -# per configure dtarget, so there's a bit of special handling for that here. +# per configured target, so there's a bit of special handling for that here. sections = {} cur_section = None sections[None] = [] @@ -412,7 +433,7 @@ for section_key in config: # order that we read it in. p("") p("writing `config.toml` in current directory") -with open('config.toml', 'w') as f: +with bootstrap.output('config.toml') as f: for section in section_order: if section == 'target': for target in targets: @@ -422,7 +443,7 @@ with open('config.toml', 'w') as f: for line in sections[section]: f.write(line + "\n") -with open('Makefile', 'w') as f: +with bootstrap.output('Makefile') as f: contents = os.path.join(rust_dir, 'src', 'bootstrap', 'mk', 'Makefile.in') contents = open(contents).read() contents = contents.replace("$(CFG_SRC_DIR)", rust_dir + '/') diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index 3d2795f04e27..6e473fae3be5 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -20,46 +20,54 @@ use std::env; use std::fs::{self, File}; -use std::io::{self, Read, Write}; +use std::io::{Read, Write}; use std::path::{PathBuf, Path}; use std::process::{Command, Stdio}; use build_helper::output; -use {Build, Compiler, Mode}; +use {Compiler, Mode, LLVM_TOOLS}; use channel; -use util::{cp_r, libdir, is_dylib, cp_filtered, copy, replace_in_file}; +use util::{libdir, is_dylib, exe}; use builder::{Builder, RunConfig, ShouldRun, Step}; use compile; +use native; use tool::{self, Tool}; use cache::{INTERNER, Interned}; +use time; -pub fn pkgname(build: &Build, component: &str) -> String { +pub fn pkgname(builder: &Builder, component: &str) -> String { if component == "cargo" { - format!("{}-{}", component, build.cargo_package_vers()) + format!("{}-{}", component, builder.cargo_package_vers()) } else if component == "rls" { - format!("{}-{}", component, build.rls_package_vers()) + format!("{}-{}", component, builder.rls_package_vers()) + } else if component == "clippy" { + format!("{}-{}", component, builder.clippy_package_vers()) } else if component == "rustfmt" { - format!("{}-{}", component, build.rustfmt_package_vers()) + format!("{}-{}", component, builder.rustfmt_package_vers()) + } else if component == "llvm-tools" { + format!("{}-{}", component, builder.llvm_tools_package_vers()) + } else if component == "lldb" { + format!("{}-{}", component, builder.lldb_package_vers()) } else { assert!(component.starts_with("rust")); - format!("{}-{}", component, build.rust_package_vers()) + format!("{}-{}", component, builder.rust_package_vers()) } } -fn distdir(build: &Build) -> PathBuf { - build.out.join("dist") +fn distdir(builder: &Builder) -> PathBuf { + builder.out.join("dist") } -pub fn tmpdir(build: &Build) -> PathBuf { - build.out.join("tmp/dist") +pub fn tmpdir(builder: &Builder) -> PathBuf { + builder.out.join("tmp/dist") } fn rust_installer(builder: &Builder) -> Command { builder.tool_cmd(Tool::RustInstaller) } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Docs { pub stage: u32, pub host: Interned, @@ -68,7 +76,6 @@ pub struct Docs { impl Step for Docs { type Output = PathBuf; const DEFAULT: bool = true; - const ONLY_BUILD_TARGETS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/doc") @@ -83,26 +90,25 @@ impl Step for Docs { /// Builds the `rust-docs` installer component. fn run(self, builder: &Builder) -> PathBuf { - let build = builder.build; let host = self.host; - let name = pkgname(build, "rust-docs"); + let name = pkgname(builder, "rust-docs"); - println!("Dist docs ({})", host); - if !build.config.docs { - println!("\tskipping - docs disabled"); - return distdir(build).join(format!("{}-{}.tar.gz", name, host)); + builder.info(&format!("Dist docs ({})", host)); + if !builder.config.docs { + builder.info("\tskipping - docs disabled"); + return distdir(builder).join(format!("{}-{}.tar.gz", name, host)); } builder.default_doc(None); - let image = tmpdir(build).join(format!("{}-{}-image", name, host)); + let image = tmpdir(builder).join(format!("{}-{}-image", name, host)); let _ = fs::remove_dir_all(&image); let dst = image.join("share/doc/rust/html"); t!(fs::create_dir_all(&dst)); - let src = build.out.join(host).join("doc"); - cp_r(&src, &dst); + let src = builder.doc_out(host); + builder.cp_r(&src, &dst); let mut cmd = rust_installer(builder); cmd.arg("generate") @@ -110,24 +116,78 @@ impl Step for Docs { .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-documentation-is-installed.") .arg("--image-dir").arg(&image) - .arg("--work-dir").arg(&tmpdir(build)) - .arg("--output-dir").arg(&distdir(build)) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) .arg(format!("--package-name={}-{}", name, host)) .arg("--component-name=rust-docs") .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--bulk-dirs=share/doc/rust/html"); - build.run(&mut cmd); - t!(fs::remove_dir_all(&image)); + builder.run(&mut cmd); + builder.remove_dir(&image); - // As part of this step, *also* copy the docs directory to a directory which - // buildbot typically uploads. - if host == build.build { - let dst = distdir(build).join("doc").join(build.rust_package_vers()); - t!(fs::create_dir_all(&dst)); - cp_r(&src, &dst); + distdir(builder).join(format!("{}-{}.tar.gz", name, host)) + } +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct RustcDocs { + pub stage: u32, + pub host: Interned, +} + +impl Step for RustcDocs { + type Output = PathBuf; + const DEFAULT: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/librustc") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(RustcDocs { + stage: run.builder.top_stage, + host: run.target, + }); + } + + /// Builds the `rustc-docs` installer component. + fn run(self, builder: &Builder) -> PathBuf { + let host = self.host; + + let name = pkgname(builder, "rustc-docs"); + + builder.info(&format!("Dist compiler docs ({})", host)); + if !builder.config.compiler_docs { + builder.info("\tskipping - compiler docs disabled"); + return distdir(builder).join(format!("{}-{}.tar.gz", name, host)); } - distdir(build).join(format!("{}-{}.tar.gz", name, host)) + builder.default_doc(None); + + let image = tmpdir(builder).join(format!("{}-{}-image", name, host)); + let _ = fs::remove_dir_all(&image); + + let dst = image.join("share/doc/rust/html"); + t!(fs::create_dir_all(&dst)); + let src = builder.compiler_doc_out(host); + builder.cp_r(&src, &dst); + + let mut cmd = rust_installer(builder); + cmd.arg("generate") + .arg("--product-name=Rustc-Documentation") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=Rustc-documentation-is-installed.") + .arg("--image-dir").arg(&image) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) + .arg(format!("--package-name={}-{}", name, host)) + .arg("--component-name=rustc-docs") + .arg("--legacy-manifest-dirs=rustlib,cargo") + .arg("--bulk-dirs=share/doc/rust/html"); + builder.run(&mut cmd); + builder.remove_dir(&image); + + distdir(builder).join(format!("{}-{}.tar.gz", name, host)) } } @@ -151,10 +211,10 @@ fn find_files(files: &[&str], path: &[PathBuf]) -> Vec { } fn make_win_dist( - rust_root: &Path, plat_root: &Path, target_triple: Interned, build: &Build + rust_root: &Path, plat_root: &Path, target_triple: Interned, builder: &Builder ) { //Ask gcc where it keeps its stuff - let mut cmd = Command::new(build.cc(target_triple)); + let mut cmd = Command::new(builder.cc(target_triple)); cmd.arg("-print-search-dirs"); let gcc_out = output(&mut cmd); @@ -202,11 +262,14 @@ fn make_win_dist( "libbcrypt.a", "libcomctl32.a", "libcomdlg32.a", + "libcredui.a", "libcrypt32.a", + "libdbghelp.a", "libgdi32.a", "libimagehlp.a", "libiphlpapi.a", "libkernel32.a", + "libmsimg32.a", "libmsvcrt.a", "libodbc32.a", "libole32.a", @@ -214,8 +277,10 @@ fn make_win_dist( "libopengl32.a", "libpsapi.a", "librpcrt4.a", + "libsecur32.a", "libsetupapi.a", "libshell32.a", + "libsynchronization.a", "libuser32.a", "libuserenv.a", "libuuid.a", @@ -231,43 +296,44 @@ fn make_win_dist( let rustc_dlls = find_files(&rustc_dlls, &bin_path); let target_libs = find_files(&target_libs, &lib_path); - fn copy_to_folder(src: &Path, dest_folder: &Path) { - let file_name = src.file_name().unwrap(); - let dest = dest_folder.join(file_name); - copy(src, &dest); - } - - //Copy runtime dlls next to rustc.exe + // Copy runtime dlls next to rustc.exe let dist_bin_dir = rust_root.join("bin/"); fs::create_dir_all(&dist_bin_dir).expect("creating dist_bin_dir failed"); for src in rustc_dlls { - copy_to_folder(&src, &dist_bin_dir); + builder.copy_to_folder(&src, &dist_bin_dir); } //Copy platform tools to platform-specific bin directory let target_bin_dir = plat_root.join("lib").join("rustlib").join(target_triple).join("bin"); fs::create_dir_all(&target_bin_dir).expect("creating target_bin_dir failed"); for src in target_tools { - copy_to_folder(&src, &target_bin_dir); + builder.copy_to_folder(&src, &target_bin_dir); } + // Warn windows-gnu users that the bundled GCC cannot compile C files + builder.create( + &target_bin_dir.join("GCC-WARNING.txt"), + "gcc.exe contained in this folder cannot be used for compiling C files - it is only\ + used as a linker. In order to be able to compile projects containing C code use\ + the GCC provided by MinGW or Cygwin." + ); + //Copy platform libs to platform-specific lib directory let target_lib_dir = plat_root.join("lib").join("rustlib").join(target_triple).join("lib"); fs::create_dir_all(&target_lib_dir).expect("creating target_lib_dir failed"); for src in target_libs { - copy_to_folder(&src, &target_lib_dir); + builder.copy_to_folder(&src, &target_lib_dir); } } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Mingw { - host: Interned, + pub host: Interned, } impl Step for Mingw { type Output = Option; const DEFAULT: bool = true; - const ONLY_BUILD_TARGETS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.never() @@ -282,16 +348,15 @@ impl Step for Mingw { /// This contains all the bits and pieces to run the MinGW Windows targets /// without any extra installed software (e.g. we bundle gcc, libraries, etc). fn run(self, builder: &Builder) -> Option { - let build = builder.build; let host = self.host; if !host.contains("pc-windows-gnu") { return None; } - println!("Dist mingw ({})", host); - let name = pkgname(build, "rust-mingw"); - let image = tmpdir(build).join(format!("{}-{}-image", name, host)); + builder.info(&format!("Dist mingw ({})", host)); + let name = pkgname(builder, "rust-mingw"); + let image = tmpdir(builder).join(format!("{}-{}-image", name, host)); let _ = fs::remove_dir_all(&image); t!(fs::create_dir_all(&image)); @@ -299,7 +364,7 @@ impl Step for Mingw { // thrown away (this contains the runtime DLLs included in the rustc package // above) and the second argument is where to place all the MinGW components // (which is what we want). - make_win_dist(&tmpdir(build), &image, host, &build); + make_win_dist(&tmpdir(builder), &image, host, &builder); let mut cmd = rust_installer(builder); cmd.arg("generate") @@ -307,18 +372,18 @@ impl Step for Mingw { .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-MinGW-is-installed.") .arg("--image-dir").arg(&image) - .arg("--work-dir").arg(&tmpdir(build)) - .arg("--output-dir").arg(&distdir(build)) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) .arg(format!("--package-name={}-{}", name, host)) .arg("--component-name=rust-mingw") .arg("--legacy-manifest-dirs=rustlib,cargo"); - build.run(&mut cmd); + builder.run(&mut cmd); t!(fs::remove_dir_all(&image)); - Some(distdir(build).join(format!("{}-{}.tar.gz", name, host))) + Some(distdir(builder).join(format!("{}-{}.tar.gz", name, host))) } } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Rustc { pub compiler: Compiler, } @@ -327,7 +392,6 @@ impl Step for Rustc { type Output = PathBuf; const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - const ONLY_BUILD_TARGETS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/librustc") @@ -341,15 +405,14 @@ impl Step for Rustc { /// Creates the `rustc` installer component. fn run(self, builder: &Builder) -> PathBuf { - let build = builder.build; let compiler = self.compiler; let host = self.compiler.host; - println!("Dist rustc stage{} ({})", compiler.stage, compiler.host); - let name = pkgname(build, "rustc"); - let image = tmpdir(build).join(format!("{}-{}-image", name, host)); + builder.info(&format!("Dist rustc stage{} ({})", compiler.stage, host)); + let name = pkgname(builder, "rustc"); + let image = tmpdir(builder).join(format!("{}-{}-image", name, host)); let _ = fs::remove_dir_all(&image); - let overlay = tmpdir(build).join(format!("{}-{}-overlay", name, host)); + let overlay = tmpdir(builder).join(format!("{}-{}-overlay", name, host)); let _ = fs::remove_dir_all(&overlay); // Prepare the rustc "image", what will actually end up getting installed @@ -358,17 +421,17 @@ impl Step for Rustc { // Prepare the overlay which is part of the tarball but won't actually be // installed let cp = |file: &str| { - install(&build.src.join(file), &overlay, 0o644); + builder.install(&builder.src.join(file), &overlay, 0o644); }; cp("COPYRIGHT"); cp("LICENSE-APACHE"); cp("LICENSE-MIT"); cp("README.md"); // tiny morsel of metadata is used by rust-packaging - let version = build.rust_version(); - t!(t!(File::create(overlay.join("version"))).write_all(version.as_bytes())); - if let Some(sha) = build.rust_sha() { - t!(t!(File::create(overlay.join("git-commit-hash"))).write_all(sha.as_bytes())); + let version = builder.rust_version(); + builder.create(&overlay.join("version"), &version); + if let Some(sha) = builder.rust_sha() { + builder.create(&overlay.join("git-commit-hash"), &sha); } // On MinGW we've got a few runtime DLL dependencies that we need to @@ -382,11 +445,11 @@ impl Step for Rustc { // install will *also* include the rust-mingw package, which also needs // licenses, so to be safe we just include it here in all MinGW packages. if host.contains("pc-windows-gnu") { - make_win_dist(&image, &tmpdir(build), host, build); + make_win_dist(&image, &tmpdir(builder), host, builder); let dst = image.join("share/doc"); t!(fs::create_dir_all(&dst)); - cp_r(&build.src.join("src/etc/third-party"), &dst); + builder.cp_r(&builder.src.join("src/etc/third-party"), &dst); } // Finally, wrap everything up in a nice tarball! @@ -396,58 +459,79 @@ impl Step for Rustc { .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-is-ready-to-roll.") .arg("--image-dir").arg(&image) - .arg("--work-dir").arg(&tmpdir(build)) - .arg("--output-dir").arg(&distdir(build)) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) .arg("--non-installed-overlay").arg(&overlay) .arg(format!("--package-name={}-{}", name, host)) .arg("--component-name=rustc") .arg("--legacy-manifest-dirs=rustlib,cargo"); - build.run(&mut cmd); - t!(fs::remove_dir_all(&image)); - t!(fs::remove_dir_all(&overlay)); + builder.run(&mut cmd); + builder.remove_dir(&image); + builder.remove_dir(&overlay); - return distdir(build).join(format!("{}-{}.tar.gz", name, host)); + return distdir(builder).join(format!("{}-{}.tar.gz", name, host)); fn prepare_image(builder: &Builder, compiler: Compiler, image: &Path) { let host = compiler.host; - let build = builder.build; let src = builder.sysroot(compiler); let libdir = libdir(&host); // Copy rustc/rustdoc binaries t!(fs::create_dir_all(image.join("bin"))); - cp_r(&src.join("bin"), &image.join("bin")); + builder.cp_r(&src.join("bin"), &image.join("bin")); - install(&builder.rustdoc(compiler.host), &image.join("bin"), 0o755); + builder.install(&builder.rustdoc(compiler.host), &image.join("bin"), 0o755); // Copy runtime DLLs needed by the compiler if libdir != "bin" { - for entry in t!(src.join(libdir).read_dir()).map(|e| t!(e)) { + for entry in builder.read_dir(&src.join(libdir)) { let name = entry.file_name(); if let Some(s) = name.to_str() { if is_dylib(s) { - install(&entry.path(), &image.join(libdir), 0o644); + builder.install(&entry.path(), &image.join(libdir), 0o644); } } } } + // Copy over the codegen backends + let backends_src = builder.sysroot_codegen_backends(compiler); + let backends_rel = backends_src.strip_prefix(&src).unwrap(); + let backends_dst = image.join(&backends_rel); + t!(fs::create_dir_all(&backends_dst)); + builder.cp_r(&backends_src, &backends_dst); + + // Copy over lld if it's there + if builder.config.lld_enabled { + let exe = exe("rust-lld", &compiler.host); + let src = builder.sysroot_libdir(compiler, host) + .parent() + .unwrap() + .join("bin") + .join(&exe); + // for the rationale about this rename check `compile::copy_lld_to_sysroot` + let dst = image.join("lib/rustlib") + .join(&*host) + .join("bin") + .join(&exe); + t!(fs::create_dir_all(&dst.parent().unwrap())); + builder.copy(&src, &dst); + } + // Man pages t!(fs::create_dir_all(image.join("share/man/man1"))); - let man_src = build.src.join("src/doc/man"); + let man_src = builder.src.join("src/doc/man"); let man_dst = image.join("share/man/man1"); - let date_output = output(Command::new("date").arg("+%B %Y")); - let month_year = date_output.trim(); + let month_year = t!(time::strftime("%B %Y", &time::now())); // don't use our `bootstrap::util::{copy, cp_r}`, because those try // to hardlink, and we don't want to edit the source templates - for entry_result in t!(fs::read_dir(man_src)) { - let file_entry = t!(entry_result); + for file_entry in builder.read_dir(&man_src) { let page_src = file_entry.path(); let page_dst = man_dst.join(file_entry.file_name()); t!(fs::copy(&page_src, &page_dst)); // template in month/year and version number - replace_in_file(&page_dst, - &[("", month_year), + builder.replace_in_file(&page_dst, + &[("", &month_year), ("", channel::CFG_RELEASE_NUM)]); } @@ -459,7 +543,7 @@ impl Step for Rustc { // Misc license info let cp = |file: &str| { - install(&build.src.join(file), &image.join("share/doc/rust"), 0o644); + builder.install(&builder.src.join(file), &image.join("share/doc/rust"), 0o644); }; cp("COPYRIGHT"); cp("LICENSE-APACHE"); @@ -491,17 +575,16 @@ impl Step for DebuggerScripts { /// Copies debugger scripts for `target` into the `sysroot` specified. fn run(self, builder: &Builder) { - let build = builder.build; let host = self.host; let sysroot = self.sysroot; let dst = sysroot.join("lib/rustlib/etc"); t!(fs::create_dir_all(&dst)); let cp_debugger_script = |file: &str| { - install(&build.src.join("src/etc/").join(file), &dst, 0o644); + builder.install(&builder.src.join("src/etc/").join(file), &dst, 0o644); }; if host.contains("windows-msvc") { // windbg debugger scripts - install(&build.src.join("src/etc/rust-windbg.cmd"), &sysroot.join("bin"), + builder.install(&builder.src.join("src/etc/rust-windbg.cmd"), &sysroot.join("bin"), 0o755); cp_debugger_script("natvis/intrinsic.natvis"); @@ -511,14 +594,14 @@ impl Step for DebuggerScripts { cp_debugger_script("debugger_pretty_printers_common.py"); // gdb debugger scripts - install(&build.src.join("src/etc/rust-gdb"), &sysroot.join("bin"), + builder.install(&builder.src.join("src/etc/rust-gdb"), &sysroot.join("bin"), 0o755); cp_debugger_script("gdb_load_rust_pretty_printers.py"); cp_debugger_script("gdb_rust_pretty_printing.py"); // lldb debugger scripts - install(&build.src.join("src/etc/rust-lldb"), &sysroot.join("bin"), + builder.install(&builder.src.join("src/etc/rust-lldb"), &sysroot.join("bin"), 0o755); cp_debugger_script("lldb_rust_formatters.py"); @@ -526,7 +609,7 @@ impl Step for DebuggerScripts { } } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Std { pub compiler: Compiler, pub target: Interned, @@ -535,7 +618,6 @@ pub struct Std { impl Step for Std { type Output = PathBuf; const DEFAULT: bool = true; - const ONLY_BUILD_TARGETS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src/libstd") @@ -543,43 +625,53 @@ impl Step for Std { fn make_run(run: RunConfig) { run.builder.ensure(Std { - compiler: run.builder.compiler(run.builder.top_stage, run.host), + compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, }); } fn run(self, builder: &Builder) -> PathBuf { - let build = builder.build; let compiler = self.compiler; let target = self.target; - let name = pkgname(build, "rust-std"); - println!("Dist std stage{} ({} -> {})", compiler.stage, &compiler.host, target); + let name = pkgname(builder, "rust-std"); + builder.info(&format!("Dist std stage{} ({} -> {})", + compiler.stage, &compiler.host, target)); // The only true set of target libraries came from the build triple, so // let's reduce redundant work by only producing archives from that host. - if compiler.host != build.build { - println!("\tskipping, not a build host"); - return distdir(build).join(format!("{}-{}.tar.gz", name, target)); + if compiler.host != builder.config.build { + builder.info("\tskipping, not a build host"); + return distdir(builder).join(format!("{}-{}.tar.gz", name, target)); } // We want to package up as many target libraries as possible // for the `rust-std` package, so if this is a host target we // depend on librustc and otherwise we just depend on libtest. - if build.hosts.iter().any(|t| t == target) { + if builder.hosts.iter().any(|t| t == target) { builder.ensure(compile::Rustc { compiler, target }); } else { - builder.ensure(compile::Test { compiler, target }); + if builder.no_std(target) == Some(true) { + // the `test` doesn't compile for no-std targets + builder.ensure(compile::Std { compiler, target }); + } else { + builder.ensure(compile::Test { compiler, target }); + } } - let image = tmpdir(build).join(format!("{}-{}-image", name, target)); + let image = tmpdir(builder).join(format!("{}-{}-image", name, target)); let _ = fs::remove_dir_all(&image); let dst = image.join("lib/rustlib").join(target); t!(fs::create_dir_all(&dst)); let mut src = builder.sysroot_libdir(compiler, target).to_path_buf(); src.pop(); // Remove the trailing /lib folder from the sysroot_libdir - cp_r(&src, &dst); + builder.cp_filtered(&src, &dst, &|path| { + let name = path.file_name().and_then(|s| s.to_str()); + name != Some(builder.config.rust_codegen_backends_dir.as_str()) && + name != Some("bin") + + }); let mut cmd = rust_installer(builder); cmd.arg("generate") @@ -587,14 +679,14 @@ impl Step for Std { .arg("--rel-manifest-dir=rustlib") .arg("--success-message=std-is-standing-at-the-ready.") .arg("--image-dir").arg(&image) - .arg("--work-dir").arg(&tmpdir(build)) - .arg("--output-dir").arg(&distdir(build)) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) .arg(format!("--package-name={}-{}", name, target)) .arg(format!("--component-name=rust-std-{}", target)) .arg("--legacy-manifest-dirs=rustlib,cargo"); - build.run(&mut cmd); - t!(fs::remove_dir_all(&image)); - distdir(build).join(format!("{}-{}.tar.gz", name, target)) + builder.run(&mut cmd); + builder.remove_dir(&image); + distdir(builder).join(format!("{}-{}.tar.gz", name, target)) } } @@ -607,54 +699,52 @@ pub struct Analysis { impl Step for Analysis { type Output = PathBuf; const DEFAULT: bool = true; - const ONLY_BUILD_TARGETS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.path("analysis").default_condition(builder.build.config.extended) + run.path("analysis").default_condition(builder.config.extended) } fn make_run(run: RunConfig) { run.builder.ensure(Analysis { - compiler: run.builder.compiler(run.builder.top_stage, run.host), + compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, }); } /// Creates a tarball of save-analysis metadata, if available. fn run(self, builder: &Builder) -> PathBuf { - let build = builder.build; let compiler = self.compiler; let target = self.target; - assert!(build.config.extended); - println!("Dist analysis"); - let name = pkgname(build, "rust-analysis"); + assert!(builder.config.extended); + builder.info("Dist analysis"); + let name = pkgname(builder, "rust-analysis"); - if &compiler.host != build.build { - println!("\tskipping, not a build host"); - return distdir(build).join(format!("{}-{}.tar.gz", name, target)); + if &compiler.host != builder.config.build { + builder.info("\tskipping, not a build host"); + return distdir(builder).join(format!("{}-{}.tar.gz", name, target)); } builder.ensure(Std { compiler, target }); // Package save-analysis from stage1 if not doing a full bootstrap, as the // stage2 artifacts is simply copied from stage1 in that case. - let compiler = if build.force_use_stage1(compiler, target) { + let compiler = if builder.force_use_stage1(compiler, target) { builder.compiler(1, compiler.host) } else { compiler.clone() }; - let image = tmpdir(build).join(format!("{}-{}-image", name, target)); + let image = tmpdir(builder).join(format!("{}-{}-image", name, target)); - let src = build.stage_out(compiler, Mode::Libstd) - .join(target).join(build.cargo_dir()).join("deps"); + let src = builder.stage_out(compiler, Mode::Std) + .join(target).join(builder.cargo_dir()).join("deps"); let image_src = src.join("save-analysis"); let dst = image.join("lib/rustlib").join(target).join("analysis"); t!(fs::create_dir_all(&dst)); - println!("image_src: {:?}, dst: {:?}", image_src, dst); - cp_r(&image_src, &dst); + builder.info(&format!("image_src: {:?}, dst: {:?}", image_src, dst)); + builder.cp_r(&image_src, &dst); let mut cmd = rust_installer(builder); cmd.arg("generate") @@ -662,18 +752,18 @@ impl Step for Analysis { .arg("--rel-manifest-dir=rustlib") .arg("--success-message=save-analysis-saved.") .arg("--image-dir").arg(&image) - .arg("--work-dir").arg(&tmpdir(build)) - .arg("--output-dir").arg(&distdir(build)) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) .arg(format!("--package-name={}-{}", name, target)) .arg(format!("--component-name=rust-analysis-{}", target)) .arg("--legacy-manifest-dirs=rustlib,cargo"); - build.run(&mut cmd); - t!(fs::remove_dir_all(&image)); - distdir(build).join(format!("{}-{}.tar.gz", name, target)) + builder.run(&mut cmd); + builder.remove_dir(&image); + distdir(builder).join(format!("{}-{}.tar.gz", name, target)) } } -fn copy_src_dirs(build: &Build, src_dirs: &[&str], exclude_dirs: &[&str], dst_dir: &Path) { +fn copy_src_dirs(builder: &Builder, src_dirs: &[&str], exclude_dirs: &[&str], dst_dir: &Path) { fn filter_fn(exclude_dirs: &[&str], dir: &str, path: &Path) -> bool { let spath = match path.to_str() { Some(path) => path, @@ -712,11 +802,12 @@ fn copy_src_dirs(build: &Build, src_dirs: &[&str], exclude_dirs: &[&str], dst_di for item in src_dirs { let dst = &dst_dir.join(item); t!(fs::create_dir_all(dst)); - cp_filtered(&build.src.join(item), dst, &|path| filter_fn(exclude_dirs, item, path)); + builder.cp_filtered( + &builder.src.join(item), dst, &|path| filter_fn(exclude_dirs, item, path)); } } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Src; impl Step for Src { @@ -724,8 +815,6 @@ impl Step for Src { type Output = PathBuf; const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - const ONLY_BUILD_TARGETS: bool = true; - const ONLY_BUILD: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("src") @@ -737,11 +826,10 @@ impl Step for Src { /// Creates the `rust-src` installer component fn run(self, builder: &Builder) -> PathBuf { - let build = builder.build; - println!("Dist src"); + builder.info("Dist src"); - let name = pkgname(build, "rust-src"); - let image = tmpdir(build).join(format!("{}-image", name)); + let name = pkgname(builder, "rust-src"); + let image = tmpdir(builder).join(format!("{}-image", name)); let _ = fs::remove_dir_all(&image); let dst = image.join("lib/rustlib/src"); @@ -770,7 +858,6 @@ impl Step for Src { "src/librustc_msan", "src/librustc_tsan", "src/libstd", - "src/libstd_unicode", "src/libunwind", "src/rustc/compiler_builtins_shim", "src/rustc/libc_shim", @@ -779,15 +866,16 @@ impl Step for Src { "src/libterm", "src/jemalloc", "src/libprofiler_builtins", + "src/stdsimd", ]; let std_src_dirs_exclude = [ "src/libcompiler_builtins/compiler-rt/test", "src/jemalloc/test/unit", ]; - copy_src_dirs(build, &std_src_dirs[..], &std_src_dirs_exclude[..], &dst_src); + copy_src_dirs(builder, &std_src_dirs[..], &std_src_dirs_exclude[..], &dst_src); for file in src_files.iter() { - copy(&build.src.join(file), &dst_src.join(file)); + builder.copy(&builder.src.join(file), &dst_src.join(file)); } // Create source tarball in rust-installer format @@ -797,21 +885,21 @@ impl Step for Src { .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Awesome-Source.") .arg("--image-dir").arg(&image) - .arg("--work-dir").arg(&tmpdir(build)) - .arg("--output-dir").arg(&distdir(build)) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) .arg(format!("--package-name={}", name)) .arg("--component-name=rust-src") .arg("--legacy-manifest-dirs=rustlib,cargo"); - build.run(&mut cmd); + builder.run(&mut cmd); - t!(fs::remove_dir_all(&image)); - distdir(build).join(&format!("{}.tar.gz", name)) + builder.remove_dir(&image); + distdir(builder).join(&format!("{}.tar.gz", name)) } } const CARGO_VENDOR_VERSION: &str = "0.1.4"; -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct PlainSourceTarball; impl Step for PlainSourceTarball { @@ -819,8 +907,6 @@ impl Step for PlainSourceTarball { type Output = PathBuf; const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - const ONLY_BUILD_TARGETS: bool = true; - const ONLY_BUILD: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; @@ -833,12 +919,11 @@ impl Step for PlainSourceTarball { /// Creates the plain source tarball fn run(self, builder: &Builder) -> PathBuf { - let build = builder.build; - println!("Create plain source tarball"); + builder.info("Create plain source tarball"); // Make sure that the root folder of tarball has the correct name - let plain_name = format!("{}-src", pkgname(build, "rustc")); - let plain_dst_src = tmpdir(build).join(&plain_name); + let plain_name = format!("{}-src", pkgname(builder, "rustc")); + let plain_dst_src = tmpdir(builder).join(&plain_name); let _ = fs::remove_dir_all(&plain_dst_src); t!(fs::create_dir_all(&plain_dst_src)); @@ -858,85 +943,74 @@ impl Step for PlainSourceTarball { "src", ]; - copy_src_dirs(build, &src_dirs[..], &[], &plain_dst_src); + copy_src_dirs(builder, &src_dirs[..], &[], &plain_dst_src); // Copy the files normally for item in &src_files { - copy(&build.src.join(item), &plain_dst_src.join(item)); + builder.copy(&builder.src.join(item), &plain_dst_src.join(item)); } // Create the version file - write_file(&plain_dst_src.join("version"), build.rust_version().as_bytes()); - if let Some(sha) = build.rust_sha() { - write_file(&plain_dst_src.join("git-commit-hash"), sha.as_bytes()); + builder.create(&plain_dst_src.join("version"), &builder.rust_version()); + if let Some(sha) = builder.rust_sha() { + builder.create(&plain_dst_src.join("git-commit-hash"), &sha); } // If we're building from git sources, we need to vendor a complete distribution. - if build.rust_info.is_git() { + if builder.rust_info.is_git() { // Get cargo-vendor installed, if it isn't already. let mut has_cargo_vendor = false; - let mut cmd = Command::new(&build.initial_cargo); + let mut cmd = Command::new(&builder.initial_cargo); for line in output(cmd.arg("install").arg("--list")).lines() { has_cargo_vendor |= line.starts_with("cargo-vendor "); } if !has_cargo_vendor { - let mut cmd = Command::new(&build.initial_cargo); - cmd.arg("install") - .arg("--force") + let mut cmd = builder.cargo( + builder.compiler(0, builder.config.build), + Mode::ToolBootstrap, + builder.config.build, + "install" + ); + cmd.arg("--force") .arg("--debug") .arg("--vers").arg(CARGO_VENDOR_VERSION) - .arg("cargo-vendor") - .env("RUSTC", &build.initial_rustc); - build.run(&mut cmd); + .arg("cargo-vendor"); + if let Some(dir) = builder.openssl_install_dir(builder.config.build) { + builder.ensure(native::Openssl { + target: builder.config.build, + }); + cmd.env("OPENSSL_DIR", dir); + } + builder.run(&mut cmd); } // Vendor all Cargo dependencies - let mut cmd = Command::new(&build.initial_cargo); + let mut cmd = Command::new(&builder.initial_cargo); cmd.arg("vendor") .current_dir(&plain_dst_src.join("src")); - build.run(&mut cmd); + builder.run(&mut cmd); } // Create plain source tarball - let plain_name = format!("rustc-{}-src", build.rust_package_vers()); - let mut tarball = distdir(build).join(&format!("{}.tar.gz", plain_name)); + let plain_name = format!("rustc-{}-src", builder.rust_package_vers()); + let mut tarball = distdir(builder).join(&format!("{}.tar.gz", plain_name)); tarball.set_extension(""); // strip .gz tarball.set_extension(""); // strip .tar if let Some(dir) = tarball.parent() { - t!(fs::create_dir_all(dir)); + builder.create_dir(&dir); } - println!("running installer"); + builder.info("running installer"); let mut cmd = rust_installer(builder); cmd.arg("tarball") .arg("--input").arg(&plain_name) .arg("--output").arg(&tarball) .arg("--work-dir=.") - .current_dir(tmpdir(build)); - build.run(&mut cmd); - distdir(build).join(&format!("{}.tar.gz", plain_name)) + .current_dir(tmpdir(builder)); + builder.run(&mut cmd); + distdir(builder).join(&format!("{}.tar.gz", plain_name)) } } -fn install(src: &Path, dstdir: &Path, perms: u32) { - let dst = dstdir.join(src.file_name().unwrap()); - t!(fs::create_dir_all(dstdir)); - drop(fs::remove_file(&dst)); - { - let mut s = t!(fs::File::open(&src)); - let mut d = t!(fs::File::create(&dst)); - io::copy(&mut s, &mut d).expect("failed to copy"); - } - chmod(&dst, perms); -} - -#[cfg(unix)] -fn chmod(path: &Path, perms: u32) { - use std::os::unix::fs::*; - t!(fs::set_permissions(path, fs::Permissions::from_mode(perms))); -} -#[cfg(windows)] -fn chmod(_path: &Path, _perms: u32) {} - // We have to run a few shell scripts, which choke quite a bit on both `\` // characters and on `C:\` paths, so normalize both of them away. pub fn sanitize_sh(path: &Path) -> String { @@ -956,12 +1030,7 @@ pub fn sanitize_sh(path: &Path) -> String { } } -fn write_file(path: &Path, data: &[u8]) { - let mut vf = t!(fs::File::create(path)); - t!(vf.write_all(data)); -} - -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Cargo { pub stage: u32, pub target: Interned, @@ -969,7 +1038,6 @@ pub struct Cargo { impl Step for Cargo { type Output = PathBuf; - const ONLY_BUILD_TARGETS: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { @@ -984,52 +1052,51 @@ impl Step for Cargo { } fn run(self, builder: &Builder) -> PathBuf { - let build = builder.build; let stage = self.stage; let target = self.target; - println!("Dist cargo stage{} ({})", stage, target); - let src = build.src.join("src/tools/cargo"); + builder.info(&format!("Dist cargo stage{} ({})", stage, target)); + let src = builder.src.join("src/tools/cargo"); let etc = src.join("src/etc"); - let release_num = build.release_num("cargo"); - let name = pkgname(build, "cargo"); - let version = builder.cargo_info.version(build, &release_num); + let release_num = builder.release_num("cargo"); + let name = pkgname(builder, "cargo"); + let version = builder.cargo_info.version(builder, &release_num); - let tmp = tmpdir(build); + let tmp = tmpdir(builder); let image = tmp.join("cargo-image"); drop(fs::remove_dir_all(&image)); - t!(fs::create_dir_all(&image)); + builder.create_dir(&image); // Prepare the image directory - t!(fs::create_dir_all(image.join("share/zsh/site-functions"))); - t!(fs::create_dir_all(image.join("etc/bash_completion.d"))); + builder.create_dir(&image.join("share/zsh/site-functions")); + builder.create_dir(&image.join("etc/bash_completion.d")); let cargo = builder.ensure(tool::Cargo { - compiler: builder.compiler(stage, build.build), + compiler: builder.compiler(stage, builder.config.build), target }); - install(&cargo, &image.join("bin"), 0o755); + builder.install(&cargo, &image.join("bin"), 0o755); for man in t!(etc.join("man").read_dir()) { let man = t!(man); - install(&man.path(), &image.join("share/man/man1"), 0o644); + builder.install(&man.path(), &image.join("share/man/man1"), 0o644); } - install(&etc.join("_cargo"), &image.join("share/zsh/site-functions"), 0o644); - copy(&etc.join("cargo.bashcomp.sh"), + builder.install(&etc.join("_cargo"), &image.join("share/zsh/site-functions"), 0o644); + builder.copy(&etc.join("cargo.bashcomp.sh"), &image.join("etc/bash_completion.d/cargo")); let doc = image.join("share/doc/cargo"); - install(&src.join("README.md"), &doc, 0o644); - install(&src.join("LICENSE-MIT"), &doc, 0o644); - install(&src.join("LICENSE-APACHE"), &doc, 0o644); - install(&src.join("LICENSE-THIRD-PARTY"), &doc, 0o644); + builder.install(&src.join("README.md"), &doc, 0o644); + builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); + builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); + builder.install(&src.join("LICENSE-THIRD-PARTY"), &doc, 0o644); // Prepare the overlay let overlay = tmp.join("cargo-overlay"); drop(fs::remove_dir_all(&overlay)); - t!(fs::create_dir_all(&overlay)); - install(&src.join("README.md"), &overlay, 0o644); - install(&src.join("LICENSE-MIT"), &overlay, 0o644); - install(&src.join("LICENSE-APACHE"), &overlay, 0o644); - install(&src.join("LICENSE-THIRD-PARTY"), &overlay, 0o644); - t!(t!(File::create(overlay.join("version"))).write_all(version.as_bytes())); + builder.create_dir(&overlay); + builder.install(&src.join("README.md"), &overlay, 0o644); + builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644); + builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644); + builder.install(&src.join("LICENSE-THIRD-PARTY"), &overlay, 0o644); + builder.create(&overlay.join("version"), &version); // Generate the installer tarball let mut cmd = rust_installer(builder); @@ -1038,18 +1105,18 @@ impl Step for Cargo { .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-is-ready-to-roll.") .arg("--image-dir").arg(&image) - .arg("--work-dir").arg(&tmpdir(build)) - .arg("--output-dir").arg(&distdir(build)) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) .arg("--non-installed-overlay").arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) .arg("--component-name=cargo") .arg("--legacy-manifest-dirs=rustlib,cargo"); - build.run(&mut cmd); - distdir(build).join(format!("{}-{}.tar.gz", name, target)) + builder.run(&mut cmd); + distdir(builder).join(format!("{}-{}.tar.gz", name, target)) } } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Rls { pub stage: u32, pub target: Interned, @@ -1057,7 +1124,6 @@ pub struct Rls { impl Step for Rls { type Output = Option; - const ONLY_BUILD_TARGETS: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { @@ -1072,18 +1138,17 @@ impl Step for Rls { } fn run(self, builder: &Builder) -> Option { - let build = builder.build; let stage = self.stage; let target = self.target; - assert!(build.config.extended); + assert!(builder.config.extended); - println!("Dist RLS stage{} ({})", stage, target); - let src = build.src.join("src/tools/rls"); - let release_num = build.release_num("rls"); - let name = pkgname(build, "rls"); - let version = build.rls_info.version(build, &release_num); + builder.info(&format!("Dist RLS stage{} ({})", stage, target)); + let src = builder.src.join("src/tools/rls"); + let release_num = builder.release_num("rls"); + let name = pkgname(builder, "rls"); + let version = builder.rls_info.version(builder, &release_num); - let tmp = tmpdir(build); + let tmp = tmpdir(builder); let image = tmp.join("rls-image"); drop(fs::remove_dir_all(&image)); t!(fs::create_dir_all(&image)); @@ -1092,24 +1157,24 @@ impl Step for Rls { // We expect RLS to build, because we've exited this step above if tool // state for RLS isn't testing. let rls = builder.ensure(tool::Rls { - compiler: builder.compiler(stage, build.build), - target + compiler: builder.compiler(stage, builder.config.build), + target, extra_features: Vec::new() }).or_else(|| { println!("Unable to build RLS, skipping dist"); None })?; - install(&rls, &image.join("bin"), 0o755); + builder.install(&rls, &image.join("bin"), 0o755); let doc = image.join("share/doc/rls"); - install(&src.join("README.md"), &doc, 0o644); - install(&src.join("LICENSE-MIT"), &doc, 0o644); - install(&src.join("LICENSE-APACHE"), &doc, 0o644); + builder.install(&src.join("README.md"), &doc, 0o644); + builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); + builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); // Prepare the overlay let overlay = tmp.join("rls-overlay"); drop(fs::remove_dir_all(&overlay)); t!(fs::create_dir_all(&overlay)); - install(&src.join("README.md"), &overlay, 0o644); - install(&src.join("LICENSE-MIT"), &overlay, 0o644); - install(&src.join("LICENSE-APACHE"), &overlay, 0o644); - t!(t!(File::create(overlay.join("version"))).write_all(version.as_bytes())); + builder.install(&src.join("README.md"), &overlay, 0o644); + builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644); + builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644); + builder.create(&overlay.join("version"), &version); // Generate the installer tarball let mut cmd = rust_installer(builder); @@ -1118,20 +1183,101 @@ impl Step for Rls { .arg("--rel-manifest-dir=rustlib") .arg("--success-message=RLS-ready-to-serve.") .arg("--image-dir").arg(&image) - .arg("--work-dir").arg(&tmpdir(build)) - .arg("--output-dir").arg(&distdir(build)) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) .arg("--non-installed-overlay").arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--component-name=rls-preview"); - build.run(&mut cmd); - Some(distdir(build).join(format!("{}-{}.tar.gz", name, target))) + builder.run(&mut cmd); + Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) } } +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] +pub struct Clippy { + pub stage: u32, + pub target: Interned, +} -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +impl Step for Clippy { + type Output = Option; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("clippy") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Clippy { + stage: run.builder.top_stage, + target: run.target, + }); + } + + fn run(self, builder: &Builder) -> Option { + let stage = self.stage; + let target = self.target; + assert!(builder.config.extended); + + builder.info(&format!("Dist clippy stage{} ({})", stage, target)); + let src = builder.src.join("src/tools/clippy"); + let release_num = builder.release_num("clippy"); + let name = pkgname(builder, "clippy"); + let version = builder.clippy_info.version(builder, &release_num); + + let tmp = tmpdir(builder); + let image = tmp.join("clippy-image"); + drop(fs::remove_dir_all(&image)); + builder.create_dir(&image); + + // Prepare the image directory + // We expect clippy to build, because we've exited this step above if tool + // state for clippy isn't testing. + let clippy = builder.ensure(tool::Clippy { + compiler: builder.compiler(stage, builder.config.build), + target, extra_features: Vec::new() + }).or_else(|| { println!("Unable to build clippy, skipping dist"); None })?; + let cargoclippy = builder.ensure(tool::CargoClippy { + compiler: builder.compiler(stage, builder.config.build), + target, extra_features: Vec::new() + }).or_else(|| { println!("Unable to build cargo clippy, skipping dist"); None })?; + + builder.install(&clippy, &image.join("bin"), 0o755); + builder.install(&cargoclippy, &image.join("bin"), 0o755); + let doc = image.join("share/doc/clippy"); + builder.install(&src.join("README.md"), &doc, 0o644); + builder.install(&src.join("LICENSE"), &doc, 0o644); + + // Prepare the overlay + let overlay = tmp.join("clippy-overlay"); + drop(fs::remove_dir_all(&overlay)); + t!(fs::create_dir_all(&overlay)); + builder.install(&src.join("README.md"), &overlay, 0o644); + builder.install(&src.join("LICENSE"), &doc, 0o644); + builder.create(&overlay.join("version"), &version); + + // Generate the installer tarball + let mut cmd = rust_installer(builder); + cmd.arg("generate") + .arg("--product-name=Rust") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=clippy-ready-to-serve.") + .arg("--image-dir").arg(&image) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) + .arg("--non-installed-overlay").arg(&overlay) + .arg(format!("--package-name={}-{}", name, target)) + .arg("--legacy-manifest-dirs=rustlib,cargo") + .arg("--component-name=clippy-preview"); + + builder.run(&mut cmd); + Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) + } +} + +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Rustfmt { pub stage: u32, pub target: Interned, @@ -1139,7 +1285,6 @@ pub struct Rustfmt { impl Step for Rustfmt { type Output = Option; - const ONLY_BUILD_TARGETS: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { @@ -1154,47 +1299,45 @@ impl Step for Rustfmt { } fn run(self, builder: &Builder) -> Option { - let build = builder.build; let stage = self.stage; let target = self.target; - assert!(build.config.extended); - println!("Dist Rustfmt stage{} ({})", stage, target); - let src = build.src.join("src/tools/rustfmt"); - let release_num = build.release_num("rustfmt"); - let name = pkgname(build, "rustfmt"); - let version = build.rustfmt_info.version(build, &release_num); + builder.info(&format!("Dist Rustfmt stage{} ({})", stage, target)); + let src = builder.src.join("src/tools/rustfmt"); + let release_num = builder.release_num("rustfmt"); + let name = pkgname(builder, "rustfmt"); + let version = builder.rustfmt_info.version(builder, &release_num); - let tmp = tmpdir(build); + let tmp = tmpdir(builder); let image = tmp.join("rustfmt-image"); drop(fs::remove_dir_all(&image)); - t!(fs::create_dir_all(&image)); + builder.create_dir(&image); // Prepare the image directory let rustfmt = builder.ensure(tool::Rustfmt { - compiler: builder.compiler(stage, build.build), - target + compiler: builder.compiler(stage, builder.config.build), + target, extra_features: Vec::new() }).or_else(|| { println!("Unable to build Rustfmt, skipping dist"); None })?; let cargofmt = builder.ensure(tool::Cargofmt { - compiler: builder.compiler(stage, build.build), - target + compiler: builder.compiler(stage, builder.config.build), + target, extra_features: Vec::new() }).or_else(|| { println!("Unable to build Cargofmt, skipping dist"); None })?; - install(&rustfmt, &image.join("bin"), 0o755); - install(&cargofmt, &image.join("bin"), 0o755); + builder.install(&rustfmt, &image.join("bin"), 0o755); + builder.install(&cargofmt, &image.join("bin"), 0o755); let doc = image.join("share/doc/rustfmt"); - install(&src.join("README.md"), &doc, 0o644); - install(&src.join("LICENSE-MIT"), &doc, 0o644); - install(&src.join("LICENSE-APACHE"), &doc, 0o644); + builder.install(&src.join("README.md"), &doc, 0o644); + builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); + builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); // Prepare the overlay let overlay = tmp.join("rustfmt-overlay"); drop(fs::remove_dir_all(&overlay)); - t!(fs::create_dir_all(&overlay)); - install(&src.join("README.md"), &overlay, 0o644); - install(&src.join("LICENSE-MIT"), &overlay, 0o644); - install(&src.join("LICENSE-APACHE"), &overlay, 0o644); - t!(t!(File::create(overlay.join("version"))).write_all(version.as_bytes())); + builder.create_dir(&overlay); + builder.install(&src.join("README.md"), &overlay, 0o644); + builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644); + builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644); + builder.create(&overlay.join("version"), &version); // Generate the installer tarball let mut cmd = rust_installer(builder); @@ -1203,44 +1346,19 @@ impl Step for Rustfmt { .arg("--rel-manifest-dir=rustlib") .arg("--success-message=rustfmt-ready-to-fmt.") .arg("--image-dir").arg(&image) - .arg("--work-dir").arg(&tmpdir(build)) - .arg("--output-dir").arg(&distdir(build)) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) .arg("--non-installed-overlay").arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--component-name=rustfmt-preview"); - build.run(&mut cmd); - Some(distdir(build).join(format!("{}-{}.tar.gz", name, target))) + builder.run(&mut cmd); + Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) } } - -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct DontDistWithMiriEnabled; - -impl Step for DontDistWithMiriEnabled { - type Output = PathBuf; - const DEFAULT: bool = true; - - fn should_run(run: ShouldRun) -> ShouldRun { - let build_miri = run.builder.build.config.test_miri; - run.default_condition(build_miri) - } - - fn make_run(run: RunConfig) { - run.builder.ensure(DontDistWithMiriEnabled); - } - - fn run(self, _: &Builder) -> PathBuf { - panic!("Do not distribute with miri enabled.\n\ - The distributed libraries would include all MIR (increasing binary size). - The distributed MIR would include validation statements."); - } -} - - -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Extended { stage: u32, host: Interned, @@ -1250,7 +1368,6 @@ pub struct Extended { impl Step for Extended { type Output = (); const DEFAULT: bool = true; - const ONLY_BUILD_TARGETS: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { @@ -1261,18 +1378,17 @@ impl Step for Extended { fn make_run(run: RunConfig) { run.builder.ensure(Extended { stage: run.builder.top_stage, - host: run.host, + host: run.builder.config.build, target: run.target, }); } /// Creates a combined installer for the specified target in the provided stage. fn run(self, builder: &Builder) { - let build = builder.build; let stage = self.stage; let target = self.target; - println!("Dist extended stage{} ({})", stage, target); + builder.info(&format!("Dist extended stage{} ({})", stage, target)); let rustc_installer = builder.ensure(Rustc { compiler: builder.compiler(stage, target), @@ -1280,6 +1396,9 @@ impl Step for Extended { let cargo_installer = builder.ensure(Cargo { stage, target }); let rustfmt_installer = builder.ensure(Rustfmt { stage, target }); let rls_installer = builder.ensure(Rls { stage, target }); + let llvm_tools_installer = builder.ensure(LlvmTools { stage, target }); + let clippy_installer = builder.ensure(Clippy { stage, target }); + let lldb_installer = builder.ensure(Lldb { target }); let mingw_installer = builder.ensure(Mingw { host: target }); let analysis_installer = builder.ensure(Analysis { compiler: builder.compiler(stage, self.host), @@ -1292,21 +1411,21 @@ impl Step for Extended { target, }); - let tmp = tmpdir(build); + let tmp = tmpdir(builder); let overlay = tmp.join("extended-overlay"); - let etc = build.src.join("src/etc/installer"); + let etc = builder.src.join("src/etc/installer"); let work = tmp.join("work"); let _ = fs::remove_dir_all(&overlay); - install(&build.src.join("COPYRIGHT"), &overlay, 0o644); - install(&build.src.join("LICENSE-APACHE"), &overlay, 0o644); - install(&build.src.join("LICENSE-MIT"), &overlay, 0o644); - let version = build.rust_version(); - t!(t!(File::create(overlay.join("version"))).write_all(version.as_bytes())); - if let Some(sha) = build.rust_sha() { - t!(t!(File::create(overlay.join("git-commit-hash"))).write_all(sha.as_bytes())); + builder.install(&builder.src.join("COPYRIGHT"), &overlay, 0o644); + builder.install(&builder.src.join("LICENSE-APACHE"), &overlay, 0o644); + builder.install(&builder.src.join("LICENSE-MIT"), &overlay, 0o644); + let version = builder.rust_version(); + builder.create(&overlay.join("version"), &version); + if let Some(sha) = builder.rust_sha() { + builder.create(&overlay.join("git-commit-hash"), &sha); } - install(&etc.join("README.md"), &overlay, 0o644); + builder.install(&etc.join("README.md"), &overlay, 0o644); // When rust-std package split from rustc, we needed to ensure that during // upgrades rustc was upgraded before rust-std. To avoid rustc clobbering @@ -1316,10 +1435,13 @@ impl Step for Extended { tarballs.push(rustc_installer); tarballs.push(cargo_installer); tarballs.extend(rls_installer.clone()); + tarballs.extend(clippy_installer.clone()); tarballs.extend(rustfmt_installer.clone()); + tarballs.extend(llvm_tools_installer.clone()); + tarballs.extend(lldb_installer.clone()); tarballs.push(analysis_installer); tarballs.push(std_installer); - if build.config.docs { + if builder.config.docs { tarballs.push(docs_installer); } if target.contains("pc-windows-gnu") { @@ -1337,19 +1459,19 @@ impl Step for Extended { .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-is-ready-to-roll.") .arg("--work-dir").arg(&work) - .arg("--output-dir").arg(&distdir(build)) - .arg(format!("--package-name={}-{}", pkgname(build, "rust"), target)) + .arg("--output-dir").arg(&distdir(builder)) + .arg(format!("--package-name={}-{}", pkgname(builder, "rust"), target)) .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--input-tarballs").arg(input_tarballs) .arg("--non-installed-overlay").arg(&overlay); - build.run(&mut cmd); + builder.run(&mut cmd); let mut license = String::new(); - t!(t!(File::open(build.src.join("COPYRIGHT"))).read_to_string(&mut license)); + license += &builder.read(&builder.src.join("COPYRIGHT")); + license += &builder.read(&builder.src.join("LICENSE-APACHE")); + license += &builder.read(&builder.src.join("LICENSE-MIT")); license.push_str("\n"); - t!(t!(File::open(build.src.join("LICENSE-APACHE"))).read_to_string(&mut license)); license.push_str("\n"); - t!(t!(File::open(build.src.join("LICENSE-MIT"))).read_to_string(&mut license)); let rtf = r"{\rtf1\ansi\deff0{\fonttbl{\f0\fnil\fcharset0 Arial;}}\nowwrap\fs18"; let mut rtf = rtf.to_string(); @@ -1384,6 +1506,9 @@ impl Step for Extended { if rls_installer.is_none() { contents = filter(&contents, "rls"); } + if clippy_installer.is_none() { + contents = filter(&contents, "clippy"); + } if rustfmt_installer.is_none() { contents = filter(&contents, "rustfmt"); } @@ -1402,14 +1527,14 @@ impl Step for Extended { .arg("--scripts").arg(pkg.join(component)) .arg("--nopayload") .arg(pkg.join(component).with_extension("pkg")); - build.run(&mut cmd); + builder.run(&mut cmd); }; let prepare = |name: &str| { - t!(fs::create_dir_all(pkg.join(name))); - cp_r(&work.join(&format!("{}-{}", pkgname(build, name), target)), + builder.create_dir(&pkg.join(name)); + builder.cp_r(&work.join(&format!("{}-{}", pkgname(builder, name), target)), &pkg.join(name)); - install(&etc.join("pkg/postinstall"), &pkg.join(name), 0o755); + builder.install(&etc.join("pkg/postinstall"), &pkg.join(name), 0o755); pkgbuild(name); }; prepare("rustc"); @@ -1421,22 +1546,25 @@ impl Step for Extended { if rls_installer.is_some() { prepare("rls"); } + if clippy_installer.is_some() { + prepare("clippy"); + } // create an 'uninstall' package - install(&etc.join("pkg/postinstall"), &pkg.join("uninstall"), 0o755); + builder.install(&etc.join("pkg/postinstall"), &pkg.join("uninstall"), 0o755); pkgbuild("uninstall"); - t!(fs::create_dir_all(pkg.join("res"))); - t!(t!(File::create(pkg.join("res/LICENSE.txt"))).write_all(license.as_bytes())); - install(&etc.join("gfx/rust-logo.png"), &pkg.join("res"), 0o644); + builder.create_dir(&pkg.join("res")); + builder.create(&pkg.join("res/LICENSE.txt"), &license); + builder.install(&etc.join("gfx/rust-logo.png"), &pkg.join("res"), 0o644); let mut cmd = Command::new("productbuild"); cmd.arg("--distribution").arg(xform(&etc.join("pkg/Distribution.xml"))) .arg("--resources").arg(pkg.join("res")) - .arg(distdir(build).join(format!("{}-{}.pkg", - pkgname(build, "rust"), + .arg(distdir(builder).join(format!("{}-{}.pkg", + pkgname(builder, "rust"), target))) .arg("--package-path").arg(&pkg); - build.run(&mut cmd); + builder.run(&mut cmd); } if target.contains("windows") { @@ -1444,18 +1572,20 @@ impl Step for Extended { let _ = fs::remove_dir_all(&exe); let prepare = |name: &str| { - t!(fs::create_dir_all(exe.join(name))); + builder.create_dir(&exe.join(name)); let dir = if name == "rust-std" || name == "rust-analysis" { format!("{}-{}", name, target) } else if name == "rls" { "rls-preview".to_string() + } else if name == "clippy" { + "clippy-preview".to_string() } else { name.to_string() }; - cp_r(&work.join(&format!("{}-{}", pkgname(build, name), target)) + builder.cp_r(&work.join(&format!("{}-{}", pkgname(builder, name), target)) .join(dir), &exe.join(name)); - t!(fs::remove_file(exe.join(name).join("manifest.in"))); + builder.remove(&exe.join(name).join("manifest.in")); }; prepare("rustc"); prepare("cargo"); @@ -1465,15 +1595,18 @@ impl Step for Extended { if rls_installer.is_some() { prepare("rls"); } + if clippy_installer.is_some() { + prepare("clippy"); + } if target.contains("windows-gnu") { prepare("rust-mingw"); } - install(&xform(&etc.join("exe/rust.iss")), &exe, 0o644); - install(&etc.join("exe/modpath.iss"), &exe, 0o644); - install(&etc.join("exe/upgrade.iss"), &exe, 0o644); - install(&etc.join("gfx/rust-logo.ico"), &exe, 0o644); - t!(t!(File::create(exe.join("LICENSE.txt"))).write_all(license.as_bytes())); + builder.install(&xform(&etc.join("exe/rust.iss")), &exe, 0o644); + builder.install(&etc.join("exe/modpath.iss"), &exe, 0o644); + builder.install(&etc.join("exe/upgrade.iss"), &exe, 0o644); + builder.install(&etc.join("gfx/rust-logo.ico"), &exe, 0o644); + builder.create(&exe.join("LICENSE.txt"), &license); // Generate exe installer let mut cmd = Command::new("iscc"); @@ -1482,10 +1615,10 @@ impl Step for Extended { if target.contains("windows-gnu") { cmd.arg("/dMINGW"); } - add_env(build, &mut cmd, target); - build.run(&mut cmd); - install(&exe.join(format!("{}-{}.exe", pkgname(build, "rust"), target)), - &distdir(build), + add_env(builder, &mut cmd, target); + builder.run(&mut cmd); + builder.install(&exe.join(format!("{}-{}.exe", pkgname(builder, "rust"), target)), + &distdir(builder), 0o755); // Generate msi installer @@ -1495,7 +1628,7 @@ impl Step for Extended { let light = wix.join("bin/light.exe"); let heat_flags = ["-nologo", "-gg", "-sfrag", "-srd", "-sreg"]; - build.run(Command::new(&heat) + builder.run(Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rustc") @@ -1504,7 +1637,7 @@ impl Step for Extended { .arg("-dr").arg("Rustc") .arg("-var").arg("var.RustcDir") .arg("-out").arg(exe.join("RustcGroup.wxs"))); - build.run(Command::new(&heat) + builder.run(Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rust-docs") @@ -1514,7 +1647,7 @@ impl Step for Extended { .arg("-var").arg("var.DocsDir") .arg("-out").arg(exe.join("DocsGroup.wxs")) .arg("-t").arg(etc.join("msi/squash-components.xsl"))); - build.run(Command::new(&heat) + builder.run(Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("cargo") @@ -1524,7 +1657,7 @@ impl Step for Extended { .arg("-var").arg("var.CargoDir") .arg("-out").arg(exe.join("CargoGroup.wxs")) .arg("-t").arg(etc.join("msi/remove-duplicates.xsl"))); - build.run(Command::new(&heat) + builder.run(Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rust-std") @@ -1534,7 +1667,7 @@ impl Step for Extended { .arg("-var").arg("var.StdDir") .arg("-out").arg(exe.join("StdGroup.wxs"))); if rls_installer.is_some() { - build.run(Command::new(&heat) + builder.run(Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rls") @@ -1545,7 +1678,19 @@ impl Step for Extended { .arg("-out").arg(exe.join("RlsGroup.wxs")) .arg("-t").arg(etc.join("msi/remove-duplicates.xsl"))); } - build.run(Command::new(&heat) + if clippy_installer.is_some() { + builder.run(Command::new(&heat) + .current_dir(&exe) + .arg("dir") + .arg("clippy") + .args(&heat_flags) + .arg("-cg").arg("ClippyGroup") + .arg("-dr").arg("Clippy") + .arg("-var").arg("var.ClippyDir") + .arg("-out").arg(exe.join("ClippyGroup.wxs")) + .arg("-t").arg(etc.join("msi/remove-duplicates.xsl"))); + } + builder.run(Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rust-analysis") @@ -1556,7 +1701,7 @@ impl Step for Extended { .arg("-out").arg(exe.join("AnalysisGroup.wxs")) .arg("-t").arg(etc.join("msi/remove-duplicates.xsl"))); if target.contains("windows-gnu") { - build.run(Command::new(&heat) + builder.run(Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rust-mingw") @@ -1582,15 +1727,18 @@ impl Step for Extended { .arg("-arch").arg(&arch) .arg("-out").arg(&output) .arg(&input); - add_env(build, &mut cmd, target); + add_env(builder, &mut cmd, target); if rls_installer.is_some() { cmd.arg("-dRlsDir=rls"); } + if clippy_installer.is_some() { + cmd.arg("-dClippyDir=clippy"); + } if target.contains("windows-gnu") { cmd.arg("-dGccDir=rust-mingw"); } - build.run(&mut cmd); + builder.run(&mut cmd); }; candle(&xform(&etc.join("msi/rust.wxs"))); candle(&etc.join("msi/ui.wxs")); @@ -1602,17 +1750,20 @@ impl Step for Extended { if rls_installer.is_some() { candle("RlsGroup.wxs".as_ref()); } + if clippy_installer.is_some() { + candle("ClippyGroup.wxs".as_ref()); + } candle("AnalysisGroup.wxs".as_ref()); if target.contains("windows-gnu") { candle("GccGroup.wxs".as_ref()); } - t!(t!(File::create(exe.join("LICENSE.rtf"))).write_all(rtf.as_bytes())); - install(&etc.join("gfx/banner.bmp"), &exe, 0o644); - install(&etc.join("gfx/dialogbg.bmp"), &exe, 0o644); + builder.create(&exe.join("LICENSE.rtf"), &rtf); + builder.install(&etc.join("gfx/banner.bmp"), &exe, 0o644); + builder.install(&etc.join("gfx/dialogbg.bmp"), &exe, 0o644); - let filename = format!("{}-{}.msi", pkgname(build, "rust"), target); + let filename = format!("{}-{}.msi", pkgname(builder, "rust"), target); let mut cmd = Command::new(&light); cmd.arg("-nologo") .arg("-ext").arg("WixUIExtension") @@ -1631,6 +1782,9 @@ impl Step for Extended { if rls_installer.is_some() { cmd.arg("RlsGroup.wixobj"); } + if clippy_installer.is_some() { + cmd.arg("ClippyGroup.wixobj"); + } if target.contains("windows-gnu") { cmd.arg("GccGroup.wixobj"); @@ -1638,27 +1792,28 @@ impl Step for Extended { // ICE57 wrongly complains about the shortcuts cmd.arg("-sice:ICE57"); - build.run(&mut cmd); + builder.run(&mut cmd); - t!(fs::rename(exe.join(&filename), distdir(build).join(&filename))); + if !builder.config.dry_run { + t!(fs::rename(exe.join(&filename), distdir(builder).join(&filename))); + } } } } -fn add_env(build: &Build, cmd: &mut Command, target: Interned) { +fn add_env(builder: &Builder, cmd: &mut Command, target: Interned) { let mut parts = channel::CFG_RELEASE_NUM.split('.'); - cmd.env("CFG_RELEASE_INFO", build.rust_version()) + cmd.env("CFG_RELEASE_INFO", builder.rust_version()) .env("CFG_RELEASE_NUM", channel::CFG_RELEASE_NUM) - .env("CFG_RELEASE", build.rust_release()) - .env("CFG_PRERELEASE_VERSION", channel::CFG_PRERELEASE_VERSION) + .env("CFG_RELEASE", builder.rust_release()) .env("CFG_VER_MAJOR", parts.next().unwrap()) .env("CFG_VER_MINOR", parts.next().unwrap()) .env("CFG_VER_PATCH", parts.next().unwrap()) .env("CFG_VER_BUILD", "0") // just needed to build - .env("CFG_PACKAGE_VERS", build.rust_package_vers()) - .env("CFG_PACKAGE_NAME", pkgname(build, "rust")) + .env("CFG_PACKAGE_VERS", builder.rust_package_vers()) + .env("CFG_PACKAGE_NAME", pkgname(builder, "rust")) .env("CFG_BUILD", target) - .env("CFG_CHANNEL", &build.config.channel); + .env("CFG_CHANNEL", &builder.config.channel); if target.contains("windows-gnu") { cmd.env("CFG_MINGW", "1") @@ -1675,14 +1830,12 @@ fn add_env(build: &Build, cmd: &mut Command, target: Interned) { } } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct HashSign; impl Step for HashSign { type Output = (); - const ONLY_BUILD_TARGETS: bool = true; const ONLY_HOSTS: bool = true; - const ONLY_BUILD: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { run.path("hash-and-sign") @@ -1693,15 +1846,17 @@ impl Step for HashSign { } fn run(self, builder: &Builder) { - let build = builder.build; let mut cmd = builder.tool_cmd(Tool::BuildManifest); - let sign = build.config.dist_sign_folder.as_ref().unwrap_or_else(|| { + if builder.config.dry_run { + return; + } + let sign = builder.config.dist_sign_folder.as_ref().unwrap_or_else(|| { panic!("\n\nfailed to specify `dist.sign-folder` in `config.toml`\n\n") }); - let addr = build.config.dist_upload_addr.as_ref().unwrap_or_else(|| { + let addr = builder.config.dist_upload_addr.as_ref().unwrap_or_else(|| { panic!("\n\nfailed to specify `dist.upload-addr` in `config.toml`\n\n") }); - let file = build.config.dist_gpg_password_file.as_ref().unwrap_or_else(|| { + let file = builder.config.dist_gpg_password_file.as_ref().unwrap_or_else(|| { panic!("\n\nfailed to specify `dist.gpg-password-file` in `config.toml`\n\n") }); let mut pass = String::new(); @@ -1710,15 +1865,18 @@ impl Step for HashSign { let today = output(Command::new("date").arg("+%Y-%m-%d")); cmd.arg(sign); - cmd.arg(distdir(build)); + cmd.arg(distdir(builder)); cmd.arg(today.trim()); - cmd.arg(build.rust_package_vers()); - cmd.arg(build.package_vers(&build.release_num("cargo"))); - cmd.arg(build.package_vers(&build.release_num("rls"))); - cmd.arg(build.package_vers(&build.release_num("rustfmt"))); + cmd.arg(builder.rust_package_vers()); + cmd.arg(builder.package_vers(&builder.release_num("cargo"))); + cmd.arg(builder.package_vers(&builder.release_num("rls"))); + cmd.arg(builder.package_vers(&builder.release_num("clippy"))); + cmd.arg(builder.package_vers(&builder.release_num("rustfmt"))); + cmd.arg(builder.llvm_tools_package_vers()); + cmd.arg(builder.lldb_package_vers()); cmd.arg(addr); - t!(fs::create_dir_all(distdir(build))); + builder.create_dir(&distdir(builder)); let mut child = t!(cmd.stdin(Stdio::piped()).spawn()); t!(child.stdin.take().unwrap().write_all(pass.as_bytes())); @@ -1726,3 +1884,205 @@ impl Step for HashSign { assert!(status.success()); } } + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct LlvmTools { + pub stage: u32, + pub target: Interned, +} + +impl Step for LlvmTools { + type Output = Option; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("llvm-tools") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(LlvmTools { + stage: run.builder.top_stage, + target: run.target, + }); + } + + fn run(self, builder: &Builder) -> Option { + let stage = self.stage; + let target = self.target; + assert!(builder.config.extended); + + /* run only if llvm-config isn't used */ + if let Some(config) = builder.config.target_config.get(&target) { + if let Some(ref _s) = config.llvm_config { + builder.info(&format!("Skipping LlvmTools stage{} ({}): external LLVM", + stage, target)); + return None; + } + } + + builder.info(&format!("Dist LlvmTools stage{} ({})", stage, target)); + let src = builder.src.join("src/llvm"); + let name = pkgname(builder, "llvm-tools"); + + let tmp = tmpdir(builder); + let image = tmp.join("llvm-tools-image"); + drop(fs::remove_dir_all(&image)); + + // Prepare the image directory + let bindir = builder + .llvm_out(target) + .join("bin"); + let dst = image.join("lib/rustlib") + .join(target) + .join("bin"); + t!(fs::create_dir_all(&dst)); + for tool in LLVM_TOOLS { + let exe = bindir.join(exe(tool, &target)); + builder.install(&exe, &dst, 0o755); + } + + // Prepare the overlay + let overlay = tmp.join("llvm-tools-overlay"); + drop(fs::remove_dir_all(&overlay)); + builder.create_dir(&overlay); + builder.install(&src.join("README.txt"), &overlay, 0o644); + builder.install(&src.join("LICENSE.TXT"), &overlay, 0o644); + builder.create(&overlay.join("version"), &builder.llvm_tools_vers()); + + // Generate the installer tarball + let mut cmd = rust_installer(builder); + cmd.arg("generate") + .arg("--product-name=Rust") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=llvm-tools-installed.") + .arg("--image-dir").arg(&image) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) + .arg("--non-installed-overlay").arg(&overlay) + .arg(format!("--package-name={}-{}", name, target)) + .arg("--legacy-manifest-dirs=rustlib,cargo") + .arg("--component-name=llvm-tools-preview"); + + + builder.run(&mut cmd); + Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) + } +} + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct Lldb { + pub target: Interned, +} + +impl Step for Lldb { + type Output = Option; + const ONLY_HOSTS: bool = true; + const DEFAULT: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/lldb") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Lldb { + target: run.target, + }); + } + + fn run(self, builder: &Builder) -> Option { + let target = self.target; + + if builder.config.dry_run { + return None; + } + + let bindir = builder + .llvm_out(target) + .join("bin"); + let lldb_exe = bindir.join(exe("lldb", &target)); + if !lldb_exe.exists() { + return None; + } + + builder.info(&format!("Dist Lldb ({})", target)); + let src = builder.src.join("src/tools/lldb"); + let name = pkgname(builder, "lldb"); + + let tmp = tmpdir(builder); + let image = tmp.join("lldb-image"); + drop(fs::remove_dir_all(&image)); + + // Prepare the image directory + let dst = image.join("bin"); + t!(fs::create_dir_all(&dst)); + for program in &["lldb", "lldb-argdumper", "lldb-mi", "lldb-server"] { + let exe = bindir.join(exe(program, &target)); + builder.install(&exe, &dst, 0o755); + } + + // The libraries. + let libdir = builder.llvm_out(target).join("lib"); + let dst = image.join("lib"); + t!(fs::create_dir_all(&dst)); + for entry in t!(fs::read_dir(&libdir)) { + // let entry = t!(entry); + let entry = entry.unwrap(); + if let Ok(name) = entry.file_name().into_string() { + if name.starts_with("liblldb.") && !name.ends_with(".a") { + if t!(entry.file_type()).is_symlink() { + builder.copy_to_folder(&entry.path(), &dst); + } else { + builder.install(&entry.path(), &dst, 0o755); + } + } + } + } + + // The lldb scripts might be installed in lib/python$version + // or in lib64/python$version. If lib64 exists, use it; + // otherwise lib. + let libdir = builder.llvm_out(target).join("lib64"); + let (libdir, libdir_name) = if libdir.exists() { + (libdir, "lib64") + } else { + (builder.llvm_out(target).join("lib"), "lib") + }; + for entry in t!(fs::read_dir(&libdir)) { + let entry = t!(entry); + if let Ok(name) = entry.file_name().into_string() { + if name.starts_with("python") { + let dst = image.join(libdir_name) + .join(entry.file_name()); + t!(fs::create_dir_all(&dst)); + builder.cp_r(&entry.path(), &dst); + break; + } + } + } + + // Prepare the overlay + let overlay = tmp.join("lldb-overlay"); + drop(fs::remove_dir_all(&overlay)); + builder.create_dir(&overlay); + builder.install(&src.join("LICENSE.TXT"), &overlay, 0o644); + builder.create(&overlay.join("version"), &builder.lldb_vers()); + + // Generate the installer tarball + let mut cmd = rust_installer(builder); + cmd.arg("generate") + .arg("--product-name=Rust") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=lldb-installed.") + .arg("--image-dir").arg(&image) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) + .arg("--non-installed-overlay").arg(&overlay) + .arg(format!("--package-name={}-{}", name, target)) + .arg("--legacy-manifest-dirs=rustlib,cargo") + .arg("--component-name=lldb-preview"); + + + builder.run(&mut cmd); + Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) + } +} diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs index d66c01eb4990..ed9b5b1773fa 100644 --- a/src/bootstrap/doc.rs +++ b/src/bootstrap/doc.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Documentation generation for rustbuild. +//! Documentation generation for rustbuilder. //! //! This module implements generation for all bits and pieces of documentation //! for the Rust project. This notably includes suites like the rust book, the @@ -17,6 +17,7 @@ //! Everything here is basically just a shim around calling either `rustbook` or //! `rustdoc`. +use std::collections::HashSet; use std::fs::{self, File}; use std::io::prelude::*; use std::io; @@ -25,11 +26,12 @@ use std::path::{PathBuf, Path}; use Mode; use build_helper::up_to_date; -use util::{cp_r, symlink_dir}; +use util::symlink_dir; use builder::{Builder, Compiler, RunConfig, ShouldRun, Step}; -use tool::Tool; +use tool::{self, prepare_tool_cargo, Tool, SourceType}; use compile; use cache::{INTERNER, Interned}; +use config::Config; macro_rules! book { ($($name:ident, $path:expr, $book_name:expr;)+) => { @@ -45,7 +47,7 @@ macro_rules! book { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.path($path).default_condition(builder.build.config.docs) + run.path($path).default_condition(builder.config.docs) } fn make_run(run: RunConfig) { @@ -68,7 +70,8 @@ macro_rules! book { book!( Nomicon, "src/doc/nomicon", "nomicon"; Reference, "src/doc/reference", "reference"; - Rustdoc, "src/doc/rustdoc", "rustdoc"; + RustdocBook, "src/doc/rustdoc", "rustdoc"; + RustcBook, "src/doc/rustc", "rustc"; RustByExample, "src/doc/rust-by-example", "rust-by-example"; ); @@ -92,7 +95,7 @@ impl Step for Rustbook { /// This will not actually generate any documentation if the documentation has /// already been generated. fn run(self, builder: &Builder) { - let src = builder.build.src.join("src/doc"); + let src = builder.src.join("src/doc"); builder.ensure(RustbookSrc { target: self.target, name: self.name, @@ -112,7 +115,7 @@ impl Step for UnstableBook { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.path("src/doc/unstable-book").default_condition(builder.build.config.docs) + run.path("src/doc/unstable-book").default_condition(builder.config.docs) } fn make_run(run: RunConfig) { @@ -128,7 +131,7 @@ impl Step for UnstableBook { builder.ensure(RustbookSrc { target: self.target, name: INTERNER.intern_str("unstable-book"), - src: builder.build.md_doc_out(self.target), + src: builder.md_doc_out(self.target), }) } } @@ -145,7 +148,7 @@ impl Step for CargoBook { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.path("src/tools/cargo/src/doc/book").default_condition(builder.build.config.docs) + run.path("src/tools/cargo/src/doc/book").default_condition(builder.config.docs) } fn make_run(run: RunConfig) { @@ -156,22 +159,20 @@ impl Step for CargoBook { } fn run(self, builder: &Builder) { - let build = builder.build; - let target = self.target; let name = self.name; - let src = build.src.join("src/tools/cargo/src/doc/book"); + let src = builder.src.join("src/tools/cargo/src/doc"); - let out = build.doc_out(target); + let out = builder.doc_out(target); t!(fs::create_dir_all(&out)); let out = out.join(name); - println!("Cargo Book ({}) - {}", target, name); + builder.info(&format!("Cargo Book ({}) - {}", target, name)); let _ = fs::remove_dir_all(&out); - build.run(builder.tool_cmd(Tool::Rustbook) + builder.run(builder.tool_cmd(Tool::Rustbook) .arg("build") .arg(&src) .arg("-d") @@ -198,23 +199,23 @@ impl Step for RustbookSrc { /// This will not actually generate any documentation if the documentation has /// already been generated. fn run(self, builder: &Builder) { - let build = builder.build; let target = self.target; let name = self.name; let src = self.src; - let out = build.doc_out(target); + let out = builder.doc_out(target); t!(fs::create_dir_all(&out)); let out = out.join(name); let src = src.join(name); let index = out.join("index.html"); let rustbook = builder.tool_exe(Tool::Rustbook); + let mut rustbook_cmd = builder.tool_cmd(Tool::Rustbook); if up_to_date(&src, &index) && up_to_date(&rustbook, &index) { return } - println!("Rustbook ({}) - {}", target, name); + builder.info(&format!("Rustbook ({}) - {}", target, name)); let _ = fs::remove_dir_all(&out); - build.run(builder.tool_cmd(Tool::Rustbook) + builder.run(rustbook_cmd .arg("build") .arg(&src) .arg("-d") @@ -235,12 +236,12 @@ impl Step for TheBook { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.path("src/doc/book").default_condition(builder.build.config.docs) + run.path("src/doc/book").default_condition(builder.config.docs) } fn make_run(run: RunConfig) { run.builder.ensure(TheBook { - compiler: run.builder.compiler(run.builder.top_stage, run.builder.build.build), + compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, name: "book", }); @@ -256,7 +257,6 @@ impl Step for TheBook { /// * Index page /// * Redirect pages fn run(self, builder: &Builder) { - let build = builder.build; let compiler = self.compiler; let target = self.target; let name = self.name; @@ -272,6 +272,12 @@ impl Step for TheBook { name: INTERNER.intern_string(format!("{}/second-edition", name)), }); + // build book 2018 edition + builder.ensure(Rustbook { + target, + name: INTERNER.intern_string(format!("{}/2018-edition", name)), + }); + // build the version info page and CSS builder.ensure(Standalone { compiler, @@ -280,12 +286,12 @@ impl Step for TheBook { // build the index page let index = format!("{}/index.md", name); - println!("Documenting book index ({})", target); + builder.info(&format!("Documenting book index ({})", target)); invoke_rustdoc(builder, compiler, target, &index); // build the redirect pages - println!("Documenting book redirect pages ({})", target); - for file in t!(fs::read_dir(build.src.join("src/doc/book/redirects"))) { + builder.info(&format!("Documenting book redirect pages ({})", target)); + for file in t!(fs::read_dir(builder.src.join("src/doc/book/redirects"))) { let file = t!(file); let path = file.path(); let path = path.to_str().unwrap(); @@ -296,13 +302,12 @@ impl Step for TheBook { } fn invoke_rustdoc(builder: &Builder, compiler: Compiler, target: Interned, markdown: &str) { - let build = builder.build; - let out = build.doc_out(target); + let out = builder.doc_out(target); - let path = build.src.join("src/doc").join(markdown); + let path = builder.src.join("src/doc").join(markdown); - let favicon = build.src.join("src/doc/favicon.inc"); - let footer = build.src.join("src/doc/footer.inc"); + let favicon = builder.src.join("src/doc/favicon.inc"); + let footer = builder.src.join("src/doc/footer.inc"); let version_info = out.join("version_info.html"); let mut cmd = builder.rustdoc_cmd(compiler.host); @@ -312,6 +317,7 @@ fn invoke_rustdoc(builder: &Builder, compiler: Compiler, target: Interned ShouldRun { let builder = run.builder; - run.path("src/doc").default_condition(builder.build.config.docs) + run.path("src/doc").default_condition(builder.config.docs) } fn make_run(run: RunConfig) { run.builder.ensure(Standalone { - compiler: run.builder.compiler(run.builder.top_stage, run.builder.build.build), + compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, }); } @@ -353,31 +359,30 @@ impl Step for Standalone { /// /// In the end, this is just a glorified wrapper around rustdoc! fn run(self, builder: &Builder) { - let build = builder.build; let target = self.target; let compiler = self.compiler; - println!("Documenting standalone ({})", target); - let out = build.doc_out(target); + builder.info(&format!("Documenting standalone ({})", target)); + let out = builder.doc_out(target); t!(fs::create_dir_all(&out)); - let favicon = build.src.join("src/doc/favicon.inc"); - let footer = build.src.join("src/doc/footer.inc"); - let full_toc = build.src.join("src/doc/full-toc.inc"); - t!(fs::copy(build.src.join("src/doc/rust.css"), out.join("rust.css"))); + let favicon = builder.src.join("src/doc/favicon.inc"); + let footer = builder.src.join("src/doc/footer.inc"); + let full_toc = builder.src.join("src/doc/full-toc.inc"); + t!(fs::copy(builder.src.join("src/doc/rust.css"), out.join("rust.css"))); - let version_input = build.src.join("src/doc/version_info.html.template"); + let version_input = builder.src.join("src/doc/version_info.html.template"); let version_info = out.join("version_info.html"); - if !up_to_date(&version_input, &version_info) { + if !builder.config.dry_run && !up_to_date(&version_input, &version_info) { let mut info = String::new(); t!(t!(File::open(&version_input)).read_to_string(&mut info)); - let info = info.replace("VERSION", &build.rust_release()) - .replace("SHORT_HASH", build.rust_info.sha_short().unwrap_or("")) - .replace("STAMP", build.rust_info.sha().unwrap_or("")); + let info = info.replace("VERSION", &builder.rust_release()) + .replace("SHORT_HASH", builder.rust_info.sha_short().unwrap_or("")) + .replace("STAMP", builder.rust_info.sha().unwrap_or("")); t!(t!(File::create(&version_info)).write_all(info.as_bytes())); } - for file in t!(fs::read_dir(build.src.join("src/doc"))) { + for file in t!(fs::read_dir(builder.src.join("src/doc"))) { let file = t!(file); let path = file.path(); let filename = path.file_name().unwrap().to_str().unwrap(); @@ -392,7 +397,7 @@ impl Step for Standalone { up_to_date(&favicon, &html) && up_to_date(&full_toc, &html) && up_to_date(&version_info, &html) && - up_to_date(&rustdoc, &html) { + (builder.config.dry_run || up_to_date(&rustdoc, &html)) { continue } @@ -412,15 +417,15 @@ impl Step for Standalone { } else { cmd.arg("--markdown-css").arg("rust.css"); } - build.run(&mut cmd); + builder.run(&mut cmd); } } } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct Std { - stage: u32, - target: Interned, + pub stage: u32, + pub target: Interned, } impl Step for Std { @@ -429,7 +434,7 @@ impl Step for Std { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.krate("std").default_condition(builder.build.config.docs) + run.all_krates("std").default_condition(builder.config.docs) } fn make_run(run: RunConfig) { @@ -444,22 +449,21 @@ impl Step for Std { /// This will generate all documentation for the standard library and its /// dependencies. This is largely just a wrapper around `cargo doc`. fn run(self, builder: &Builder) { - let build = builder.build; let stage = self.stage; let target = self.target; - println!("Documenting stage{} std ({})", stage, target); - let out = build.doc_out(target); + builder.info(&format!("Documenting stage{} std ({})", stage, target)); + let out = builder.doc_out(target); t!(fs::create_dir_all(&out)); - let compiler = builder.compiler(stage, build.build); + let compiler = builder.compiler(stage, builder.config.build); let rustdoc = builder.rustdoc(compiler.host); - let compiler = if build.force_use_stage1(compiler, target) { + let compiler = if builder.force_use_stage1(compiler, target) { builder.compiler(1, compiler.host) } else { compiler }; builder.ensure(compile::Std { compiler, target }); - let out_dir = build.stage_out(compiler, Mode::Libstd) + let out_dir = builder.stage_out(compiler, Mode::Std) .join(target).join("doc"); // Here what we're doing is creating a *symlink* (directory junction on @@ -475,30 +479,26 @@ impl Step for Std { // // This way rustdoc generates output directly into the output, and rustdoc // will also directly handle merging. - let my_out = build.crate_doc_out(target); - build.clear_if_dirty(&my_out, &rustdoc); - t!(symlink_dir_force(&my_out, &out_dir)); + let my_out = builder.crate_doc_out(target); + builder.clear_if_dirty(&my_out, &rustdoc); + t!(symlink_dir_force(&builder.config, &my_out, &out_dir)); - let mut cargo = builder.cargo(compiler, Mode::Libstd, target, "doc"); - compile::std_cargo(build, &compiler, target, &mut cargo); + let mut cargo = builder.cargo(compiler, Mode::Std, target, "doc"); + compile::std_cargo(builder, &compiler, target, &mut cargo); - // We don't want to build docs for internal std dependencies unless - // in compiler-docs mode. When not in that mode, we whitelist the crates - // for which docs must be built. - if !build.config.compiler_docs { - cargo.arg("--no-deps"); - for krate in &["alloc", "core", "std", "std_unicode"] { - cargo.arg("-p").arg(krate); - // Create all crate output directories first to make sure rustdoc uses - // relative links. - // FIXME: Cargo should probably do this itself. - t!(fs::create_dir_all(out_dir.join(krate))); - } + // Keep a whitelist so we do not build internal stdlib crates, these will be + // build by the rustc step later if enabled. + cargo.arg("--no-deps"); + for krate in &["alloc", "core", "std"] { + cargo.arg("-p").arg(krate); + // Create all crate output directories first to make sure rustdoc uses + // relative links. + // FIXME: Cargo should probably do this itself. + t!(fs::create_dir_all(out_dir.join(krate))); } - - build.run(&mut cargo); - cp_r(&my_out, &out); + builder.run(&mut cargo); + builder.cp_r(&my_out, &out); } } @@ -514,7 +514,7 @@ impl Step for Test { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.krate("test").default_condition(builder.config.compiler_docs) + run.krate("test").default_condition(builder.config.docs) } fn make_run(run: RunConfig) { @@ -529,15 +529,14 @@ impl Step for Test { /// This will generate all documentation for libtest and its dependencies. This /// is largely just a wrapper around `cargo doc`. fn run(self, builder: &Builder) { - let build = builder.build; let stage = self.stage; let target = self.target; - println!("Documenting stage{} test ({})", stage, target); - let out = build.doc_out(target); + builder.info(&format!("Documenting stage{} test ({})", stage, target)); + let out = builder.doc_out(target); t!(fs::create_dir_all(&out)); - let compiler = builder.compiler(stage, build.build); + let compiler = builder.compiler(stage, builder.config.build); let rustdoc = builder.rustdoc(compiler.host); - let compiler = if build.force_use_stage1(compiler, target) { + let compiler = if builder.force_use_stage1(compiler, target) { builder.compiler(1, compiler.host) } else { compiler @@ -547,18 +546,95 @@ impl Step for Test { builder.ensure(Std { stage, target }); builder.ensure(compile::Test { compiler, target }); - let out_dir = build.stage_out(compiler, Mode::Libtest) + let out_dir = builder.stage_out(compiler, Mode::Test) .join(target).join("doc"); // See docs in std above for why we symlink - let my_out = build.crate_doc_out(target); - build.clear_if_dirty(&my_out, &rustdoc); - t!(symlink_dir_force(&my_out, &out_dir)); + let my_out = builder.crate_doc_out(target); + builder.clear_if_dirty(&my_out, &rustdoc); + t!(symlink_dir_force(&builder.config, &my_out, &out_dir)); - let mut cargo = builder.cargo(compiler, Mode::Libtest, target, "doc"); - compile::test_cargo(build, &compiler, target, &mut cargo); - build.run(&mut cargo); - cp_r(&my_out, &out); + let mut cargo = builder.cargo(compiler, Mode::Test, target, "doc"); + compile::test_cargo(builder, &compiler, target, &mut cargo); + + cargo.arg("--no-deps").arg("-p").arg("test"); + + builder.run(&mut cargo); + builder.cp_r(&my_out, &out); + } +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct WhitelistedRustc { + stage: u32, + target: Interned, +} + +impl Step for WhitelistedRustc { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + let builder = run.builder; + run.krate("rustc-main").default_condition(builder.config.docs) + } + + fn make_run(run: RunConfig) { + run.builder.ensure(WhitelistedRustc { + stage: run.builder.top_stage, + target: run.target, + }); + } + + /// Generate whitelisted compiler crate documentation. + /// + /// This will generate all documentation for crates that are whitelisted + /// to be included in the standard documentation. This documentation is + /// included in the standard Rust documentation, so we should always + /// document it and symlink to merge with the rest of the std and test + /// documentation. We don't build other compiler documentation + /// here as we want to be able to keep it separate from the standard + /// documentation. This is largely just a wrapper around `cargo doc`. + fn run(self, builder: &Builder) { + let stage = self.stage; + let target = self.target; + builder.info(&format!("Documenting stage{} whitelisted compiler ({})", stage, target)); + let out = builder.doc_out(target); + t!(fs::create_dir_all(&out)); + let compiler = builder.compiler(stage, builder.config.build); + let rustdoc = builder.rustdoc(compiler.host); + let compiler = if builder.force_use_stage1(compiler, target) { + builder.compiler(1, compiler.host) + } else { + compiler + }; + + // Build libstd docs so that we generate relative links + builder.ensure(Std { stage, target }); + + builder.ensure(compile::Rustc { compiler, target }); + let out_dir = builder.stage_out(compiler, Mode::Rustc) + .join(target).join("doc"); + + // See docs in std above for why we symlink + let my_out = builder.crate_doc_out(target); + builder.clear_if_dirty(&my_out, &rustdoc); + t!(symlink_dir_force(&builder.config, &my_out, &out_dir)); + + let mut cargo = builder.cargo(compiler, Mode::Rustc, target, "doc"); + compile::rustc_cargo(builder, &mut cargo); + + // We don't want to build docs for internal compiler dependencies in this + // step (there is another step for that). Therefore, we whitelist the crates + // for which docs must be built. + cargo.arg("--no-deps"); + for krate in &["proc_macro"] { + cargo.arg("-p").arg(krate); + } + + builder.run(&mut cargo); + builder.cp_r(&my_out, &out); } } @@ -575,7 +651,7 @@ impl Step for Rustc { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.krate("rustc-main").default_condition(builder.build.config.docs) + run.krate("rustc-main").default_condition(builder.config.docs) } fn make_run(run: RunConfig) { @@ -585,55 +661,164 @@ impl Step for Rustc { }); } - /// Generate all compiler documentation. + /// Generate compiler documentation. /// - /// This will generate all documentation for the compiler libraries and their - /// dependencies. This is largely just a wrapper around `cargo doc`. + /// This will generate all documentation for compiler and dependencies. + /// Compiler documentation is distributed separately, so we make sure + /// we do not merge it with the other documentation from std, test and + /// proc_macros. This is largely just a wrapper around `cargo doc`. fn run(self, builder: &Builder) { - let build = builder.build; let stage = self.stage; let target = self.target; - println!("Documenting stage{} compiler ({})", stage, target); - let out = build.doc_out(target); + builder.info(&format!("Documenting stage{} compiler ({})", stage, target)); + + // This is the intended out directory for compiler documentation. + let out = builder.compiler_doc_out(target); t!(fs::create_dir_all(&out)); - let compiler = builder.compiler(stage, build.build); + + // Get the correct compiler for this stage. + let compiler = builder.compiler(stage, builder.config.build); let rustdoc = builder.rustdoc(compiler.host); - let compiler = if build.force_use_stage1(compiler, target) { + let compiler = if builder.force_use_stage1(compiler, target) { builder.compiler(1, compiler.host) } else { compiler }; - // Build libstd docs so that we generate relative links - builder.ensure(Std { stage, target }); - - builder.ensure(compile::Rustc { compiler, target }); - let out_dir = build.stage_out(compiler, Mode::Librustc) - .join(target).join("doc"); - - // See docs in std above for why we symlink - let my_out = build.crate_doc_out(target); - build.clear_if_dirty(&my_out, &rustdoc); - t!(symlink_dir_force(&my_out, &out_dir)); - - let mut cargo = builder.cargo(compiler, Mode::Librustc, target, "doc"); - compile::rustc_cargo(build, target, &mut cargo); - - if build.config.compiler_docs { - // src/rustc/Cargo.toml contains a bin crate called rustc which - // would otherwise overwrite the docs for the real rustc lib crate. - cargo.arg("-p").arg("rustc_driver"); - } else { - // Like with libstd above if compiler docs aren't enabled then we're not - // documenting internal dependencies, so we have a whitelist. - cargo.arg("--no-deps"); - for krate in &["proc_macro"] { - cargo.arg("-p").arg(krate); - } + if !builder.config.compiler_docs { + builder.info("\tskipping - compiler/librustdoc docs disabled"); + return; } - build.run(&mut cargo); - cp_r(&my_out, &out); + // Build libstd docs so that we generate relative links. + builder.ensure(Std { stage, target }); + + // Build rustc. + builder.ensure(compile::Rustc { compiler, target }); + + // We do not symlink to the same shared folder that already contains std library + // documentation from previous steps as we do not want to include that. + let out_dir = builder.stage_out(compiler, Mode::Rustc).join(target).join("doc"); + builder.clear_if_dirty(&out, &rustdoc); + t!(symlink_dir_force(&builder.config, &out, &out_dir)); + + // Build cargo command. + let mut cargo = builder.cargo(compiler, Mode::Rustc, target, "doc"); + cargo.env("RUSTDOCFLAGS", "--document-private-items"); + compile::rustc_cargo(builder, &mut cargo); + + // Only include compiler crates, no dependencies of those, such as `libc`. + cargo.arg("--no-deps"); + + // Find dependencies for top level crates. + let mut compiler_crates = HashSet::new(); + for root_crate in &["rustc", "rustc_driver"] { + let interned_root_crate = INTERNER.intern_str(root_crate); + find_compiler_crates(builder, &interned_root_crate, &mut compiler_crates); + } + + for krate in &compiler_crates { + cargo.arg("-p").arg(krate); + } + + builder.run(&mut cargo); + } +} + +fn find_compiler_crates( + builder: &Builder, + name: &Interned, + crates: &mut HashSet> +) { + // Add current crate. + crates.insert(*name); + + // Look for dependencies. + for dep in builder.crates.get(name).unwrap().deps.iter() { + if builder.crates.get(dep).unwrap().is_local(builder) { + find_compiler_crates(builder, dep, crates); + } + } +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct Rustdoc { + stage: u32, + target: Interned, +} + +impl Step for Rustdoc { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.krate("rustdoc-tool") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Rustdoc { + stage: run.builder.top_stage, + target: run.target, + }); + } + + /// Generate compiler documentation. + /// + /// This will generate all documentation for compiler and dependencies. + /// Compiler documentation is distributed separately, so we make sure + /// we do not merge it with the other documentation from std, test and + /// proc_macros. This is largely just a wrapper around `cargo doc`. + fn run(self, builder: &Builder) { + let stage = self.stage; + let target = self.target; + builder.info(&format!("Documenting stage{} rustdoc ({})", stage, target)); + + // This is the intended out directory for compiler documentation. + let out = builder.compiler_doc_out(target); + t!(fs::create_dir_all(&out)); + + // Get the correct compiler for this stage. + let compiler = builder.compiler(stage, builder.config.build); + let rustdoc = builder.rustdoc(compiler.host); + let compiler = if builder.force_use_stage1(compiler, target) { + builder.compiler(1, compiler.host) + } else { + compiler + }; + + if !builder.config.compiler_docs { + builder.info("\tskipping - compiler/librustdoc docs disabled"); + return; + } + + // Build libstd docs so that we generate relative links. + builder.ensure(Std { stage, target }); + + // Build rustdoc. + builder.ensure(tool::Rustdoc { host: compiler.host }); + + // Symlink compiler docs to the output directory of rustdoc documentation. + let out_dir = builder.stage_out(compiler, Mode::ToolRustc) + .join(target) + .join("doc"); + t!(fs::create_dir_all(&out_dir)); + builder.clear_if_dirty(&out, &rustdoc); + t!(symlink_dir_force(&builder.config, &out, &out_dir)); + + // Build cargo command. + let mut cargo = prepare_tool_cargo( + builder, + compiler, + Mode::ToolRustc, + target, + "doc", + "src/tools/rustdoc", + SourceType::InTree, + ); + + cargo.env("RUSTDOCFLAGS", "--document-private-items"); + builder.run(&mut cargo); } } @@ -649,7 +834,7 @@ impl Step for ErrorIndex { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.path("src/tools/error_index_generator").default_condition(builder.build.config.docs) + run.path("src/tools/error_index_generator").default_condition(builder.config.docs) } fn make_run(run: RunConfig) { @@ -661,21 +846,20 @@ impl Step for ErrorIndex { /// Generates the HTML rendered error-index by running the /// `error_index_generator` tool. fn run(self, builder: &Builder) { - let build = builder.build; let target = self.target; - println!("Documenting error index ({})", target); - let out = build.doc_out(target); + builder.info(&format!("Documenting error index ({})", target)); + let out = builder.doc_out(target); t!(fs::create_dir_all(&out)); let mut index = builder.tool_cmd(Tool::ErrorIndex); index.arg("html"); index.arg(out.join("error-index.html")); // FIXME: shouldn't have to pass this env var - index.env("CFG_BUILD", &build.build) - .env("RUSTC_ERROR_METADATA_DST", build.extended_error_dir()); + index.env("CFG_BUILD", &builder.config.build) + .env("RUSTC_ERROR_METADATA_DST", builder.extended_error_dir()); - build.run(&mut index); + builder.run(&mut index); } } @@ -691,7 +875,7 @@ impl Step for UnstableBookGen { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.path("src/tools/unstable-book-gen").default_condition(builder.build.config.docs) + run.path("src/tools/unstable-book-gen").default_condition(builder.config.docs) } fn make_run(run: RunConfig) { @@ -701,27 +885,29 @@ impl Step for UnstableBookGen { } fn run(self, builder: &Builder) { - let build = builder.build; let target = self.target; builder.ensure(compile::Std { - compiler: builder.compiler(builder.top_stage, build.build), + compiler: builder.compiler(builder.top_stage, builder.config.build), target, }); - println!("Generating unstable book md files ({})", target); - let out = build.md_doc_out(target).join("unstable-book"); - t!(fs::create_dir_all(&out)); - t!(fs::remove_dir_all(&out)); + builder.info(&format!("Generating unstable book md files ({})", target)); + let out = builder.md_doc_out(target).join("unstable-book"); + builder.create_dir(&out); + builder.remove_dir(&out); let mut cmd = builder.tool_cmd(Tool::UnstableBookGen); - cmd.arg(build.src.join("src")); + cmd.arg(builder.src.join("src")); cmd.arg(out); - build.run(&mut cmd); + builder.run(&mut cmd); } } -fn symlink_dir_force(src: &Path, dst: &Path) -> io::Result<()> { +fn symlink_dir_force(config: &Config, src: &Path, dst: &Path) -> io::Result<()> { + if config.dry_run { + return Ok(()); + } if let Ok(m) = fs::symlink_metadata(dst) { if m.file_type().is_dir() { try!(fs::remove_dir_all(dst)); @@ -734,5 +920,5 @@ fn symlink_dir_force(src: &Path, dst: &Path) -> io::Result<()> { } } - symlink_dir(src, dst) + symlink_dir(config, src, dst) } diff --git a/src/bootstrap/flags.rs b/src/bootstrap/flags.rs index b5d51598fab8..60b4d65f4440 100644 --- a/src/bootstrap/flags.rs +++ b/src/bootstrap/flags.rs @@ -13,48 +13,59 @@ //! This module implements the command-line parsing of the build system which //! has various flags to configure how it's run. -use std::env; use std::fs; use std::path::PathBuf; use std::process; use getopts::Options; -use Build; +use builder::Builder; use config::Config; use metadata; -use builder::Builder; +use {Build, DocTests}; use cache::{Interned, INTERNER}; /// Deserialized version of all flags for this compile. pub struct Flags { - pub verbose: usize, // verbosity level: 0 == not verbose, 1 == verbose, 2 == very verbose + pub verbose: usize, // number of -v args; each extra -v after the first is passed to Cargo pub on_fail: Option, pub stage: Option, - pub keep_stage: Option, - pub build: Option>, + pub keep_stage: Vec, pub host: Vec>, pub target: Vec>, pub config: Option, - pub src: PathBuf, pub jobs: Option, pub cmd: Subcommand, pub incremental: bool, + pub exclude: Vec, + pub rustc_error_format: Option, + pub dry_run: bool, + + // true => deny + pub warnings: Option, } pub enum Subcommand { Build { paths: Vec, }, + Check { + paths: Vec, + }, Doc { paths: Vec, }, Test { paths: Vec, + /// Whether to automatically update stderr/stdout files + bless: bool, + compare_mode: Option, test_args: Vec, + rustc_args: Vec, fail_fast: bool, + doc_tests: DocTests, }, Bench { paths: Vec, @@ -82,11 +93,13 @@ impl Default for Subcommand { impl Flags { pub fn parse(args: &[String]) -> Flags { let mut extra_help = String::new(); - let mut subcommand_help = format!("\ + let mut subcommand_help = format!( + "\ Usage: x.py [options] [...] Subcommands: build Compile either the compiler or libraries + check Compile either the compiler or libraries, using cargo check test Build and run some test suites bench Build and run some benchmarks doc Build documentation @@ -94,7 +107,8 @@ Subcommands: dist Build distribution artifacts install Install distribution artifacts -To learn more about a subcommand, run `./x.py -h`"); +To learn more about a subcommand, run `./x.py -h`" + ); let mut opts = Options::new(); // Options common to all subcommands @@ -104,35 +118,50 @@ To learn more about a subcommand, run `./x.py -h`"); opts.optopt("", "build", "build target of the stage0 compiler", "BUILD"); opts.optmulti("", "host", "host targets to build", "HOST"); opts.optmulti("", "target", "target targets to build", "TARGET"); + opts.optmulti("", "exclude", "build paths to exclude", "PATH"); opts.optopt("", "on-fail", "command to run on failure", "CMD"); - opts.optopt("", "stage", "stage to build", "N"); - opts.optopt("", "keep-stage", "stage to keep without recompiling", "N"); + opts.optflag("", "dry-run", "dry run; don't build anything"); + opts.optopt("", "stage", + "stage to build (indicates compiler to use/test, e.g. stage 0 uses the \ + bootstrap compiler, stage 1 the stage 0 rustc artifacts, etc.)", + "N"); + opts.optmulti("", "keep-stage", "stage(s) to keep without recompiling", "N"); opts.optopt("", "src", "path to the root of the rust checkout", "DIR"); opts.optopt("j", "jobs", "number of jobs to run in parallel", "JOBS"); opts.optflag("h", "help", "print this help message"); + opts.optopt( + "", + "warnings", + "if value is deny, will deny warnings, otherwise use default", + "VALUE", + ); + opts.optopt("", "error-format", "rustc error format", "FORMAT"); // fn usage() - let usage = |exit_code: i32, opts: &Options, subcommand_help: &str, extra_help: &str| -> ! { - println!("{}", opts.usage(subcommand_help)); - if !extra_help.is_empty() { - println!("{}", extra_help); - } - process::exit(exit_code); - }; + let usage = + |exit_code: i32, opts: &Options, subcommand_help: &str, extra_help: &str| -> ! { + println!("{}", opts.usage(subcommand_help)); + if !extra_help.is_empty() { + println!("{}", extra_help); + } + process::exit(exit_code); + }; // We can't use getopt to parse the options until we have completed specifying which // options are valid, but under the current implementation, some options are conditional on // the subcommand. Therefore we must manually identify the subcommand first, so that we can // complete the definition of the options. Then we can use the getopt::Matches object from // there on out. - let subcommand = args.iter().find(|&s| + let subcommand = args.iter().find(|&s| { (s == "build") - || (s == "test") - || (s == "bench") - || (s == "doc") - || (s == "clean") - || (s == "dist") - || (s == "install")); + || (s == "check") + || (s == "test") + || (s == "bench") + || (s == "doc") + || (s == "clean") + || (s == "dist") + || (s == "install") + }); let subcommand = match subcommand { Some(s) => s, None => { @@ -147,13 +176,36 @@ To learn more about a subcommand, run `./x.py -h`"); // Some subcommands get extra options match subcommand.as_str() { - "test" => { + "test" => { opts.optflag("", "no-fail-fast", "Run all tests regardless of failure"); opts.optmulti("", "test-args", "extra arguments", "ARGS"); - }, - "bench" => { opts.optmulti("", "test-args", "extra arguments", "ARGS"); }, - "clean" => { opts.optflag("", "all", "clean all build artifacts"); }, - _ => { }, + opts.optmulti( + "", + "rustc-args", + "extra options to pass the compiler when running tests", + "ARGS", + ); + opts.optflag("", "no-doc", "do not run doc tests"); + opts.optflag("", "doc", "only run doc tests"); + opts.optflag( + "", + "bless", + "update all stderr/stdout files of failing ui tests", + ); + opts.optopt( + "", + "compare-mode", + "mode describing what file the actual ui output will be compared to", + "COMPARE MODE", + ); + } + "bench" => { + opts.optmulti("", "test-args", "extra arguments", "ARGS"); + } + "clean" => { + opts.optflag("", "all", "clean all build artifacts"); + } + _ => {} }; // Done specifying what options are possible, so do the getopts parsing @@ -173,21 +225,24 @@ To learn more about a subcommand, run `./x.py -h`"); if check_subcommand != subcommand { pass_sanity_check = false; } - }, + } None => { pass_sanity_check = false; } } if !pass_sanity_check { println!("{}\n", subcommand_help); - println!("Sorry, I couldn't figure out which subcommand you were trying to specify.\n\ - You may need to move some options to after the subcommand.\n"); + println!( + "Sorry, I couldn't figure out which subcommand you were trying to specify.\n\ + You may need to move some options to after the subcommand.\n" + ); process::exit(1); } // Extra help text for some commands match subcommand.as_str() { "build" => { - subcommand_help.push_str("\n + subcommand_help.push_str( + "\n Arguments: This subcommand accepts a number of paths to directories to the crates and/or artifacts to compile. For example: @@ -206,29 +261,56 @@ Arguments: ./x.py build --stage 1 src/libtest - This will first build everything once (like --stage 0 without further + This will first build everything once (like `--stage 0` without further arguments would), and then use the compiler built in stage 0 to build src/libtest and its dependencies. - Once this is done, build/$ARCH/stage1 contains a usable compiler."); + Once this is done, build/$ARCH/stage1 contains a usable compiler.", + ); + } + "check" => { + subcommand_help.push_str( + "\n +Arguments: + This subcommand accepts a number of paths to directories to the crates + and/or artifacts to compile. For example: + + ./x.py check src/libcore + ./x.py check src/libcore src/libproc_macro + + If no arguments are passed then the complete artifacts are compiled: std, test, and rustc. Note + also that since we use `cargo check`, by default this will automatically enable incremental + compilation, so there's no need to pass it separately, though it won't hurt. We also completely + ignore the stage passed, as there's no way to compile in non-stage 0 without actually building + the compiler.", + ); } "test" => { - subcommand_help.push_str("\n + subcommand_help.push_str( + "\n Arguments: This subcommand accepts a number of paths to directories to tests that should be compiled and run. For example: ./x.py test src/test/run-pass ./x.py test src/libstd --test-args hash_map - ./x.py test src/libstd --stage 0 + ./x.py test src/libstd --stage 0 --no-doc + ./x.py test src/test/ui --bless + ./x.py test src/test/ui --compare-mode nll + + Note that `test src/test/* --stage N` does NOT depend on `build src/rustc --stage N`; + just like `build src/libstd --stage N` it tests the compiler produced by the previous + stage. If no arguments are passed then the complete artifacts for that stage are compiled and tested. ./x.py test - ./x.py test --stage 1"); + ./x.py test --stage 1", + ); } "doc" => { - subcommand_help.push_str("\n + subcommand_help.push_str( + "\n Arguments: This subcommand accepts a number of paths to directories of documentation to build. For example: @@ -240,13 +322,16 @@ Arguments: If no arguments are passed then everything is documented: ./x.py doc - ./x.py doc --stage 1"); + ./x.py doc --stage 1", + ); } - _ => { } + _ => {} }; // Get any optional paths which occur after the subcommand - let cwd = t!(env::current_dir()); - let paths = matches.free[1..].iter().map(|p| cwd.join(p)).collect::>(); + let paths = matches.free[1..] + .iter() + .map(|p| p.into()) + .collect::>(); let cfg_file = matches.opt_str("config").map(PathBuf::from).or_else(|| { if fs::metadata("config.toml").is_ok() { @@ -265,9 +350,12 @@ Arguments: let maybe_rules_help = Builder::get_help(&build, subcommand.as_str()); extra_help.push_str(maybe_rules_help.unwrap_or_default().as_str()); } else if subcommand.as_str() != "clean" { - extra_help.push_str(format!( - "Run `./x.py {} -h -v` to see a list of available paths.", - subcommand).as_str()); + extra_help.push_str( + format!( + "Run `./x.py {} -h -v` to see a list of available paths.", + subcommand + ).as_str(), + ); } // User passed in -h/--help? @@ -276,25 +364,28 @@ Arguments: } let cmd = match subcommand.as_str() { - "build" => { - Subcommand::Build { paths: paths } - } - "test" => { - Subcommand::Test { - paths, - test_args: matches.opt_strs("test-args"), - fail_fast: !matches.opt_present("no-fail-fast"), - } - } - "bench" => { - Subcommand::Bench { - paths, - test_args: matches.opt_strs("test-args"), - } - } - "doc" => { - Subcommand::Doc { paths: paths } - } + "build" => Subcommand::Build { paths: paths }, + "check" => Subcommand::Check { paths: paths }, + "test" => Subcommand::Test { + paths, + bless: matches.opt_present("bless"), + compare_mode: matches.opt_str("compare-mode"), + test_args: matches.opt_strs("test-args"), + rustc_args: matches.opt_strs("rustc-args"), + fail_fast: !matches.opt_present("no-fail-fast"), + doc_tests: if matches.opt_present("doc") { + DocTests::Only + } else if matches.opt_present("no-doc") { + DocTests::No + } else { + DocTests::Yes + }, + }, + "bench" => Subcommand::Bench { + paths, + test_args: matches.opt_strs("test-args"), + }, + "doc" => Subcommand::Doc { paths: paths }, "clean" => { if paths.len() > 0 { println!("\nclean does not take a path argument\n"); @@ -305,48 +396,39 @@ Arguments: all: matches.opt_present("all"), } } - "dist" => { - Subcommand::Dist { - paths, - } - } - "install" => { - Subcommand::Install { - paths, - } - } + "dist" => Subcommand::Dist { paths }, + "install" => Subcommand::Install { paths }, _ => { usage(1, &opts, &subcommand_help, &extra_help); } }; - - let mut stage = matches.opt_str("stage").map(|j| j.parse().unwrap()); - - if matches.opt_present("incremental") && stage.is_none() { - stage = Some(1); - } - - let cwd = t!(env::current_dir()); - let src = matches.opt_str("src").map(PathBuf::from) - .or_else(|| env::var_os("SRC").map(PathBuf::from)) - .unwrap_or(cwd); - Flags { verbose: matches.opt_count("verbose"), - stage, + stage: matches.opt_str("stage").map(|j| j.parse().unwrap()), + dry_run: matches.opt_present("dry-run"), on_fail: matches.opt_str("on-fail"), - keep_stage: matches.opt_str("keep-stage").map(|j| j.parse().unwrap()), - build: matches.opt_str("build").map(|s| INTERNER.intern_string(s)), + rustc_error_format: matches.opt_str("error-format"), + keep_stage: matches.opt_strs("keep-stage") + .into_iter().map(|j| j.parse().unwrap()) + .collect(), host: split(matches.opt_strs("host")) - .into_iter().map(|x| INTERNER.intern_string(x)).collect::>(), + .into_iter() + .map(|x| INTERNER.intern_string(x)) + .collect::>(), target: split(matches.opt_strs("target")) - .into_iter().map(|x| INTERNER.intern_string(x)).collect::>(), + .into_iter() + .map(|x| INTERNER.intern_string(x)) + .collect::>(), config: cfg_file, - src, jobs: matches.opt_str("jobs").map(|j| j.parse().unwrap()), cmd, incremental: matches.opt_present("incremental"), + exclude: split(matches.opt_strs("exclude")) + .into_iter() + .map(|p| p.into()) + .collect::>(), + warnings: matches.opt_str("warnings").map(|v| v == "deny"), } } } @@ -354,22 +436,60 @@ Arguments: impl Subcommand { pub fn test_args(&self) -> Vec<&str> { match *self { - Subcommand::Test { ref test_args, .. } | - Subcommand::Bench { ref test_args, .. } => { - test_args.iter().flat_map(|s| s.split_whitespace()).collect() + Subcommand::Test { ref test_args, .. } | Subcommand::Bench { ref test_args, .. } => { + test_args + .iter() + .flat_map(|s| s.split_whitespace()) + .collect() } _ => Vec::new(), } } + pub fn rustc_args(&self) -> Vec<&str> { + match *self { + Subcommand::Test { ref rustc_args, .. } => rustc_args + .iter() + .flat_map(|s| s.split_whitespace()) + .collect(), + _ => Vec::new(), + } + } + pub fn fail_fast(&self) -> bool { match *self { Subcommand::Test { fail_fast, .. } => fail_fast, _ => false, } } + + pub fn doc_tests(&self) -> DocTests { + match *self { + Subcommand::Test { doc_tests, .. } => doc_tests, + _ => DocTests::Yes, + } + } + + pub fn bless(&self) -> bool { + match *self { + Subcommand::Test { bless, .. } => bless, + _ => false, + } + } + + pub fn compare_mode(&self) -> Option<&str> { + match *self { + Subcommand::Test { + ref compare_mode, .. + } => compare_mode.as_ref().map(|s| &s[..]), + _ => None, + } + } } fn split(s: Vec) -> Vec { - s.iter().flat_map(|s| s.split(',')).map(|s| s.to_string()).collect() + s.iter() + .flat_map(|s| s.split(',')) + .map(|s| s.to_string()) + .collect() } diff --git a/src/bootstrap/install.rs b/src/bootstrap/install.rs index 743f32ece99c..cb28698aa3d6 100644 --- a/src/bootstrap/install.rs +++ b/src/bootstrap/install.rs @@ -22,6 +22,7 @@ use dist::{self, pkgname, sanitize_sh, tmpdir}; use builder::{Builder, RunConfig, ShouldRun, Step}; use cache::Interned; +use config::Config; pub fn install_docs(builder: &Builder, stage: u32, host: Interned) { install_sh(builder, "docs", "rust-docs", stage, Some(host)); @@ -38,6 +39,9 @@ pub fn install_cargo(builder: &Builder, stage: u32, host: Interned) { pub fn install_rls(builder: &Builder, stage: u32, host: Interned) { install_sh(builder, "rls", "rls", stage, Some(host)); } +pub fn install_clippy(builder: &Builder, stage: u32, host: Interned) { + install_sh(builder, "clippy", "clippy", stage, Some(host)); +} pub fn install_rustfmt(builder: &Builder, stage: u32, host: Interned) { install_sh(builder, "rustfmt", "rustfmt", stage, Some(host)); @@ -61,23 +65,27 @@ fn install_sh( stage: u32, host: Option> ) { - let build = builder.build; - println!("Install {} stage{} ({:?})", package, stage, host); + builder.info(&format!("Install {} stage{} ({:?})", package, stage, host)); let prefix_default = PathBuf::from("/usr/local"); let sysconfdir_default = PathBuf::from("/etc"); - let docdir_default = PathBuf::from("share/doc/rust"); + let datadir_default = PathBuf::from("share"); + let docdir_default = datadir_default.join("doc/rust"); let bindir_default = PathBuf::from("bin"); let libdir_default = PathBuf::from("lib"); - let mandir_default = PathBuf::from("share/man"); - let prefix = build.config.prefix.as_ref().unwrap_or(&prefix_default); - let sysconfdir = build.config.sysconfdir.as_ref().unwrap_or(&sysconfdir_default); - let docdir = build.config.docdir.as_ref().unwrap_or(&docdir_default); - let bindir = build.config.bindir.as_ref().unwrap_or(&bindir_default); - let libdir = build.config.libdir.as_ref().unwrap_or(&libdir_default); - let mandir = build.config.mandir.as_ref().unwrap_or(&mandir_default); + let mandir_default = datadir_default.join("man"); + let prefix = builder.config.prefix.as_ref().map_or(prefix_default, |p| { + fs::canonicalize(p).unwrap_or_else(|_| panic!("could not canonicalize {}", p.display())) + }); + let sysconfdir = builder.config.sysconfdir.as_ref().unwrap_or(&sysconfdir_default); + let datadir = builder.config.datadir.as_ref().unwrap_or(&datadir_default); + let docdir = builder.config.docdir.as_ref().unwrap_or(&docdir_default); + let bindir = builder.config.bindir.as_ref().unwrap_or(&bindir_default); + let libdir = builder.config.libdir.as_ref().unwrap_or(&libdir_default); + let mandir = builder.config.mandir.as_ref().unwrap_or(&mandir_default); let sysconfdir = prefix.join(sysconfdir); + let datadir = prefix.join(datadir); let docdir = prefix.join(docdir); let bindir = prefix.join(bindir); let libdir = prefix.join(libdir); @@ -87,31 +95,33 @@ fn install_sh( let prefix = add_destdir(&prefix, &destdir); let sysconfdir = add_destdir(&sysconfdir, &destdir); + let datadir = add_destdir(&datadir, &destdir); let docdir = add_destdir(&docdir, &destdir); let bindir = add_destdir(&bindir, &destdir); let libdir = add_destdir(&libdir, &destdir); let mandir = add_destdir(&mandir, &destdir); - let empty_dir = build.out.join("tmp/empty_dir"); + let empty_dir = builder.out.join("tmp/empty_dir"); t!(fs::create_dir_all(&empty_dir)); let package_name = if let Some(host) = host { - format!("{}-{}", pkgname(build, name), host) + format!("{}-{}", pkgname(builder, name), host) } else { - pkgname(build, name) + pkgname(builder, name) }; let mut cmd = Command::new("sh"); cmd.current_dir(&empty_dir) - .arg(sanitize_sh(&tmpdir(build).join(&package_name).join("install.sh"))) + .arg(sanitize_sh(&tmpdir(builder).join(&package_name).join("install.sh"))) .arg(format!("--prefix={}", sanitize_sh(&prefix))) .arg(format!("--sysconfdir={}", sanitize_sh(&sysconfdir))) + .arg(format!("--datadir={}", sanitize_sh(&datadir))) .arg(format!("--docdir={}", sanitize_sh(&docdir))) .arg(format!("--bindir={}", sanitize_sh(&bindir))) .arg(format!("--libdir={}", sanitize_sh(&libdir))) .arg(format!("--mandir={}", sanitize_sh(&mandir))) .arg("--disable-ldconfig"); - build.run(&mut cmd); + builder.run(&mut cmd); t!(fs::remove_dir_all(&empty_dir)); } @@ -144,10 +154,22 @@ macro_rules! install { pub host: Interned, } + impl $name { + #[allow(dead_code)] + fn should_build(config: &Config) -> bool { + config.extended && config.tools.as_ref() + .map_or(true, |t| t.contains($path)) + } + + #[allow(dead_code)] + fn should_install(builder: &Builder) -> bool { + builder.config.tools.as_ref().map_or(false, |t| t.contains($path)) + } + } + impl Step for $name { type Output = (); const DEFAULT: bool = true; - const ONLY_BUILD_TARGETS: bool = true; const ONLY_HOSTS: bool = $only_hosts; $(const $c: bool = true;)* @@ -160,7 +182,7 @@ macro_rules! install { run.builder.ensure($name { stage: run.builder.top_stage, target: run.target, - host: run.host, + host: run.builder.config.build, }); } @@ -177,7 +199,7 @@ install!((self, builder, _config), install_docs(builder, self.stage, self.target); }; Std, "src/libstd", true, only_hosts: true, { - for target in &builder.build.targets { + for target in &builder.targets { builder.ensure(dist::Std { compiler: builder.compiler(self.stage, self.host), target: *target @@ -185,35 +207,42 @@ install!((self, builder, _config), install_std(builder, self.stage, *target); } }; - Cargo, "cargo", _config.extended, only_hosts: true, { + Cargo, "cargo", Self::should_build(_config), only_hosts: true, { builder.ensure(dist::Cargo { stage: self.stage, target: self.target }); install_cargo(builder, self.stage, self.target); }; - Rls, "rls", _config.extended, only_hosts: true, { - if builder.ensure(dist::Rls { stage: self.stage, target: self.target }).is_some() { + Rls, "rls", Self::should_build(_config), only_hosts: true, { + if builder.ensure(dist::Rls { stage: self.stage, target: self.target }).is_some() || + Self::should_install(builder) { install_rls(builder, self.stage, self.target); } else { - println!("skipping Install RLS stage{} ({})", self.stage, self.target); + builder.info(&format!("skipping Install RLS stage{} ({})", self.stage, self.target)); } }; - Rustfmt, "rustfmt", _config.extended, only_hosts: true, { - if builder.ensure(dist::Rustfmt { stage: self.stage, target: self.target }).is_some() { + Clippy, "clippy", Self::should_build(_config), only_hosts: true, { + if builder.ensure(dist::Clippy { stage: self.stage, target: self.target }).is_some() || + Self::should_install(builder) { + install_clippy(builder, self.stage, self.target); + } else { + builder.info(&format!("skipping Install clippy stage{} ({})", self.stage, self.target)); + } + }; + Rustfmt, "rustfmt", Self::should_build(_config), only_hosts: true, { + if builder.ensure(dist::Rustfmt { stage: self.stage, target: self.target }).is_some() || + Self::should_install(builder) { install_rustfmt(builder, self.stage, self.target); } else { - println!("skipping Install Rustfmt stage{} ({})", self.stage, self.target); + builder.info( + &format!("skipping Install Rustfmt stage{} ({})", self.stage, self.target)); } }; - Analysis, "analysis", _config.extended, only_hosts: false, { + Analysis, "analysis", Self::should_build(_config), only_hosts: false, { builder.ensure(dist::Analysis { compiler: builder.compiler(self.stage, self.host), target: self.target }); install_analysis(builder, self.stage, self.target); }; - Src, "src", _config.extended, only_hosts: true, { - builder.ensure(dist::Src); - install_src(builder, self.stage); - }, ONLY_BUILD; Rustc, "src/librustc", true, only_hosts: true, { builder.ensure(dist::Rustc { compiler: builder.compiler(self.stage, self.target), @@ -221,3 +250,32 @@ install!((self, builder, _config), install_rustc(builder, self.stage, self.target); }; ); + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct Src { + pub stage: u32, +} + +impl Step for Src { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + let config = &run.builder.config; + let cond = config.extended && + config.tools.as_ref().map_or(true, |t| t.contains("src")); + run.path("src").default_condition(cond) + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Src { + stage: run.builder.top_stage, + }); + } + + fn run(self, builder: &Builder) { + builder.ensure(dist::Src); + install_src(builder, self.stage); + } +} diff --git a/src/bootstrap/job.rs b/src/bootstrap/job.rs index fa3ba02482f5..6445ce8da332 100644 --- a/src/bootstrap/job.rs +++ b/src/bootstrap/job.rs @@ -122,12 +122,10 @@ struct JOBOBJECT_BASIC_LIMIT_INFORMATION { } pub unsafe fn setup(build: &mut Build) { - // Tell Windows to not show any UI on errors (such as not finding a required dll - // during startup or terminating abnormally). This is important for running tests, - // since some of them use abnormal termination by design. - // This mode is inherited by all child processes. - let mode = SetErrorMode(SEM_NOGPFAULTERRORBOX); // read inherited flags - SetErrorMode(mode | SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); + // Enable the Windows Error Reporting dialog which msys disables, + // so we can JIT debug rustc + let mode = SetErrorMode(0); + SetErrorMode(mode & !SEM_NOGPFAULTERRORBOX); // Create a new job object for us to use let job = CreateJobObjectW(0 as *mut _, 0 as *const _); diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs index 948bf29bbacc..5bb475e07ba8 100644 --- a/src/bootstrap/lib.rs +++ b/src/bootstrap/lib.rs @@ -113,9 +113,10 @@ //! More documentation can be found in each respective module below, and you can //! also check out the `src/bootstrap/README.md` file for more information. +#![deny(bare_trait_objects)] #![deny(warnings)] -#![allow(stable_features)] -#![feature(associated_consts)] +#![feature(core_intrinsics)] +#![feature(drain_filter)] #[macro_use] extern crate build_helper; @@ -130,26 +131,40 @@ extern crate cc; extern crate getopts; extern crate num_cpus; extern crate toml; +extern crate time; +extern crate petgraph; + +#[cfg(test)] +#[macro_use] +extern crate pretty_assertions; #[cfg(unix)] extern crate libc; -use std::cell::RefCell; +use std::cell::{RefCell, Cell}; use std::collections::{HashSet, HashMap}; use std::env; -use std::fs::{self, File}; -use std::io::Read; +use std::fs::{self, OpenOptions, File}; +use std::io::{self, Seek, SeekFrom, Write, Read}; use std::path::{PathBuf, Path}; use std::process::{self, Command}; use std::slice; +use std::str; + +#[cfg(unix)] +use std::os::unix::fs::symlink as symlink_file; +#[cfg(windows)] +use std::os::windows::fs::symlink_file; use build_helper::{run_silent, run_suppressed, try_run_silent, try_run_suppressed, output, mtime}; +use filetime::FileTime; use util::{exe, libdir, OutputFolder, CiEnv}; mod cc_detect; mod channel; mod check; +mod test; mod clean; mod compile; mod metadata; @@ -169,7 +184,7 @@ mod toolstate; #[cfg(windows)] mod job; -#[cfg(unix)] +#[cfg(all(unix, not(target_os = "haiku")))] mod job { use libc; @@ -180,7 +195,7 @@ mod job { } } -#[cfg(not(any(unix, windows)))] +#[cfg(any(target_os = "haiku", not(any(unix, windows))))] mod job { pub unsafe fn setup(_build: &mut ::Build) { } @@ -191,17 +206,36 @@ use flags::Subcommand; use cache::{Interned, INTERNER}; use toolstate::ToolState; +const LLVM_TOOLS: &[&str] = &[ + "llvm-nm", // used to inspect binaries; it shows symbol names, their sizes and visibility + "llvm-objcopy", // used to transform ELFs into binary format which flashing tools consume + "llvm-objdump", // used to disassemble programs + "llvm-profdata", // used to inspect and merge files generated by profiles + "llvm-size", // used to prints the size of the linker sections of a program + "llvm-strip", // used to discard symbols from binary files to reduce their size +]; + /// A structure representing a Rust compiler. /// /// Each compiler has a `stage` that it is associated with and a `host` that /// corresponds to the platform the compiler runs on. This structure is used as /// a parameter to many methods below. -#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)] +#[derive(Eq, PartialOrd, Ord, PartialEq, Clone, Copy, Hash, Debug)] pub struct Compiler { stage: u32, host: Interned, } +#[derive(PartialEq, Eq, Copy, Clone, Debug)] +pub enum DocTests { + // Default, run normal tests and doc tests. + Yes, + // Do not run any doc tests. + No, + // Only run doc tests. + Only, +} + /// Global configuration for the build system. /// /// This structure transitively contains all configuration for the build system. @@ -222,9 +256,11 @@ pub struct Build { rust_info: channel::GitInfo, cargo_info: channel::GitInfo, rls_info: channel::GitInfo, + clippy_info: channel::GitInfo, rustfmt_info: channel::GitInfo, local_rebuild: bool, fail_fast: bool, + doc_tests: DocTests, verbosity: usize, // Targets for which to build. @@ -250,13 +286,19 @@ pub struct Build { is_sudo: bool, ci_env: CiEnv, delayed_failures: RefCell>, + prerelease_version: Cell>, + tool_artifacts: RefCell, + HashMap)> + >>, } #[derive(Debug)] struct Crate { name: Interned, version: String, - deps: Vec>, + deps: HashSet>, + id: String, path: PathBuf, doc_step: String, build_step: String, @@ -264,23 +306,56 @@ struct Crate { bench_step: String, } +impl Crate { + fn is_local(&self, build: &Build) -> bool { + self.path.starts_with(&build.config.src) && + !self.path.to_string_lossy().ends_with("_shim") + } + + fn local_path(&self, build: &Build) -> PathBuf { + assert!(self.is_local(build)); + self.path.strip_prefix(&build.config.src).unwrap().into() + } +} + /// The various "modes" of invoking Cargo. /// /// These entries currently correspond to the various output directories of the /// build system, with each mod generating output in a different directory. -#[derive(Debug, Hash, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Hash, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub enum Mode { /// Build the standard library, placing output in the "stageN-std" directory. - Libstd, + Std, /// Build libtest, placing output in the "stageN-test" directory. - Libtest, + Test, - /// Build librustc and compiler libraries, placing output in the "stageN-rustc" directory. - Librustc, + /// Build librustc, and compiler libraries, placing output in the "stageN-rustc" directory. + Rustc, - /// Build some tool, placing output in the "stageN-tools" directory. - Tool, + /// Build codegen libraries, placing output in the "stageN-codegen" directory + Codegen, + + /// Build some tools, placing output in the "stageN-tools" directory. The + /// "other" here is for miscellaneous sets of tools that are built using the + /// bootstrap compiler in its entirety (target libraries and all). + /// Typically these tools compile with stable Rust. + ToolBootstrap, + + /// Compile a tool which uses all libraries we compile (up to rustc). + /// Doesn't use the stage0 compiler libraries like "other", and includes + /// tools like rustdoc, cargo, rls, etc. + ToolStd, + ToolRustc, +} + +impl Mode { + pub fn is_tool(&self) -> bool { + match self { + Mode::ToolBootstrap | Mode::ToolRustc | Mode::ToolStd => true, + _ => false + } + } } impl Build { @@ -289,9 +364,8 @@ impl Build { /// /// By default all build output will be placed in the current directory. pub fn new(config: Config) -> Build { - let cwd = t!(env::current_dir()); let src = config.src.clone(); - let out = cwd.join("build"); + let out = config.out.clone(); let is_sudo = match env::var_os("SUDO_USER") { Some(sudo_user) => { @@ -305,13 +379,15 @@ impl Build { let rust_info = channel::GitInfo::new(&config, &src); let cargo_info = channel::GitInfo::new(&config, &src.join("src/tools/cargo")); let rls_info = channel::GitInfo::new(&config, &src.join("src/tools/rls")); + let clippy_info = channel::GitInfo::new(&config, &src.join("src/tools/clippy")); let rustfmt_info = channel::GitInfo::new(&config, &src.join("src/tools/rustfmt")); - Build { + let mut build = Build { initial_rustc: config.initial_rustc.clone(), initial_cargo: config.initial_cargo.clone(), local_rebuild: config.local_rebuild, fail_fast: config.cmd.fail_fast(), + doc_tests: config.cmd.doc_tests(), verbosity: config.verbose, build: config.build, @@ -325,6 +401,7 @@ impl Build { rust_info, cargo_info, rls_info, + clippy_info, rustfmt_info, cc: HashMap::new(), cxx: HashMap::new(), @@ -335,7 +412,32 @@ impl Build { is_sudo, ci_env: CiEnv::current(), delayed_failures: RefCell::new(Vec::new()), + prerelease_version: Cell::new(None), + tool_artifacts: Default::default(), + }; + + build.verbose("finding compilers"); + cc_detect::find(&mut build); + build.verbose("running sanity check"); + sanity::check(&mut build); + + // If local-rust is the same major.minor as the current version, then force a + // local-rebuild + let local_version_verbose = output( + Command::new(&build.initial_rustc).arg("--version").arg("--verbose")); + let local_release = local_version_verbose + .lines().filter(|x| x.starts_with("release:")) + .next().unwrap().trim_left_matches("release:").trim(); + let my_version = channel::CFG_RELEASE_NUM; + if local_release.split('.').take(2).eq(my_version.split('.').take(2)) { + build.verbose(&format!("auto-detected local-rebuild {}", local_release)); + build.local_rebuild = true; } + + build.verbose("learning about cargo"); + metadata::build(&mut build); + + build } pub fn build_triple(&self) -> &[Interned] { @@ -354,25 +456,28 @@ impl Build { return clean::clean(self, all); } - self.verbose("finding compilers"); - cc_detect::find(self); - self.verbose("running sanity check"); - sanity::check(self); - // If local-rust is the same major.minor as the current version, then force a local-rebuild - let local_version_verbose = output( - Command::new(&self.initial_rustc).arg("--version").arg("--verbose")); - let local_release = local_version_verbose - .lines().filter(|x| x.starts_with("release:")) - .next().unwrap().trim_left_matches("release:").trim(); - let my_version = channel::CFG_RELEASE_NUM; - if local_release.split('.').take(2).eq(my_version.split('.').take(2)) { - self.verbose(&format!("auto-detected local-rebuild {}", local_release)); - self.local_rebuild = true; + { + let builder = builder::Builder::new(&self); + if let Some(path) = builder.paths.get(0) { + if path == Path::new("nonexistent/path/to/trigger/cargo/metadata") { + return; + } + } } - self.verbose("learning about cargo"); - metadata::build(self); - builder::Builder::run(&self); + if !self.config.dry_run { + { + self.config.dry_run = true; + let builder = builder::Builder::new(&self); + builder.execute_cli(); + } + self.config.dry_run = false; + let builder = builder::Builder::new(&self); + builder.execute_cli(); + } else { + let builder = builder::Builder::new(&self); + let _ = builder.execute_cli(); + } // Check for postponed failures from `test --no-fail-fast`. let failures = self.delayed_failures.borrow(); @@ -420,6 +525,9 @@ impl Build { if self.config.profiler { features.push_str(" profiler"); } + if self.config.wasm_syscall { + features.push_str(" wasm_syscall"); + } features } @@ -429,9 +537,6 @@ impl Build { if self.config.use_jemalloc { features.push_str(" jemalloc"); } - if self.config.llvm_enabled { - features.push_str(" llvm"); - } features } @@ -447,22 +552,19 @@ impl Build { out } - /// Get the directory for incremental by-products when using the - /// given compiler. - fn incremental_dir(&self, compiler: Compiler) -> PathBuf { - self.out.join(&*compiler.host).join(format!("stage{}-incremental", compiler.stage)) - } - /// Returns the root directory for all output generated in a particular /// stage when running with a particular host compiler. /// /// The mode indicates what the root directory is for. fn stage_out(&self, compiler: Compiler, mode: Mode) -> PathBuf { let suffix = match mode { - Mode::Libstd => "-std", - Mode::Libtest => "-test", - Mode::Tool => "-tools", - Mode::Librustc => "-rustc", + Mode::Std => "-std", + Mode::Test => "-test", + Mode::Rustc => "-rustc", + Mode::Codegen => "-codegen", + Mode::ToolBootstrap => "-bootstrap-tools", + Mode::ToolStd => "-tools", + Mode::ToolRustc => "-tools", }; self.out.join(&*compiler.host) .join(format!("stage{}{}", compiler.stage, suffix)) @@ -486,11 +588,24 @@ impl Build { self.out.join(&*target).join("llvm") } + fn emscripten_llvm_out(&self, target: Interned) -> PathBuf { + self.out.join(&*target).join("llvm-emscripten") + } + + fn lld_out(&self, target: Interned) -> PathBuf { + self.out.join(&*target).join("lld") + } + /// Output directory for all documentation for a target fn doc_out(&self, target: Interned) -> PathBuf { self.out.join(&*target).join("doc") } + /// Output directory for all documentation for a target + fn compiler_doc_out(&self, target: Interned) -> PathBuf { + self.out.join(&*target).join("compiler-doc") + } + /// Output directory for some generated md crate documentation for a target (temporary) fn md_doc_out(&self, target: Interned) -> Interned { INTERNER.intern_path(self.out.join(&*target).join("md-doc")) @@ -513,20 +628,6 @@ impl Build { } } - /// Returns the path to `llvm-config` for the specified target. - /// - /// If a custom `llvm-config` was specified for target then that's returned - /// instead. - fn llvm_config(&self, target: Interned) -> PathBuf { - let target_config = self.config.target_config.get(&target); - if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) { - s.clone() - } else { - self.llvm_out(self.config.build).join("bin") - .join(exe("llvm-config", &*target)) - } - } - /// Returns the path to `FileCheck` binary for the specified target fn llvm_filecheck(&self, target: Interned) -> PathBuf { let target_config = self.config.target_config.get(&target); @@ -535,12 +636,20 @@ impl Build { Path::new(llvm_bindir.trim()).join(exe("FileCheck", &*target)) } else { let base = self.llvm_out(self.config.build).join("build"); - let exe = exe("FileCheck", &*target); - if !self.config.ninja && self.config.build.contains("msvc") { - base.join("Release/bin").join(exe) + let base = if !self.config.ninja && self.config.build.contains("msvc") { + if self.config.llvm_optimize { + if self.config.llvm_release_debuginfo { + base.join("RelWithDebInfo") + } else { + base.join("Release") + } + } else { + base.join("Debug") + } } else { - base.join("bin").join(exe) - } + base + }; + base.join("bin").join(exe("FileCheck", &*target)) } } @@ -564,18 +673,24 @@ impl Build { /// Returns the libdir of the snapshot compiler. fn rustc_snapshot_libdir(&self) -> PathBuf { + self.rustc_snapshot_sysroot().join(libdir(&self.config.build)) + } + + /// Returns the sysroot of the snapshot compiler. + fn rustc_snapshot_sysroot(&self) -> &Path { self.initial_rustc.parent().unwrap().parent().unwrap() - .join(libdir(&self.config.build)) } /// Runs a command, printing out nice contextual information if it fails. fn run(&self, cmd: &mut Command) { + if self.config.dry_run { return; } self.verbose(&format!("running: {:?}", cmd)); run_silent(cmd) } /// Runs a command, printing out nice contextual information if it fails. fn run_quiet(&self, cmd: &mut Command) { + if self.config.dry_run { return; } self.verbose(&format!("running: {:?}", cmd)); run_suppressed(cmd) } @@ -584,6 +699,7 @@ impl Build { /// Exits if the command failed to execute at all, otherwise returns its /// `status.success()`. fn try_run(&self, cmd: &mut Command) -> bool { + if self.config.dry_run { return true; } self.verbose(&format!("running: {:?}", cmd)); try_run_silent(cmd) } @@ -592,6 +708,7 @@ impl Build { /// Exits if the command failed to execute at all, otherwise returns its /// `status.success()`. fn try_run_quiet(&self, cmd: &mut Command) -> bool { + if self.config.dry_run { return true; } self.verbose(&format!("running: {:?}", cmd)); try_run_suppressed(cmd) } @@ -600,10 +717,6 @@ impl Build { self.verbosity > 0 } - pub fn is_very_verbose(&self) -> bool { - self.verbosity > 1 - } - /// Prints a message if this build is configured in verbose mode. fn verbose(&self, msg: &str) { if self.is_verbose() { @@ -611,6 +724,11 @@ impl Build { } } + fn info(&self, msg: &str) { + if self.config.dry_run { return; } + println!("{}", msg); + } + /// Returns the number of parallel jobs that have been configured for this /// build. fn jobs(&self) -> u32 { @@ -664,13 +782,15 @@ impl Build { } } - /// Returns the path to the linker for the given target if it needs to be overriden. + /// Returns the path to the linker for the given target if it needs to be overridden. fn linker(&self, target: Interned) -> Option<&Path> { if let Some(linker) = self.config.target_config.get(&target) .and_then(|c| c.linker.as_ref()) { Some(linker) } else if target != self.config.build && - !target.contains("msvc") && !target.contains("emscripten") { + !target.contains("msvc") && + !target.contains("emscripten") && + !target.contains("wasm32") { Some(self.cc(target)) } else { None @@ -695,6 +815,12 @@ impl Build { .map(|p| &**p) } + /// Returns true if this is a no-std `target`, if defined + fn no_std(&self, target: Interned) -> Option { + self.config.target_config.get(&target) + .map(|t| t.no_std) + } + /// Returns whether the target will be tested using the `remote-test-client` /// and `remote-test-server` binaries. fn remote_tested(&self, target: Interned) -> bool { @@ -774,12 +900,63 @@ impl Build { fn release(&self, num: &str) -> String { match &self.config.channel[..] { "stable" => num.to_string(), - "beta" => format!("{}-beta{}", num, channel::CFG_PRERELEASE_VERSION), + "beta" => if self.rust_info.is_git() { + format!("{}-beta.{}", num, self.beta_prerelease_version()) + } else { + format!("{}-beta", num) + }, "nightly" => format!("{}-nightly", num), _ => format!("{}-dev", num), } } + fn beta_prerelease_version(&self) -> u32 { + if let Some(s) = self.prerelease_version.get() { + return s + } + + let beta = output( + Command::new("git") + .arg("ls-remote") + .arg("origin") + .arg("beta") + .current_dir(&self.src) + ); + let beta = beta.trim().split_whitespace().next().unwrap(); + let master = output( + Command::new("git") + .arg("ls-remote") + .arg("origin") + .arg("master") + .current_dir(&self.src) + ); + let master = master.trim().split_whitespace().next().unwrap(); + + // Figure out where the current beta branch started. + let base = output( + Command::new("git") + .arg("merge-base") + .arg(beta) + .arg(master) + .current_dir(&self.src), + ); + let base = base.trim(); + + // Next figure out how many merge commits happened since we branched off + // beta. That's our beta number! + let count = output( + Command::new("git") + .arg("rev-list") + .arg("--count") + .arg("--merges") + .arg(format!("{}...HEAD", base)) + .current_dir(&self.src), + ); + let n = count.trim().parse().unwrap(); + self.prerelease_version.set(Some(n)); + n + } + /// Returns the value of `release` above for Rust itself. fn rust_release(&self) -> String { self.release(channel::CFG_RELEASE_NUM) @@ -815,11 +992,32 @@ impl Build { self.package_vers(&self.release_num("rls")) } + /// Returns the value of `package_vers` above for clippy + fn clippy_package_vers(&self) -> String { + self.package_vers(&self.release_num("clippy")) + } + /// Returns the value of `package_vers` above for rustfmt fn rustfmt_package_vers(&self) -> String { self.package_vers(&self.release_num("rustfmt")) } + fn llvm_tools_package_vers(&self) -> String { + self.package_vers(&self.rust_version()) + } + + fn llvm_tools_vers(&self) -> String { + self.rust_version() + } + + fn lldb_package_vers(&self) -> String { + self.package_vers(&self.rust_version()) + } + + fn lldb_vers(&self) -> String { + self.rust_version() + } + /// Returns the `version` string associated with this compiler for Rust /// itself. /// @@ -865,7 +1063,7 @@ impl Build { pub fn fold_output(&self, name: F) -> Option where D: Into, F: FnOnce() -> D { - if self.ci_env == CiEnv::Travis { + if !self.config.dry_run && self.ci_env == CiEnv::Travis { Some(OutputFolder::new(name().into())) } else { None @@ -896,29 +1094,194 @@ impl Build { } } - /// Get a list of crates from a root crate. - /// - /// Returns Vec<(crate, path to crate, is_root_crate)> - fn crates(&self, root: &str) -> Vec<(Interned, &Path)> { - let interned = INTERNER.intern_string(root.to_owned()); + fn in_tree_crates(&self, root: &str) -> Vec<&Crate> { let mut ret = Vec::new(); - let mut list = vec![interned]; + let mut list = vec![INTERNER.intern_str(root)]; let mut visited = HashSet::new(); while let Some(krate) = list.pop() { let krate = &self.crates[&krate]; - // If we can't strip prefix, then out-of-tree path - let path = krate.path.strip_prefix(&self.src).unwrap_or(&krate.path); - ret.push((krate.name, path)); - for dep in &krate.deps { - if visited.insert(dep) && dep != "build_helper" { - list.push(*dep); + if krate.is_local(self) { + ret.push(krate); + for dep in &krate.deps { + if visited.insert(dep) && dep != "build_helper" { + list.push(*dep); + } } } } ret } + + fn read_stamp_file(&self, stamp: &Path) -> Vec { + if self.config.dry_run { + return Vec::new(); + } + + let mut paths = Vec::new(); + let mut contents = Vec::new(); + t!(t!(File::open(stamp)).read_to_end(&mut contents)); + // This is the method we use for extracting paths from the stamp file passed to us. See + // run_cargo for more information (in compile.rs). + for part in contents.split(|b| *b == 0) { + if part.is_empty() { + continue + } + let path = PathBuf::from(t!(str::from_utf8(part))); + paths.push(path); + } + paths + } + + /// Copies a file from `src` to `dst` + pub fn copy(&self, src: &Path, dst: &Path) { + if self.config.dry_run { return; } + let _ = fs::remove_file(&dst); + let metadata = t!(src.symlink_metadata()); + if metadata.file_type().is_symlink() { + let link = t!(fs::read_link(src)); + t!(symlink_file(link, dst)); + } else if let Ok(()) = fs::hard_link(src, dst) { + // Attempt to "easy copy" by creating a hard link + // (symlinks don't work on windows), but if that fails + // just fall back to a slow `copy` operation. + } else { + if let Err(e) = fs::copy(src, dst) { + panic!("failed to copy `{}` to `{}`: {}", src.display(), + dst.display(), e) + } + t!(fs::set_permissions(dst, metadata.permissions())); + let atime = FileTime::from_last_access_time(&metadata); + let mtime = FileTime::from_last_modification_time(&metadata); + t!(filetime::set_file_times(dst, atime, mtime)); + } + } + + /// Search-and-replaces within a file. (Not maximally efficiently: allocates a + /// new string for each replacement.) + pub fn replace_in_file(&self, path: &Path, replacements: &[(&str, &str)]) { + if self.config.dry_run { return; } + let mut contents = String::new(); + let mut file = t!(OpenOptions::new().read(true).write(true).open(path)); + t!(file.read_to_string(&mut contents)); + for &(target, replacement) in replacements { + contents = contents.replace(target, replacement); + } + t!(file.seek(SeekFrom::Start(0))); + t!(file.set_len(0)); + t!(file.write_all(contents.as_bytes())); + } + + /// Copies the `src` directory recursively to `dst`. Both are assumed to exist + /// when this function is called. + pub fn cp_r(&self, src: &Path, dst: &Path) { + if self.config.dry_run { return; } + for f in t!(fs::read_dir(src)) { + let f = t!(f); + let path = f.path(); + let name = path.file_name().unwrap(); + let dst = dst.join(name); + if t!(f.file_type()).is_dir() { + t!(fs::create_dir_all(&dst)); + self.cp_r(&path, &dst); + } else { + let _ = fs::remove_file(&dst); + self.copy(&path, &dst); + } + } + } + + /// Copies the `src` directory recursively to `dst`. Both are assumed to exist + /// when this function is called. Unwanted files or directories can be skipped + /// by returning `false` from the filter function. + pub fn cp_filtered(&self, src: &Path, dst: &Path, filter: &dyn Fn(&Path) -> bool) { + // Immediately recurse with an empty relative path + self.recurse_(src, dst, Path::new(""), filter) + } + + // Inner function does the actual work + fn recurse_(&self, src: &Path, dst: &Path, relative: &Path, filter: &dyn Fn(&Path) -> bool) { + for f in self.read_dir(src) { + let path = f.path(); + let name = path.file_name().unwrap(); + let dst = dst.join(name); + let relative = relative.join(name); + // Only copy file or directory if the filter function returns true + if filter(&relative) { + if t!(f.file_type()).is_dir() { + let _ = fs::remove_dir_all(&dst); + self.create_dir(&dst); + self.recurse_(&path, &dst, &relative, filter); + } else { + let _ = fs::remove_file(&dst); + self.copy(&path, &dst); + } + } + } + } + + fn copy_to_folder(&self, src: &Path, dest_folder: &Path) { + let file_name = src.file_name().unwrap(); + let dest = dest_folder.join(file_name); + self.copy(src, &dest); + } + + fn install(&self, src: &Path, dstdir: &Path, perms: u32) { + if self.config.dry_run { return; } + let dst = dstdir.join(src.file_name().unwrap()); + t!(fs::create_dir_all(dstdir)); + drop(fs::remove_file(&dst)); + { + let mut s = t!(fs::File::open(&src)); + let mut d = t!(fs::File::create(&dst)); + io::copy(&mut s, &mut d).expect("failed to copy"); + } + chmod(&dst, perms); + } + + fn create(&self, path: &Path, s: &str) { + if self.config.dry_run { return; } + t!(fs::write(path, s)); + } + + fn read(&self, path: &Path) -> String { + if self.config.dry_run { return String::new(); } + t!(fs::read_to_string(path)) + } + + fn create_dir(&self, dir: &Path) { + if self.config.dry_run { return; } + t!(fs::create_dir_all(dir)) + } + + fn remove_dir(&self, dir: &Path) { + if self.config.dry_run { return; } + t!(fs::remove_dir_all(dir)) + } + + fn read_dir(&self, dir: &Path) -> impl Iterator { + let iter = match fs::read_dir(dir) { + Ok(v) => v, + Err(_) if self.config.dry_run => return vec![].into_iter(), + Err(err) => panic!("could not read dir {:?}: {:?}", dir, err), + }; + iter.map(|e| t!(e)).collect::>().into_iter() + } + + fn remove(&self, f: &Path) { + if self.config.dry_run { return; } + fs::remove_file(f).unwrap_or_else(|_| panic!("failed to remove {:?}", f)); + } } +#[cfg(unix)] +fn chmod(path: &Path, perms: u32) { + use std::os::unix::fs::*; + t!(fs::set_permissions(path, fs::Permissions::from_mode(perms))); +} +#[cfg(windows)] +fn chmod(_path: &Path, _perms: u32) {} + + impl<'a> Compiler { pub fn with_stage(mut self, stage: u32) -> Compiler { self.stage = stage; diff --git a/src/bootstrap/metadata.rs b/src/bootstrap/metadata.rs index 5f1df1d26e27..fa0b1983510b 100644 --- a/src/bootstrap/metadata.rs +++ b/src/bootstrap/metadata.rs @@ -11,6 +11,7 @@ use std::collections::HashMap; use std::process::Command; use std::path::PathBuf; +use std::collections::HashSet; use build_helper::output; use serde_json; @@ -45,45 +46,17 @@ struct ResolveNode { } pub fn build(build: &mut Build) { - build_krate(build, "src/libstd"); - build_krate(build, "src/libtest"); - build_krate(build, "src/rustc"); -} + let mut resolves = Vec::new(); + build_krate(&build.std_features(), build, &mut resolves, "src/libstd"); + build_krate("", build, &mut resolves, "src/libtest"); + build_krate(&build.rustc_features(), build, &mut resolves, "src/rustc"); -fn build_krate(build: &mut Build, krate: &str) { - // Run `cargo metadata` to figure out what crates we're testing. - // - // Down below we're going to call `cargo test`, but to test the right set - // of packages we're going to have to know what `-p` arguments to pass it - // to know what crates to test. Here we run `cargo metadata` to learn about - // the dependency graph and what `-p` arguments there are. - let mut cargo = Command::new(&build.initial_cargo); - cargo.arg("metadata") - .arg("--format-version").arg("1") - .arg("--manifest-path").arg(build.src.join(krate).join("Cargo.toml")); - let output = output(&mut cargo); - let output: Output = serde_json::from_str(&output).unwrap(); - let mut id2name = HashMap::new(); - for package in output.packages { - if package.source.is_none() { - let name = INTERNER.intern_string(package.name); - id2name.insert(package.id, name); - let mut path = PathBuf::from(package.manifest_path); - path.pop(); - build.crates.insert(name, Crate { - build_step: format!("build-crate-{}", name), - doc_step: format!("doc-crate-{}", name), - test_step: format!("test-crate-{}", name), - bench_step: format!("bench-crate-{}", name), - name, - version: package.version, - deps: Vec::new(), - path, - }); - } + let mut id2name = HashMap::with_capacity(build.crates.len()); + for (name, krate) in build.crates.iter() { + id2name.insert(krate.id.clone(), name.clone()); } - for node in output.resolve.nodes { + for node in resolves { let name = match id2name.get(&node.id) { Some(name) => name, None => continue, @@ -95,7 +68,42 @@ fn build_krate(build: &mut Build, krate: &str) { Some(dep) => dep, None => continue, }; - krate.deps.push(*dep); + krate.deps.insert(*dep); } } } + +fn build_krate(features: &str, build: &mut Build, resolves: &mut Vec, krate: &str) { + // Run `cargo metadata` to figure out what crates we're testing. + // + // Down below we're going to call `cargo test`, but to test the right set + // of packages we're going to have to know what `-p` arguments to pass it + // to know what crates to test. Here we run `cargo metadata` to learn about + // the dependency graph and what `-p` arguments there are. + let mut cargo = Command::new(&build.initial_cargo); + cargo.arg("metadata") + .arg("--format-version").arg("1") + .arg("--features").arg(features) + .arg("--manifest-path").arg(build.src.join(krate).join("Cargo.toml")); + let output = output(&mut cargo); + let output: Output = serde_json::from_str(&output).unwrap(); + for package in output.packages { + if package.source.is_none() { + let name = INTERNER.intern_string(package.name); + let mut path = PathBuf::from(package.manifest_path); + path.pop(); + build.crates.insert(name, Crate { + build_step: format!("build-crate-{}", name), + doc_step: format!("doc-crate-{}", name), + test_step: format!("test-crate-{}", name), + bench_step: format!("bench-crate-{}", name), + name, + version: package.version, + id: package.id, + deps: HashSet::new(), + path, + }); + } + } + resolves.extend(output.resolve.nodes); +} diff --git a/src/bootstrap/mk/Makefile.in b/src/bootstrap/mk/Makefile.in index 925a361f0b22..bcf2f6a675e0 100644 --- a/src/bootstrap/mk/Makefile.in +++ b/src/bootstrap/mk/Makefile.in @@ -16,6 +16,12 @@ Q := @ BOOTSTRAP_ARGS := endif +ifdef EXCLUDE_CARGO +AUX_ARGS := +else +AUX_ARGS := src/tools/cargo src/tools/cargotest +endif + BOOTSTRAP := $(CFG_PYTHON) $(CFG_SRC_DIR)src/bootstrap/bootstrap.py all: @@ -52,14 +58,13 @@ check: $(Q)$(BOOTSTRAP) test $(BOOTSTRAP_ARGS) check-aux: $(Q)$(BOOTSTRAP) test \ - src/tools/cargo \ - src/tools/cargotest \ src/test/pretty \ src/test/run-pass/pretty \ src/test/run-fail/pretty \ src/test/run-pass-valgrind/pretty \ src/test/run-pass-fulldeps/pretty \ src/test/run-fail-fulldeps/pretty \ + $(AUX_ARGS) \ $(BOOTSTRAP_ARGS) check-bootstrap: $(Q)$(CFG_PYTHON) $(CFG_SRC_DIR)src/bootstrap/bootstrap_test.py @@ -80,5 +85,12 @@ check-stage2-T-arm-linux-androideabi-H-x86_64-unknown-linux-gnu: check-stage2-T-x86_64-unknown-linux-musl-H-x86_64-unknown-linux-gnu: $(Q)$(BOOTSTRAP) test --target x86_64-unknown-linux-musl +TESTS_IN_2 := src/test/run-pass src/test/compile-fail src/test/run-pass-fulldeps + +appveyor-subset-1: + $(Q)$(BOOTSTRAP) test $(TESTS_IN_2:%=--exclude %) +appveyor-subset-2: + $(Q)$(BOOTSTRAP) test $(TESTS_IN_2) + .PHONY: dist diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index ba8cf3a8e2eb..c99347aa94e6 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -22,15 +22,14 @@ use std::env; use std::ffi::OsString; use std::fs::{self, File}; use std::io::{Read, Write}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::process::Command; use build_helper::output; use cmake; use cc; -use Build; -use util; +use util::{self, exe}; use build_helper::up_to_date; use builder::{Builder, RunConfig, ShouldRun, Step}; use cache::Interned; @@ -38,44 +37,60 @@ use cache::Interned; #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct Llvm { pub target: Interned, + pub emscripten: bool, } impl Step for Llvm { - type Output = (); + type Output = PathBuf; // path to llvm-config + const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/llvm") + run.path("src/llvm").path("src/llvm-emscripten") } fn make_run(run: RunConfig) { - run.builder.ensure(Llvm { target: run.target }) + let emscripten = run.path.ends_with("llvm-emscripten"); + run.builder.ensure(Llvm { + target: run.target, + emscripten, + }); } /// Compile LLVM for `target`. - fn run(self, builder: &Builder) { - let build = builder.build; + fn run(self, builder: &Builder) -> PathBuf { let target = self.target; - - // If we're not compiling for LLVM bail out here. - if !build.config.llvm_enabled { - return; - } + let emscripten = self.emscripten; // If we're using a custom LLVM bail out here, but we can only use a // custom LLVM for the build triple. - if let Some(config) = build.config.target_config.get(&target) { - if let Some(ref s) = config.llvm_config { - return check_llvm_version(build, s); + if !self.emscripten { + if let Some(config) = builder.config.target_config.get(&target) { + if let Some(ref s) = config.llvm_config { + check_llvm_version(builder, s); + return s.to_path_buf() + } } } - let rebuild_trigger = build.src.join("src/rustllvm/llvm-rebuild-trigger"); + let rebuild_trigger = builder.src.join("src/rustllvm/llvm-rebuild-trigger"); let mut rebuild_trigger_contents = String::new(); t!(t!(File::open(&rebuild_trigger)).read_to_string(&mut rebuild_trigger_contents)); - let out_dir = build.llvm_out(target); + let (out_dir, llvm_config_ret_dir) = if emscripten { + let dir = builder.emscripten_llvm_out(target); + let config_dir = dir.join("bin"); + (dir, config_dir) + } else { + let mut dir = builder.llvm_out(builder.config.build); + if !builder.config.build.contains("msvc") || builder.config.ninja { + dir.push("build"); + } + (builder.llvm_out(target), dir.join("bin")) + }; let done_stamp = out_dir.join("llvm-finished-building"); + let build_llvm_config = llvm_config_ret_dir + .join(exe("llvm-config", &*builder.config.build)); if done_stamp.exists() { let mut done_contents = String::new(); t!(t!(File::open(&done_stamp)).read_to_string(&mut done_contents)); @@ -83,40 +98,46 @@ impl Step for Llvm { // If LLVM was already built previously and contents of the rebuild-trigger file // didn't change from the previous build, then no action is required. if done_contents == rebuild_trigger_contents { - return + return build_llvm_config } } - let _folder = build.fold_output(|| "llvm"); - println!("Building LLVM for {}", target); - let _time = util::timeit(); + let _folder = builder.fold_output(|| "llvm"); + let descriptor = if emscripten { "Emscripten " } else { "" }; + builder.info(&format!("Building {}LLVM for {}", descriptor, target)); + let _time = util::timeit(&builder); t!(fs::create_dir_all(&out_dir)); // http://llvm.org/docs/CMake.html - let mut cfg = cmake::Config::new(build.src.join("src/llvm")); - if build.config.ninja { - cfg.generator("Ninja"); - } + let root = if self.emscripten { "src/llvm-emscripten" } else { "src/llvm" }; + let mut cfg = cmake::Config::new(builder.src.join(root)); - let profile = match (build.config.llvm_optimize, build.config.llvm_release_debuginfo) { + let profile = match (builder.config.llvm_optimize, builder.config.llvm_release_debuginfo) { (false, _) => "Debug", (true, false) => "Release", (true, true) => "RelWithDebInfo", }; - // NOTE: remember to also update `config.toml.example` when changing the defaults! - let llvm_targets = match build.config.llvm_targets { - Some(ref s) => s, - None => "X86;ARM;AArch64;Mips;PowerPC;SystemZ;JSBackend;MSP430;Sparc;NVPTX;Hexagon", + // NOTE: remember to also update `config.toml.example` when changing the + // defaults! + let llvm_targets = if self.emscripten { + "JSBackend" + } else { + match builder.config.llvm_targets { + Some(ref s) => s, + None => "X86;ARM;AArch64;Mips;PowerPC;SystemZ;MSP430;Sparc;NVPTX;Hexagon", + } }; - let llvm_exp_targets = &build.config.llvm_experimental_targets; + let llvm_exp_targets = if self.emscripten { + "" + } else { + &builder.config.llvm_experimental_targets[..] + }; - let assertions = if build.config.llvm_assertions {"ON"} else {"OFF"}; + let assertions = if builder.config.llvm_assertions {"ON"} else {"OFF"}; - cfg.target(&target) - .host(&build.build) - .out_dir(&out_dir) + cfg.out_dir(&out_dir) .profile(profile) .define("LLVM_ENABLE_ASSERTIONS", assertions) .define("LLVM_TARGETS_TO_BUILD", llvm_targets) @@ -128,17 +149,42 @@ impl Step for Llvm { .define("WITH_POLLY", "OFF") .define("LLVM_ENABLE_TERMINFO", "OFF") .define("LLVM_ENABLE_LIBEDIT", "OFF") - .define("LLVM_PARALLEL_COMPILE_JOBS", build.jobs().to_string()) + .define("LLVM_PARALLEL_COMPILE_JOBS", builder.jobs().to_string()) .define("LLVM_TARGET_ARCH", target.split('-').next().unwrap()) .define("LLVM_DEFAULT_TARGET_TRIPLE", target); + // By default, LLVM will automatically find OCaml and, if it finds it, + // install the LLVM bindings in LLVM_OCAML_INSTALL_PATH, which defaults + // to /usr/bin/ocaml. + // This causes problem for non-root builds of Rust. Side-step the issue + // by setting LLVM_OCAML_INSTALL_PATH to a relative path, so it installs + // in the prefix. + cfg.define("LLVM_OCAML_INSTALL_PATH", + env::var_os("LLVM_OCAML_INSTALL_PATH").unwrap_or_else(|| "usr/lib/ocaml".into())); + + let want_lldb = builder.config.lldb_enabled && !self.emscripten; // This setting makes the LLVM tools link to the dynamic LLVM library, // which saves both memory during parallel links and overall disk space // for the tools. We don't distribute any of those tools, so this is // just a local concern. However, it doesn't work well everywhere. - if target.contains("linux-gnu") || target.contains("apple-darwin") { - cfg.define("LLVM_LINK_LLVM_DYLIB", "ON"); + // + // If we are shipping llvm tools then we statically link them LLVM + if (target.contains("linux-gnu") || target.contains("apple-darwin")) && + !builder.config.llvm_tools_enabled && + !want_lldb { + cfg.define("LLVM_LINK_LLVM_DYLIB", "ON"); + } + + // For distribution we want the LLVM tools to be *statically* linked to libstdc++ + if builder.config.llvm_tools_enabled || want_lldb { + if !target.contains("windows") { + if target.contains("apple") { + cfg.define("CMAKE_EXE_LINKER_FLAGS", "-static-libstdc++"); + } else { + cfg.define("CMAKE_EXE_LINKER_FLAGS", "-Wl,-Bsymbolic -static-libstdc++"); + } + } } if target.contains("msvc") { @@ -152,19 +198,33 @@ impl Step for Llvm { cfg.define("LLVM_BUILD_32_BITS", "ON"); } - if let Some(num_linkers) = build.config.llvm_link_jobs { + if want_lldb { + cfg.define("LLVM_EXTERNAL_CLANG_SOURCE_DIR", builder.src.join("src/tools/clang")); + cfg.define("LLVM_EXTERNAL_LLDB_SOURCE_DIR", builder.src.join("src/tools/lldb")); + // For the time being, disable code signing. + cfg.define("LLDB_CODESIGN_IDENTITY", ""); + } else { + // LLDB requires libxml2; but otherwise we want it to be disabled. + // See https://github.com/rust-lang/rust/pull/50104 + cfg.define("LLVM_ENABLE_LIBXML2", "OFF"); + } + + if let Some(num_linkers) = builder.config.llvm_link_jobs { if num_linkers > 0 { cfg.define("LLVM_PARALLEL_LINK_JOBS", num_linkers.to_string()); } } // http://llvm.org/docs/HowToCrossCompileLLVM.html - if target != build.build { - builder.ensure(Llvm { target: build.build }); + if target != builder.config.build && !emscripten { + builder.ensure(Llvm { + target: builder.config.build, + emscripten: false, + }); // FIXME: if the llvm root for the build triple is overridden then we // should use llvm-tblgen from there, also should verify that it // actually exists most of the time in normal installs of LLVM. - let host = build.llvm_out(build.build).join("bin/llvm-tblgen"); + let host = builder.llvm_out(builder.config.build).join("bin/llvm-tblgen"); cfg.define("CMAKE_CROSSCOMPILING", "True") .define("LLVM_TABLEGEN", &host); @@ -174,96 +234,227 @@ impl Step for Llvm { cfg.define("CMAKE_SYSTEM_NAME", "FreeBSD"); } - cfg.define("LLVM_NATIVE_BUILD", build.llvm_out(build.build).join("build")); + cfg.define("LLVM_NATIVE_BUILD", builder.llvm_out(builder.config.build).join("build")); } - let sanitize_cc = |cc: &Path| { - if target.contains("msvc") { - OsString::from(cc.to_str().unwrap().replace("\\", "/")) - } else { - cc.as_os_str().to_owned() - } - }; - - let configure_compilers = |cfg: &mut cmake::Config| { - // MSVC with CMake uses msbuild by default which doesn't respect these - // vars that we'd otherwise configure. In that case we just skip this - // entirely. - if target.contains("msvc") && !build.config.ninja { - return - } - - let cc = build.cc(target); - let cxx = build.cxx(target).unwrap(); - - // Handle msvc + ninja + ccache specially (this is what the bots use) - if target.contains("msvc") && - build.config.ninja && - build.config.ccache.is_some() { - let mut cc = env::current_exe().expect("failed to get cwd"); - cc.set_file_name("sccache-plus-cl.exe"); - - cfg.define("CMAKE_C_COMPILER", sanitize_cc(&cc)) - .define("CMAKE_CXX_COMPILER", sanitize_cc(&cc)); - cfg.env("SCCACHE_PATH", - build.config.ccache.as_ref().unwrap()) - .env("SCCACHE_TARGET", target); - - // If ccache is configured we inform the build a little differently hwo - // to invoke ccache while also invoking our compilers. - } else if let Some(ref ccache) = build.config.ccache { - cfg.define("CMAKE_C_COMPILER", ccache) - .define("CMAKE_C_COMPILER_ARG1", sanitize_cc(cc)) - .define("CMAKE_CXX_COMPILER", ccache) - .define("CMAKE_CXX_COMPILER_ARG1", sanitize_cc(cxx)); - } else { - cfg.define("CMAKE_C_COMPILER", sanitize_cc(cc)) - .define("CMAKE_CXX_COMPILER", sanitize_cc(cxx)); - } - - cfg.build_arg("-j").build_arg(build.jobs().to_string()); - cfg.define("CMAKE_C_FLAGS", build.cflags(target).join(" ")); - cfg.define("CMAKE_CXX_FLAGS", build.cflags(target).join(" ")); - if let Some(ar) = build.ar(target) { - if ar.is_absolute() { - // LLVM build breaks if `CMAKE_AR` is a relative path, for some reason it - // tries to resolve this path in the LLVM build directory. - cfg.define("CMAKE_AR", sanitize_cc(ar)); - } - } - }; - - configure_compilers(&mut cfg); - - if env::var_os("SCCACHE_ERROR_LOG").is_some() { - cfg.env("RUST_LOG", "sccache=warn"); - } + configure_cmake(builder, target, &mut cfg, false); // FIXME: we don't actually need to build all LLVM tools and all LLVM // libraries here, e.g. we just want a few components and a few // tools. Figure out how to filter them down and only build the right // tools and libs on all platforms. + + if builder.config.dry_run { + return build_llvm_config; + } + cfg.build(); t!(t!(File::create(&done_stamp)).write_all(rebuild_trigger_contents.as_bytes())); + + build_llvm_config } } -fn check_llvm_version(build: &Build, llvm_config: &Path) { - if !build.config.llvm_version_check { +fn check_llvm_version(builder: &Builder, llvm_config: &Path) { + if !builder.config.llvm_version_check { return } + if builder.config.dry_run { + return; + } + let mut cmd = Command::new(llvm_config); let version = output(cmd.arg("--version")); let mut parts = version.split('.').take(2) .filter_map(|s| s.parse::().ok()); - if let (Some(major), Some(minor)) = (parts.next(), parts.next()) { - if major > 3 || (major == 3 && minor >= 9) { + if let (Some(major), Some(_minor)) = (parts.next(), parts.next()) { + if major >= 5 { return } } - panic!("\n\nbad LLVM version: {}, need >=3.9\n\n", version) + panic!("\n\nbad LLVM version: {}, need >=5.0\n\n", version) +} + +fn configure_cmake(builder: &Builder, + target: Interned, + cfg: &mut cmake::Config, + building_dist_binaries: bool) { + if builder.config.ninja { + cfg.generator("Ninja"); + } + cfg.target(&target) + .host(&builder.config.build); + + let sanitize_cc = |cc: &Path| { + if target.contains("msvc") { + OsString::from(cc.to_str().unwrap().replace("\\", "/")) + } else { + cc.as_os_str().to_owned() + } + }; + + // MSVC with CMake uses msbuild by default which doesn't respect these + // vars that we'd otherwise configure. In that case we just skip this + // entirely. + if target.contains("msvc") && !builder.config.ninja { + return + } + + let (cc, cxx) = match builder.config.llvm_clang_cl { + Some(ref cl) => (cl.as_ref(), cl.as_ref()), + None => (builder.cc(target), builder.cxx(target).unwrap()), + }; + + // Handle msvc + ninja + ccache specially (this is what the bots use) + if target.contains("msvc") && + builder.config.ninja && + builder.config.ccache.is_some() + { + let mut wrap_cc = env::current_exe().expect("failed to get cwd"); + wrap_cc.set_file_name("sccache-plus-cl.exe"); + + cfg.define("CMAKE_C_COMPILER", sanitize_cc(&wrap_cc)) + .define("CMAKE_CXX_COMPILER", sanitize_cc(&wrap_cc)); + cfg.env("SCCACHE_PATH", + builder.config.ccache.as_ref().unwrap()) + .env("SCCACHE_TARGET", target) + .env("SCCACHE_CC", &cc) + .env("SCCACHE_CXX", &cxx); + + // Building LLVM on MSVC can be a little ludicrous at times. We're so far + // off the beaten path here that I'm not really sure this is even half + // supported any more. Here we're trying to: + // + // * Build LLVM on MSVC + // * Build LLVM with `clang-cl` instead of `cl.exe` + // * Build a project with `sccache` + // * Build for 32-bit as well + // * Build with Ninja + // + // For `cl.exe` there are different binaries to compile 32/64 bit which + // we use but for `clang-cl` there's only one which internally + // multiplexes via flags. As a result it appears that CMake's detection + // of a compiler's architecture and such on MSVC **doesn't** pass any + // custom flags we pass in CMAKE_CXX_FLAGS below. This means that if we + // use `clang-cl.exe` it's always diagnosed as a 64-bit compiler which + // definitely causes problems since all the env vars are pointing to + // 32-bit libraries. + // + // To hack aroudn this... again... we pass an argument that's + // unconditionally passed in the sccache shim. This'll get CMake to + // correctly diagnose it's doing a 32-bit compilation and LLVM will + // internally configure itself appropriately. + if builder.config.llvm_clang_cl.is_some() && target.contains("i686") { + cfg.env("SCCACHE_EXTRA_ARGS", "-m32"); + } + + // If ccache is configured we inform the build a little differently hwo + // to invoke ccache while also invoking our compilers. + } else if let Some(ref ccache) = builder.config.ccache { + cfg.define("CMAKE_C_COMPILER", ccache) + .define("CMAKE_C_COMPILER_ARG1", sanitize_cc(cc)) + .define("CMAKE_CXX_COMPILER", ccache) + .define("CMAKE_CXX_COMPILER_ARG1", sanitize_cc(cxx)); + } else { + cfg.define("CMAKE_C_COMPILER", sanitize_cc(cc)) + .define("CMAKE_CXX_COMPILER", sanitize_cc(cxx)); + } + + cfg.build_arg("-j").build_arg(builder.jobs().to_string()); + cfg.define("CMAKE_C_FLAGS", builder.cflags(target).join(" ")); + let mut cxxflags = builder.cflags(target).join(" "); + if building_dist_binaries { + if builder.config.llvm_static_stdcpp && !target.contains("windows") { + cxxflags.push_str(" -static-libstdc++"); + } + } + cfg.define("CMAKE_CXX_FLAGS", cxxflags); + if let Some(ar) = builder.ar(target) { + if ar.is_absolute() { + // LLVM build breaks if `CMAKE_AR` is a relative path, for some reason it + // tries to resolve this path in the LLVM build directory. + cfg.define("CMAKE_AR", sanitize_cc(ar)); + } + } + + if env::var_os("SCCACHE_ERROR_LOG").is_some() { + cfg.env("RUST_LOG", "sccache=warn"); + } +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct Lld { + pub target: Interned, +} + +impl Step for Lld { + type Output = PathBuf; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/lld") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Lld { target: run.target }); + } + + /// Compile LLVM for `target`. + fn run(self, builder: &Builder) -> PathBuf { + if builder.config.dry_run { + return PathBuf::from("lld-out-dir-test-gen"); + } + let target = self.target; + + let llvm_config = builder.ensure(Llvm { + target: self.target, + emscripten: false, + }); + + let out_dir = builder.lld_out(target); + let done_stamp = out_dir.join("lld-finished-building"); + if done_stamp.exists() { + return out_dir + } + + let _folder = builder.fold_output(|| "lld"); + builder.info(&format!("Building LLD for {}", target)); + let _time = util::timeit(&builder); + t!(fs::create_dir_all(&out_dir)); + + let mut cfg = cmake::Config::new(builder.src.join("src/tools/lld")); + configure_cmake(builder, target, &mut cfg, true); + + // This is an awful, awful hack. Discovered when we migrated to using + // clang-cl to compile LLVM/LLD it turns out that LLD, when built out of + // tree, will execute `llvm-config --cmakedir` and then tell CMake about + // that directory for later processing. Unfortunately if this path has + // forward slashes in it (which it basically always does on Windows) + // then CMake will hit a syntax error later on as... something isn't + // escaped it seems? + // + // Instead of attempting to fix this problem in upstream CMake and/or + // LLVM/LLD we just hack around it here. This thin wrapper will take the + // output from llvm-config and replace all instances of `\` with `/` to + // ensure we don't hit the same bugs with escaping. It means that you + // can't build on a system where your paths require `\` on Windows, but + // there's probably a lot of reasons you can't do that other than this. + let llvm_config_shim = env::current_exe() + .unwrap() + .with_file_name("llvm-config-wrapper"); + cfg.out_dir(&out_dir) + .profile("Release") + .env("LLVM_CONFIG_REAL", llvm_config) + .define("LLVM_CONFIG_PATH", llvm_config_shim) + .define("LLVM_INCLUDE_TESTS", "OFF"); + + cfg.build(); + + t!(File::create(&done_stamp)); + out_dir + } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] @@ -275,7 +466,7 @@ impl Step for TestHelpers { type Output = (); fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/rt/rust_test_helpers.c") + run.path("src/test/auxiliary/rust_test_helpers.c") } fn make_run(run: RunConfig) { @@ -285,16 +476,18 @@ impl Step for TestHelpers { /// Compiles the `rust_test_helpers.c` library which we used in various /// `run-pass` test suites for ABI testing. fn run(self, builder: &Builder) { - let build = builder.build; + if builder.config.dry_run { + return; + } let target = self.target; - let dst = build.test_helpers_out(target); - let src = build.src.join("src/rt/rust_test_helpers.c"); + let dst = builder.test_helpers_out(target); + let src = builder.src.join("src/test/auxiliary/rust_test_helpers.c"); if up_to_date(&src, &dst.join("librust_test_helpers.a")) { return } - let _folder = build.fold_output(|| "build_test_helpers"); - println!("Building test helpers"); + let _folder = builder.fold_output(|| "build_test_helpers"); + builder.info("Building test helpers"); t!(fs::create_dir_all(&dst)); let mut cfg = cc::Build::new(); @@ -302,27 +495,27 @@ impl Step for TestHelpers { // extra configuration, so inform gcc of these compilers. Note, though, that // on MSVC we still need gcc's detection of env vars (ugh). if !target.contains("msvc") { - if let Some(ar) = build.ar(target) { + if let Some(ar) = builder.ar(target) { cfg.archiver(ar); } - cfg.compiler(build.cc(target)); + cfg.compiler(builder.cc(target)); } cfg.cargo_metadata(false) .out_dir(&dst) .target(&target) - .host(&build.build) + .host(&builder.config.build) .opt_level(0) .warnings(false) .debug(false) - .file(build.src.join("src/rt/rust_test_helpers.c")) + .file(builder.src.join("src/test/auxiliary/rust_test_helpers.c")) .compile("rust_test_helpers"); } } -const OPENSSL_VERS: &'static str = "1.0.2m"; +const OPENSSL_VERS: &'static str = "1.0.2n"; const OPENSSL_SHA256: &'static str = - "8c6ff15ec6b319b50788f42c7abc2890c08ba5a1cdcd3810eb9092deada37b0f"; + "370babb75f278c39e0c50e8c4e7493bc0f18db6867478341a832a982fd15a8fe"; #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct Openssl { @@ -337,9 +530,11 @@ impl Step for Openssl { } fn run(self, builder: &Builder) { - let build = builder.build; + if builder.config.dry_run { + return; + } let target = self.target; - let out = match build.openssl_dir(target) { + let out = match builder.openssl_dir(target) { Some(dir) => dir, None => return, }; @@ -375,7 +570,8 @@ impl Step for Openssl { } // Ensure the hash is correct. - let mut shasum = if target.contains("apple") || build.build.contains("netbsd") { + let mut shasum = if target.contains("apple") || + builder.config.build.contains("netbsd") { let mut cmd = Command::new("shasum"); cmd.arg("-a").arg("256"); cmd @@ -408,10 +604,10 @@ impl Step for Openssl { t!(fs::rename(&tmp, &tarball)); } let obj = out.join(format!("openssl-{}", OPENSSL_VERS)); - let dst = build.openssl_install_dir(target).unwrap(); + let dst = builder.openssl_install_dir(target).unwrap(); drop(fs::remove_dir_all(&obj)); drop(fs::remove_dir_all(&dst)); - build.run(Command::new("tar").arg("zxf").arg(&tarball).current_dir(&out)); + builder.run(Command::new("tar").arg("zxf").arg(&tarball).current_dir(&out)); let mut configure = Command::new("perl"); configure.arg(obj.join("Configure")); @@ -424,11 +620,14 @@ impl Step for Openssl { "aarch64-linux-android" => "linux-aarch64", "aarch64-unknown-linux-gnu" => "linux-aarch64", "aarch64-unknown-linux-musl" => "linux-aarch64", + "aarch64-unknown-netbsd" => "BSD-generic64", "arm-linux-androideabi" => "android", "arm-unknown-linux-gnueabi" => "linux-armv4", "arm-unknown-linux-gnueabihf" => "linux-armv4", + "armv6-unknown-netbsd-eabihf" => "BSD-generic32", "armv7-linux-androideabi" => "android-armv7", "armv7-unknown-linux-gnueabihf" => "linux-armv4", + "armv7-unknown-netbsd-eabihf" => "BSD-generic32", "i586-unknown-linux-gnu" => "linux-elf", "i586-unknown-linux-musl" => "linux-elf", "i686-apple-darwin" => "darwin-i386-cc", @@ -442,9 +641,13 @@ impl Step for Openssl { "mips64el-unknown-linux-gnuabi64" => "linux64-mips64", "mipsel-unknown-linux-gnu" => "linux-mips32", "powerpc-unknown-linux-gnu" => "linux-ppc", + "powerpc-unknown-linux-gnuspe" => "linux-ppc", + "powerpc-unknown-netbsd" => "BSD-generic32", "powerpc64-unknown-linux-gnu" => "linux-ppc64", "powerpc64le-unknown-linux-gnu" => "linux-ppc64le", + "powerpc64le-unknown-linux-musl" => "linux-ppc64le", "s390x-unknown-linux-gnu" => "linux64-s390x", + "sparc-unknown-linux-gnu" => "linux-sparcv9", "sparc64-unknown-linux-gnu" => "linux64-sparcv9", "sparc64-unknown-netbsd" => "BSD-sparc64", "x86_64-apple-darwin" => "darwin64-x86_64-cc", @@ -452,13 +655,14 @@ impl Step for Openssl { "x86_64-unknown-freebsd" => "BSD-x86_64", "x86_64-unknown-dragonfly" => "BSD-x86_64", "x86_64-unknown-linux-gnu" => "linux-x86_64", + "x86_64-unknown-linux-gnux32" => "linux-x32", "x86_64-unknown-linux-musl" => "linux-x86_64", "x86_64-unknown-netbsd" => "BSD-x86_64", _ => panic!("don't know how to configure OpenSSL for {}", target), }; configure.arg(os); - configure.env("CC", build.cc(target)); - for flag in build.cflags(target) { + configure.env("CC", builder.cc(target)); + for flag in builder.cflags(target) { configure.arg(flag); } // There is no specific os target for android aarch64 or x86_64, @@ -470,7 +674,7 @@ impl Step for Openssl { if target == "sparc64-unknown-netbsd" { // Need -m64 to get assembly generated correctly for sparc64. configure.arg("-m64"); - if build.build.contains("netbsd") { + if builder.config.build.contains("netbsd") { // Disable sparc64 asm on NetBSD builders, it uses // m4(1)'s -B flag, which NetBSD m4 does not support. configure.arg("no-asm"); @@ -483,12 +687,12 @@ impl Step for Openssl { configure.arg("no-asm"); } configure.current_dir(&obj); - println!("Configuring openssl for {}", target); - build.run_quiet(&mut configure); - println!("Building openssl for {}", target); - build.run_quiet(Command::new("make").arg("-j1").current_dir(&obj)); - println!("Installing openssl for {}", target); - build.run_quiet(Command::new("make").arg("install").current_dir(&obj)); + builder.info(&format!("Configuring openssl for {}", target)); + builder.run_quiet(&mut configure); + builder.info(&format!("Building openssl for {}", target)); + builder.run_quiet(Command::new("make").arg("-j1").current_dir(&obj)); + builder.info(&format!("Installing openssl for {}", target)); + builder.run_quiet(Command::new("make").arg("install").arg("-j1").current_dir(&obj)); let mut f = t!(File::create(&stamp)); t!(f.write_all(OPENSSL_VERS.as_bytes())); diff --git a/src/bootstrap/sanity.rs b/src/bootstrap/sanity.rs index a8b43ad3c308..c2610de23beb 100644 --- a/src/bootstrap/sanity.rs +++ b/src/bootstrap/sanity.rs @@ -140,14 +140,18 @@ pub fn check(build: &mut Build) { continue; } - cmd_finder.must_have(build.cc(*target)); - if let Some(ar) = build.ar(*target) { - cmd_finder.must_have(ar); + if !build.config.dry_run { + cmd_finder.must_have(build.cc(*target)); + if let Some(ar) = build.ar(*target) { + cmd_finder.must_have(ar); + } } } for host in &build.hosts { - cmd_finder.must_have(build.cxx(*host).unwrap()); + if !build.config.dry_run { + cmd_finder.must_have(build.cxx(*host).unwrap()); + } // The msvc hosts don't use jemalloc, turn it off globally to // avoid packaging the dummy liballoc_jemalloc on that platform. @@ -169,13 +173,26 @@ pub fn check(build: &mut Build) { panic!("the iOS target is only supported on macOS"); } + if target.contains("-none-") { + if build.no_std(*target).is_none() { + let target = build.config.target_config.entry(target.clone()) + .or_default(); + + target.no_std = true; + } + + if build.no_std(*target) == Some(false) { + panic!("All the *-none-* targets are no-std targets") + } + } + // Make sure musl-root is valid - if target.contains("musl") && !target.contains("mips") { + if target.contains("musl") { // If this is a native target (host is also musl) and no musl-root is given, // fall back to the system toolchain in /usr before giving up if build.musl_root(*target).is_none() && build.config.build == *target { let target = build.config.target_config.entry(target.clone()) - .or_insert(Default::default()); + .or_default(); target.musl_root = Some("/usr".into()); } match build.musl_root(*target) { diff --git a/src/bootstrap/test.rs b/src/bootstrap/test.rs new file mode 100644 index 000000000000..f762d9414cff --- /dev/null +++ b/src/bootstrap/test.rs @@ -0,0 +1,1967 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of the test-related targets of the build system. +//! +//! This file implements the various regression test suites that we execute on +//! our CI. + +use std::env; +use std::ffi::OsString; +use std::fmt; +use std::fs::{self, File}; +use std::io::Read; +use std::iter; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use build_helper::{self, output}; + +use builder::{Builder, Compiler, Kind, RunConfig, ShouldRun, Step}; +use cache::{Interned, INTERNER}; +use compile; +use dist; +use flags::Subcommand; +use native; +use tool::{self, Tool, SourceType}; +use toolstate::ToolState; +use util::{self, dylib_path, dylib_path_var}; +use Crate as CargoCrate; +use {DocTests, Mode}; + +const ADB_TEST_DIR: &str = "/data/tmp/work"; + +/// The two modes of the test runner; tests or benchmarks. +#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, PartialOrd, Ord)] +pub enum TestKind { + /// Run `cargo test` + Test, + /// Run `cargo bench` + Bench, +} + +impl From for TestKind { + fn from(kind: Kind) -> Self { + match kind { + Kind::Test => TestKind::Test, + Kind::Bench => TestKind::Bench, + _ => panic!("unexpected kind in crate: {:?}", kind), + } + } +} + +impl TestKind { + // Return the cargo subcommand for this test kind + fn subcommand(self) -> &'static str { + match self { + TestKind::Test => "test", + TestKind::Bench => "bench", + } + } +} + +impl fmt::Display for TestKind { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match *self { + TestKind::Test => "Testing", + TestKind::Bench => "Benchmarking", + }) + } +} + +fn try_run(builder: &Builder, cmd: &mut Command) -> bool { + if !builder.fail_fast { + if !builder.try_run(cmd) { + let mut failures = builder.delayed_failures.borrow_mut(); + failures.push(format!("{:?}", cmd)); + return false; + } + } else { + builder.run(cmd); + } + true +} + +fn try_run_quiet(builder: &Builder, cmd: &mut Command) -> bool { + if !builder.fail_fast { + if !builder.try_run_quiet(cmd) { + let mut failures = builder.delayed_failures.borrow_mut(); + failures.push(format!("{:?}", cmd)); + return false; + } + } else { + builder.run_quiet(cmd); + } + true +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Linkcheck { + host: Interned, +} + +impl Step for Linkcheck { + type Output = (); + const ONLY_HOSTS: bool = true; + const DEFAULT: bool = true; + + /// Runs the `linkchecker` tool as compiled in `stage` by the `host` compiler. + /// + /// This tool in `src/tools` will verify the validity of all our links in the + /// documentation to ensure we don't have a bunch of dead ones. + fn run(self, builder: &Builder) { + let host = self.host; + + builder.info(&format!("Linkcheck ({})", host)); + + builder.default_doc(None); + + let _time = util::timeit(&builder); + try_run( + builder, + builder + .tool_cmd(Tool::Linkchecker) + .arg(builder.out.join(host).join("doc")), + ); + } + + fn should_run(run: ShouldRun) -> ShouldRun { + let builder = run.builder; + run.path("src/tools/linkchecker") + .default_condition(builder.config.docs) + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Linkcheck { host: run.target }); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Cargotest { + stage: u32, + host: Interned, +} + +impl Step for Cargotest { + type Output = (); + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/cargotest") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Cargotest { + stage: run.builder.top_stage, + host: run.target, + }); + } + + /// Runs the `cargotest` tool as compiled in `stage` by the `host` compiler. + /// + /// This tool in `src/tools` will check out a few Rust projects and run `cargo + /// test` to ensure that we don't regress the test suites there. + fn run(self, builder: &Builder) { + let compiler = builder.compiler(self.stage, self.host); + builder.ensure(compile::Rustc { + compiler, + target: compiler.host, + }); + + // Note that this is a short, cryptic, and not scoped directory name. This + // is currently to minimize the length of path on Windows where we otherwise + // quickly run into path name limit constraints. + let out_dir = builder.out.join("ct"); + t!(fs::create_dir_all(&out_dir)); + + let _time = util::timeit(&builder); + let mut cmd = builder.tool_cmd(Tool::CargoTest); + try_run( + builder, + cmd.arg(&builder.initial_cargo) + .arg(&out_dir) + .env("RUSTC", builder.rustc(compiler)) + .env("RUSTDOC", builder.rustdoc(compiler.host)), + ); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Cargo { + stage: u32, + host: Interned, +} + +impl Step for Cargo { + type Output = (); + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/cargo") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Cargo { + stage: run.builder.top_stage, + host: run.target, + }); + } + + /// Runs `cargo test` for `cargo` packaged with Rust. + fn run(self, builder: &Builder) { + let compiler = builder.compiler(self.stage, self.host); + + builder.ensure(tool::Cargo { + compiler, + target: self.host, + }); + let mut cargo = tool::prepare_tool_cargo(builder, + compiler, + Mode::ToolRustc, + self.host, + "test", + "src/tools/cargo", + SourceType::Submodule); + + if !builder.fail_fast { + cargo.arg("--no-fail-fast"); + } + + // Don't run cross-compile tests, we may not have cross-compiled libstd libs + // available. + cargo.env("CFG_DISABLE_CROSS_TESTS", "1"); + + try_run( + builder, + cargo.env("PATH", &path_for_cargo(builder, compiler)), + ); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Rls { + stage: u32, + host: Interned, +} + +impl Step for Rls { + type Output = (); + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/rls") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Rls { + stage: run.builder.top_stage, + host: run.target, + }); + } + + /// Runs `cargo test` for the rls. + fn run(self, builder: &Builder) { + let stage = self.stage; + let host = self.host; + let compiler = builder.compiler(stage, host); + + let build_result = builder.ensure(tool::Rls { + compiler, + target: self.host, + extra_features: Vec::new(), + }); + if build_result.is_none() { + eprintln!("failed to test rls: could not build"); + return; + } + + let mut cargo = tool::prepare_tool_cargo(builder, + compiler, + Mode::ToolRustc, + host, + "test", + "src/tools/rls", + SourceType::Submodule); + + // Copy `src/tools/rls/test_data` to a writable drive. + let test_workspace_path = builder.out.join("rls-test-data"); + let test_data_path = test_workspace_path.join("test_data"); + builder.create_dir(&test_data_path); + builder.cp_r(&builder.src.join("src/tools/rls/test_data"), &test_data_path); + cargo.env("RLS_TEST_WORKSPACE_DIR", test_workspace_path); + + builder.add_rustc_lib_path(compiler, &mut cargo); + cargo.arg("--") + .args(builder.config.cmd.test_args()); + + if try_run(builder, &mut cargo) { + builder.save_toolstate("rls", ToolState::TestPass); + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Rustfmt { + stage: u32, + host: Interned, +} + +impl Step for Rustfmt { + type Output = (); + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/rustfmt") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Rustfmt { + stage: run.builder.top_stage, + host: run.target, + }); + } + + /// Runs `cargo test` for rustfmt. + fn run(self, builder: &Builder) { + let stage = self.stage; + let host = self.host; + let compiler = builder.compiler(stage, host); + + let build_result = builder.ensure(tool::Rustfmt { + compiler, + target: self.host, + extra_features: Vec::new(), + }); + if build_result.is_none() { + eprintln!("failed to test rustfmt: could not build"); + return; + } + + let mut cargo = tool::prepare_tool_cargo(builder, + compiler, + Mode::ToolRustc, + host, + "test", + "src/tools/rustfmt", + SourceType::Submodule); + + let dir = testdir(builder, compiler.host); + t!(fs::create_dir_all(&dir)); + cargo.env("RUSTFMT_TEST_DIR", dir); + + builder.add_rustc_lib_path(compiler, &mut cargo); + + if try_run(builder, &mut cargo) { + builder.save_toolstate("rustfmt", ToolState::TestPass); + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Miri { + stage: u32, + host: Interned, +} + +impl Step for Miri { + type Output = (); + const ONLY_HOSTS: bool = true; + const DEFAULT: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + let test_miri = run.builder.config.test_miri; + run.path("src/tools/miri").default_condition(test_miri) + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Miri { + stage: run.builder.top_stage, + host: run.target, + }); + } + + /// Runs `cargo test` for miri. + fn run(self, builder: &Builder) { + let stage = self.stage; + let host = self.host; + let compiler = builder.compiler(stage, host); + + let miri = builder.ensure(tool::Miri { + compiler, + target: self.host, + extra_features: Vec::new(), + }); + if let Some(miri) = miri { + let mut cargo = tool::prepare_tool_cargo(builder, + compiler, + Mode::ToolRustc, + host, + "test", + "src/tools/miri", + SourceType::Submodule); + + // miri tests need to know about the stage sysroot + cargo.env("MIRI_SYSROOT", builder.sysroot(compiler)); + cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler)); + cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler)); + cargo.env("MIRI_PATH", miri); + + builder.add_rustc_lib_path(compiler, &mut cargo); + + if try_run(builder, &mut cargo) { + builder.save_toolstate("miri", ToolState::TestPass); + } + } else { + eprintln!("failed to test miri: could not build"); + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Clippy { + stage: u32, + host: Interned, +} + +impl Step for Clippy { + type Output = (); + const ONLY_HOSTS: bool = true; + const DEFAULT: bool = false; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/clippy") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Clippy { + stage: run.builder.top_stage, + host: run.target, + }); + } + + /// Runs `cargo test` for clippy. + fn run(self, builder: &Builder) { + let stage = self.stage; + let host = self.host; + let compiler = builder.compiler(stage, host); + + let clippy = builder.ensure(tool::Clippy { + compiler, + target: self.host, + extra_features: Vec::new(), + }); + if let Some(clippy) = clippy { + let mut cargo = tool::prepare_tool_cargo(builder, + compiler, + Mode::ToolRustc, + host, + "test", + "src/tools/clippy", + SourceType::Submodule); + + // clippy tests need to know about the stage sysroot + cargo.env("SYSROOT", builder.sysroot(compiler)); + cargo.env("RUSTC_TEST_SUITE", builder.rustc(compiler)); + cargo.env("RUSTC_LIB_PATH", builder.rustc_libdir(compiler)); + let host_libs = builder + .stage_out(compiler, Mode::ToolRustc) + .join(builder.cargo_dir()); + cargo.env("HOST_LIBS", host_libs); + // clippy tests need to find the driver + cargo.env("CLIPPY_DRIVER_PATH", clippy); + + builder.add_rustc_lib_path(compiler, &mut cargo); + + if try_run(builder, &mut cargo) { + builder.save_toolstate("clippy-driver", ToolState::TestPass); + } + } else { + eprintln!("failed to test clippy: could not build"); + } + } +} + +fn path_for_cargo(builder: &Builder, compiler: Compiler) -> OsString { + // Configure PATH to find the right rustc. NB. we have to use PATH + // and not RUSTC because the Cargo test suite has tests that will + // fail if rustc is not spelled `rustc`. + let path = builder.sysroot(compiler).join("bin"); + let old_path = env::var_os("PATH").unwrap_or_default(); + env::join_paths(iter::once(path).chain(env::split_paths(&old_path))).expect("") +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct RustdocTheme { + pub compiler: Compiler, +} + +impl Step for RustdocTheme { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/rustdoc-themes") + } + + fn make_run(run: RunConfig) { + let compiler = run.builder.compiler(run.builder.top_stage, run.host); + + run.builder.ensure(RustdocTheme { compiler: compiler }); + } + + fn run(self, builder: &Builder) { + let rustdoc = builder.out.join("bootstrap/debug/rustdoc"); + let mut cmd = builder.tool_cmd(Tool::RustdocTheme); + cmd.arg(rustdoc.to_str().unwrap()) + .arg( + builder + .src + .join("src/librustdoc/html/static/themes") + .to_str() + .unwrap(), + ) + .env("RUSTC_STAGE", self.compiler.stage.to_string()) + .env("RUSTC_SYSROOT", builder.sysroot(self.compiler)) + .env( + "RUSTDOC_LIBDIR", + builder.sysroot_libdir(self.compiler, self.compiler.host), + ) + .env("CFG_RELEASE_CHANNEL", &builder.config.channel) + .env("RUSTDOC_REAL", builder.rustdoc(self.compiler.host)) + .env("RUSTDOC_CRATE_VERSION", builder.rust_version()) + .env("RUSTC_BOOTSTRAP", "1"); + if let Some(linker) = builder.linker(self.compiler.host) { + cmd.env("RUSTC_TARGET_LINKER", linker); + } + try_run(builder, &mut cmd); + } +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct RustdocJS { + pub host: Interned, + pub target: Interned, +} + +impl Step for RustdocJS { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/test/rustdoc-js") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(RustdocJS { + host: run.host, + target: run.target, + }); + } + + fn run(self, builder: &Builder) { + if let Some(ref nodejs) = builder.config.nodejs { + let mut command = Command::new(nodejs); + command.args(&["src/tools/rustdoc-js/tester.js", &*self.host]); + builder.ensure(::doc::Std { + target: self.target, + stage: builder.top_stage, + }); + builder.run(&mut command); + } else { + builder.info(&format!( + "No nodejs found, skipping \"src/test/rustdoc-js\" tests" + )); + } + } +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct RustdocUi { + pub host: Interned, + pub target: Interned, + pub compiler: Compiler, +} + +impl Step for RustdocUi { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/test/rustdoc-ui") + } + + fn make_run(run: RunConfig) { + let compiler = run.builder.compiler(run.builder.top_stage, run.host); + run.builder.ensure(RustdocUi { + host: run.host, + target: run.target, + compiler, + }); + } + + fn run(self, builder: &Builder) { + builder.ensure(Compiletest { + compiler: self.compiler, + target: self.target, + mode: "ui", + suite: "rustdoc-ui", + path: None, + compare_mode: None, + }) + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Tidy; + +impl Step for Tidy { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + /// Runs the `tidy` tool. + /// + /// This tool in `src/tools` checks up on various bits and pieces of style and + /// otherwise just implements a few lint-like checks that are specific to the + /// compiler itself. + fn run(self, builder: &Builder) { + let mut cmd = builder.tool_cmd(Tool::Tidy); + cmd.arg(builder.src.join("src")); + cmd.arg(&builder.initial_cargo); + if !builder.config.vendor { + cmd.arg("--no-vendor"); + } + if !builder.config.verbose_tests { + cmd.arg("--quiet"); + } + + let _folder = builder.fold_output(|| "tidy"); + builder.info(&format!("tidy check")); + try_run(builder, &mut cmd); + } + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/tidy") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Tidy); + } +} + +fn testdir(builder: &Builder, host: Interned) -> PathBuf { + builder.out.join(host).join("test") +} + +macro_rules! default_test { + ($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => { + test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: false }); + } +} + +macro_rules! default_test_with_compare_mode { + ($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, + compare_mode: $compare_mode:expr }) => { + test_with_compare_mode!($name { path: $path, mode: $mode, suite: $suite, default: true, + host: false, compare_mode: $compare_mode }); + } +} + +macro_rules! host_test { + ($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr }) => { + test!($name { path: $path, mode: $mode, suite: $suite, default: true, host: true }); + } +} + +macro_rules! test { + ($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr, + host: $host:expr }) => { + test_definitions!($name { path: $path, mode: $mode, suite: $suite, default: $default, + host: $host, compare_mode: None }); + } +} + +macro_rules! test_with_compare_mode { + ($name:ident { path: $path:expr, mode: $mode:expr, suite: $suite:expr, default: $default:expr, + host: $host:expr, compare_mode: $compare_mode:expr }) => { + test_definitions!($name { path: $path, mode: $mode, suite: $suite, default: $default, + host: $host, compare_mode: Some($compare_mode) }); + } +} + +macro_rules! test_definitions { + ($name:ident { + path: $path:expr, + mode: $mode:expr, + suite: $suite:expr, + default: $default:expr, + host: $host:expr, + compare_mode: $compare_mode:expr + }) => { + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct $name { + pub compiler: Compiler, + pub target: Interned, + } + + impl Step for $name { + type Output = (); + const DEFAULT: bool = $default; + const ONLY_HOSTS: bool = $host; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.suite_path($path) + } + + fn make_run(run: RunConfig) { + let compiler = run.builder.compiler(run.builder.top_stage, run.host); + + run.builder.ensure($name { + compiler, + target: run.target, + }); + } + + fn run(self, builder: &Builder) { + builder.ensure(Compiletest { + compiler: self.compiler, + target: self.target, + mode: $mode, + suite: $suite, + path: Some($path), + compare_mode: $compare_mode, + }) + } + } + } +} + +default_test_with_compare_mode!(Ui { + path: "src/test/ui", + mode: "ui", + suite: "ui", + compare_mode: "nll" +}); + +default_test!(RunPass { + path: "src/test/run-pass", + mode: "run-pass", + suite: "run-pass" +}); + +default_test!(CompileFail { + path: "src/test/compile-fail", + mode: "compile-fail", + suite: "compile-fail" +}); + +default_test!(ParseFail { + path: "src/test/parse-fail", + mode: "parse-fail", + suite: "parse-fail" +}); + +default_test!(RunFail { + path: "src/test/run-fail", + mode: "run-fail", + suite: "run-fail" +}); + +default_test!(RunPassValgrind { + path: "src/test/run-pass-valgrind", + mode: "run-pass-valgrind", + suite: "run-pass-valgrind" +}); + +default_test!(MirOpt { + path: "src/test/mir-opt", + mode: "mir-opt", + suite: "mir-opt" +}); + +default_test!(Codegen { + path: "src/test/codegen", + mode: "codegen", + suite: "codegen" +}); + +default_test!(CodegenUnits { + path: "src/test/codegen-units", + mode: "codegen-units", + suite: "codegen-units" +}); + +default_test!(Incremental { + path: "src/test/incremental", + mode: "incremental", + suite: "incremental" +}); + +default_test!(Debuginfo { + path: "src/test/debuginfo", + // What this runs varies depending on the native platform being apple + mode: "debuginfo-XXX", + suite: "debuginfo" +}); + +host_test!(UiFullDeps { + path: "src/test/ui-fulldeps", + mode: "ui", + suite: "ui-fulldeps" +}); + +host_test!(RunPassFullDeps { + path: "src/test/run-pass-fulldeps", + mode: "run-pass", + suite: "run-pass-fulldeps" +}); + +host_test!(RunFailFullDeps { + path: "src/test/run-fail-fulldeps", + mode: "run-fail", + suite: "run-fail-fulldeps" +}); + +host_test!(CompileFailFullDeps { + path: "src/test/compile-fail-fulldeps", + mode: "compile-fail", + suite: "compile-fail-fulldeps" +}); + +host_test!(IncrementalFullDeps { + path: "src/test/incremental-fulldeps", + mode: "incremental", + suite: "incremental-fulldeps" +}); + +host_test!(Rustdoc { + path: "src/test/rustdoc", + mode: "rustdoc", + suite: "rustdoc" +}); + +test!(Pretty { + path: "src/test/pretty", + mode: "pretty", + suite: "pretty", + default: false, + host: true +}); +test!(RunPassPretty { + path: "src/test/run-pass/pretty", + mode: "pretty", + suite: "run-pass", + default: false, + host: true +}); +test!(RunFailPretty { + path: "src/test/run-fail/pretty", + mode: "pretty", + suite: "run-fail", + default: false, + host: true +}); +test!(RunPassValgrindPretty { + path: "src/test/run-pass-valgrind/pretty", + mode: "pretty", + suite: "run-pass-valgrind", + default: false, + host: true +}); +test!(RunPassFullDepsPretty { + path: "src/test/run-pass-fulldeps/pretty", + mode: "pretty", + suite: "run-pass-fulldeps", + default: false, + host: true +}); +test!(RunFailFullDepsPretty { + path: "src/test/run-fail-fulldeps/pretty", + mode: "pretty", + suite: "run-fail-fulldeps", + default: false, + host: true +}); + +default_test!(RunMake { + path: "src/test/run-make", + mode: "run-make", + suite: "run-make" +}); + +host_test!(RunMakeFullDeps { + path: "src/test/run-make-fulldeps", + mode: "run-make", + suite: "run-make-fulldeps" +}); + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +struct Compiletest { + compiler: Compiler, + target: Interned, + mode: &'static str, + suite: &'static str, + path: Option<&'static str>, + compare_mode: Option<&'static str>, +} + +impl Step for Compiletest { + type Output = (); + + fn should_run(run: ShouldRun) -> ShouldRun { + run.never() + } + + /// Executes the `compiletest` tool to run a suite of tests. + /// + /// Compiles all tests with `compiler` for `target` with the specified + /// compiletest `mode` and `suite` arguments. For example `mode` can be + /// "run-pass" or `suite` can be something like `debuginfo`. + fn run(self, builder: &Builder) { + let compiler = self.compiler; + let target = self.target; + let mode = self.mode; + let suite = self.suite; + + // Path for test suite + let suite_path = self.path.unwrap_or(""); + + // Skip codegen tests if they aren't enabled in configuration. + if !builder.config.codegen_tests && suite == "codegen" { + return; + } + + if suite == "debuginfo" { + // Skip debuginfo tests on MSVC + if builder.config.build.contains("msvc") { + return; + } + + if mode == "debuginfo-XXX" { + return if builder.config.build.contains("apple") { + builder.ensure(Compiletest { + mode: "debuginfo-lldb", + ..self + }); + } else { + builder.ensure(Compiletest { + mode: "debuginfo-gdb", + ..self + }); + }; + } + + builder.ensure(dist::DebuggerScripts { + sysroot: builder.sysroot(compiler), + host: target, + }); + } + + if suite.ends_with("fulldeps") || + // FIXME: Does pretty need librustc compiled? Note that there are + // fulldeps test suites with mode = pretty as well. + mode == "pretty" + { + builder.ensure(compile::Rustc { compiler, target }); + } + + if builder.no_std(target) == Some(true) { + // the `test` doesn't compile for no-std targets + builder.ensure(compile::Std { compiler, target }); + } else { + builder.ensure(compile::Test { compiler, target }); + } + + if builder.no_std(target) == Some(true) { + // for no_std run-make (e.g. thumb*), + // we need a host compiler which is called by cargo. + builder.ensure(compile::Std { compiler, target: compiler.host }); + } + + builder.ensure(native::TestHelpers { target }); + builder.ensure(RemoteCopyLibs { compiler, target }); + + let mut cmd = builder.tool_cmd(Tool::Compiletest); + + // compiletest currently has... a lot of arguments, so let's just pass all + // of them! + + cmd.arg("--compile-lib-path") + .arg(builder.rustc_libdir(compiler)); + cmd.arg("--run-lib-path") + .arg(builder.sysroot_libdir(compiler, target)); + cmd.arg("--rustc-path").arg(builder.rustc(compiler)); + + let is_rustdoc_ui = suite.ends_with("rustdoc-ui"); + + // Avoid depending on rustdoc when we don't need it. + if mode == "rustdoc" + || (mode == "run-make" && suite.ends_with("fulldeps")) + || (mode == "ui" && is_rustdoc_ui) + { + cmd.arg("--rustdoc-path") + .arg(builder.rustdoc(compiler.host)); + } + + cmd.arg("--src-base") + .arg(builder.src.join("src/test").join(suite)); + cmd.arg("--build-base") + .arg(testdir(builder, compiler.host).join(suite)); + cmd.arg("--stage-id") + .arg(format!("stage{}-{}", compiler.stage, target)); + cmd.arg("--mode").arg(mode); + cmd.arg("--target").arg(target); + cmd.arg("--host").arg(&*compiler.host); + cmd.arg("--llvm-filecheck") + .arg(builder.llvm_filecheck(builder.config.build)); + + if builder.config.cmd.bless() { + cmd.arg("--bless"); + } + + let compare_mode = builder.config.cmd.compare_mode().or(self.compare_mode); + + if let Some(ref nodejs) = builder.config.nodejs { + cmd.arg("--nodejs").arg(nodejs); + } + + let mut flags = if is_rustdoc_ui { + Vec::new() + } else { + vec!["-Crpath".to_string()] + }; + if !is_rustdoc_ui { + if builder.config.rust_optimize_tests { + flags.push("-O".to_string()); + } + if builder.config.rust_debuginfo_tests { + flags.push("-g".to_string()); + } + } + flags.push("-Zunstable-options".to_string()); + flags.push(builder.config.cmd.rustc_args().join(" ")); + + if let Some(linker) = builder.linker(target) { + cmd.arg("--linker").arg(linker); + } + + let hostflags = flags.clone(); + cmd.arg("--host-rustcflags").arg(hostflags.join(" ")); + + let mut targetflags = flags.clone(); + targetflags.push(format!( + "-Lnative={}", + builder.test_helpers_out(target).display() + )); + cmd.arg("--target-rustcflags").arg(targetflags.join(" ")); + + cmd.arg("--docck-python").arg(builder.python()); + + if builder.config.build.ends_with("apple-darwin") { + // Force /usr/bin/python on macOS for LLDB tests because we're loading the + // LLDB plugin's compiled module which only works with the system python + // (namely not Homebrew-installed python) + cmd.arg("--lldb-python").arg("/usr/bin/python"); + } else { + cmd.arg("--lldb-python").arg(builder.python()); + } + + if let Some(ref gdb) = builder.config.gdb { + cmd.arg("--gdb").arg(gdb); + } + if let Some(ref vers) = builder.lldb_version { + cmd.arg("--lldb-version").arg(vers); + } + if let Some(ref dir) = builder.lldb_python_dir { + cmd.arg("--lldb-python-dir").arg(dir); + } + + // Get paths from cmd args + let paths = match &builder.config.cmd { + Subcommand::Test { ref paths, .. } => &paths[..], + _ => &[], + }; + + // Get test-args by striping suite path + let mut test_args: Vec<&str> = paths + .iter() + .map(|p| { + match p.strip_prefix(".") { + Ok(path) => path, + Err(_) => p, + } + }) + .filter(|p| p.starts_with(suite_path) && p.is_file()) + .map(|p| p.strip_prefix(suite_path).unwrap().to_str().unwrap()) + .collect(); + + test_args.append(&mut builder.config.cmd.test_args()); + + cmd.args(&test_args); + + if builder.is_verbose() { + cmd.arg("--verbose"); + } + + if !builder.config.verbose_tests { + cmd.arg("--quiet"); + } + + if builder.config.llvm_enabled { + let llvm_config = builder.ensure(native::Llvm { + target: builder.config.build, + emscripten: false, + }); + if !builder.config.dry_run { + let llvm_version = output(Command::new(&llvm_config).arg("--version")); + cmd.arg("--llvm-version").arg(llvm_version); + } + if !builder.is_rust_llvm(target) { + cmd.arg("--system-llvm"); + } + + // Only pass correct values for these flags for the `run-make` suite as it + // requires that a C++ compiler was configured which isn't always the case. + if !builder.config.dry_run && suite == "run-make-fulldeps" { + let llvm_components = output(Command::new(&llvm_config).arg("--components")); + let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags")); + cmd.arg("--cc") + .arg(builder.cc(target)) + .arg("--cxx") + .arg(builder.cxx(target).unwrap()) + .arg("--cflags") + .arg(builder.cflags(target).join(" ")) + .arg("--llvm-components") + .arg(llvm_components.trim()) + .arg("--llvm-cxxflags") + .arg(llvm_cxxflags.trim()); + if let Some(ar) = builder.ar(target) { + cmd.arg("--ar").arg(ar); + } + } + } + if suite == "run-make-fulldeps" && !builder.config.llvm_enabled { + builder.info(&format!( + "Ignoring run-make test suite as they generally don't work without LLVM" + )); + return; + } + + if suite != "run-make-fulldeps" { + cmd.arg("--cc") + .arg("") + .arg("--cxx") + .arg("") + .arg("--cflags") + .arg("") + .arg("--llvm-components") + .arg("") + .arg("--llvm-cxxflags") + .arg(""); + } + + if builder.remote_tested(target) { + cmd.arg("--remote-test-client") + .arg(builder.tool_exe(Tool::RemoteTestClient)); + } + + // Running a C compiler on MSVC requires a few env vars to be set, to be + // sure to set them here. + // + // Note that if we encounter `PATH` we make sure to append to our own `PATH` + // rather than stomp over it. + if target.contains("msvc") { + for &(ref k, ref v) in builder.cc[&target].env() { + if k != "PATH" { + cmd.env(k, v); + } + } + } + cmd.env("RUSTC_BOOTSTRAP", "1"); + builder.add_rust_test_threads(&mut cmd); + + if builder.config.sanitizers { + cmd.env("SANITIZER_SUPPORT", "1"); + } + + if builder.config.profiler { + cmd.env("PROFILER_SUPPORT", "1"); + } + + cmd.env("RUST_TEST_TMPDIR", builder.out.join("tmp")); + + cmd.arg("--adb-path").arg("adb"); + cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR); + if target.contains("android") { + // Assume that cc for this target comes from the android sysroot + cmd.arg("--android-cross-path") + .arg(builder.cc(target).parent().unwrap().parent().unwrap()); + } else { + cmd.arg("--android-cross-path").arg(""); + } + + builder.ci_env.force_coloring_in_ci(&mut cmd); + + let _folder = builder.fold_output(|| format!("test_{}", suite)); + builder.info(&format!( + "Check compiletest suite={} mode={} ({} -> {})", + suite, mode, &compiler.host, target + )); + let _time = util::timeit(&builder); + try_run(builder, &mut cmd); + + if let Some(compare_mode) = compare_mode { + cmd.arg("--compare-mode").arg(compare_mode); + let _folder = builder.fold_output(|| format!("test_{}_{}", suite, compare_mode)); + builder.info(&format!( + "Check compiletest suite={} mode={} compare_mode={} ({} -> {})", + suite, mode, compare_mode, &compiler.host, target + )); + let _time = util::timeit(&builder); + try_run(builder, &mut cmd); + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +struct DocTest { + compiler: Compiler, + path: &'static str, + name: &'static str, + is_ext_doc: bool, +} + +impl Step for DocTest { + type Output = (); + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.never() + } + + /// Run `rustdoc --test` for all documentation in `src/doc`. + /// + /// This will run all tests in our markdown documentation (e.g. the book) + /// located in `src/doc`. The `rustdoc` that's run is the one that sits next to + /// `compiler`. + fn run(self, builder: &Builder) { + let compiler = self.compiler; + + builder.ensure(compile::Test { + compiler, + target: compiler.host, + }); + + // Do a breadth-first traversal of the `src/doc` directory and just run + // tests for all files that end in `*.md` + let mut stack = vec![builder.src.join(self.path)]; + let _time = util::timeit(&builder); + let _folder = builder.fold_output(|| format!("test_{}", self.name)); + + let mut files = Vec::new(); + while let Some(p) = stack.pop() { + if p.is_dir() { + stack.extend(t!(p.read_dir()).map(|p| t!(p).path())); + continue; + } + + if p.extension().and_then(|s| s.to_str()) != Some("md") { + continue; + } + + // The nostarch directory in the book is for no starch, and so isn't + // guaranteed to builder. We don't care if it doesn't build, so skip it. + if p.to_str().map_or(false, |p| p.contains("nostarch")) { + continue; + } + + files.push(p); + } + + files.sort(); + + let mut toolstate = ToolState::TestPass; + for file in files { + if !markdown_test(builder, compiler, &file) { + toolstate = ToolState::TestFail; + } + } + if self.is_ext_doc { + builder.save_toolstate(self.name, toolstate); + } + } +} + +macro_rules! test_book { + ($($name:ident, $path:expr, $book_name:expr, default=$default:expr;)+) => { + $( + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] + pub struct $name { + compiler: Compiler, + } + + impl Step for $name { + type Output = (); + const DEFAULT: bool = $default; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path($path) + } + + fn make_run(run: RunConfig) { + run.builder.ensure($name { + compiler: run.builder.compiler(run.builder.top_stage, run.host), + }); + } + + fn run(self, builder: &Builder) { + builder.ensure(DocTest { + compiler: self.compiler, + path: $path, + name: $book_name, + is_ext_doc: !$default, + }); + } + } + )+ + } +} + +test_book!( + Nomicon, "src/doc/nomicon", "nomicon", default=false; + Reference, "src/doc/reference", "reference", default=false; + RustdocBook, "src/doc/rustdoc", "rustdoc", default=true; + RustcBook, "src/doc/rustc", "rustc", default=true; + RustByExample, "src/doc/rust-by-example", "rust-by-example", default=false; + TheBook, "src/doc/book", "book", default=false; + UnstableBook, "src/doc/unstable-book", "unstable-book", default=true; +); + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct ErrorIndex { + compiler: Compiler, +} + +impl Step for ErrorIndex { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/tools/error_index_generator") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(ErrorIndex { + compiler: run.builder.compiler(run.builder.top_stage, run.host), + }); + } + + /// Run the error index generator tool to execute the tests located in the error + /// index. + /// + /// The `error_index_generator` tool lives in `src/tools` and is used to + /// generate a markdown file from the error indexes of the code base which is + /// then passed to `rustdoc --test`. + fn run(self, builder: &Builder) { + let compiler = self.compiler; + + builder.ensure(compile::Std { + compiler, + target: compiler.host, + }); + + let dir = testdir(builder, compiler.host); + t!(fs::create_dir_all(&dir)); + let output = dir.join("error-index.md"); + + let mut tool = builder.tool_cmd(Tool::ErrorIndex); + tool.arg("markdown") + .arg(&output) + .env("CFG_BUILD", &builder.config.build) + .env("RUSTC_ERROR_METADATA_DST", builder.extended_error_dir()); + + let _folder = builder.fold_output(|| "test_error_index"); + builder.info(&format!("Testing error-index stage{}", compiler.stage)); + let _time = util::timeit(&builder); + builder.run(&mut tool); + markdown_test(builder, compiler, &output); + } +} + +fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) -> bool { + match File::open(markdown) { + Ok(mut file) => { + let mut contents = String::new(); + t!(file.read_to_string(&mut contents)); + if !contents.contains("```") { + return true; + } + } + Err(_) => {} + } + + builder.info(&format!("doc tests for: {}", markdown.display())); + let mut cmd = builder.rustdoc_cmd(compiler.host); + builder.add_rust_test_threads(&mut cmd); + cmd.arg("--test"); + cmd.arg(markdown); + cmd.env("RUSTC_BOOTSTRAP", "1"); + + let test_args = builder.config.cmd.test_args().join(" "); + cmd.arg("--test-args").arg(test_args); + + if builder.config.verbose_tests { + try_run(builder, &mut cmd) + } else { + try_run_quiet(builder, &mut cmd) + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct CrateLibrustc { + compiler: Compiler, + target: Interned, + test_kind: TestKind, + krate: Interned, +} + +impl Step for CrateLibrustc { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.krate("rustc-main") + } + + fn make_run(run: RunConfig) { + let builder = run.builder; + let compiler = builder.compiler(builder.top_stage, run.host); + + for krate in builder.in_tree_crates("rustc-main") { + if run.path.ends_with(&krate.path) { + let test_kind = builder.kind.into(); + + builder.ensure(CrateLibrustc { + compiler, + target: run.target, + test_kind, + krate: krate.name, + }); + } + } + } + + fn run(self, builder: &Builder) { + builder.ensure(Crate { + compiler: self.compiler, + target: self.target, + mode: Mode::Rustc, + test_kind: self.test_kind, + krate: self.krate, + }); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct CrateNotDefault { + compiler: Compiler, + target: Interned, + test_kind: TestKind, + krate: &'static str, +} + +impl Step for CrateNotDefault { + type Output = (); + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/liballoc_jemalloc") + .path("src/librustc_asan") + .path("src/librustc_lsan") + .path("src/librustc_msan") + .path("src/librustc_tsan") + } + + fn make_run(run: RunConfig) { + let builder = run.builder; + let compiler = builder.compiler(builder.top_stage, run.host); + + let test_kind = builder.kind.into(); + + builder.ensure(CrateNotDefault { + compiler, + target: run.target, + test_kind, + krate: match run.path { + _ if run.path.ends_with("src/liballoc_jemalloc") => "alloc_jemalloc", + _ if run.path.ends_with("src/librustc_asan") => "rustc_asan", + _ if run.path.ends_with("src/librustc_lsan") => "rustc_lsan", + _ if run.path.ends_with("src/librustc_msan") => "rustc_msan", + _ if run.path.ends_with("src/librustc_tsan") => "rustc_tsan", + _ => panic!("unexpected path {:?}", run.path), + }, + }); + } + + fn run(self, builder: &Builder) { + builder.ensure(Crate { + compiler: self.compiler, + target: self.target, + mode: Mode::Std, + test_kind: self.test_kind, + krate: INTERNER.intern_str(self.krate), + }); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Crate { + pub compiler: Compiler, + pub target: Interned, + pub mode: Mode, + pub test_kind: TestKind, + pub krate: Interned, +} + +impl Step for Crate { + type Output = (); + const DEFAULT: bool = true; + + fn should_run(mut run: ShouldRun) -> ShouldRun { + let builder = run.builder; + run = run.krate("test"); + for krate in run.builder.in_tree_crates("std") { + if krate.is_local(&run.builder) + && !krate.name.contains("jemalloc") + && !(krate.name.starts_with("rustc_") && krate.name.ends_with("san")) + && krate.name != "dlmalloc" + { + run = run.path(krate.local_path(&builder).to_str().unwrap()); + } + } + run + } + + fn make_run(run: RunConfig) { + let builder = run.builder; + let compiler = builder.compiler(builder.top_stage, run.host); + + let make = |mode: Mode, krate: &CargoCrate| { + let test_kind = builder.kind.into(); + + builder.ensure(Crate { + compiler, + target: run.target, + mode, + test_kind, + krate: krate.name, + }); + }; + + for krate in builder.in_tree_crates("std") { + if run.path.ends_with(&krate.local_path(&builder)) { + make(Mode::Std, krate); + } + } + for krate in builder.in_tree_crates("test") { + if run.path.ends_with(&krate.local_path(&builder)) { + make(Mode::Test, krate); + } + } + } + + /// Run all unit tests plus documentation tests for a given crate defined + /// by a `Cargo.toml` (single manifest) + /// + /// This is what runs tests for crates like the standard library, compiler, etc. + /// It essentially is the driver for running `cargo test`. + /// + /// Currently this runs all tests for a DAG by passing a bunch of `-p foo` + /// arguments, and those arguments are discovered from `cargo metadata`. + fn run(self, builder: &Builder) { + let compiler = self.compiler; + let target = self.target; + let mode = self.mode; + let test_kind = self.test_kind; + let krate = self.krate; + + builder.ensure(compile::Test { compiler, target }); + builder.ensure(RemoteCopyLibs { compiler, target }); + + // If we're not doing a full bootstrap but we're testing a stage2 version of + // libstd, then what we're actually testing is the libstd produced in + // stage1. Reflect that here by updating the compiler that we're working + // with automatically. + let compiler = if builder.force_use_stage1(compiler, target) { + builder.compiler(1, compiler.host) + } else { + compiler.clone() + }; + + let mut cargo = builder.cargo(compiler, mode, target, test_kind.subcommand()); + match mode { + Mode::Std => { + compile::std_cargo(builder, &compiler, target, &mut cargo); + } + Mode::Test => { + compile::test_cargo(builder, &compiler, target, &mut cargo); + } + Mode::Rustc => { + builder.ensure(compile::Rustc { compiler, target }); + compile::rustc_cargo(builder, &mut cargo); + } + _ => panic!("can only test libraries"), + }; + + // Build up the base `cargo test` command. + // + // Pass in some standard flags then iterate over the graph we've discovered + // in `cargo metadata` with the maps above and figure out what `-p` + // arguments need to get passed. + if test_kind.subcommand() == "test" && !builder.fail_fast { + cargo.arg("--no-fail-fast"); + } + match builder.doc_tests { + DocTests::Only => { + cargo.arg("--doc"); + } + DocTests::No => { + cargo.args(&["--lib", "--bins", "--examples", "--tests", "--benches"]); + } + DocTests::Yes => {} + } + + cargo.arg("-p").arg(krate); + + // The tests are going to run with the *target* libraries, so we need to + // ensure that those libraries show up in the LD_LIBRARY_PATH equivalent. + // + // Note that to run the compiler we need to run with the *host* libraries, + // but our wrapper scripts arrange for that to be the case anyway. + let mut dylib_path = dylib_path(); + dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target))); + cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap()); + + cargo.arg("--"); + cargo.args(&builder.config.cmd.test_args()); + + if !builder.config.verbose_tests { + cargo.arg("--quiet"); + } + + if target.contains("emscripten") { + cargo.env( + format!("CARGO_TARGET_{}_RUNNER", envify(&target)), + builder + .config + .nodejs + .as_ref() + .expect("nodejs not configured"), + ); + } else if target.starts_with("wasm32") { + // Warn about running tests without the `wasm_syscall` feature enabled. + // The javascript shim implements the syscall interface so that test + // output can be correctly reported. + if !builder.config.wasm_syscall { + builder.info(&format!( + "Libstd was built without `wasm_syscall` feature enabled: \ + test output may not be visible." + )); + } + + // On the wasm32-unknown-unknown target we're using LTO which is + // incompatible with `-C prefer-dynamic`, so disable that here + cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); + + let node = builder + .config + .nodejs + .as_ref() + .expect("nodejs not configured"); + let runner = format!( + "{} {}/src/etc/wasm32-shim.js", + node.display(), + builder.src.display() + ); + cargo.env(format!("CARGO_TARGET_{}_RUNNER", envify(&target)), &runner); + } else if builder.remote_tested(target) { + cargo.env( + format!("CARGO_TARGET_{}_RUNNER", envify(&target)), + format!("{} run", builder.tool_exe(Tool::RemoteTestClient).display()), + ); + } + + let _folder = builder.fold_output(|| { + format!( + "{}_stage{}-{}", + test_kind.subcommand(), + compiler.stage, + krate + ) + }); + builder.info(&format!( + "{} {} stage{} ({} -> {})", + test_kind, krate, compiler.stage, &compiler.host, target + )); + let _time = util::timeit(&builder); + try_run(builder, &mut cargo); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct CrateRustdoc { + host: Interned, + test_kind: TestKind, +} + +impl Step for CrateRustdoc { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun) -> ShouldRun { + run.paths(&["src/librustdoc", "src/tools/rustdoc"]) + } + + fn make_run(run: RunConfig) { + let builder = run.builder; + + let test_kind = builder.kind.into(); + + builder.ensure(CrateRustdoc { + host: run.host, + test_kind, + }); + } + + fn run(self, builder: &Builder) { + let test_kind = self.test_kind; + + let compiler = builder.compiler(builder.top_stage, self.host); + let target = compiler.host; + builder.ensure(compile::Rustc { compiler, target }); + + let mut cargo = tool::prepare_tool_cargo(builder, + compiler, + Mode::ToolRustc, + target, + test_kind.subcommand(), + "src/tools/rustdoc", + SourceType::InTree); + if test_kind.subcommand() == "test" && !builder.fail_fast { + cargo.arg("--no-fail-fast"); + } + + cargo.arg("-p").arg("rustdoc:0.0.0"); + + cargo.arg("--"); + cargo.args(&builder.config.cmd.test_args()); + + if !builder.config.verbose_tests { + cargo.arg("--quiet"); + } + + let _folder = builder + .fold_output(|| format!("{}_stage{}-rustdoc", test_kind.subcommand(), compiler.stage)); + builder.info(&format!( + "{} rustdoc stage{} ({} -> {})", + test_kind, compiler.stage, &compiler.host, target + )); + let _time = util::timeit(&builder); + + try_run(builder, &mut cargo); + } +} + +fn envify(s: &str) -> String { + s.chars() + .map(|c| match c { + '-' => '_', + c => c, + }) + .flat_map(|c| c.to_uppercase()) + .collect() +} + +/// Some test suites are run inside emulators or on remote devices, and most +/// of our test binaries are linked dynamically which means we need to ship +/// the standard library and such to the emulator ahead of time. This step +/// represents this and is a dependency of all test suites. +/// +/// Most of the time this is a noop. For some steps such as shipping data to +/// QEMU we have to build our own tools so we've got conditional dependencies +/// on those programs as well. Note that the remote test client is built for +/// the build target (us) and the server is built for the target. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct RemoteCopyLibs { + compiler: Compiler, + target: Interned, +} + +impl Step for RemoteCopyLibs { + type Output = (); + + fn should_run(run: ShouldRun) -> ShouldRun { + run.never() + } + + fn run(self, builder: &Builder) { + let compiler = self.compiler; + let target = self.target; + if !builder.remote_tested(target) { + return; + } + + builder.ensure(compile::Test { compiler, target }); + + builder.info(&format!("REMOTE copy libs to emulator ({})", target)); + t!(fs::create_dir_all(builder.out.join("tmp"))); + + let server = builder.ensure(tool::RemoteTestServer { + compiler: compiler.with_stage(0), + target, + }); + + // Spawn the emulator and wait for it to come online + let tool = builder.tool_exe(Tool::RemoteTestClient); + let mut cmd = Command::new(&tool); + cmd.arg("spawn-emulator") + .arg(target) + .arg(&server) + .arg(builder.out.join("tmp")); + if let Some(rootfs) = builder.qemu_rootfs(target) { + cmd.arg(rootfs); + } + builder.run(&mut cmd); + + // Push all our dylibs to the emulator + for f in t!(builder.sysroot_libdir(compiler, target).read_dir()) { + let f = t!(f); + let name = f.file_name().into_string().unwrap(); + if util::is_dylib(&name) { + builder.run(Command::new(&tool).arg("push").arg(f.path())); + } + } + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Distcheck; + +impl Step for Distcheck { + type Output = (); + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("distcheck") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Distcheck); + } + + /// Run "distcheck", a 'make check' from a tarball + fn run(self, builder: &Builder) { + builder.info(&format!("Distcheck")); + let dir = builder.out.join("tmp").join("distcheck"); + let _ = fs::remove_dir_all(&dir); + t!(fs::create_dir_all(&dir)); + + // Guarantee that these are built before we begin running. + builder.ensure(dist::PlainSourceTarball); + builder.ensure(dist::Src); + + let mut cmd = Command::new("tar"); + cmd.arg("-xzf") + .arg(builder.ensure(dist::PlainSourceTarball)) + .arg("--strip-components=1") + .current_dir(&dir); + builder.run(&mut cmd); + builder.run( + Command::new("./configure") + .args(&builder.config.configure_args) + .arg("--enable-vendor") + .current_dir(&dir), + ); + builder.run( + Command::new(build_helper::make(&builder.config.build)) + .arg("check") + .current_dir(&dir), + ); + + // Now make sure that rust-src has all of libstd's dependencies + builder.info(&format!("Distcheck rust-src")); + let dir = builder.out.join("tmp").join("distcheck-src"); + let _ = fs::remove_dir_all(&dir); + t!(fs::create_dir_all(&dir)); + + let mut cmd = Command::new("tar"); + cmd.arg("-xzf") + .arg(builder.ensure(dist::Src)) + .arg("--strip-components=1") + .current_dir(&dir); + builder.run(&mut cmd); + + let toml = dir.join("rust-src/lib/rustlib/src/rust/src/libstd/Cargo.toml"); + builder.run( + Command::new(&builder.initial_cargo) + .arg("generate-lockfile") + .arg("--manifest-path") + .arg(&toml) + .current_dir(&dir), + ); + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct Bootstrap; + +impl Step for Bootstrap { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + /// Test the build system itself + fn run(self, builder: &Builder) { + let mut cmd = Command::new(&builder.initial_cargo); + cmd.arg("test") + .current_dir(builder.src.join("src/bootstrap")) + .env("RUSTFLAGS", "-Cdebuginfo=2") + .env("CARGO_TARGET_DIR", builder.out.join("bootstrap")) + .env("RUSTC_BOOTSTRAP", "1") + .env("RUSTC", &builder.initial_rustc); + if let Some(flags) = option_env!("RUSTFLAGS") { + // Use the same rustc flags for testing as for "normal" compilation, + // so that Cargo doesn’t recompile the entire dependency graph every time: + // https://github.com/rust-lang/rust/issues/49215 + cmd.env("RUSTFLAGS", flags); + } + if !builder.fail_fast { + cmd.arg("--no-fail-fast"); + } + cmd.arg("--").args(&builder.config.cmd.test_args()); + // rustbuild tests are racy on directory creation so just run them one at a time. + // Since there's not many this shouldn't be a problem. + cmd.arg("--test-threads=1"); + try_run(builder, &mut cmd); + } + + fn should_run(run: ShouldRun) -> ShouldRun { + run.path("src/bootstrap") + } + + fn make_run(run: RunConfig) { + run.builder.ensure(Bootstrap); + } +} diff --git a/src/bootstrap/tool.rs b/src/bootstrap/tool.rs index ea055cb5d1b9..23ef031dcb70 100644 --- a/src/bootstrap/tool.rs +++ b/src/bootstrap/tool.rs @@ -10,13 +10,15 @@ use std::fs; use std::env; +use std::iter; use std::path::PathBuf; use std::process::{Command, exit}; +use std::collections::HashSet; use Mode; use Compiler; use builder::{Step, RunConfig, ShouldRun, Builder}; -use util::{copy, exe, add_lib_path}; +use util::{exe, add_lib_path}; use compile::{self, libtest_stamp, libstd_stamp, librustc_stamp}; use native; use channel::GitInfo; @@ -27,7 +29,7 @@ use toolstate::ToolState; pub struct CleanTools { pub compiler: Compiler, pub target: Interned, - pub mode: Mode, + pub cause: Mode, } impl Step for CleanTools { @@ -38,50 +40,57 @@ impl Step for CleanTools { } fn run(self, builder: &Builder) { - let build = builder.build; let compiler = self.compiler; let target = self.target; - let mode = self.mode; + let cause = self.cause; // This is for the original compiler, but if we're forced to use stage 1, then // std/test/rustc stamps won't exist in stage 2, so we need to get those from stage 1, since // we copy the libs forward. - let tools_dir = build.stage_out(compiler, Mode::Tool); + let tools_dir = builder.stage_out(compiler, Mode::ToolRustc); let compiler = if builder.force_use_stage1(compiler, target) { builder.compiler(1, compiler.host) } else { compiler }; - for &cur_mode in &[Mode::Libstd, Mode::Libtest, Mode::Librustc] { + for &cur_mode in &[Mode::Std, Mode::Test, Mode::Rustc] { let stamp = match cur_mode { - Mode::Libstd => libstd_stamp(build, compiler, target), - Mode::Libtest => libtest_stamp(build, compiler, target), - Mode::Librustc => librustc_stamp(build, compiler, target), + Mode::Std => libstd_stamp(builder, compiler, target), + Mode::Test => libtest_stamp(builder, compiler, target), + Mode::Rustc => librustc_stamp(builder, compiler, target), _ => panic!(), }; - if build.clear_if_dirty(&tools_dir, &stamp) { + if builder.clear_if_dirty(&tools_dir, &stamp) { break; } // If we are a rustc tool, and std changed, we also need to clear ourselves out -- our // dependencies depend on std. Therefore, we iterate up until our own mode. - if mode == cur_mode { + if cause == cur_mode { break; } } } } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, Clone, Hash, PartialEq, Eq)] +pub enum SourceType { + InTree, + Submodule, +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq)] struct ToolBuild { compiler: Compiler, target: Interned, tool: &'static str, path: &'static str, mode: Mode, - is_ext_tool: bool, + is_optional_tool: bool, + source_type: SourceType, + extra_features: Vec, } impl Step for ToolBuild { @@ -96,42 +105,146 @@ impl Step for ToolBuild { /// This will build the specified tool with the specified `host` compiler in /// `stage` into the normal cargo output directory. fn run(self, builder: &Builder) -> Option { - let build = builder.build; let compiler = self.compiler; let target = self.target; let tool = self.tool; let path = self.path; - let is_ext_tool = self.is_ext_tool; + let is_optional_tool = self.is_optional_tool; match self.mode { - Mode::Libstd => builder.ensure(compile::Std { compiler, target }), - Mode::Libtest => builder.ensure(compile::Test { compiler, target }), - Mode::Librustc => builder.ensure(compile::Rustc { compiler, target }), - Mode::Tool => panic!("unexpected Mode::Tool for tool build") + Mode::ToolRustc => { + builder.ensure(compile::Rustc { compiler, target }) + } + Mode::ToolStd => { + builder.ensure(compile::Std { compiler, target }) + } + Mode::ToolBootstrap => {} // uses downloaded stage0 compiler libs + _ => panic!("unexpected Mode for tool build") } - let _folder = build.fold_output(|| format!("stage{}-{}", compiler.stage, tool)); - println!("Building stage{} tool {} ({})", compiler.stage, tool, target); + let mut cargo = prepare_tool_cargo( + builder, + compiler, + self.mode, + target, + "build", + path, + self.source_type, + ); + cargo.arg("--features").arg(self.extra_features.join(" ")); - let mut cargo = prepare_tool_cargo(builder, compiler, target, "build", path); - let is_expected = build.try_run(&mut cargo); - build.save_toolstate(tool, if is_expected { + let _folder = builder.fold_output(|| format!("stage{}-{}", compiler.stage, tool)); + builder.info(&format!("Building stage{} tool {} ({})", compiler.stage, tool, target)); + let mut duplicates = Vec::new(); + let is_expected = compile::stream_cargo(builder, &mut cargo, &mut |msg| { + // Only care about big things like the RLS/Cargo for now + match tool { + | "rls" + | "cargo" + | "clippy-driver" + => {} + + _ => return, + } + let (id, features, filenames) = match msg { + compile::CargoMessage::CompilerArtifact { + package_id, + features, + filenames + } => { + (package_id, features, filenames) + } + _ => return, + }; + let features = features.iter().map(|s| s.to_string()).collect::>(); + + for path in filenames { + let val = (tool, PathBuf::from(&*path), features.clone()); + // we're only interested in deduplicating rlibs for now + if val.1.extension().and_then(|s| s.to_str()) != Some("rlib") { + continue + } + + // Don't worry about libs that turn out to be host dependencies + // or build scripts, we only care about target dependencies that + // are in `deps`. + if let Some(maybe_target) = val.1 + .parent() // chop off file name + .and_then(|p| p.parent()) // chop off `deps` + .and_then(|p| p.parent()) // chop off `release` + .and_then(|p| p.file_name()) + .and_then(|p| p.to_str()) + { + if maybe_target != &*target { + continue + } + } + + let mut artifacts = builder.tool_artifacts.borrow_mut(); + let prev_artifacts = artifacts + .entry(target) + .or_default(); + if let Some(prev) = prev_artifacts.get(&*id) { + if prev.1 != val.1 { + duplicates.push(( + id.to_string(), + val, + prev.clone(), + )); + } + return + } + prev_artifacts.insert(id.to_string(), val); + } + }); + + if is_expected && duplicates.len() != 0 { + println!("duplicate artfacts found when compiling a tool, this \ + typically means that something was recompiled because \ + a transitive dependency has different features activated \ + than in a previous build:\n"); + println!("the following dependencies are duplicated although they \ + have the same features enabled:"); + for (id, cur, prev) in duplicates.drain_filter(|(_, cur, prev)| cur.2 == prev.2) { + println!(" {}", id); + // same features + println!(" `{}` ({:?})\n `{}` ({:?})", cur.0, cur.1, prev.0, prev.1); + } + println!("the following dependencies have different features:"); + for (id, cur, prev) in duplicates { + println!(" {}", id); + let cur_features: HashSet<_> = cur.2.into_iter().collect(); + let prev_features: HashSet<_> = prev.2.into_iter().collect(); + println!(" `{}` additionally enabled features {:?} at {:?}", + cur.0, &cur_features - &prev_features, cur.1); + println!(" `{}` additionally enabled features {:?} at {:?}", + prev.0, &prev_features - &cur_features, prev.1); + } + println!(""); + println!("to fix this you will probably want to edit the local \ + src/tools/rustc-workspace-hack/Cargo.toml crate, as \ + that will update the dependency graph to ensure that \ + these crates all share the same feature set"); + panic!("tools should not compile multiple copies of the same crate"); + } + + builder.save_toolstate(tool, if is_expected { ToolState::TestFail } else { ToolState::BuildFail }); if !is_expected { - if !is_ext_tool { + if !is_optional_tool { exit(1); } else { return None; } } else { - let cargo_out = build.cargo_out(compiler, Mode::Tool, target) + let cargo_out = builder.cargo_out(compiler, self.mode, target) .join(exe(tool, &compiler.host)); - let bin = build.tools_dir(compiler).join(exe(tool, &compiler.host)); - copy(&cargo_out, &bin); + let bin = builder.tools_dir(compiler).join(exe(tool, &compiler.host)); + builder.copy(&cargo_out, &bin); Some(bin) } } @@ -140,20 +253,25 @@ impl Step for ToolBuild { pub fn prepare_tool_cargo( builder: &Builder, compiler: Compiler, + mode: Mode, target: Interned, command: &'static str, path: &'static str, + source_type: SourceType, ) -> Command { - let build = builder.build; - let mut cargo = builder.cargo(compiler, Mode::Tool, target, command); - let dir = build.src.join(path); + let mut cargo = builder.cargo(compiler, mode, target, command); + let dir = builder.src.join(path); cargo.arg("--manifest-path").arg(dir.join("Cargo.toml")); // We don't want to build tools dynamically as they'll be running across // stages and such and it's just easier if they're not dynamically linked. cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1"); - if let Some(dir) = build.openssl_install_dir(target) { + if source_type == SourceType::Submodule { + cargo.env("RUSTC_EXTERNAL_TOOL", "1"); + } + + if let Some(dir) = builder.openssl_install_dir(target) { cargo.env("OPENSSL_STATIC", "1"); cargo.env("OPENSSL_DIR", dir); cargo.env("LIBZ_SYS_STATIC", "1"); @@ -163,10 +281,10 @@ pub fn prepare_tool_cargo( // own copy cargo.env("LZMA_API_STATIC", "1"); - cargo.env("CFG_RELEASE_CHANNEL", &build.config.channel); - cargo.env("CFG_VERSION", build.rust_version()); + cargo.env("CFG_RELEASE_CHANNEL", &builder.config.channel); + cargo.env("CFG_VERSION", builder.rust_version()); - let info = GitInfo::new(&build.config, &dir); + let info = GitInfo::new(&builder.config, &dir); if let Some(sha) = info.sha() { cargo.env("CFG_COMMIT_HASH", sha); } @@ -180,22 +298,39 @@ pub fn prepare_tool_cargo( } macro_rules! tool { - ($($name:ident, $path:expr, $tool_name:expr, $mode:expr;)+) => { - #[derive(Copy, Clone)] + ($($name:ident, $path:expr, $tool_name:expr, $mode:expr + $(,llvm_tools = $llvm:expr)* $(,is_external_tool = $external:expr)*;)+) => { + #[derive(Copy, PartialEq, Eq, Clone)] pub enum Tool { $( $name, )+ } + impl Tool { + pub fn get_mode(&self) -> Mode { + let mode = match self { + $(Tool::$name => $mode,)+ + }; + mode + } + + /// Whether this tool requires LLVM to run + pub fn uses_llvm_tools(&self) -> bool { + match self { + $(Tool::$name => false $(|| $llvm)*,)+ + } + } + } + impl<'a> Builder<'a> { pub fn tool_exe(&self, tool: Tool) -> PathBuf { let stage = self.tool_default_stage(tool); match tool { $(Tool::$name => self.ensure($name { - compiler: self.compiler(stage, self.build.build), - target: self.build.build, + compiler: self.compiler(stage, self.config.build), + target: self.config.build, }), )+ } @@ -229,7 +364,7 @@ macro_rules! tool { fn make_run(run: RunConfig) { run.builder.ensure($name { - compiler: run.builder.compiler(run.builder.top_stage, run.builder.build.build), + compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, }); } @@ -241,7 +376,13 @@ macro_rules! tool { tool: $tool_name, mode: $mode, path: $path, - is_ext_tool: false, + is_optional_tool: false, + source_type: if false $(|| $external)* { + SourceType::Submodule + } else { + SourceType::InTree + }, + extra_features: Vec::new(), }).expect("expected to build -- essential tool") } } @@ -250,16 +391,18 @@ macro_rules! tool { } tool!( - Rustbook, "src/tools/rustbook", "rustbook", Mode::Librustc; - ErrorIndex, "src/tools/error_index_generator", "error_index_generator", Mode::Librustc; - UnstableBookGen, "src/tools/unstable-book-gen", "unstable-book-gen", Mode::Libstd; - Tidy, "src/tools/tidy", "tidy", Mode::Libstd; - Linkchecker, "src/tools/linkchecker", "linkchecker", Mode::Libstd; - CargoTest, "src/tools/cargotest", "cargotest", Mode::Libstd; - Compiletest, "src/tools/compiletest", "compiletest", Mode::Libtest; - BuildManifest, "src/tools/build-manifest", "build-manifest", Mode::Libstd; - RemoteTestClient, "src/tools/remote-test-client", "remote-test-client", Mode::Libstd; - RustInstaller, "src/tools/rust-installer", "fabricate", Mode::Libstd; + Rustbook, "src/tools/rustbook", "rustbook", Mode::ToolBootstrap; + ErrorIndex, "src/tools/error_index_generator", "error_index_generator", Mode::ToolRustc; + UnstableBookGen, "src/tools/unstable-book-gen", "unstable-book-gen", Mode::ToolBootstrap; + Tidy, "src/tools/tidy", "tidy", Mode::ToolBootstrap; + Linkchecker, "src/tools/linkchecker", "linkchecker", Mode::ToolBootstrap; + CargoTest, "src/tools/cargotest", "cargotest", Mode::ToolBootstrap; + Compiletest, "src/tools/compiletest", "compiletest", Mode::ToolBootstrap, llvm_tools = true; + BuildManifest, "src/tools/build-manifest", "build-manifest", Mode::ToolBootstrap; + RemoteTestClient, "src/tools/remote-test-client", "remote-test-client", Mode::ToolBootstrap; + RustInstaller, "src/tools/rust-installer", "fabricate", Mode::ToolBootstrap, + is_external_tool = true; + RustdocTheme, "src/tools/rustdoc-themes", "rustdoc-themes", Mode::ToolBootstrap; ); #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] @@ -277,7 +420,7 @@ impl Step for RemoteTestServer { fn make_run(run: RunConfig) { run.builder.ensure(RemoteTestServer { - compiler: run.builder.compiler(run.builder.top_stage, run.builder.build.build), + compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, }); } @@ -287,9 +430,11 @@ impl Step for RemoteTestServer { compiler: self.compiler, target: self.target, tool: "remote-test-server", - mode: Mode::Libstd, + mode: Mode::ToolStd, path: "src/tools/remote-test-server", - is_ext_tool: false, + is_optional_tool: false, + source_type: SourceType::InTree, + extra_features: Vec::new(), }).expect("expected to build -- essential tool") } } @@ -315,43 +460,51 @@ impl Step for Rustdoc { } fn run(self, builder: &Builder) -> PathBuf { - let build = builder.build; let target_compiler = builder.compiler(builder.top_stage, self.host); let target = target_compiler.host; let build_compiler = if target_compiler.stage == 0 { - builder.compiler(0, builder.build.build) + builder.compiler(0, builder.config.build) } else if target_compiler.stage >= 2 { // Past stage 2, we consider the compiler to be ABI-compatible and hence capable of // building rustdoc itself. - builder.compiler(target_compiler.stage, builder.build.build) + builder.compiler(target_compiler.stage, builder.config.build) } else { // Similar to `compile::Assemble`, build with the previous stage's compiler. Otherwise // we'd have stageN/bin/rustc and stageN/bin/rustdoc be effectively different stage // compilers, which isn't what we want. - builder.compiler(target_compiler.stage - 1, builder.build.build) + builder.compiler(target_compiler.stage - 1, builder.config.build) }; builder.ensure(compile::Rustc { compiler: build_compiler, target }); + builder.ensure(compile::Rustc { + compiler: build_compiler, + target: builder.config.build, + }); - let _folder = build.fold_output(|| format!("stage{}-rustdoc", target_compiler.stage)); - println!("Building rustdoc for stage{} ({})", target_compiler.stage, target_compiler.host); - - let mut cargo = prepare_tool_cargo(builder, - build_compiler, - target, - "build", - "src/tools/rustdoc"); + let mut cargo = prepare_tool_cargo( + builder, + build_compiler, + Mode::ToolRustc, + target, + "build", + "src/tools/rustdoc", + SourceType::InTree, + ); // Most tools don't get debuginfo, but rustdoc should. cargo.env("RUSTC_DEBUGINFO", builder.config.rust_debuginfo.to_string()) .env("RUSTC_DEBUGINFO_LINES", builder.config.rust_debuginfo_lines.to_string()); - build.run(&mut cargo); + let _folder = builder.fold_output(|| format!("stage{}-rustdoc", target_compiler.stage)); + builder.info(&format!("Building rustdoc for stage{} ({})", + target_compiler.stage, target_compiler.host)); + builder.run(&mut cargo); + // Cargo adds a number of paths to the dylib search path on windows, which results in // the wrong rustdoc being executed. To avoid the conflicting rustdocs, we name the "tool" // rustdoc a different name. - let tool_rustdoc = build.cargo_out(build_compiler, Mode::Tool, target) - .join(exe("rustdoc-tool-binary", &target_compiler.host)); + let tool_rustdoc = builder.cargo_out(build_compiler, Mode::ToolRustc, target) + .join(exe("rustdoc_tool_binary", &target_compiler.host)); // don't create a stage0-sysroot/bin directory. if target_compiler.stage > 0 { @@ -360,7 +513,7 @@ impl Step for Rustdoc { t!(fs::create_dir_all(&bindir)); let bin_rustdoc = bindir.join(exe("rustdoc", &*target_compiler.host)); let _ = fs::remove_file(&bin_rustdoc); - copy(&tool_rustdoc, &bin_rustdoc); + builder.copy(&tool_rustdoc, &bin_rustdoc); bin_rustdoc } else { tool_rustdoc @@ -381,12 +534,12 @@ impl Step for Cargo { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.path("src/tools/cargo").default_condition(builder.build.config.extended) + run.path("src/tools/cargo").default_condition(builder.config.extended) } fn make_run(run: RunConfig) { run.builder.ensure(Cargo { - compiler: run.builder.compiler(run.builder.top_stage, run.builder.build.build), + compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, }); } @@ -399,15 +552,17 @@ impl Step for Cargo { // compiler to be available, so we need to depend on that. builder.ensure(compile::Rustc { compiler: self.compiler, - target: builder.build.build, + target: builder.config.build, }); builder.ensure(ToolBuild { compiler: self.compiler, target: self.target, tool: "cargo", - mode: Mode::Librustc, + mode: Mode::ToolRustc, path: "src/tools/cargo", - is_ext_tool: false, + is_optional_tool: false, + source_type: SourceType::Submodule, + extra_features: Vec::new(), }).expect("expected to build -- essential tool") } } @@ -420,10 +575,11 @@ macro_rules! tool_extended { $tool_name:expr, $extra_deps:block;)+) => { $( - #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] + #[derive(Debug, Clone, Hash, PartialEq, Eq)] pub struct $name { pub compiler: Compiler, pub target: Interned, + pub extra_features: Vec, } impl Step for $name { @@ -433,25 +589,29 @@ macro_rules! tool_extended { fn should_run(run: ShouldRun) -> ShouldRun { let builder = run.builder; - run.path($path).default_condition(builder.build.config.extended) + run.path($path).default_condition(builder.config.extended) } fn make_run(run: RunConfig) { run.builder.ensure($name { - compiler: run.builder.compiler(run.builder.top_stage, run.builder.build.build), + compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, + extra_features: Vec::new(), }); } - fn run($sel, $builder: &Builder) -> Option { + #[allow(unused_mut)] + fn run(mut $sel, $builder: &Builder) -> Option { $extra_deps $builder.ensure(ToolBuild { compiler: $sel.compiler, target: $sel.target, tool: $tool_name, - mode: Mode::Librustc, + mode: Mode::ToolRustc, path: $path, - is_ext_tool: true, + extra_features: $sel.extra_features, + is_optional_tool: true, + source_type: SourceType::Submodule, }) } } @@ -461,16 +621,32 @@ macro_rules! tool_extended { tool_extended!((self, builder), Cargofmt, rustfmt, "src/tools/rustfmt", "cargo-fmt", {}; + CargoClippy, clippy, "src/tools/clippy", "cargo-clippy", { + // Clippy depends on procedural macros (serde), which requires a full host + // compiler to be available, so we need to depend on that. + builder.ensure(compile::Rustc { + compiler: self.compiler, + target: builder.config.build, + }); + }; Clippy, clippy, "src/tools/clippy", "clippy-driver", { // Clippy depends on procedural macros (serde), which requires a full host // compiler to be available, so we need to depend on that. builder.ensure(compile::Rustc { compiler: self.compiler, - target: builder.build.build, + target: builder.config.build, }); }; Miri, miri, "src/tools/miri", "miri", {}; Rls, rls, "src/tools/rls", "rls", { + let clippy = builder.ensure(Clippy { + compiler: self.compiler, + target: self.target, + extra_features: Vec::new(), + }); + if clippy.is_some() { + self.extra_features.push("clippy".to_owned()); + } builder.ensure(native::Openssl { target: self.target, }); @@ -478,7 +654,7 @@ tool_extended!((self, builder), // compiler to be available, so we need to depend on that. builder.ensure(compile::Rustc { compiler: self.compiler, - target: builder.build.build, + target: builder.config.build, }); }; Rustfmt, rustfmt, "src/tools/rustfmt", "rustfmt", {}; @@ -489,8 +665,8 @@ impl<'a> Builder<'a> { /// `host`. pub fn tool_cmd(&self, tool: Tool) -> Command { let mut cmd = Command::new(self.tool_exe(tool)); - let compiler = self.compiler(self.tool_default_stage(tool), self.build.build); - self.prepare_tool_cmd(compiler, &mut cmd); + let compiler = self.compiler(self.tool_default_stage(tool), self.config.build); + self.prepare_tool_cmd(compiler, tool, &mut cmd); cmd } @@ -498,11 +674,15 @@ impl<'a> Builder<'a> { /// /// Notably this munges the dynamic library lookup path to point to the /// right location to run `compiler`. - fn prepare_tool_cmd(&self, compiler: Compiler, cmd: &mut Command) { + fn prepare_tool_cmd(&self, compiler: Compiler, tool: Tool, cmd: &mut Command) { let host = &compiler.host; - let mut paths: Vec = vec![ - PathBuf::from(&self.sysroot_libdir(compiler, compiler.host)), - self.cargo_out(compiler, Mode::Tool, *host).join("deps"), + let mut lib_paths: Vec = vec![ + if compiler.stage == 0 && tool != Tool::ErrorIndex { + self.build.rustc_snapshot_libdir() + } else { + PathBuf::from(&self.sysroot_libdir(compiler, compiler.host)) + }, + self.cargo_out(compiler, tool.get_mode(), *host).join("deps"), ]; // On MSVC a tool may invoke a C compiler (e.g. compiletest in run-make @@ -517,11 +697,48 @@ impl<'a> Builder<'a> { } for path in env::split_paths(v) { if !curpaths.contains(&path) { - paths.push(path); + lib_paths.push(path); } } } } - add_lib_path(paths, cmd); + + // Add the llvm/bin directory to PATH since it contains lots of + // useful, platform-independent tools + if tool.uses_llvm_tools() { + if let Some(llvm_bin_path) = self.llvm_bin_path() { + if host.contains("windows") { + // On Windows, PATH and the dynamic library path are the same, + // so we just add the LLVM bin path to lib_path + lib_paths.push(llvm_bin_path); + } else { + let old_path = env::var_os("PATH").unwrap_or_default(); + let new_path = env::join_paths(iter::once(llvm_bin_path) + .chain(env::split_paths(&old_path))) + .expect("Could not add LLVM bin path to PATH"); + cmd.env("PATH", new_path); + } + } + } + + add_lib_path(lib_paths, cmd); + } + + fn llvm_bin_path(&self) -> Option { + if self.config.llvm_enabled && !self.config.dry_run { + let llvm_config = self.ensure(native::Llvm { + target: self.config.build, + emscripten: false, + }); + + // Add the llvm/bin directory to PATH since it contains lots of + // useful, platform-independent tools + let llvm_bin_path = llvm_config.parent() + .expect("Expected llvm-config to be contained in directory"); + assert!(llvm_bin_path.is_dir()); + Some(llvm_bin_path.to_path_buf()) + } else { + None + } } } diff --git a/src/bootstrap/util.rs b/src/bootstrap/util.rs index 07941e588387..be03796921af 100644 --- a/src/bootstrap/util.rs +++ b/src/bootstrap/util.rs @@ -15,13 +15,14 @@ use std::env; use std::str; -use std::fs::{self, File, OpenOptions}; -use std::io::{self, Read, Write, Seek, SeekFrom}; +use std::fs; +use std::io::{self, Write}; use std::path::{Path, PathBuf}; use std::process::Command; use std::time::{SystemTime, Instant}; -use filetime::{self, FileTime}; +use config::Config; +use builder::Builder; /// Returns the `name` as the filename of a static library for `target`. pub fn staticlib(name: &str, target: &str) -> String { @@ -32,102 +33,6 @@ pub fn staticlib(name: &str, target: &str) -> String { } } -/// Copies a file from `src` to `dst` -pub fn copy(src: &Path, dst: &Path) { - let _ = fs::remove_file(&dst); - // Attempt to "easy copy" by creating a hard link (symlinks don't work on - // windows), but if that fails just fall back to a slow `copy` operation. - if let Ok(()) = fs::hard_link(src, dst) { - return - } - if let Err(e) = fs::copy(src, dst) { - panic!("failed to copy `{}` to `{}`: {}", src.display(), - dst.display(), e) - } - let metadata = t!(src.metadata()); - t!(fs::set_permissions(dst, metadata.permissions())); - let atime = FileTime::from_last_access_time(&metadata); - let mtime = FileTime::from_last_modification_time(&metadata); - t!(filetime::set_file_times(dst, atime, mtime)); -} - -/// Search-and-replaces within a file. (Not maximally efficiently: allocates a -/// new string for each replacement.) -pub fn replace_in_file(path: &Path, replacements: &[(&str, &str)]) { - let mut contents = String::new(); - let mut file = t!(OpenOptions::new().read(true).write(true).open(path)); - t!(file.read_to_string(&mut contents)); - for &(target, replacement) in replacements { - contents = contents.replace(target, replacement); - } - t!(file.seek(SeekFrom::Start(0))); - t!(file.set_len(0)); - t!(file.write_all(contents.as_bytes())); -} - -pub fn read_stamp_file(stamp: &Path) -> Vec { - let mut paths = Vec::new(); - let mut contents = Vec::new(); - t!(t!(File::open(stamp)).read_to_end(&mut contents)); - // This is the method we use for extracting paths from the stamp file passed to us. See - // run_cargo for more information (in compile.rs). - for part in contents.split(|b| *b == 0) { - if part.is_empty() { - continue - } - let path = PathBuf::from(t!(str::from_utf8(part))); - paths.push(path); - } - paths -} - -/// Copies the `src` directory recursively to `dst`. Both are assumed to exist -/// when this function is called. -pub fn cp_r(src: &Path, dst: &Path) { - for f in t!(fs::read_dir(src)) { - let f = t!(f); - let path = f.path(); - let name = path.file_name().unwrap(); - let dst = dst.join(name); - if t!(f.file_type()).is_dir() { - t!(fs::create_dir_all(&dst)); - cp_r(&path, &dst); - } else { - let _ = fs::remove_file(&dst); - copy(&path, &dst); - } - } -} - -/// Copies the `src` directory recursively to `dst`. Both are assumed to exist -/// when this function is called. Unwanted files or directories can be skipped -/// by returning `false` from the filter function. -pub fn cp_filtered(src: &Path, dst: &Path, filter: &Fn(&Path) -> bool) { - // Inner function does the actual work - fn recurse(src: &Path, dst: &Path, relative: &Path, filter: &Fn(&Path) -> bool) { - for f in t!(fs::read_dir(src)) { - let f = t!(f); - let path = f.path(); - let name = path.file_name().unwrap(); - let dst = dst.join(name); - let relative = relative.join(name); - // Only copy file or directory if the filter function returns true - if filter(&relative) { - if t!(f.file_type()).is_dir() { - let _ = fs::remove_dir_all(&dst); - t!(fs::create_dir(&dst)); - recurse(&path, &dst, &relative, filter); - } else { - let _ = fs::remove_file(&dst); - copy(&path, &dst); - } - } - } - } - // Immediately recurse with an empty relative path - recurse(src, dst, Path::new(""), filter) -} - /// Given an executable called `name`, return the filename for the /// executable for a particular target. pub fn exe(name: &str, target: &str) -> String { @@ -187,34 +92,34 @@ pub fn push_exe_path(mut buf: PathBuf, components: &[&str]) -> PathBuf { file.push_str(".exe"); } - for c in components { - buf.push(c); - } - + buf.extend(components); buf.push(file); buf } -pub struct TimeIt(Instant); +pub struct TimeIt(bool, Instant); /// Returns an RAII structure that prints out how long it took to drop. -pub fn timeit() -> TimeIt { - TimeIt(Instant::now()) +pub fn timeit(builder: &Builder) -> TimeIt { + TimeIt(builder.config.dry_run, Instant::now()) } impl Drop for TimeIt { fn drop(&mut self) { - let time = self.0.elapsed(); - println!("\tfinished in {}.{:03}", - time.as_secs(), - time.subsec_nanos() / 1_000_000); + let time = self.1.elapsed(); + if !self.0 { + println!("\tfinished in {}.{:03}", + time.as_secs(), + time.subsec_nanos() / 1_000_000); + } } } /// Symlinks two directories, using junctions on Windows and normal symlinks on /// Unix. -pub fn symlink_dir(src: &Path, dest: &Path) -> io::Result<()> { +pub fn symlink_dir(config: &Config, src: &Path, dest: &Path) -> io::Result<()> { + if config.dry_run { return Ok(()); } let _ = fs::remove_dir(dest); return symlink_dir_inner(src, dest); @@ -288,6 +193,7 @@ pub fn symlink_dir(src: &Path, dest: &Path) -> io::Result<()> { nOutBufferSize: DWORD, lpBytesReturned: LPDWORD, lpOverlapped: LPOVERLAPPED) -> BOOL; + fn CloseHandle(hObject: HANDLE) -> BOOL; } fn to_u16s>(s: S) -> io::Result> { @@ -341,11 +247,13 @@ pub fn symlink_dir(src: &Path, dest: &Path) -> io::Result<()> { &mut ret, ptr::null_mut()); - if res == 0 { + let out = if res == 0 { Err(io::Error::last_os_error()) } else { Ok(()) - } + }; + CloseHandle(h); + out } } } diff --git a/src/build_helper/Cargo.toml b/src/build_helper/Cargo.toml index f8ade0616a57..01d704f816bb 100644 --- a/src/build_helper/Cargo.toml +++ b/src/build_helper/Cargo.toml @@ -6,6 +6,3 @@ authors = ["The Rust Project Developers"] [lib] name = "build_helper" path = "lib.rs" - -[dependencies] -filetime = "0.1" diff --git a/src/build_helper/lib.rs b/src/build_helper/lib.rs index 363bbd795442..1cbb8e49bfa1 100644 --- a/src/build_helper/lib.rs +++ b/src/build_helper/lib.rs @@ -8,16 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(warnings)] - -extern crate filetime; - use std::fs::File; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; -use std::{fs, env}; - -use filetime::FileTime; +use std::time::{SystemTime, UNIX_EPOCH}; +use std::{env, fs}; +use std::thread; /// A helper macro to `unwrap` a result except also print out details like: /// @@ -29,10 +25,12 @@ use filetime::FileTime; /// using a `Result` with `try!`, but this may change one day... #[macro_export] macro_rules! t { - ($e:expr) => (match $e { - Ok(e) => e, - Err(e) => panic!("{} failed with {}", stringify!($e), e), - }) + ($e:expr) => { + match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {}", stringify!($e), e), + } + }; } pub fn run(cmd: &mut Command) { @@ -49,14 +47,17 @@ pub fn run_silent(cmd: &mut Command) { pub fn try_run_silent(cmd: &mut Command) -> bool { let status = match cmd.status() { Ok(status) => status, - Err(e) => fail(&format!("failed to execute command: {:?}\nerror: {}", - cmd, e)), + Err(e) => fail(&format!( + "failed to execute command: {:?}\nerror: {}", + cmd, e + )), }; if !status.success() { - println!("\n\ncommand did not execute successfully: {:?}\n\ - expected success, got: {}\n\n", - cmd, - status); + println!( + "\n\ncommand did not execute successfully: {:?}\n\ + expected success, got: {}\n\n", + cmd, status + ); } status.success() } @@ -70,18 +71,22 @@ pub fn run_suppressed(cmd: &mut Command) { pub fn try_run_suppressed(cmd: &mut Command) -> bool { let output = match cmd.output() { Ok(status) => status, - Err(e) => fail(&format!("failed to execute command: {:?}\nerror: {}", - cmd, e)), + Err(e) => fail(&format!( + "failed to execute command: {:?}\nerror: {}", + cmd, e + )), }; if !output.status.success() { - println!("\n\ncommand did not execute successfully: {:?}\n\ - expected success, got: {}\n\n\ - stdout ----\n{}\n\ - stderr ----\n{}\n\n", - cmd, - output.status, - String::from_utf8_lossy(&output.stdout), - String::from_utf8_lossy(&output.stderr)); + println!( + "\n\ncommand did not execute successfully: {:?}\n\ + expected success, got: {}\n\n\ + stdout ----\n{}\n\ + stderr ----\n{}\n\n", + cmd, + output.status, + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr) + ); } output.status.success() } @@ -97,9 +102,9 @@ pub fn gnu_target(target: &str) -> String { } pub fn make(host: &str) -> PathBuf { - if host.contains("bitrig") || host.contains("dragonfly") || - host.contains("freebsd") || host.contains("netbsd") || - host.contains("openbsd") { + if host.contains("bitrig") || host.contains("dragonfly") || host.contains("freebsd") + || host.contains("netbsd") || host.contains("openbsd") + { PathBuf::from("gmake") } else { PathBuf::from("make") @@ -109,23 +114,27 @@ pub fn make(host: &str) -> PathBuf { pub fn output(cmd: &mut Command) -> String { let output = match cmd.stderr(Stdio::inherit()).output() { Ok(status) => status, - Err(e) => fail(&format!("failed to execute command: {:?}\nerror: {}", - cmd, e)), + Err(e) => fail(&format!( + "failed to execute command: {:?}\nerror: {}", + cmd, e + )), }; if !output.status.success() { - panic!("command did not execute successfully: {:?}\n\ - expected success, got: {}", - cmd, - output.status); + panic!( + "command did not execute successfully: {:?}\n\ + expected success, got: {}", + cmd, output.status + ); } String::from_utf8(output.stdout).unwrap() } pub fn rerun_if_changed_anything_in_dir(dir: &Path) { - let mut stack = dir.read_dir().unwrap() - .map(|e| e.unwrap()) - .filter(|e| &*e.file_name() != ".git") - .collect::>(); + let mut stack = dir.read_dir() + .unwrap() + .map(|e| e.unwrap()) + .filter(|e| &*e.file_name() != ".git") + .collect::>(); while let Some(entry) = stack.pop() { let path = entry.path(); if entry.file_type().unwrap().is_dir() { @@ -137,10 +146,10 @@ pub fn rerun_if_changed_anything_in_dir(dir: &Path) { } /// Returns the last-modified time for `path`, or zero if it doesn't exist. -pub fn mtime(path: &Path) -> FileTime { - fs::metadata(path).map(|f| { - FileTime::from_last_modification_time(&f) - }).unwrap_or(FileTime::zero()) +pub fn mtime(path: &Path) -> SystemTime { + fs::metadata(path) + .and_then(|f| f.modified()) + .unwrap_or(UNIX_EPOCH) } /// Returns whether `dst` is up to date given that the file or files in `src` @@ -157,9 +166,9 @@ pub fn up_to_date(src: &Path, dst: &Path) -> bool { Err(e) => panic!("source {:?} failed to get metadata: {}", src, e), }; if meta.is_dir() { - dir_up_to_date(src, &threshold) + dir_up_to_date(src, threshold) } else { - FileTime::from_last_modification_time(&meta) <= threshold + meta.modified().unwrap_or(UNIX_EPOCH) <= threshold } } @@ -171,7 +180,9 @@ pub struct NativeLibBoilerplate { impl Drop for NativeLibBoilerplate { fn drop(&mut self) { - t!(File::create(self.out_dir.join("rustbuild.timestamp"))); + if !thread::panicking() { + t!(File::create(self.out_dir.join("rustbuild.timestamp"))); + } } } @@ -181,11 +192,12 @@ impl Drop for NativeLibBoilerplate { // If Err is returned, then everything is up-to-date and further build actions can be skipped. // Timestamps are created automatically when the result of `native_lib_boilerplate` goes out // of scope, so all the build actions should be completed until then. -pub fn native_lib_boilerplate(src_name: &str, - out_name: &str, - link_name: &str, - search_subdir: &str) - -> Result { +pub fn native_lib_boilerplate( + src_name: &str, + out_name: &str, + link_name: &str, + search_subdir: &str, +) -> Result { let current_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()); let src_dir = current_dir.join("..").join(src_name); rerun_if_changed_anything_in_dir(&src_dir); @@ -198,41 +210,59 @@ pub fn native_lib_boilerplate(src_name: &str, } else { println!("cargo:rustc-link-lib=static={}", link_name); } - println!("cargo:rustc-link-search=native={}", out_dir.join(search_subdir).display()); + println!( + "cargo:rustc-link-search=native={}", + out_dir.join(search_subdir).display() + ); let timestamp = out_dir.join("rustbuild.timestamp"); if !up_to_date(Path::new("build.rs"), ×tamp) || !up_to_date(&src_dir, ×tamp) { - Ok(NativeLibBoilerplate { src_dir: src_dir, out_dir: out_dir }) + Ok(NativeLibBoilerplate { + src_dir: src_dir, + out_dir: out_dir, + }) } else { Err(()) } } -pub fn sanitizer_lib_boilerplate(sanitizer_name: &str) -> Result { - let (link_name, search_path) = match &*env::var("TARGET").unwrap() { +pub fn sanitizer_lib_boilerplate(sanitizer_name: &str) + -> Result<(NativeLibBoilerplate, String), ()> +{ + let (link_name, search_path, dynamic) = match &*env::var("TARGET").unwrap() { "x86_64-unknown-linux-gnu" => ( format!("clang_rt.{}-x86_64", sanitizer_name), "build/lib/linux", + false, ), "x86_64-apple-darwin" => ( - format!("dylib=clang_rt.{}_osx_dynamic", sanitizer_name), + format!("clang_rt.{}_osx_dynamic", sanitizer_name), "build/lib/darwin", + true, ), _ => return Err(()), }; - native_lib_boilerplate("libcompiler_builtins/compiler-rt", - sanitizer_name, - &link_name, - search_path) + let to_link = if dynamic { + format!("dylib={}", link_name) + } else { + format!("static={}", link_name) + }; + let lib = native_lib_boilerplate( + "libcompiler_builtins/compiler-rt", + sanitizer_name, + &to_link, + search_path, + )?; + Ok((lib, link_name)) } -fn dir_up_to_date(src: &Path, threshold: &FileTime) -> bool { +fn dir_up_to_date(src: &Path, threshold: SystemTime) -> bool { t!(fs::read_dir(src)).map(|e| t!(e)).all(|e| { let meta = t!(e.metadata()); if meta.is_dir() { dir_up_to_date(&e.path(), threshold) } else { - FileTime::from_last_modification_time(&meta) < *threshold + meta.modified().unwrap_or(UNIX_EPOCH) < threshold } }) } diff --git a/src/ci/docker/arm-android/Dockerfile b/src/ci/docker/arm-android/Dockerfile index f2773a720cfb..e10ccd56a4a5 100644 --- a/src/ci/docker/arm-android/Dockerfile +++ b/src/ci/docker/arm-android/Dockerfile @@ -31,9 +31,7 @@ ENV PATH=$PATH:/android/sdk/platform-tools ENV TARGETS=arm-linux-androideabi -ENV RUST_CONFIGURE_ARGS \ - --target=$TARGETS \ - --arm-linux-androideabi-ndk=/android/ndk/arm-14 +ENV RUST_CONFIGURE_ARGS --arm-linux-androideabi-ndk=/android/ndk/arm-14 ENV SCRIPT python2.7 ../x.py test --target $TARGETS diff --git a/src/ci/docker/armhf-gnu/Dockerfile b/src/ci/docker/armhf-gnu/Dockerfile index 191f8e3a2895..2b7624d53ee0 100644 --- a/src/ci/docker/armhf-gnu/Dockerfile +++ b/src/ci/docker/armhf-gnu/Dockerfile @@ -76,9 +76,7 @@ RUN curl -O http://ftp.nl.debian.org/debian/dists/jessie/main/installer-armhf/cu COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh -ENV RUST_CONFIGURE_ARGS \ - --target=arm-unknown-linux-gnueabihf \ - --qemu-armhf-rootfs=/tmp/rootfs +ENV RUST_CONFIGURE_ARGS --qemu-armhf-rootfs=/tmp/rootfs ENV SCRIPT python2.7 ../x.py test --target arm-unknown-linux-gnueabihf ENV NO_CHANGE_USER=1 diff --git a/src/ci/docker/asmjs/Dockerfile b/src/ci/docker/asmjs/Dockerfile index 07849a20d004..cb85cf3d9e9f 100644 --- a/src/ci/docker/asmjs/Dockerfile +++ b/src/ci/docker/asmjs/Dockerfile @@ -29,6 +29,11 @@ ENV EM_CONFIG=/emsdk-portable/.emscripten ENV TARGETS=asmjs-unknown-emscripten -ENV RUST_CONFIGURE_ARGS --target=$TARGETS +ENV RUST_CONFIGURE_ARGS --enable-emscripten --disable-optimize-tests -ENV SCRIPT python2.7 ../x.py test --target $TARGETS +ENV SCRIPT python2.7 ../x.py test --target $TARGETS \ + src/test/run-pass \ + src/test/run-fail \ + src/libstd \ + src/liballoc \ + src/libcore diff --git a/src/ci/docker/disabled/aarch64-gnu/Dockerfile b/src/ci/docker/disabled/aarch64-gnu/Dockerfile index fedb4094c8aa..b2a3ba3ec260 100644 --- a/src/ci/docker/disabled/aarch64-gnu/Dockerfile +++ b/src/ci/docker/disabled/aarch64-gnu/Dockerfile @@ -74,7 +74,6 @@ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh ENV RUST_CONFIGURE_ARGS \ - --target=aarch64-unknown-linux-gnu \ --qemu-aarch64-rootfs=/tmp/rootfs ENV SCRIPT python2.7 ../x.py test --target aarch64-unknown-linux-gnu ENV NO_CHANGE_USER=1 diff --git a/src/ci/docker/disabled/dist-aarch64-android/Dockerfile b/src/ci/docker/disabled/dist-aarch64-android/Dockerfile index ce5e8cfaf095..a7903b6f4250 100644 --- a/src/ci/docker/disabled/dist-aarch64-android/Dockerfile +++ b/src/ci/docker/disabled/dist-aarch64-android/Dockerfile @@ -14,8 +14,6 @@ ENV DEP_Z_ROOT=/android/ndk/arm64-21/sysroot/usr/ ENV HOSTS=aarch64-linux-android ENV RUST_CONFIGURE_ARGS \ - --host=$HOSTS \ - --target=$HOSTS \ --aarch64-linux-android-ndk=/android/ndk/arm64-21 \ --disable-rpath \ --enable-extended \ diff --git a/src/ci/docker/disabled/dist-armebv7r-none-eabihf/Dockerfile b/src/ci/docker/disabled/dist-armebv7r-none-eabihf/Dockerfile new file mode 100644 index 000000000000..34c6e640abb7 --- /dev/null +++ b/src/ci/docker/disabled/dist-armebv7r-none-eabihf/Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + sudo \ + xz-utils \ + bzip2 \ + libssl-dev \ + pkg-config + + +COPY scripts/sccache.sh /scripts/ +RUN sh /scripts/sccache.sh + +ENV BASE_URL=https://releases.linaro.org/components/toolchain/binaries/latest/armeb-eabi/ +ENV GCC_LINARO=gcc-linaro-7.2.1-2017.11-x86_64_armeb-eabi + +RUN curl -sL $BASE_URL/$GCC_LINARO.tar.xz | tar -xJ + +ENV PATH=$PATH:/$GCC_LINARO/bin + +ENV TARGET=armebv7r-none-eabihf + +ENV CC_armebv7r_none_eabihf=armeb-eabi-gcc \ + CFLAGS_armebv7r_none_eabihf="-march=armv7-r" + +ENV RUST_CONFIGURE_ARGS --disable-docs + +ENV SCRIPT python2.7 ../x.py dist --target $TARGET diff --git a/src/ci/docker/disabled/dist-armv7-android/Dockerfile b/src/ci/docker/disabled/dist-armv7-android/Dockerfile index 3177fa2147fa..c02a5e5a0954 100644 --- a/src/ci/docker/disabled/dist-armv7-android/Dockerfile +++ b/src/ci/docker/disabled/dist-armv7-android/Dockerfile @@ -20,8 +20,6 @@ ENV DEP_Z_ROOT=/android/ndk/arm-14/sysroot/usr/ ENV HOSTS=armv7-linux-androideabi ENV RUST_CONFIGURE_ARGS \ - --host=$HOSTS \ - --target=$HOSTS \ --armv7-linux-androideabi-ndk=/android/ndk/arm \ --disable-rpath \ --enable-extended \ diff --git a/src/ci/docker/disabled/dist-i686-android/Dockerfile b/src/ci/docker/disabled/dist-i686-android/Dockerfile index ace9c4feb4f3..04e83a431c45 100644 --- a/src/ci/docker/disabled/dist-i686-android/Dockerfile +++ b/src/ci/docker/disabled/dist-i686-android/Dockerfile @@ -20,8 +20,6 @@ ENV DEP_Z_ROOT=/android/ndk/x86-14/sysroot/usr/ ENV HOSTS=i686-linux-android ENV RUST_CONFIGURE_ARGS \ - --host=$HOSTS \ - --target=$HOSTS \ --i686-linux-android-ndk=/android/ndk/x86 \ --disable-rpath \ --enable-extended \ diff --git a/src/ci/docker/disabled/dist-sparc64-linux/Dockerfile b/src/ci/docker/disabled/dist-sparc64-linux/Dockerfile new file mode 100644 index 000000000000..952c265a1390 --- /dev/null +++ b/src/ci/docker/disabled/dist-sparc64-linux/Dockerfile @@ -0,0 +1,26 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + sudo \ + gdb \ + xz-utils \ + g++-sparc64-linux-gnu \ + libssl-dev \ + pkg-config + + +COPY scripts/sccache.sh /scripts/ +RUN sh /scripts/sccache.sh + +ENV HOSTS=sparc64-unknown-linux-gnu + +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs +ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/disabled/dist-x86_64-android/Dockerfile b/src/ci/docker/disabled/dist-x86_64-android/Dockerfile index 322d26f0adc4..937301864cd0 100644 --- a/src/ci/docker/disabled/dist-x86_64-android/Dockerfile +++ b/src/ci/docker/disabled/dist-x86_64-android/Dockerfile @@ -14,8 +14,6 @@ ENV DEP_Z_ROOT=/android/ndk/x86_64-21/sysroot/usr/ ENV HOSTS=x86_64-linux-android ENV RUST_CONFIGURE_ARGS \ - --host=$HOSTS \ - --target=$HOSTS \ --x86_64-linux-android-ndk=/android/ndk/x86_64-21 \ --disable-rpath \ --enable-extended \ diff --git a/src/ci/docker/disabled/dist-x86_64-dragonfly/Dockerfile b/src/ci/docker/disabled/dist-x86_64-dragonfly/Dockerfile index f3509efdb988..dbff9e32e131 100644 --- a/src/ci/docker/disabled/dist-x86_64-dragonfly/Dockerfile +++ b/src/ci/docker/disabled/dist-x86_64-dragonfly/Dockerfile @@ -32,5 +32,5 @@ ENV \ ENV HOSTS=x86_64-unknown-dragonfly -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/disabled/dist-x86_64-haiku/Dockerfile b/src/ci/docker/disabled/dist-x86_64-haiku/Dockerfile index 621976b5cbe3..440afd7c97f5 100644 --- a/src/ci/docker/disabled/dist-x86_64-haiku/Dockerfile +++ b/src/ci/docker/disabled/dist-x86_64-haiku/Dockerfile @@ -42,8 +42,8 @@ RUN sh /scripts/sccache.sh ENV HOST=x86_64-unknown-haiku ENV TARGET=target.$HOST -ENV RUST_CONFIGURE_ARGS --host=$HOST --target=$HOST --disable-jemalloc \ +ENV RUST_CONFIGURE_ARGS --disable-jemalloc \ --set=$TARGET.cc=x86_64-unknown-haiku-gcc \ --set=$TARGET.cxx=x86_64-unknown-haiku-g++ \ --set=$TARGET.llvm-config=/bin/llvm-config-haiku -ENV SCRIPT python2.7 ../x.py dist +ENV SCRIPT python2.7 ../x.py dist --host=$HOST --target=$HOST diff --git a/src/ci/docker/disabled/dist-x86_64-redox/Dockerfile b/src/ci/docker/disabled/dist-x86_64-redox/Dockerfile index ed19939545f6..f4c25f791bc3 100644 --- a/src/ci/docker/disabled/dist-x86_64-redox/Dockerfile +++ b/src/ci/docker/disabled/dist-x86_64-redox/Dockerfile @@ -18,5 +18,5 @@ ENV \ CC_x86_64_unknown_redox=x86_64-unknown-redox-gcc \ CXX_x86_64_unknown_redox=x86_64-unknown-redox-g++ -ENV RUST_CONFIGURE_ARGS --target=x86_64-unknown-redox --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended ENV SCRIPT python2.7 ../x.py dist --target x86_64-unknown-redox diff --git a/src/ci/docker/disabled/wasm32-exp/Dockerfile b/src/ci/docker/disabled/wasm32-exp/Dockerfile index 8653b0e8b465..420d47b314c0 100644 --- a/src/ci/docker/disabled/wasm32-exp/Dockerfile +++ b/src/ci/docker/disabled/wasm32-exp/Dockerfile @@ -30,6 +30,6 @@ ENV EM_CONFIG=/root/.emscripten ENV TARGETS=wasm32-experimental-emscripten -ENV RUST_CONFIGURE_ARGS --target=$TARGETS --experimental-targets=WebAssembly +ENV RUST_CONFIGURE_ARGS --experimental-targets=WebAssembly ENV SCRIPT python2.7 ../x.py test --target $TARGETS diff --git a/src/ci/docker/disabled/wasm32/Dockerfile b/src/ci/docker/disabled/wasm32/Dockerfile index 7f6f8ae08827..6ac90d17450a 100644 --- a/src/ci/docker/disabled/wasm32/Dockerfile +++ b/src/ci/docker/disabled/wasm32/Dockerfile @@ -29,7 +29,4 @@ ENV BINARYEN_ROOT=/emsdk-portable/clang/e1.37.13_64bit/binaryen/ ENV EM_CONFIG=/emsdk-portable/.emscripten ENV TARGETS=wasm32-unknown-emscripten - -ENV RUST_CONFIGURE_ARGS --target=$TARGETS - ENV SCRIPT python2.7 ../x.py test --target $TARGETS diff --git a/src/ci/docker/dist-aarch64-linux/Dockerfile b/src/ci/docker/dist-aarch64-linux/Dockerfile index 841d3012125f..cddfa557f6ae 100644 --- a/src/ci/docker/dist-aarch64-linux/Dockerfile +++ b/src/ci/docker/dist-aarch64-linux/Dockerfile @@ -32,5 +32,5 @@ ENV CC_aarch64_unknown_linux_gnu=aarch64-unknown-linux-gnueabi-gcc \ ENV HOSTS=aarch64-unknown-linux-gnu -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-android/Dockerfile b/src/ci/docker/dist-android/Dockerfile index 5d7545a3c2a9..e00c23dac89b 100644 --- a/src/ci/docker/dist-android/Dockerfile +++ b/src/ci/docker/dist-android/Dockerfile @@ -21,13 +21,13 @@ ENV TARGETS=$TARGETS,aarch64-linux-android ENV TARGETS=$TARGETS,x86_64-linux-android ENV RUST_CONFIGURE_ARGS \ - --target=$TARGETS \ --enable-extended \ --arm-linux-androideabi-ndk=/android/ndk/arm-14 \ --armv7-linux-androideabi-ndk=/android/ndk/arm-14 \ --i686-linux-android-ndk=/android/ndk/x86-14 \ --aarch64-linux-android-ndk=/android/ndk/arm64-21 \ - --x86_64-linux-android-ndk=/android/ndk/x86_64-21 + --x86_64-linux-android-ndk=/android/ndk/x86_64-21 \ + --disable-docs ENV SCRIPT python2.7 ../x.py dist --target $TARGETS diff --git a/src/ci/docker/dist-arm-linux/Dockerfile b/src/ci/docker/dist-arm-linux/Dockerfile index ecd5090ea05f..6ddc5c1e04ae 100644 --- a/src/ci/docker/dist-arm-linux/Dockerfile +++ b/src/ci/docker/dist-arm-linux/Dockerfile @@ -32,5 +32,5 @@ ENV CC_arm_unknown_linux_gnueabi=arm-unknown-linux-gnueabi-gcc \ ENV HOSTS=arm-unknown-linux-gnueabi -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-armhf-linux/Dockerfile b/src/ci/docker/dist-armhf-linux/Dockerfile index 5bbd17bd4142..e4d4b2feeec4 100644 --- a/src/ci/docker/dist-armhf-linux/Dockerfile +++ b/src/ci/docker/dist-armhf-linux/Dockerfile @@ -32,5 +32,5 @@ ENV CC_arm_unknown_linux_gnueabihf=arm-unknown-linux-gnueabihf-gcc \ ENV HOSTS=arm-unknown-linux-gnueabihf -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-armv7-linux/Dockerfile b/src/ci/docker/dist-armv7-linux/Dockerfile index ea9034d71788..99fe7bd7b8f7 100644 --- a/src/ci/docker/dist-armv7-linux/Dockerfile +++ b/src/ci/docker/dist-armv7-linux/Dockerfile @@ -32,5 +32,5 @@ ENV CC_armv7_unknown_linux_gnueabihf=armv7-unknown-linux-gnueabihf-gcc \ ENV HOSTS=armv7-unknown-linux-gnueabihf -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-i586-gnu-i586-i686-musl/Dockerfile b/src/ci/docker/dist-i586-gnu-i586-i686-musl/Dockerfile index 4c9d4b3ba78e..ba2d32a9296b 100644 --- a/src/ci/docker/dist-i586-gnu-i586-i686-musl/Dockerfile +++ b/src/ci/docker/dist-i586-gnu-i586-i686-musl/Dockerfile @@ -18,10 +18,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ WORKDIR /build/ COPY scripts/musl.sh /build/ -RUN CC=gcc CFLAGS="-m32 -fPIC -Wa,-mrelax-relocations=no" \ +RUN CC=gcc CFLAGS="-m32 -Wa,-mrelax-relocations=no" \ CXX=g++ CXXFLAGS="-m32 -Wa,-mrelax-relocations=no" \ bash musl.sh i686 --target=i686 && \ - CC=gcc CFLAGS="-march=pentium -m32 -fPIC -Wa,-mrelax-relocations=no" \ + CC=gcc CFLAGS="-march=pentium -m32 -Wa,-mrelax-relocations=no" \ CXX=g++ CXXFLAGS="-march=pentium -m32 -Wa,-mrelax-relocations=no" \ bash musl.sh i586 --target=i586 && \ rm -rf /build @@ -30,10 +30,10 @@ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh ENV RUST_CONFIGURE_ARGS \ - --target=i686-unknown-linux-musl,i586-unknown-linux-gnu \ --musl-root-i586=/musl-i586 \ --musl-root-i686=/musl-i686 \ - --enable-extended + --enable-extended \ + --disable-docs # Newer binutils broke things on some vms/distros (i.e., linking against # unknown relocs disabled by the following flag), so we need to go out of our @@ -42,12 +42,9 @@ ENV RUST_CONFIGURE_ARGS \ # See: https://github.com/rust-lang/rust/issues/34978 ENV CFLAGS_i686_unknown_linux_musl=-Wa,-mrelax-relocations=no ENV CFLAGS_i586_unknown_linux_gnu=-Wa,-mrelax-relocations=no -# FIXME remove -Wl,-melf_i386 after cc is updated to include -# https://github.com/alexcrichton/cc-rs/pull/281 -ENV CFLAGS_i586_unknown_linux_musl="-Wa,-mrelax-relocations=no -Wl,-melf_i386" +ENV CFLAGS_i586_unknown_linux_musl=-Wa,-mrelax-relocations=no -ENV TARGETS=i586-unknown-linux-gnu -ENV TARGETS=$TARGETS,i686-unknown-linux-musl +ENV TARGETS=i586-unknown-linux-gnu,i686-unknown-linux-musl ENV SCRIPT \ python2.7 ../x.py test --target $TARGETS && \ diff --git a/src/ci/docker/dist-i686-freebsd/Dockerfile b/src/ci/docker/dist-i686-freebsd/Dockerfile index 686afc97289b..6f6a663a3309 100644 --- a/src/ci/docker/dist-i686-freebsd/Dockerfile +++ b/src/ci/docker/dist-i686-freebsd/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:16.04 +FROM ubuntu:18.04 RUN apt-get update && apt-get install -y --no-install-recommends \ clang \ @@ -29,5 +29,5 @@ ENV \ ENV HOSTS=i686-unknown-freebsd -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-i686-linux/Dockerfile b/src/ci/docker/dist-i686-linux/Dockerfile index a5d776af19da..d591fb28f36e 100644 --- a/src/ci/docker/dist-i686-linux/Dockerfile +++ b/src/ci/docker/dist-i686-linux/Dockerfile @@ -29,13 +29,13 @@ ENV PATH=/rustroot/bin:$PATH ENV LD_LIBRARY_PATH=/rustroot/lib64:/rustroot/lib ENV PKG_CONFIG_PATH=/rustroot/lib/pkgconfig WORKDIR /tmp -COPY dist-i686-linux/shared.sh dist-i686-linux/build-binutils.sh /tmp/ +COPY dist-x86_64-linux/shared.sh /tmp/ # We need a build of openssl which supports SNI to download artifacts from # static.rust-lang.org. This'll be used to link into libcurl below (and used # later as well), so build a copy of OpenSSL with dynamic libraries into our # generic root. -COPY dist-i686-linux/build-openssl.sh /tmp/ +COPY dist-x86_64-linux/build-openssl.sh /tmp/ RUN ./build-openssl.sh # The `curl` binary on CentOS doesn't support SNI which is needed for fetching @@ -44,36 +44,43 @@ RUN ./build-openssl.sh # # Note that we also disable a bunch of optional features of curl that we don't # really need. -COPY dist-i686-linux/build-curl.sh /tmp/ +COPY dist-x86_64-linux/build-curl.sh /tmp/ RUN ./build-curl.sh # binutils < 2.22 has a bug where the 32-bit executables it generates # immediately segfault in Rust, so we need to install our own binutils. # # See https://github.com/rust-lang/rust/issues/20440 for more info +COPY dist-x86_64-linux/build-binutils.sh /tmp/ RUN ./build-binutils.sh -# Need a newer version of gcc than centos has to compile LLVM nowadays -COPY dist-i686-linux/build-gcc.sh /tmp/ -RUN ./build-gcc.sh - -# CentOS 5.5 has Python 2.4 by default, but LLVM needs 2.7+ -COPY dist-i686-linux/build-python.sh /tmp/ -RUN ./build-python.sh - -# Apparently CentOS 5.5 desn't have `git` in yum, but we're gonna need it for -# cloning, so download and build it here. -COPY dist-i686-linux/build-git.sh /tmp/ -RUN ./build-git.sh - # libssh2 (a dependency of Cargo) requires cmake 2.8.11 or higher but CentOS # only has 2.6.4, so build our own -COPY dist-i686-linux/build-cmake.sh /tmp/ +COPY dist-x86_64-linux/build-cmake.sh /tmp/ RUN ./build-cmake.sh +# Need a newer version of gcc than centos has to compile LLVM nowadays +COPY dist-x86_64-linux/build-gcc.sh /tmp/ +RUN ./build-gcc.sh + +# CentOS 5.5 has Python 2.4 by default, but LLVM needs 2.7+ +COPY dist-x86_64-linux/build-python.sh /tmp/ +RUN ./build-python.sh + +# Now build LLVM+Clang 6, afterwards configuring further compilations to use the +# clang/clang++ compilers. +COPY dist-x86_64-linux/build-clang.sh /tmp/ +RUN ./build-clang.sh +ENV CC=clang CXX=clang++ + +# Apparently CentOS 5.5 desn't have `git` in yum, but we're gonna need it for +# cloning, so download and build it here. +COPY dist-x86_64-linux/build-git.sh /tmp/ +RUN ./build-git.sh + # for sanitizers, we need kernel headers files newer than the ones CentOS ships # with so we install newer ones here -COPY dist-i686-linux/build-headers.sh /tmp/ +COPY dist-x86_64-linux/build-headers.sh /tmp/ RUN ./build-headers.sh COPY scripts/sccache.sh /scripts/ @@ -82,14 +89,23 @@ RUN sh /scripts/sccache.sh ENV HOSTS=i686-unknown-linux-gnu ENV RUST_CONFIGURE_ARGS \ - --host=$HOSTS \ - --enable-extended \ + --enable-full-tools \ --enable-sanitizers \ - --enable-profiler -ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS + --enable-profiler \ + --set target.i686-unknown-linux-gnu.linker=clang \ + --build=i686-unknown-linux-gnu +ENV SCRIPT python2.7 ../x.py dist --build $HOSTS --host $HOSTS --target $HOSTS +ENV CARGO_TARGET_I686_UNKNOWN_LINUX_GNU_LINKER=clang -# This is the only builder which will create source tarballs -ENV DIST_SRC 1 +# This was added when we switched from gcc to clang. It's not clear why this is +# needed unfortunately, but without this the stage1 bootstrap segfaults +# somewhere inside of a build script. The build ends up just hanging instead of +# actually killing the process that segfaulted, but if the process is run +# manually in a debugger the segfault is immediately seen as well as the +# misaligned stack access. +# +# Added in #50200 there's some more logs there +ENV CFLAGS -mstackrealign # When we build cargo in this container, we don't want it to use the system # libcurl, instead it should compile its own. diff --git a/src/ci/docker/dist-i686-linux/build-binutils.sh b/src/ci/docker/dist-i686-linux/build-binutils.sh deleted file mode 100755 index f4bdbd80d0ed..000000000000 --- a/src/ci/docker/dist-i686-linux/build-binutils.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex - -source shared.sh - -curl https://ftp.gnu.org/gnu/binutils/binutils-2.25.1.tar.bz2 | tar xfj - - -mkdir binutils-build -cd binutils-build -hide_output ../binutils-2.25.1/configure --prefix=/rustroot -hide_output make -j10 -hide_output make install - -cd .. -rm -rf binutils-build -rm -rf binutils-2.25.1 diff --git a/src/ci/docker/dist-i686-linux/build-cmake.sh b/src/ci/docker/dist-i686-linux/build-cmake.sh deleted file mode 100755 index 9a3763d421ad..000000000000 --- a/src/ci/docker/dist-i686-linux/build-cmake.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex -source shared.sh - -curl https://cmake.org/files/v3.6/cmake-3.6.3.tar.gz | tar xzf - - -mkdir cmake-build -cd cmake-build -hide_output ../cmake-3.6.3/configure --prefix=/rustroot -hide_output make -j10 -hide_output make install - -cd .. -rm -rf cmake-build -rm -rf cmake-3.6.3 diff --git a/src/ci/docker/dist-i686-linux/build-curl.sh b/src/ci/docker/dist-i686-linux/build-curl.sh deleted file mode 100755 index edf3175b81c4..000000000000 --- a/src/ci/docker/dist-i686-linux/build-curl.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex -source shared.sh - -VERSION=7.51.0 - -curl http://cool.haxx.se/download/curl-$VERSION.tar.bz2 | tar xjf - - -mkdir curl-build -cd curl-build -hide_output ../curl-$VERSION/configure \ - --prefix=/rustroot \ - --with-ssl=/rustroot \ - --disable-sspi \ - --disable-gopher \ - --disable-smtp \ - --disable-smb \ - --disable-imap \ - --disable-pop3 \ - --disable-tftp \ - --disable-telnet \ - --disable-manual \ - --disable-dict \ - --disable-rtsp \ - --disable-ldaps \ - --disable-ldap -hide_output make -j10 -hide_output make install - -cd .. -rm -rf curl-build -rm -rf curl-$VERSION -yum erase -y curl diff --git a/src/ci/docker/dist-i686-linux/build-gcc.sh b/src/ci/docker/dist-i686-linux/build-gcc.sh deleted file mode 100755 index 6b991bb59e4b..000000000000 --- a/src/ci/docker/dist-i686-linux/build-gcc.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex - -source shared.sh - -GCC=4.8.5 - -curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf - -cd gcc-$GCC -./contrib/download_prerequisites -mkdir ../gcc-build -cd ../gcc-build -hide_output ../gcc-$GCC/configure \ - --prefix=/rustroot \ - --enable-languages=c,c++ -hide_output make -j10 -hide_output make install -ln -nsf gcc /rustroot/bin/cc - -cd .. -rm -rf gcc-build -rm -rf gcc-$GCC -yum erase -y gcc gcc-c++ binutils diff --git a/src/ci/docker/dist-i686-linux/build-git.sh b/src/ci/docker/dist-i686-linux/build-git.sh deleted file mode 100755 index ff62a68629a8..000000000000 --- a/src/ci/docker/dist-i686-linux/build-git.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex -source shared.sh - -curl https://www.kernel.org/pub/software/scm/git/git-2.10.0.tar.gz | tar xzf - - -cd git-2.10.0 -make configure -hide_output ./configure --prefix=/rustroot -hide_output make -j10 -hide_output make install - -cd .. -rm -rf git-2.10.0 diff --git a/src/ci/docker/dist-i686-linux/build-headers.sh b/src/ci/docker/dist-i686-linux/build-headers.sh deleted file mode 100755 index 2f15114d6f98..000000000000 --- a/src/ci/docker/dist-i686-linux/build-headers.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex -source shared.sh - -curl https://cdn.kernel.org/pub/linux/kernel/v3.x/linux-3.2.84.tar.xz | unxz | tar x - -cd linux-3.2.84 -hide_output make mrproper -hide_output make INSTALL_HDR_PATH=dest headers_install - -find dest/include \( -name .install -o -name ..install.cmd \) -delete -yes | cp -fr dest/include/* /usr/include - -cd .. -rm -rf linux-3.2.84 diff --git a/src/ci/docker/dist-i686-linux/build-openssl.sh b/src/ci/docker/dist-i686-linux/build-openssl.sh deleted file mode 100755 index e7226ace020b..000000000000 --- a/src/ci/docker/dist-i686-linux/build-openssl.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex -source shared.sh - -VERSION=1.0.2k -URL=https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/openssl-$VERSION.tar.gz - -curl $URL | tar xzf - - -cd openssl-$VERSION -hide_output ./config --prefix=/rustroot shared -fPIC -hide_output make -j10 -hide_output make install -cd .. -rm -rf openssl-$VERSION - -# Make the system cert collection available to the new install. -ln -nsf /etc/pki/tls/cert.pem /rustroot/ssl/ diff --git a/src/ci/docker/dist-i686-linux/build-python.sh b/src/ci/docker/dist-i686-linux/build-python.sh deleted file mode 100755 index c6b8cdde4b9a..000000000000 --- a/src/ci/docker/dist-i686-linux/build-python.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -set -ex -source shared.sh - -curl https://www.python.org/ftp/python/2.7.12/Python-2.7.12.tgz | \ - tar xzf - - -mkdir python-build -cd python-build - -# Gotta do some hackery to tell python about our custom OpenSSL build, but other -# than that fairly normal. -CFLAGS='-I /rustroot/include' LDFLAGS='-L /rustroot/lib -L /rustroot/lib64' \ - hide_output ../Python-2.7.12/configure --prefix=/rustroot -hide_output make -j10 -hide_output make install - -cd .. -rm -rf python-build -rm -rf Python-2.7.12 diff --git a/src/ci/docker/dist-i686-linux/shared.sh b/src/ci/docker/dist-i686-linux/shared.sh deleted file mode 100644 index 97e6d2908cf8..000000000000 --- a/src/ci/docker/dist-i686-linux/shared.sh +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -hide_output() { - set +x - on_err=" -echo ERROR: An error was encountered with the build. -cat /tmp/build.log -exit 1 -" - trap "$on_err" ERR - bash -c "while true; do sleep 30; echo \$(date) - building ...; done" & - PING_LOOP_PID=$! - $@ &> /tmp/build.log - trap - ERR - kill $PING_LOOP_PID - set -x -} diff --git a/src/ci/docker/dist-mips-linux/Dockerfile b/src/ci/docker/dist-mips-linux/Dockerfile index 94a3cf8a3820..466def1f80fb 100644 --- a/src/ci/docker/dist-mips-linux/Dockerfile +++ b/src/ci/docker/dist-mips-linux/Dockerfile @@ -22,5 +22,5 @@ RUN sh /scripts/sccache.sh ENV HOSTS=mips-unknown-linux-gnu -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-mips64-linux/Dockerfile b/src/ci/docker/dist-mips64-linux/Dockerfile index 0b0dfff1fe36..2205b733e99f 100644 --- a/src/ci/docker/dist-mips64-linux/Dockerfile +++ b/src/ci/docker/dist-mips64-linux/Dockerfile @@ -21,5 +21,5 @@ RUN sh /scripts/sccache.sh ENV HOSTS=mips64-unknown-linux-gnuabi64 -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-mips64el-linux/Dockerfile b/src/ci/docker/dist-mips64el-linux/Dockerfile index 1810b1cdc5ab..f1d9dad46ea3 100644 --- a/src/ci/docker/dist-mips64el-linux/Dockerfile +++ b/src/ci/docker/dist-mips64el-linux/Dockerfile @@ -22,5 +22,5 @@ RUN sh /scripts/sccache.sh ENV HOSTS=mips64el-unknown-linux-gnuabi64 -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-mipsel-linux/Dockerfile b/src/ci/docker/dist-mipsel-linux/Dockerfile index f5be07484758..ee73e29c76e3 100644 --- a/src/ci/docker/dist-mipsel-linux/Dockerfile +++ b/src/ci/docker/dist-mipsel-linux/Dockerfile @@ -21,5 +21,5 @@ RUN sh /scripts/sccache.sh ENV HOSTS=mipsel-unknown-linux-gnu -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-powerpc-linux/Dockerfile b/src/ci/docker/dist-powerpc-linux/Dockerfile index 14ce3654fce7..f03aff060c10 100644 --- a/src/ci/docker/dist-powerpc-linux/Dockerfile +++ b/src/ci/docker/dist-powerpc-linux/Dockerfile @@ -34,7 +34,7 @@ ENV \ ENV HOSTS=powerpc-unknown-linux-gnu -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS # FIXME(#36150) this will fail the bootstrap. Probably means something bad is diff --git a/src/ci/docker/dist-powerpc64-linux/Dockerfile b/src/ci/docker/dist-powerpc64-linux/Dockerfile index 1f6e83e2f49e..bb30210c0563 100644 --- a/src/ci/docker/dist-powerpc64-linux/Dockerfile +++ b/src/ci/docker/dist-powerpc64-linux/Dockerfile @@ -35,5 +35,5 @@ ENV \ ENV HOSTS=powerpc64-unknown-linux-gnu -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-powerpc64le-linux/Dockerfile b/src/ci/docker/dist-powerpc64le-linux/Dockerfile index d4677e180609..ee9e45504835 100644 --- a/src/ci/docker/dist-powerpc64le-linux/Dockerfile +++ b/src/ci/docker/dist-powerpc64le-linux/Dockerfile @@ -32,5 +32,5 @@ ENV \ ENV HOSTS=powerpc64le-unknown-linux-gnu -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-s390x-linux/Dockerfile b/src/ci/docker/dist-s390x-linux/Dockerfile index 39478e92f7c9..7ba6fe643c2a 100644 --- a/src/ci/docker/dist-s390x-linux/Dockerfile +++ b/src/ci/docker/dist-s390x-linux/Dockerfile @@ -34,5 +34,5 @@ ENV \ ENV HOSTS=s390x-unknown-linux-gnu -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-various-1/Dockerfile b/src/ci/docker/dist-various-1/Dockerfile index 0f08bcddd388..bfc5e712f76c 100644 --- a/src/ci/docker/dist-various-1/Dockerfile +++ b/src/ci/docker/dist-various-1/Dockerfile @@ -20,7 +20,9 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ bzip2 \ patch \ libssl-dev \ - pkg-config + pkg-config \ + gcc-arm-none-eabi \ + libnewlib-arm-none-eabi WORKDIR /build @@ -30,8 +32,21 @@ RUN ./build-rumprun.sh COPY dist-various-1/install-x86_64-redox.sh /build RUN ./install-x86_64-redox.sh +COPY dist-various-1/install-mips-musl.sh /build +RUN ./install-mips-musl.sh + +COPY dist-various-1/install-mipsel-musl.sh /build +RUN ./install-mipsel-musl.sh + +# Suppress some warnings in the openwrt toolchains we downloaded +ENV STAGING_DIR=/tmp + COPY scripts/musl.sh /build RUN env \ + CC=arm-linux-gnueabi-gcc CFLAGS="-march=armv5te -marm -mfloat-abi=soft" \ + CXX=arm-linux-gnueabi-g++ CXXFLAGS="-march=armv5te -marm -mfloat-abi=soft" \ + bash musl.sh armv5te && \ + env \ CC=arm-linux-gnueabi-gcc CFLAGS="-march=armv6 -marm" \ CXX=arm-linux-gnueabi-g++ CXXFLAGS="-march=armv6 -marm" \ bash musl.sh arm && \ @@ -47,13 +62,28 @@ RUN env \ CC=aarch64-linux-gnu-gcc \ CXX=aarch64-linux-gnu-g++ \ bash musl.sh aarch64 && \ + env \ + CC=mips-openwrt-linux-gcc \ + CXX=mips-openwrt-linux-g++ \ + bash musl.sh mips && \ + env \ + CC=mipsel-openwrt-linux-gcc \ + CXX=mipsel-openwrt-linux-g++ \ + bash musl.sh mipsel && \ rm -rf /build/* -COPY dist-various-1/install-mips-musl.sh /build -RUN ./install-mips-musl.sh +# FIXME(mozilla/sccache#235) this shouldn't be necessary but is currently +# necessary to disambiguate the mips compiler with the mipsel compiler. We want +# to give these two wrapper scripts (currently identical ones) different hashes +# to ensure that sccache understands that they're different compilers. +RUN \ + echo "# a" >> /usr/local/mips-linux-musl/bin/mips-openwrt-linux-musl-wrapper.sh && \ + echo "# b" >> /usr/local/mipsel-linux-musl/bin/mipsel-openwrt-linux-musl-wrapper.sh -COPY dist-various-1/install-mipsel-musl.sh /build -RUN ./install-mipsel-musl.sh +ENV RUN_MAKE_TARGETS=thumbv6m-none-eabi +ENV RUN_MAKE_TARGETS=$RUN_MAKE_TARGETS,thumbv7m-none-eabi +ENV RUN_MAKE_TARGETS=$RUN_MAKE_TARGETS,thumbv7em-none-eabi +ENV RUN_MAKE_TARGETS=$RUN_MAKE_TARGETS,thumbv7em-none-eabihf ENV TARGETS=asmjs-unknown-emscripten ENV TARGETS=$TARGETS,wasm32-unknown-emscripten @@ -63,31 +93,36 @@ ENV TARGETS=$TARGETS,mipsel-unknown-linux-musl ENV TARGETS=$TARGETS,arm-unknown-linux-musleabi ENV TARGETS=$TARGETS,arm-unknown-linux-musleabihf ENV TARGETS=$TARGETS,armv5te-unknown-linux-gnueabi +ENV TARGETS=$TARGETS,armv5te-unknown-linux-musleabi ENV TARGETS=$TARGETS,armv7-unknown-linux-musleabihf ENV TARGETS=$TARGETS,aarch64-unknown-linux-musl ENV TARGETS=$TARGETS,sparc64-unknown-linux-gnu ENV TARGETS=$TARGETS,x86_64-unknown-redox +ENV TARGETS=$TARGETS,thumbv6m-none-eabi +ENV TARGETS=$TARGETS,thumbv7m-none-eabi +ENV TARGETS=$TARGETS,thumbv7em-none-eabi +ENV TARGETS=$TARGETS,thumbv7em-none-eabihf +ENV TARGETS=$TARGETS,riscv32imac-unknown-none-elf -# FIXME: remove armv5te vars after https://github.com/alexcrichton/cc-rs/issues/271 -# get fixed and cc update ENV CC_mipsel_unknown_linux_musl=mipsel-openwrt-linux-gcc \ CC_mips_unknown_linux_musl=mips-openwrt-linux-gcc \ CC_sparc64_unknown_linux_gnu=sparc64-linux-gnu-gcc \ - CC_x86_64_unknown_redox=x86_64-unknown-redox-gcc \ - CC_armv5te_unknown_linux_gnueabi=arm-linux-gnueabi-gcc \ - CFLAGS_armv5te_unknown_linux_gnueabi="-march=armv5te -marm -mfloat-abi=soft" - -# Suppress some warnings in the openwrt toolchains we downloaded -ENV STAGING_DIR=/tmp + CC_x86_64_unknown_redox=x86_64-unknown-redox-gcc ENV RUST_CONFIGURE_ARGS \ - --enable-extended \ - --target=$TARGETS \ + --musl-root-armv5te=/musl-armv5te \ --musl-root-arm=/musl-arm \ --musl-root-armhf=/musl-armhf \ --musl-root-armv7=/musl-armv7 \ - --musl-root-aarch64=/musl-aarch64 -ENV SCRIPT python2.7 ../x.py dist --target $TARGETS + --musl-root-aarch64=/musl-aarch64 \ + --musl-root-mips=/musl-mips \ + --musl-root-mipsel=/musl-mipsel \ + --enable-emscripten \ + --disable-docs + +ENV SCRIPT \ + python2.7 ../x.py test --target $RUN_MAKE_TARGETS src/test/run-make && \ + python2.7 ../x.py dist --target $TARGETS # sccache COPY scripts/sccache.sh /scripts/ diff --git a/src/ci/docker/dist-various-2/Dockerfile b/src/ci/docker/dist-various-2/Dockerfile index d8f09bf47a49..7adb32efa1d4 100644 --- a/src/ci/docker/dist-various-2/Dockerfile +++ b/src/ci/docker/dist-various-2/Dockerfile @@ -34,12 +34,12 @@ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh ENV \ - AR_x86_64_unknown_fuchsia=x86_64-unknown-fuchsia-ar \ - CC_x86_64_unknown_fuchsia=x86_64-unknown-fuchsia-clang \ - CXX_x86_64_unknown_fuchsia=x86_64-unknown-fuchsia-clang++ \ - AR_aarch64_unknown_fuchsia=aarch64-unknown-fuchsia-ar \ - CC_aarch64_unknown_fuchsia=aarch64-unknown-fuchsia-clang \ - CXX_aarch64_unknown_fuchsia=aarch64-unknown-fuchsia-clang++ \ + AR_x86_64_fuchsia=x86_64-fuchsia-ar \ + CC_x86_64_fuchsia=x86_64-fuchsia-clang \ + CXX_x86_64_fuchsia=x86_64-fuchsia-clang++ \ + AR_aarch64_fuchsia=aarch64-fuchsia-ar \ + CC_aarch64_fuchsia=aarch64-fuchsia-clang \ + CXX_aarch64_fuchsia=aarch64-fuchsia-clang++ \ AR_sparcv9_sun_solaris=sparcv9-sun-solaris2.10-ar \ CC_sparcv9_sun_solaris=sparcv9-sun-solaris2.10-gcc \ CXX_sparcv9_sun_solaris=sparcv9-sun-solaris2.10-g++ \ @@ -47,20 +47,13 @@ ENV \ CC_x86_64_sun_solaris=x86_64-sun-solaris2.10-gcc \ CXX_x86_64_sun_solaris=x86_64-sun-solaris2.10-g++ -# FIXME(EdSchouten): Remove this once cc ≥1.0.4 has been merged. It can -# automatically pick the right compiler path. -ENV \ - AR_x86_64_unknown_cloudabi=x86_64-unknown-cloudabi-ar \ - CC_x86_64_unknown_cloudabi=x86_64-unknown-cloudabi-clang \ - CXX_x86_64_unknown_cloudabi=x86_64-unknown-cloudabi-clang++ - -ENV TARGETS=x86_64-unknown-fuchsia -ENV TARGETS=$TARGETS,aarch64-unknown-fuchsia +ENV TARGETS=x86_64-fuchsia +ENV TARGETS=$TARGETS,aarch64-fuchsia ENV TARGETS=$TARGETS,sparcv9-sun-solaris ENV TARGETS=$TARGETS,wasm32-unknown-unknown ENV TARGETS=$TARGETS,x86_64-sun-solaris ENV TARGETS=$TARGETS,x86_64-unknown-linux-gnux32 ENV TARGETS=$TARGETS,x86_64-unknown-cloudabi -ENV RUST_CONFIGURE_ARGS --target=$TARGETS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --target $TARGETS diff --git a/src/ci/docker/dist-various-2/build-cloudabi-toolchain.sh b/src/ci/docker/dist-various-2/build-cloudabi-toolchain.sh index d64da4366399..8c04d849e8d0 100755 --- a/src/ci/docker/dist-various-2/build-cloudabi-toolchain.sh +++ b/src/ci/docker/dist-various-2/build-cloudabi-toolchain.sh @@ -40,12 +40,6 @@ ln -s ../lib/llvm-5.0/bin/clang /usr/bin/${target}-c++ ln -s ../lib/llvm-5.0/bin/lld /usr/bin/${target}-ld ln -s ../../${target} /usr/lib/llvm-5.0/${target} -# FIXME(EdSchouten): Remove this once cc ≥1.0.4 has been merged. It -# can make use of ${target}-cc and ${target}-c++, without incorrectly -# assuming it's MSVC. -ln -s ../lib/llvm-5.0/bin/clang /usr/bin/${target}-clang -ln -s ../lib/llvm-5.0/bin/clang /usr/bin/${target}-clang++ - # Install the C++ runtime libraries from CloudABI Ports. echo deb https://nuxi.nl/distfiles/cloudabi-ports/debian/ cloudabi cloudabi > \ /etc/apt/sources.list.d/cloudabi.list diff --git a/src/ci/docker/dist-various-2/build-fuchsia-toolchain.sh b/src/ci/docker/dist-various-2/build-fuchsia-toolchain.sh index ef8f0c37f8c3..ec19f7c4f45d 100755 --- a/src/ci/docker/dist-various-2/build-fuchsia-toolchain.sh +++ b/src/ci/docker/dist-various-2/build-fuchsia-toolchain.sh @@ -39,7 +39,7 @@ build() { esac hide_output make -j$(getconf _NPROCESSORS_ONLN) $tgt - dst=/usr/local/${arch}-unknown-fuchsia + dst=/usr/local/${arch}-fuchsia mkdir -p $dst cp -a build-${tgt}/sysroot/include $dst/ cp -a build-${tgt}/sysroot/lib $dst/ @@ -55,11 +55,11 @@ rm -rf zircon for arch in x86_64 aarch64; do for tool in clang clang++; do - cat >/usr/local/bin/${arch}-unknown-fuchsia-${tool} </usr/local/bin/${arch}-fuchsia-${tool} < or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex + +source shared.sh + +LLVM=6.0.0 + +mkdir clang +cd clang + +curl https://releases.llvm.org/$LLVM/llvm-$LLVM.src.tar.xz | \ + xz -d | \ + tar xf - + +cd llvm-$LLVM.src + +mkdir -p tools/clang + +curl https://releases.llvm.org/$LLVM/cfe-$LLVM.src.tar.xz | \ + xz -d | \ + tar xf - -C tools/clang --strip-components=1 + +mkdir ../clang-build +cd ../clang-build + +# For whatever reason the default set of include paths for clang is different +# than that of gcc. As a result we need to manually include our sysroot's +# include path, /rustroot/include, to clang's default include path. +# +# Alsow there's this weird oddity with gcc where there's an 'include-fixed' +# directory that it generates. It turns out [1] that Centos 5's headers are so +# old that they're incompatible with modern C semantics. While gcc automatically +# fixes that clang doesn't account for this. Tell clang to manually include the +# fixed headers so we can successfully compile code later on. +# +# [1]: https://sourceware.org/ml/crossgcc/2008-11/msg00028.html +INC="/rustroot/include" +INC="$INC:/rustroot/lib/gcc/x86_64-unknown-linux-gnu/4.8.5/include-fixed" +INC="$INC:/usr/include" + +hide_output \ + cmake ../llvm-$LLVM.src \ + -DCMAKE_C_COMPILER=/rustroot/bin/gcc \ + -DCMAKE_CXX_COMPILER=/rustroot/bin/g++ \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=/rustroot \ + -DLLVM_TARGETS_TO_BUILD=X86 \ + -DC_INCLUDE_DIRS="$INC" + +hide_output make -j10 +hide_output make install + +cd ../.. +rm -rf clang diff --git a/src/ci/docker/dist-x86_64-linux/build-gcc.sh b/src/ci/docker/dist-x86_64-linux/build-gcc.sh index 6b991bb59e4b..62ea2506f4ef 100755 --- a/src/ci/docker/dist-x86_64-linux/build-gcc.sh +++ b/src/ci/docker/dist-x86_64-linux/build-gcc.sh @@ -17,6 +17,23 @@ GCC=4.8.5 curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf - cd gcc-$GCC + +# FIXME(#49246): Remove the `sed` below. +# +# On 2018 March 21st, two Travis builders' cache for Docker are suddenly invalidated. Normally this +# is fine, because we just need to rebuild the Docker image. However, it reveals a network issue: +# downloading from `ftp://gcc.gnu.org/` from Travis (using passive mode) often leads to "Connection +# timed out" error, and even when the download completed, the file is usually corrupted. This causes +# nothing to be landed that day. +# +# We observed that the `gcc-4.8.5.tar.bz2` above can be downloaded successfully, so as a stability +# improvement we try to download from the HTTPS mirror instead. Turns out this uncovered the third +# bug: the host `gcc.gnu.org` and `cygwin.com` share the same IP, and the TLS certificate of the +# latter host is presented to `wget`! Therefore, we choose to download from the insecure HTTP server +# instead here. +# +sed -i'' 's|ftp://gcc\.gnu\.org/|http://gcc.gnu.org/|g' ./contrib/download_prerequisites + ./contrib/download_prerequisites mkdir ../gcc-build cd ../gcc-build @@ -25,7 +42,6 @@ hide_output ../gcc-$GCC/configure \ --enable-languages=c,c++ hide_output make -j10 hide_output make install -ln -nsf gcc /rustroot/bin/cc cd .. rm -rf gcc-build diff --git a/src/ci/docker/dist-x86_64-linux/build-git.sh b/src/ci/docker/dist-x86_64-linux/build-git.sh index ff62a68629a8..aa31f50ba034 100755 --- a/src/ci/docker/dist-x86_64-linux/build-git.sh +++ b/src/ci/docker/dist-x86_64-linux/build-git.sh @@ -12,7 +12,7 @@ set -ex source shared.sh -curl https://www.kernel.org/pub/software/scm/git/git-2.10.0.tar.gz | tar xzf - +curl -L https://www.kernel.org/pub/software/scm/git/git-2.10.0.tar.gz | tar xzf - cd git-2.10.0 make configure diff --git a/src/ci/docker/dist-x86_64-musl/Dockerfile b/src/ci/docker/dist-x86_64-musl/Dockerfile index 77a55b33e41b..06f8a2fbba83 100644 --- a/src/ci/docker/dist-x86_64-musl/Dockerfile +++ b/src/ci/docker/dist-x86_64-musl/Dockerfile @@ -21,7 +21,7 @@ WORKDIR /build/ COPY scripts/musl.sh /build/ # We need to mitigate rust-lang/rust#34978 when compiling musl itself as well RUN CC=gcc \ - CFLAGS="-fPIC -Wa,-mrelax-relocations=no" \ + CFLAGS="-Wa,-mrelax-relocations=no" \ CXX=g++ \ CXXFLAGS="-Wa,-mrelax-relocations=no" \ bash musl.sh x86_64 && rm -rf /build @@ -30,9 +30,9 @@ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh ENV RUST_CONFIGURE_ARGS \ - --target=x86_64-unknown-linux-musl \ --musl-root-x86_64=/musl-x86_64 \ - --enable-extended + --enable-extended \ + --disable-docs # Newer binutils broke things on some vms/distros (i.e., linking against # unknown relocs disabled by the following flag), so we need to go out of our diff --git a/src/ci/docker/dist-x86_64-netbsd/Dockerfile b/src/ci/docker/dist-x86_64-netbsd/Dockerfile index 4fd2503c31bb..a17a7ebc03dd 100644 --- a/src/ci/docker/dist-x86_64-netbsd/Dockerfile +++ b/src/ci/docker/dist-x86_64-netbsd/Dockerfile @@ -33,5 +33,5 @@ ENV \ ENV HOSTS=x86_64-unknown-netbsd -ENV RUST_CONFIGURE_ARGS --host=$HOSTS --enable-extended +ENV RUST_CONFIGURE_ARGS --enable-extended --disable-docs ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh b/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh index 5b4314d57e6c..e730dd86087f 100755 --- a/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh +++ b/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh @@ -54,7 +54,7 @@ cd usr/src # The options, in order, do the following # * this is an unprivileged build # * output to a predictable location -# * disable various uneeded stuff +# * disable various unneeded stuff MKUNPRIVED=yes TOOLDIR=/x-tools/x86_64-unknown-netbsd \ MKSHARE=no MKDOC=no MKHTML=no MKINFO=no MKKMOD=no MKLINT=no MKMAN=no MKNLS=no MKPROFILE=no \ hide_output ./build.sh -j10 -m amd64 tools diff --git a/src/ci/docker/mingw-check/Dockerfile b/src/ci/docker/mingw-check/Dockerfile new file mode 100644 index 000000000000..aab339f399c5 --- /dev/null +++ b/src/ci/docker/mingw-check/Dockerfile @@ -0,0 +1,23 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + sudo \ + gdb \ + xz-utils \ + libssl-dev \ + pkg-config \ + mingw-w64 + +COPY scripts/sccache.sh /scripts/ +RUN sh /scripts/sccache.sh + +ENV RUN_CHECK_WITH_PARALLEL_QUERIES 1 +ENV SCRIPT python2.7 ../x.py check --target=i686-pc-windows-gnu --host=i686-pc-windows-gnu diff --git a/src/ci/docker/run.sh b/src/ci/docker/run.sh index f743c976f91a..3a7714d32fa1 100755 --- a/src/ci/docker/run.sh +++ b/src/ci/docker/run.sh @@ -27,6 +27,23 @@ travis_fold start build_docker travis_time_start if [ -f "$docker_dir/$image/Dockerfile" ]; then + if [ "$CI" != "" ]; then + cksum=$(find $docker_dir/$image $docker_dir/scripts -type f | \ + sort | \ + xargs cat | \ + sha512sum | \ + awk '{print $1}') + s3url="s3://$SCCACHE_BUCKET/docker/$cksum" + url="https://s3-us-west-1.amazonaws.com/$SCCACHE_BUCKET/docker/$cksum" + echo "Attempting to download $s3url" + rm -f /tmp/rustci_docker_cache + set +e + retry curl -f -L -C - -o /tmp/rustci_docker_cache "$url" + loaded_images=$(docker load -i /tmp/rustci_docker_cache | sed 's/.* sha/sha/') + set -e + echo "Downloaded containers:\n$loaded_images" + fi + dockerfile="$docker_dir/$image/Dockerfile" if [ -x /usr/bin/cygpath ]; then context="`cygpath -w $docker_dir`" @@ -40,6 +57,23 @@ if [ -f "$docker_dir/$image/Dockerfile" ]; then -t rust-ci \ -f "$dockerfile" \ "$context" + + if [ "$s3url" != "" ]; then + digest=$(docker inspect rust-ci --format '{{.Id}}') + echo "Built container $digest" + if ! grep -q "$digest" <(echo "$loaded_images"); then + echo "Uploading finished image to $s3url" + set +e + docker history -q rust-ci | \ + grep -v missing | \ + xargs docker save | \ + gzip | \ + aws s3 cp - $s3url + set -e + else + echo "Looks like docker image is the same as before, not uploading" + fi + fi elif [ -f "$docker_dir/disabled/$image/Dockerfile" ]; then if [ -n "$TRAVIS_OS_NAME" ]; then echo Cannot run disabled images on travis! @@ -65,6 +99,7 @@ objdir=$root_dir/obj mkdir -p $HOME/.cargo mkdir -p $objdir/tmp +mkdir -p $objdir/cores args= if [ "$SCCACHE_BUCKET" != "" ]; then @@ -72,8 +107,6 @@ if [ "$SCCACHE_BUCKET" != "" ]; then args="$args --env SCCACHE_REGION" args="$args --env AWS_ACCESS_KEY_ID" args="$args --env AWS_SECRET_ACCESS_KEY" - args="$args --env SCCACHE_ERROR_LOG=/tmp/sccache/sccache.log" - args="$args --volume $objdir/tmp:/tmp/sccache" else mkdir -p $HOME/.cache/sccache args="$args --env SCCACHE_DIR=/sccache --volume $HOME/.cache/sccache:/sccache" @@ -100,6 +133,7 @@ exec docker \ --env TRAVIS \ --env TRAVIS_BRANCH \ --env TOOLSTATE_REPO_ACCESS_TOKEN \ + --env CI_JOB_NAME="${CI_JOB_NAME-$IMAGE}" \ --volume "$HOME/.cargo:/cargo" \ --volume "$HOME/rustsrc:$HOME/rustsrc" \ --init \ diff --git a/src/ci/docker/scripts/musl.sh b/src/ci/docker/scripts/musl.sh index 7a7233216a35..4ca7389d6d1a 100644 --- a/src/ci/docker/scripts/musl.sh +++ b/src/ci/docker/scripts/musl.sh @@ -30,15 +30,23 @@ exit 1 TAG=$1 shift -MUSL=musl-1.1.18 +export CFLAGS="-fPIC $CFLAGS" + +# FIXME: remove the patch when upate to 1.1.20 +MUSL=musl-1.1.19 # may have been downloaded in a previous run if [ ! -d $MUSL ]; then curl https://www.musl-libc.org/releases/$MUSL.tar.gz | tar xzf - + # Patch to fix https://github.com/rust-lang/rust/issues/48967 + cd $MUSL && \ + curl "https://git.musl-libc.org/cgit/musl/patch/?id=610c5a8524c3d6cd3ac5a5f1231422e7648a3791" |\ + patch -p1 && \ + cd - fi cd $MUSL -./configure --disable-shared --prefix=/musl-$TAG $@ +./configure --enable-optimize --enable-debug --disable-shared --prefix=/musl-$TAG $@ if [ "$TAG" = "i586" -o "$TAG" = "i686" ]; then hide_output make -j$(nproc) AR=ar RANLIB=ranlib else @@ -49,42 +57,12 @@ hide_output make clean cd .. -LLVM=39 +LLVM=60 + # may have been downloaded in a previous run if [ ! -d libunwind-release_$LLVM ]; then curl -L https://github.com/llvm-mirror/llvm/archive/release_$LLVM.tar.gz | tar xzf - curl -L https://github.com/llvm-mirror/libunwind/archive/release_$LLVM.tar.gz | tar xzf - - # Whoa what's this mysterious patch we're applying to libunwind! Why are we - # swapping the values of ESP/EBP in libunwind?! - # - # Discovered in #35599 it turns out that the vanilla build of libunwind is not - # suitable for unwinding i686 musl. After some investigation it ended up - # looking like the register values for ESP/EBP were indeed incorrect (swapped) - # in the source. Similar commits in libunwind (r280099 and r282589) have noticed - # this for other platforms, and we just need to realize it for musl linux as - # well. - # - # More technical info can be found at #35599 - cd libunwind-release_$LLVM - patch -Np1 << EOF -diff --git a/include/libunwind.h b/include/libunwind.h -index c5b9633..1360eb2 100644 ---- a/include/libunwind.h -+++ b/include/libunwind.h -@@ -151,8 +151,8 @@ enum { - UNW_X86_ECX = 1, - UNW_X86_EDX = 2, - UNW_X86_EBX = 3, -- UNW_X86_EBP = 4, -- UNW_X86_ESP = 5, -+ UNW_X86_ESP = 4, -+ UNW_X86_EBP = 5, - UNW_X86_ESI = 6, - UNW_X86_EDI = 7 - }; -fi -EOF - cd .. fi mkdir libunwind-build diff --git a/src/ci/docker/scripts/sccache.sh b/src/ci/docker/scripts/sccache.sh index ce2d45563f7b..da52d0831811 100644 --- a/src/ci/docker/scripts/sccache.sh +++ b/src/ci/docker/scripts/sccache.sh @@ -13,6 +13,6 @@ set -ex curl -fo /usr/local/bin/sccache \ - https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-05-12-sccache-x86_64-unknown-linux-musl + https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2018-04-02-sccache-x86_64-unknown-linux-musl chmod +x /usr/local/bin/sccache diff --git a/src/ci/docker/wasm32-unknown/Dockerfile b/src/ci/docker/wasm32-unknown/Dockerfile index dc1727b7014c..56eda5480715 100644 --- a/src/ci/docker/wasm32-unknown/Dockerfile +++ b/src/ci/docker/wasm32-unknown/Dockerfile @@ -22,10 +22,17 @@ RUN sh /scripts/sccache.sh ENV TARGETS=wasm32-unknown-unknown ENV RUST_CONFIGURE_ARGS \ - --target=$TARGETS \ - --set build.nodejs=/node-v9.2.0-linux-x64/bin/node + --set build.nodejs=/node-v9.2.0-linux-x64/bin/node \ + --set rust.lld + +# Some run-make tests have assertions about code size, and enabling debug +# assertions in libstd causes the binary to be much bigger than it would +# otherwise normally be. We already test libstd with debug assertions in lots of +# other contexts as well +ENV NO_DEBUG_ASSERTIONS=1 ENV SCRIPT python2.7 /checkout/x.py test --target $TARGETS \ + src/test/run-make \ src/test/ui \ src/test/run-pass \ src/test/compile-fail \ @@ -33,4 +40,3 @@ ENV SCRIPT python2.7 /checkout/x.py test --target $TARGETS \ src/test/mir-opt \ src/test/codegen-units \ src/libcore \ - src/libstd_unicode/ \ diff --git a/src/ci/docker/x86_64-gnu-debug/Dockerfile b/src/ci/docker/x86_64-gnu-debug/Dockerfile index 95d41028595f..bdde7ad7fe85 100644 --- a/src/ci/docker/x86_64-gnu-debug/Dockerfile +++ b/src/ci/docker/x86_64-gnu-debug/Dockerfile @@ -16,6 +16,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh +ENV RUN_CHECK_WITH_PARALLEL_QUERIES 1 ENV RUST_CONFIGURE_ARGS \ --build=x86_64-unknown-linux-gnu \ --enable-debug \ diff --git a/src/ci/docker/x86_64-gnu-incremental/Dockerfile b/src/ci/docker/x86_64-gnu-incremental/Dockerfile deleted file mode 100644 index 7304ed6015cc..000000000000 --- a/src/ci/docker/x86_64-gnu-incremental/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -FROM ubuntu:16.04 - -RUN apt-get update && apt-get install -y --no-install-recommends \ - g++ \ - make \ - file \ - curl \ - ca-certificates \ - python2.7 \ - git \ - cmake \ - sudo \ - gdb \ - xz-utils - -COPY scripts/sccache.sh /scripts/ -RUN sh /scripts/sccache.sh - -ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu -ENV RUSTFLAGS -Zincremental=/tmp/rust-incr-cache -ENV RUST_CHECK_TARGET check -ENV CARGO_INCREMENTAL 0 diff --git a/src/ci/docker/x86_64-gnu-llvm-3.9/Dockerfile b/src/ci/docker/x86_64-gnu-llvm-3.9/Dockerfile deleted file mode 100644 index 6b8186048988..000000000000 --- a/src/ci/docker/x86_64-gnu-llvm-3.9/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM ubuntu:16.04 - -RUN apt-get update && apt-get install -y --no-install-recommends \ - g++ \ - make \ - file \ - curl \ - ca-certificates \ - python2.7 \ - git \ - cmake \ - sudo \ - gdb \ - llvm-3.9-tools \ - libedit-dev \ - zlib1g-dev \ - xz-utils - -COPY scripts/sccache.sh /scripts/ -RUN sh /scripts/sccache.sh - -# using llvm-link-shared due to libffi issues -- see #34486 -ENV RUST_CONFIGURE_ARGS \ - --build=x86_64-unknown-linux-gnu \ - --llvm-root=/usr/lib/llvm-3.9 \ - --enable-llvm-link-shared -ENV RUST_CHECK_TARGET check diff --git a/src/ci/docker/x86_64-gnu-llvm-5.0/Dockerfile b/src/ci/docker/x86_64-gnu-llvm-5.0/Dockerfile new file mode 100644 index 000000000000..4f90c5097260 --- /dev/null +++ b/src/ci/docker/x86_64-gnu-llvm-5.0/Dockerfile @@ -0,0 +1,27 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + sudo \ + gdb \ + llvm-5.0-tools \ + libedit-dev \ + zlib1g-dev \ + xz-utils + +COPY scripts/sccache.sh /scripts/ +RUN sh /scripts/sccache.sh + +# using llvm-link-shared due to libffi issues -- see #34486 +ENV RUST_CONFIGURE_ARGS \ + --build=x86_64-unknown-linux-gnu \ + --llvm-root=/usr/lib/llvm-5.0 \ + --enable-llvm-link-shared +ENV RUST_CHECK_TARGET check diff --git a/src/ci/docker/x86_64-gnu-tools/Dockerfile b/src/ci/docker/x86_64-gnu-tools/Dockerfile index 8975d419d205..bab9145cbcb9 100644 --- a/src/ci/docker/x86_64-gnu-tools/Dockerfile +++ b/src/ci/docker/x86_64-gnu-tools/Dockerfile @@ -17,6 +17,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh +COPY x86_64-gnu-tools/checkregression.py /tmp/ COPY x86_64-gnu-tools/checktools.sh /tmp/ COPY x86_64-gnu-tools/repo.sh /tmp/ diff --git a/src/ci/docker/x86_64-gnu-tools/checkregression.py b/src/ci/docker/x86_64-gnu-tools/checkregression.py new file mode 100755 index 000000000000..208aab434ce1 --- /dev/null +++ b/src/ci/docker/x86_64-gnu-tools/checkregression.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2018 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +import sys +import json + +if __name__ == '__main__': + os_name = sys.argv[1] + toolstate_file = sys.argv[2] + current_state = sys.argv[3] + verb = sys.argv[4] # 'regressed' or 'changed' + + with open(toolstate_file, 'r') as f: + toolstate = json.load(f) + with open(current_state, 'r') as f: + current = json.load(f) + + regressed = False + for cur in current: + tool = cur['tool'] + state = cur[os_name] + new_state = toolstate.get(tool, '') + if verb == 'regressed': + updated = new_state < state + elif verb == 'changed': + updated = new_state != state + else: + print('Unknown verb {}'.format(updated)) + sys.exit(2) + if updated: + print( + 'The state of "{}" has {} from "{}" to "{}"' + .format(tool, verb, state, new_state) + ) + regressed = True + + if regressed: + sys.exit(1) diff --git a/src/ci/docker/x86_64-gnu-tools/checktools.sh b/src/ci/docker/x86_64-gnu-tools/checktools.sh index b9268fe62ed0..d876cb7f37a4 100755 --- a/src/ci/docker/x86_64-gnu-tools/checktools.sh +++ b/src/ci/docker/x86_64-gnu-tools/checktools.sh @@ -17,52 +17,109 @@ TOOLSTATE_FILE="$(realpath $2)" OS="$3" COMMIT="$(git rev-parse HEAD)" CHANGED_FILES="$(git diff --name-status HEAD HEAD^)" +SIX_WEEK_CYCLE="$(( ($(date +%s) / 86400 - 20) % 42 ))" +# ^ Number of days after the last promotion of beta. +# Its value is 41 on the Tuesday where "Promote master to beta (T-2)" happens. +# The Wednesday after this has value 0. +# We track this value to prevent regressing tools in the last week of the 6-week cycle. touch "$TOOLSTATE_FILE" +# Try to test all the tools and store the build/test success in the TOOLSTATE_FILE + set +e python2.7 "$X_PY" test --no-fail-fast \ + src/doc/book \ + src/doc/nomicon \ + src/doc/reference \ + src/doc/rust-by-example \ + src/tools/clippy \ src/tools/rls \ src/tools/rustfmt \ src/tools/miri \ - src/tools/clippy + set -e cat "$TOOLSTATE_FILE" +echo -# If this PR is intended to update one of these tools, do not let the build pass -# when they do not test-pass. -for TOOL in rls rustfmt miri clippy; do - echo "Verifying status of $TOOL..." - if echo "$CHANGED_FILES" | grep -q "^M[[:blank:]]src/tools/$TOOL$"; then - echo "This PR updated 'src/tools/$TOOL', verifying if status is 'test-pass'..." - if grep -vq '"'"$TOOL"'[^"]*":"test-pass"' "$TOOLSTATE_FILE"; then +# This function checks that if a tool's submodule changed, the tool's state must improve +verify_status() { + echo "Verifying status of $1..." + if echo "$CHANGED_FILES" | grep -q "^M[[:blank:]]$2$"; then + echo "This PR updated '$2', verifying if status is 'test-pass'..." + if grep -vq '"'"$1"'":"test-pass"' "$TOOLSTATE_FILE"; then echo - echo "⚠️ We detected that this PR updated '$TOOL', but its tests failed." + echo "⚠️ We detected that this PR updated '$1', but its tests failed." echo - echo "If you do intend to update '$TOOL', please check the error messages above and" + echo "If you do intend to update '$1', please check the error messages above and" echo "commit another update." echo - echo "If you do NOT intend to update '$TOOL', please ensure you did not accidentally" - echo "change the submodule at 'src/tools/$TOOL'. You may ask your reviewer for the" + echo "If you do NOT intend to update '$1', please ensure you did not accidentally" + echo "change the submodule at '$2'. You may ask your reviewer for the" echo "proper steps." exit 3 fi fi -done +} -if [ "$RUST_RELEASE_CHANNEL" = nightly -a -n "${TOOLSTATE_REPO_ACCESS_TOKEN+is_set}" ]; then - . "$(dirname $0)/repo.sh" - MESSAGE_FILE=$(mktemp -t msg.XXXXXX) - echo "($OS CI update)" > "$MESSAGE_FILE" - commit_toolstate_change "$MESSAGE_FILE" \ +# deduplicates the submodule check and the assertion that on beta some tools MUST be passing +check_dispatch() { + if [ "$1" = submodule_changed ]; then + # ignore $2 (branch id) + verify_status $3 $4 + elif [ "$2" = beta ]; then + echo "Requiring test passing for $3..." + if grep -q '"'"$3"'":"\(test\|build\)-fail"' "$TOOLSTATE_FILE"; then + exit 4 + fi + fi +} + +# list all tools here +status_check() { + check_dispatch $1 beta book src/doc/book + check_dispatch $1 beta nomicon src/doc/nomicon + check_dispatch $1 beta reference src/doc/reference + check_dispatch $1 beta rust-by-example src/doc/rust-by-example + check_dispatch $1 beta rls src/tools/rls + check_dispatch $1 beta rustfmt src/tools/rustfmt + check_dispatch $1 beta clippy-driver src/tools/clippy + # these tools are not required for beta to successfully branch + check_dispatch $1 nightly miri src/tools/miri +} + +# If this PR is intended to update one of these tools, do not let the build pass +# when they do not test-pass. + +status_check "submodule_changed" + +CHECK_NOT="$(readlink -f "$(dirname $0)/checkregression.py")" +change_toolstate() { + # only update the history + if python2.7 "$CHECK_NOT" "$OS" "$TOOLSTATE_FILE" "_data/latest.json" changed; then + echo 'Toolstate is not changed. Not updating.' + else + if [ $SIX_WEEK_CYCLE -ge 35 ]; then + python2.7 "$CHECK_NOT" "$OS" "$TOOLSTATE_FILE" "_data/latest.json" regressed + fi sed -i "1 a\\ $COMMIT\t$(cat "$TOOLSTATE_FILE") " "history/$OS.tsv" - rm -f "$MESSAGE_FILE" + fi +} + +if [ "$RUST_RELEASE_CHANNEL" = nightly ]; then + if [ -n "${TOOLSTATE_REPO_ACCESS_TOKEN+is_set}" ]; then + . "$(dirname $0)/repo.sh" + MESSAGE_FILE=$(mktemp -t msg.XXXXXX) + echo "($OS CI update)" > "$MESSAGE_FILE" + commit_toolstate_change "$MESSAGE_FILE" change_toolstate + rm -f "$MESSAGE_FILE" + fi exit 0 fi -if grep -q fail "$TOOLSTATE_FILE"; then - exit 4 -fi +# abort compilation if an important tool doesn't build +# (this code is reachable if not on the nightly channel) +status_check "beta_required" diff --git a/src/ci/docker/x86_64-gnu-tools/repo.sh b/src/ci/docker/x86_64-gnu-tools/repo.sh index c10afef753e8..807e6fb7b642 100644 --- a/src/ci/docker/x86_64-gnu-tools/repo.sh +++ b/src/ci/docker/x86_64-gnu-tools/repo.sh @@ -60,7 +60,7 @@ commit_toolstate_change() { OLDFLAGS="$-" set -eu - git config --global user.email '34210020+rust-toolstate-update@users.noreply.github.com' + git config --global user.email '7378925+rust-toolstate-update@users.noreply.github.com' git config --global user.name 'Rust Toolstate Update' git config --global credential.helper store printf 'https://%s:x-oauth-basic@github.com\n' "$TOOLSTATE_REPO_ACCESS_TOKEN" \ diff --git a/src/ci/init_repo.sh b/src/ci/init_repo.sh index e073a3d99c15..f2664e6d196c 100755 --- a/src/ci/init_repo.sh +++ b/src/ci/init_repo.sh @@ -17,6 +17,7 @@ ci_dir=$(cd $(dirname $0) && pwd) . "$ci_dir/shared.sh" travis_fold start init_repo +travis_time_start REPO_DIR="$1" CACHE_DIR="$2" @@ -36,49 +37,45 @@ fi rm -rf "$CACHE_DIR" mkdir "$CACHE_DIR" -travis_fold start update_cache -travis_time_start +# On the beta channel we'll be automatically calculating the prerelease version +# via the git history, so unshallow our shallow clone from CI. +if grep -q RUST_RELEASE_CHANNEL=beta src/ci/run.sh; then + git fetch origin --unshallow beta master +fi -# Update the cache (a pristine copy of the rust source master) -retry sh -c "rm -rf $cache_src_dir && mkdir -p $cache_src_dir && \ - git clone --depth 1 https://github.com/rust-lang/rust.git $cache_src_dir" -(cd $cache_src_dir && git rm src/llvm) -retry sh -c "cd $cache_src_dir && \ - git submodule deinit -f . && git submodule sync && git submodule update --init" +function fetch_submodule { + local module=$1 + local cached="download-${module//\//-}.tar.gz" + retry sh -c "rm -f $cached && \ + curl -sSL -o $cached $2" + mkdir $module + touch "$module/.git" + tar -C $module --strip-components=1 -xf $cached + rm $cached +} -travis_fold end update_cache -travis_time_finish - -travis_fold start update_submodules -travis_time_start - -# Update the submodules of the repo we're in, using the pristine repo as -# a cache for any object files -# No, `git submodule foreach` won't work: -# http://stackoverflow.com/questions/12641469/list-submodules-in-a-git-repository +included="src/llvm src/llvm-emscripten src/doc/book src/doc/rust-by-example" modules="$(git config --file .gitmodules --get-regexp '\.path$' | cut -d' ' -f2)" -for module in $modules; do - if [ "$module" = src/llvm ]; then - commit="$(git ls-tree HEAD src/llvm | awk '{print $3}')" - git rm src/llvm - retry sh -c "rm -f $commit.tar.gz && \ - curl -sSL -O https://github.com/rust-lang/llvm/archive/$commit.tar.gz" - tar -C src/ -xf "$commit.tar.gz" - rm "$commit.tar.gz" - mv "src/llvm-$commit" src/llvm +modules=($modules) +use_git="" +urls="$(git config --file .gitmodules --get-regexp '\.url$' | cut -d' ' -f2)" +urls=($urls) +for i in ${!modules[@]}; do + module=${modules[$i]} + if [[ " $included " = *" $module "* ]]; then + commit="$(git ls-tree HEAD $module | awk '{print $3}')" + git rm $module + url=${urls[$i]} + url=${url/\.git/} + fetch_submodule $module "$url/archive/$commit.tar.gz" & continue + else + use_git="$use_git $module" fi - if [ ! -e "$cache_src_dir/$module/.git" ]; then - echo "WARNING: $module not found in pristine repo" - retry sh -c "git submodule deinit -f $module && \ - git submodule update --init --recursive $module" - continue - fi - retry sh -c "git submodule deinit -f $module && \ - git submodule update --init --recursive --reference $cache_src_dir/$module $module" done - -travis_fold end update_submodules -travis_time_finish - +retry sh -c "git submodule deinit -f $use_git && \ + git submodule sync && \ + git submodule update -j 16 --init --recursive $use_git" +wait travis_fold end init_repo +travis_time_finish diff --git a/src/ci/run.sh b/src/ci/run.sh index dab385c09649..09a0cf3541d8 100755 --- a/src/ci/run.sh +++ b/src/ci/run.sh @@ -11,6 +11,10 @@ set -e +if [ -n "$CI_JOB_NAME" ]; then + echo "[CI_JOB_NAME=$CI_JOB_NAME]" +fi + if [ "$NO_CHANGE_USER" = "" ]; then if [ "$LOCAL_USER_ID" != "" ]; then useradd --shell /bin/bash -u $LOCAL_USER_ID -o -c "" -m user @@ -20,11 +24,16 @@ if [ "$NO_CHANGE_USER" = "" ]; then fi fi +# only enable core dump on Linux +if [ -f /proc/sys/kernel/core_pattern ]; then + ulimit -c unlimited +fi + ci_dir=`cd $(dirname $0) && pwd` source "$ci_dir/shared.sh" -if [ "$TRAVIS" == "true" ] && [ "$TRAVIS_BRANCH" != "auto" ]; then - RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-quiet-tests" +if [ "$TRAVIS" != "true" ] || [ "$TRAVIS_BRANCH" == "auto" ]; then + RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --set build.print-step-timings --enable-verbose-tests" fi RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-sccache" @@ -66,6 +75,19 @@ else fi fi +# We've had problems in the past of shell scripts leaking fds into the sccache +# server (#48192) which causes Cargo to erroneously think that a build script +# hasn't finished yet. Try to solve that problem by starting a very long-lived +# sccache server at the start of the build, but no need to worry if this fails. +SCCACHE_IDLE_TIMEOUT=10800 sccache --start-server || true + +if [ "$RUN_CHECK_WITH_PARALLEL_QUERIES" != "" ]; then + $SRC/configure --enable-experimental-parallel-queries + CARGO_INCREMENTAL=0 python2.7 ../x.py check + rm -f config.toml + rm -rf build +fi + travis_fold start configure travis_time_start $SRC/configure $RUST_CONFIGURE_ARGS @@ -84,11 +106,19 @@ make check-bootstrap travis_fold end check-bootstrap travis_time_finish +# Display the CPU and memory information. This helps us know why the CI timing +# is fluctuating. +travis_fold start log-system-info if [ "$TRAVIS_OS_NAME" = "osx" ]; then + system_profiler SPHardwareDataType || true + sysctl hw || true ncpus=$(sysctl -n hw.ncpu) else + cat /proc/cpuinfo || true + cat /proc/meminfo || true ncpus=$(grep processor /proc/cpuinfo | wc -l) fi +travis_fold end log-system-info if [ ! -z "$SCRIPT" ]; then sh -x -c "$SCRIPT" @@ -97,7 +127,7 @@ else travis_fold start "make-$1" travis_time_start echo "make -j $ncpus $1" - make -j $ncpus "$1" + make -j $ncpus $1 local retval=$? travis_fold end "make-$1" travis_time_finish diff --git a/src/ci/shared.sh b/src/ci/shared.sh index 4a08683e3ee8..bb6945f0fd6b 100644 --- a/src/ci/shared.sh +++ b/src/ci/shared.sh @@ -21,11 +21,12 @@ function retry { while true; do "$@" && break || { if [[ $n -lt $max ]]; then + sleep $n # don't retry immediately ((n++)) echo "Command failed. Attempt $n/$max:" else echo "The command has failed after $n attempts." - exit 1 + return 1 fi } done diff --git a/src/dlmalloc b/src/dlmalloc index d3812c3accae..c99638dc2ecf 160000 --- a/src/dlmalloc +++ b/src/dlmalloc @@ -1 +1 @@ -Subproject commit d3812c3accaee7ad23068ed4fc089cc05c7a538f +Subproject commit c99638dc2ecfc750cc1656f6edb2bd062c1e0981 diff --git a/src/doc/README.md b/src/doc/README.md index e1d95732b467..5f25894afd76 100644 --- a/src/doc/README.md +++ b/src/doc/README.md @@ -29,4 +29,4 @@ rustdoc reference.md An overview of how to use the `rustdoc` command is available [in the docs][1]. Further details are available from the command line by with `rustdoc --help`. -[1]: https://github.com/rust-lang/rust/blob/master/src/doc/book/documentation.md +[1]: https://github.com/rust-lang/rust/blob/master/src/doc/rustdoc/src/what-is-rustdoc.md diff --git a/src/doc/book b/src/doc/book index 194eb8d5f175..88cdde350fd3 160000 --- a/src/doc/book +++ b/src/doc/book @@ -1 +1 @@ -Subproject commit 194eb8d5f1753fb5f4501011cebdc1b585712474 +Subproject commit 88cdde350fd3a90c93f3bac8b4f168f105d28060 diff --git a/src/doc/grammar.md b/src/doc/grammar.md index 78432b6a9659..ee9135b6578f 100644 --- a/src/doc/grammar.md +++ b/src/doc/grammar.md @@ -101,29 +101,24 @@ properties: `ident`, `non_null`, `non_eol`, `non_single_quote` and ### Identifiers -The `ident` production is any nonempty Unicode[^non_ascii_idents] string of +The `ident` production is any nonempty Unicode string of the following form: -[^non_ascii_idents]: Non-ASCII characters in identifiers are currently feature - gated. This is expected to improve soon. +- The first character is in one of the following ranges `U+0041` to `U+005A` +("A" to "Z"), `U+0061` to `U+007A` ("a" to "z"), or `U+005F` ("\_"). +- The remaining characters are in the range `U+0030` to `U+0039` ("0" to "9"), +or any of the prior valid initial characters. -- The first character has property `XID_start` -- The remaining characters have property `XID_continue` - -that does _not_ occur in the set of [keywords](#keywords). - -> **Note**: `XID_start` and `XID_continue` as character properties cover the -> character ranges used to form the more familiar C and Java language-family -> identifiers. +as long as the identifier does _not_ occur in the set of [keywords](#keywords). ### Delimiter-restricted productions Some productions are defined by exclusion of particular Unicode characters: - `non_null` is any single Unicode character aside from `U+0000` (null) -- `non_eol` is `non_null` restricted to exclude `U+000A` (`'\n'`) -- `non_single_quote` is `non_null` restricted to exclude `U+0027` (`'`) -- `non_double_quote` is `non_null` restricted to exclude `U+0022` (`"`) +- `non_eol` is any single Unicode character aside from `U+000A` (`'\n'`) +- `non_single_quote` is any single Unicode character aside from `U+0027` (`'`) +- `non_double_quote` is any single Unicode character aside from `U+0022` (`"`) ## Comments diff --git a/src/doc/index.md b/src/doc/index.md index 3784cc3c4b49..3a4f51069fc3 100644 --- a/src/doc/index.md +++ b/src/doc/index.md @@ -6,53 +6,76 @@ nav { } -This page is an overview of the documentation included with your Rust install. -Other unofficial documentation may exist elsewhere; for example, the [Rust -Learning] project collects documentation from the community, and [Docs.rs] -builds documentation for individual Rust packages. +Welcome to an overview of the documentation provided by the Rust project. +All of these projects are managed by the Docs Team; there are other +unofficial documentation resources as well! -# API Documentation +Many of these resources take the form of "books"; we collectively call these +"The Rust Bookshelf." Some are large, some are small. -Rust provides a standard library with a number of features; [we host its -documentation here][api]. +# Learn Rust -# Extended Error Documentation +If you'd like to learn Rust, this is the spot for you! All of these resources +assume that you have programmed before, but not in any specific language: + +## The Rust Programming Language + +Affectionately nicknamed "the book," [The Rust Programming +Language](book/index.html) will give you an overview of the language from +first principles. You'll build a few projects along the way, and by the end, +you'll have a solid grasp of the language. + +## Rust By Example + +If reading multiple hundreds of pages about a language isn't your style, then +[Rust By Example](rust-by-example/index.html) has you covered. While the book talks about code with +a lot of words, RBE shows off a bunch of code, and keeps the talking to a +minimum. It also includes exercises! + +# Use Rust + +Once you've gotten familliar with the language, these resources can help you +when you're actually using it day-to-day. + +## The Standard Library + +Rust's standard library has [extensive API documentation](std/index.html), +with explanations of how to use various things, as well as example code for +accomplishing various tasks. + +## The Rustc Book + +[The Rustc Book](rustc/index.html) describes the Rust compiler, `rustc`. + +## The Cargo Book + +[The Cargo Book](cargo/index.html) is a guide to Cargo, Rust's build tool and dependency manager. + +## The Rustdoc Book + +[The Rustdoc Book](rustdoc/index.html) describes our documentation tool, `rustdoc`. + +## Extended Error Listing Many of Rust's errors come with error codes, and you can request extended -diagnostics from the compiler on those errors. We also [have the text of those -extended errors on the web][err], if you prefer to read them that way. +diagnostics from the compiler on those errors. You can also [read them +here](error-index.html), if you prefer to read them that way. -# The Rust Bookshelf +# Master Rust -Rust provides a number of book-length sets of documentation, collectively -nicknamed 'The Rust Bookshelf.' +Once you're quite familiar with the language, you may find these advanced +resources useful. -* [The Rust Programming Language][book] teaches you how to program in Rust. -* [The Cargo Book][cargo-book] is a guide to Cargo, Rust's build tool and dependency manager. -* [The Unstable Book][unstable-book] has documentation for unstable features. -* [The Rustonomicon][nomicon] is your guidebook to the dark arts of unsafe Rust. -* [The Reference][ref] is not a formal spec, but is more detailed and comprehensive than the book. -* [The Rustdoc Book][rustdoc-book] describes our documentation tool, `rustdoc`. +## The Reference -Initially, documentation lands in the Unstable Book, and then, as part of the -stabilization process, is moved into the Book, Nomicon, or Reference. +[The Reference](reference/index.html) is not a formal spec, but is more detailed and +comprehensive than the book. -Another few words about the reference: it is guaranteed to be accurate, but not -complete. We have a policy that features must have documentation to be stabilized, -but we did not always have this policy, and so there are some stable things that -are not yet in the reference. We're working on back-filling things that landed -before this policy was put into place. That work is being tracked -[here][refchecklist]. +## The Rustonomicon -[Rust Learning]: https://github.com/ctjhoa/rust-learning -[Docs.rs]: https://docs.rs/ -[api]: std/index.html -[ref]: reference/index.html -[refchecklist]: https://github.com/rust-lang-nursery/reference/issues/9 -[err]: error-index.html -[book]: book/index.html -[nomicon]: nomicon/index.html -[unstable-book]: unstable-book/index.html -[rustdoc-book]: rustdoc/index.html -[cargo-book]: cargo/index.html +[The Rustonomicon](nomicon/index.html) is your guidebook to the dark arts of unsafe +Rust. It's also sometimes called "the 'nomicon." +## The Unstable Book + +[The Unstable Book](unstable-book/index.html) has documentation for unstable features. diff --git a/src/doc/man/rustc.1 b/src/doc/man/rustc.1 index 19f6cc9ac619..8f611063dbe5 100644 --- a/src/doc/man/rustc.1 +++ b/src/doc/man/rustc.1 @@ -55,7 +55,7 @@ Configure the output that \fBrustc\fR will produce. Each emission may also have an optional explicit output \fIPATH\fR specified for that particular emission kind. This path takes precedence over the \fB-o\fR option. .TP -\fB\-\-print\fR [crate\-name|file\-names|sysroot] +\fB\-\-print\fR [crate\-name|\:file\-names|\:sysroot|\:cfg|\:target\-list|\:target\-cpus|\:target\-features|\:relocation\-models|\:code\-models|\:tls\-models|\:target\-spec\-json|\:native\-static\-libs] Comma separated list of compiler information to print on stdout. .TP \fB\-g\fR @@ -125,6 +125,16 @@ Print version info and exit. \fB\-v\fR, \fB\-\-verbose\fR Use verbose output. .TP +\fB\-\-remap\-path\-prefix\fR \fIfrom\fR=\fIto\fR +Remap source path prefixes in all output, including compiler diagnostics, debug information, +macro expansions, etc. The \fIfrom\fR=\fIto\fR parameter is scanned from right to left, so \fIfrom\fR +may contain '=', but \fIto\fR may not. + +This is useful for normalizing build products, for example by removing the current directory out of +pathnames emitted into the object files. The replacement is purely textual, with no consideration of +the current system's pathname syntax. For example \fI\-\-remap\-path\-prefix foo=bar\fR will +match \fBfoo/lib.rs\fR but not \fB./foo/lib.rs\fR. +.TP \fB\-\-extern\fR \fINAME\fR=\fIPATH\fR Specify where an external rust library is located. These should match \fIextern\fR declarations in the crate's source code. diff --git a/src/doc/man/rustdoc.1 b/src/doc/man/rustdoc.1 index a878380f556b..d7f78e8f6f4d 100644 --- a/src/doc/man/rustdoc.1 +++ b/src/doc/man/rustdoc.1 @@ -119,7 +119,7 @@ See <\fBhttps://github.com/rust\-lang/rust/issues\fR> for issues. .SH "AUTHOR" -See \fIAUTHORS.txt\fR in the Rust source distribution. +See the version control history or <\fBhttps://thanks.rust\-lang.org\fR> .SH "COPYRIGHT" This work is dual\[hy]licensed under Apache\ 2.0 and MIT terms. diff --git a/src/doc/nomicon b/src/doc/nomicon index 2f7b05fd5939..790e96b87f4b 160000 --- a/src/doc/nomicon +++ b/src/doc/nomicon @@ -1 +1 @@ -Subproject commit 2f7b05fd5939aa49d52c4ab309b9a47776ba7bd8 +Subproject commit 790e96b87f4b5817cac310e73a524d25c3d076d8 diff --git a/src/doc/reference b/src/doc/reference index 1d791b55b23e..219e261ddb83 160000 --- a/src/doc/reference +++ b/src/doc/reference @@ -1 +1 @@ -Subproject commit 1d791b55b23ec5389fbd5b3cee80db3f8bbdd162 +Subproject commit 219e261ddb833a5683627b0a9be87a0f4486abb9 diff --git a/src/doc/rust-by-example b/src/doc/rust-by-example index 4ebb8169dfe5..e3719fc78ff4 160000 --- a/src/doc/rust-by-example +++ b/src/doc/rust-by-example @@ -1 +1 @@ -Subproject commit 4ebb8169dfe569b3dcbeab560607800bb717978a +Subproject commit e3719fc78ff4a21dfd13cfcc9e2ca42cb5de29f4 diff --git a/src/doc/rustc-ux-guidelines.md b/src/doc/rustc-ux-guidelines.md index 323d49e46912..93e94e558630 100644 --- a/src/doc/rustc-ux-guidelines.md +++ b/src/doc/rustc-ux-guidelines.md @@ -64,13 +64,12 @@ for details on how to format and write long error codes. [librustc](https://github.com/rust-lang/rust/blob/master/src/librustc/diagnostics.rs), [libsyntax](https://github.com/rust-lang/rust/blob/master/src/libsyntax/diagnostics.rs), [librustc_borrowck](https://github.com/rust-lang/rust/blob/master/src/librustc_borrowck/diagnostics.rs), - [librustc_const_eval](https://github.com/rust-lang/rust/blob/master/src/librustc_const_eval/diagnostics.rs), [librustc_metadata](https://github.com/rust-lang/rust/blob/master/src/librustc_metadata/diagnostics.rs), [librustc_mir](https://github.com/rust-lang/rust/blob/master/src/librustc_mir/diagnostics.rs), [librustc_passes](https://github.com/rust-lang/rust/blob/master/src/librustc_passes/diagnostics.rs), [librustc_privacy](https://github.com/rust-lang/rust/blob/master/src/librustc_privacy/diagnostics.rs), [librustc_resolve](https://github.com/rust-lang/rust/blob/master/src/librustc_resolve/diagnostics.rs), - [librustc_trans](https://github.com/rust-lang/rust/blob/master/src/librustc_trans/diagnostics.rs), + [librustc_codegen_llvm](https://github.com/rust-lang/rust/blob/master/src/librustc_codegen_llvm/diagnostics.rs), [librustc_plugin](https://github.com/rust-lang/rust/blob/master/src/librustc_plugin/diagnostics.rs), [librustc_typeck](https://github.com/rust-lang/rust/blob/master/src/librustc_typeck/diagnostics.rs). * Explanations have full markdown support. Use it, especially to highlight diff --git a/src/doc/rustc/.gitignore b/src/doc/rustc/.gitignore new file mode 100644 index 000000000000..7585238efedf --- /dev/null +++ b/src/doc/rustc/.gitignore @@ -0,0 +1 @@ +book diff --git a/src/doc/rustc/book.toml b/src/doc/rustc/book.toml new file mode 100644 index 000000000000..8adc05c51372 --- /dev/null +++ b/src/doc/rustc/book.toml @@ -0,0 +1,5 @@ +[book] +authors = ["The Rust Project Developers"] +multilingual = false +src = "src" +title = "The rustc book" diff --git a/src/doc/rustc/src/SUMMARY.md b/src/doc/rustc/src/SUMMARY.md new file mode 100644 index 000000000000..e4c0939fd463 --- /dev/null +++ b/src/doc/rustc/src/SUMMARY.md @@ -0,0 +1,16 @@ +# The Rustc Book + +- [What is rustc?](what-is-rustc.md) +- [Command-line arguments](command-line-arguments.md) +- [Lints](lints/index.md) + - [Lint levels](lints/levels.md) + - [Lint Groups](lints/groups.md) + - [Lint listing](lints/listing/index.md) + - [Allowed-by-default lints](lints/listing/allowed-by-default.md) + - [Warn-by-default lints](lints/listing/warn-by-default.md) + - [Deny-by-default lints](lints/listing/deny-by-default.md) +- [Codegen options](codegen-options/index.md) +- [Targets](targets/index.md) + - [Built-in Targets](targets/built-in.md) + - [Custom Targets](targets/custom.md) +- [Contributing to `rustc`](contributing.md) \ No newline at end of file diff --git a/src/doc/rustc/src/codegen-options/index.md b/src/doc/rustc/src/codegen-options/index.md new file mode 100644 index 000000000000..eff09428902e --- /dev/null +++ b/src/doc/rustc/src/codegen-options/index.md @@ -0,0 +1,209 @@ +# Codegen options + +All of these options are passed to `rustc` via the `-C` flag, short for "codegen." You can see +a version of this list for your exact compiler by running `rustc -C help`. + +## ar + +This option is deprecated and does nothing. + +## linker + +This flag lets you control which linker `rustc` invokes to link your code. + +## link-arg=val + +This flag lets you append a single extra argument to the linker invocation. + +"Append" is significant; you can pass this flag multiple times to add multiple arguments. + +## link-args + +This flag lets you append multiple extra arguments to the linker invocation. The +options should be separated by spaces. + +## link-dead-code + +Normally, the linker will remove dead code. This flag disables this behavior. + +An example of when this flag might be useful is when trying to construct code coverage +metrics. + +## lto + +This flag instructs LLVM to use [link time +optimizations](https://llvm.org/docs/LinkTimeOptimization.html). + +It takes one of two values, `thin` and `fat`. 'thin' LTO [is a new feature of +LLVM](http://blog.llvm.org/2016/06/thinlto-scalable-and-incremental-lto.html), +'fat' referring to the classic version of LTO. + +## target-cpu + +This instructs `rustc` to generate code specifically for a particular processor. + +You can run `rustc --print target-cpus` to see the valid options to pass +here. Additionally, `native` can be passed to use the processor of the host +machine. + +## target-feature + +Individual targets will support different features; this flag lets you control +enabling or disabling a feature. + +To see the valid options and an example of use, run `rustc --print +target-features`. + +## passes + +This flag can be used to add extra LLVM passes to the compilation. + +The list must be separated by spaces. + +## llvm-args + +This flag can be used to pass a list of arguments directly to LLVM. + +The list must be separated by spaces. + +## save-temps + +`rustc` will generate temporary files during compilation; normally it will +delete them after it's done with its work. This option will cause them to be +preserved instead of removed. + +## rpath + +This option allows you to set the value of +[`rpath`](https://en.wikipedia.org/wiki/Rpath). + +## overflow-checks + +This flag allows you to control the behavior of integer overflow. This flag +can be passed many options: + +* To turn overflow checks on: `y`, `yes`, or `on`. +* To turn overflow checks off: `n`, `no`, or `off`. + +## no-prepopulate-passes + +The pass manager comes pre-populated with a list of passes; this flag +ensures that list is empty. + +## no-vectorize-loops + +By default, `rustc` will attempt to [vectorize +loops](https://llvm.org/docs/Vectorizers.html#the-loop-vectorizer). This +flag will turn that behavior off. + +## no-vectorize-slp + +By default, `rustc` will attempt to vectorize loops using [superword-level +parallelism](https://llvm.org/docs/Vectorizers.html#the-slp-vectorizer). This +flag will turn that behavior off. + +## soft-float + +This option will make `rustc` generate code using "soft floats." By default, +a lot of hardware supports floating point instructions, and so the code generated +will take advantage of this. "soft floats" emulate floating point instructions +in software. + +## prefer-dynamic + +By default, `rustc` prefers to statically link dependencies. This option will +make it use dynamic linking instead. + +## no-integrated-as + +LLVM comes with an internal assembler; this option will let you use an +external assembler instead. + +## no-redzone + +This flag allows you to disable [the +red zone](https://en.wikipedia.org/wiki/Red_zone_\(computing\)). This flag can +be passed many options: + +* To enable the red zone: `y`, `yes`, or `on`. +* To disable it: `n`, `no`, or `off`. + +## relocation-model + +This option lets you choose which relocation model to use. + +To find the valid options for this flag, run `rustc --print relocation-models`. + +## code-model=val + +This option lets you choose which code model to use. + +To find the valid options for this flag, run `rustc --print code-models`. + +## metadata + +This option allows you to control the metadata used for symbol mangling. + +## extra-filename + +This option allows you to put extra data in each output filename. + +## codegen-units + +This flag lets you control how many threads are used when doing +code generation. + +Increasing paralellism may speed up compile times, but may also +produce slower code. + +## remark + +This flag lets you print remarks for these optimization passes. + +The list of passes should be separated by spaces. + +`all` will remark on every pass. + +## no-stack-check + +This option is deprecated and does nothing. + +## debuginfo + +This flag lets you control debug information: + +* `0`: no debug info at all +* `1`: line tables only +* `2`: full debug info + +## opt-level + +This flag lets you control the optimization level. + +* `0`: no optimizations +* `1`: basic optimizations +* `2`: some optimizations +* `3`: all optimizations +* `s`: optimize for binary size +* `z`: optimize for binary size, but also turn off loop vectorization. + +## debug-assertions + +This flag lets you turn `cfg(debug_assertions)` on or off. + +## inline-threshold + +This option lets you set the threshold for inlining a function. + +The default is 225. + +## panic + +This option lets you control what happens when the code panics. + +* `abort`: terminate the process upon panic +* `unwind`: unwind the stack upon panic + +## incremental + +This flag allows you to enable incremental compilation. diff --git a/src/doc/rustc/src/command-line-arguments.md b/src/doc/rustc/src/command-line-arguments.md new file mode 100644 index 000000000000..e2b001832fe3 --- /dev/null +++ b/src/doc/rustc/src/command-line-arguments.md @@ -0,0 +1,116 @@ +# Command-line arguments + +Here's a list of command-line arguments to `rustc` and what they do. + +## `-h`/`--help`: get help + +This flag will print out help information for `rustc`. + +## `--cfg`: configure the compilation environment + +This flag can turn on or off various `#[cfg]` settings. + +## `-L`: add a directory to the library search path + +When looking for external crates, a directory passed to this flag will be searched. + +## `-l`: link the generated crate to a native library + +This flag allows you to specify linking to a specific native library when building +a crate. + +## `--crate-type`: a list of types of crates for the compiler to emit + +This instructs `rustc` on which crate type to build. + +## `--crate-name`: specify the name of the crate being built + +This informs `rustc` of the name of your crate. + +## `--emit`: emit output other than a crate + +Instead of producing a crate, this flag can print out things like the assembly or LLVM-IR. + +## `--print`: print compiler information + +This flag prints out various information about the compiler. + +## `-g`: include debug information + +A synonym for `-C debug-level=2`. + +## `-O`: optimize your code + +A synonym for `-C opt-level=2`. + +## `-o`: filename of the output + +This flag controls the output filename. + +## `--out-dir`: directory to write the output in + +The outputted crate will be written to this directory. + +## `--explain`: provide a detailed explanation of an error message + +Each error of `rustc`'s comes with an error code; this will print +out a longer explanation of a given error. + +## `--test`: build a test harness + +When compiling this crate, `rustc` will ignore your `main` function +and instead produce a test harness. + +## `--target`: select a target triple to build + +This controls which [target](targets/index.html) to produce. + +## `-W`: set lint warnings + +This flag will set which lints should be set to the [warn level](lints/levels.html#warn). + +## `-A`: set lint allowed + +This flag will set which lints should be set to the [allow level](lints/levels.html#allow). + +## `-D`: set lint denied + +This flag will set which lints should be set to the [deny level](lints/levels.html#deny). + +## `-F`: set lint forbidden + +This flag will set which lints should be set to the [forbid level](lints/levels.html#forbid). + +## `--cap-lints`: set the most restrictive lint level + +This flag lets you 'cap' lints, for more, [see here](lints/levels.html#capping-lints). + +## `-C`/`--codegen`: code generation options + +This flag will allow you to set [codegen options](codegen-options/index.html). + +## `-V`/`--version`: print a version + +This flag will print out `rustc`'s version. + +## `-v`/`--verbose`: use verbose output + +This flag, when combined with other flags, makes them produce extra output. + +## `--extern`: specify where an external library is located + +This flag allows you to pass the name and location of an external crate that will +be linked into the crate you're buildling. + +## `--sysroot`: Override the system root + +The "sysroot" is where `rustc` looks for the crates that come with the Rust +distribution; this flag allows that to be overridden. + +## `--error-format`: control how errors are produced + +This flag lets you control the format of errors. + +## `--color`: configure coloring of output + +This flag lets you control color settings of the output. diff --git a/src/doc/rustc/src/contributing.md b/src/doc/rustc/src/contributing.md new file mode 100644 index 000000000000..fcb8e6b27dbf --- /dev/null +++ b/src/doc/rustc/src/contributing.md @@ -0,0 +1,6 @@ +# Contributing to rustc + +We'd love to have your help improving `rustc`! To that end, we've written [a +whole book](https://rust-lang-nursery.github.io/rustc-guide/) on its +internals, how it works, and how to get started working on it. To learn +more, you'll want to check that out. \ No newline at end of file diff --git a/src/doc/rustc/src/lints/groups.md b/src/doc/rustc/src/lints/groups.md new file mode 100644 index 000000000000..46b717f3387d --- /dev/null +++ b/src/doc/rustc/src/lints/groups.md @@ -0,0 +1,29 @@ +# Lint Groups + +`rustc` has the concept of a "lint group", where you can toggle several warnings +through one name. + +For example, the `nonstandard-style` lint sets `non-camel-case-types`, +`non-snake-case`, and `non-upper-case-globals` all at once. So these are +equivalent: + +```bash +$ rustc -D nonstandard-style +$ rustc -D non-camel-case-types -D non-snake-case -D non-upper-case-globals +``` + +Here's a list of each lint group, and the lints that they are made up of: + +| group | description | lints | +|---------------------|---------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| nonstandard-style | Violation of standard naming conventions | non-camel-case-types, non-snake-case, non-upper-case-globals | +| warnings | all lints that would be issuing warnings | all lints that would be issuing warnings | +| edition-2018 | Lints that will be turned into errors in Rust 2018 | tyvar-behind-raw-pointer | +| rust-2018-idioms | Lints to nudge you toward idiomatic features of Rust 2018 | bare-trait-object, unreachable-pub | +| unused | These lints detect things being declared but not used | unused-imports, unused-variables, unused-assignments, dead-code, unused-mut, unreachable-code, unreachable-patterns, unused-must-use, unused-unsafe, path-statements, unused-attributes, unused-macros, unused-allocation, unused-doc-comment, unused-extern-crates, unused-features, unused-parens | +| future-incompatible | Lints that detect code that has future-compatibility problems | private-in-public, pub-use-of-private-extern-crate, patterns-in-fns-without-body, safe-extern-statics, invalid-type-param-default, legacy-directory-ownership, legacy-imports, legacy-constructor-visibility, missing-fragment-specifier, illegal-floating-point-literal-pattern, anonymous-parameters, parenthesized-params-in-types-and-modules, late-bound-lifetime-arguments, safe-packed-borrows, incoherent-fundamental-impls, tyvar-behind-raw-pointer, unstable-name-collision | + +Additionally, there's a `bad-style` lint group that's a deprecated alias for `nonstandard-style`. + +Finally, you can also see the table above by invoking `rustc -W help`. This will give you the exact values for the specific +compiler you have installed. \ No newline at end of file diff --git a/src/doc/rustc/src/lints/index.md b/src/doc/rustc/src/lints/index.md new file mode 100644 index 000000000000..bf345a24389d --- /dev/null +++ b/src/doc/rustc/src/lints/index.md @@ -0,0 +1,28 @@ +# Lints + +In software, a "lint" is a tool used to help improve your source code. The +Rust compiler contains a number of lints, and when it compiles your code, it will +also run the lints. These lints may produce a warning, an error, or nothing at all, +depending on how you've configured things. + +Here's a small example: + +```bash +$ cat main.rs +fn main() { + let x = 5; +} +> rustc main.rs +warning: unused variable: `x` + --> main.rs:2:9 + | +2 | let x = 5; + | ^ + | + = note: #[warn(unused_variables)] on by default + = note: to avoid this warning, consider using `_x` instead +``` + +This is the `unused_variables` lint, and it tells you that you've introduced +a variable that you don't use in your code. That's not *wrong*, so it's not +an error, but it might be a bug, so you get a warning. diff --git a/src/doc/rustc/src/lints/levels.md b/src/doc/rustc/src/lints/levels.md new file mode 100644 index 000000000000..19bb6707d228 --- /dev/null +++ b/src/doc/rustc/src/lints/levels.md @@ -0,0 +1,252 @@ +# Lint levels + +In `rustc`, lints are divided into four *levels*: + +1. allow +2. warn +3. deny +4. forbid + +Each lint has a default level (explained in the lint listing later in this +chapter), and the compiler has a default warning level. First, let's explain +what these levels mean, and then we'll talk about configuration. + +## allow + +These lints exist, but by default, do nothing. For example, consider this +source: + +```rust +pub fn foo() {} +``` + +Compiling this file produces no warnings: + +```bash +$ rustc lib.rs --crate-type=lib +$ +``` + +But this code violates the `missing_docs` lint. + +These lints exist mostly to be manually turned on via configuration, as we'll +talk about later in this section. + +## warn + +The 'warn' lint level will produce a warning if you violate the lint. For example, +this code runs afoul of the `unused_variable` lint: + +```rust +pub fn foo() { + let x = 5; +} +``` + +This will produce this warning: + +```console +$ rustc lib.rs --crate-type=lib +warning: unused variable: `x` + --> lib.rs:2:9 + | +2 | let x = 5; + | ^ + | + = note: #[warn(unused_variables)] on by default + = note: to avoid this warning, consider using `_x` instead +``` + +## deny + +A 'deny' lint produces an error if you violate it. For example, this code +runs into the `exceeding_bitshifts` lint. + +```rust,ignore +fn main() { + 100u8 << 10; +} +``` + +```bash +> rustc main.rs +error: bitshift exceeds the type's number of bits + --> main.rs:2:13 + | +2 | 100u8 << 10; + | ^^^^^^^^^^^ + | + = note: #[deny(exceeding_bitshifts)] on by default +``` + +What's the difference between an error from a lint and a regular old error? +Lints are configurable via levels, so in a similar way to 'allow' lints, +warnings that are 'deny' by default let you allow them. Similarly, you may +wish to set up a lint that is `warn` by default to produce an error instead. +This lint level gives you that. + +## forbid + +'forbid' is a special lint level that's stronger than 'deny'. It's the same +as 'deny' in that a lint at this level will produce an error, but unlike the +'deny' level, the 'forbid' level can not be overridden to be anything lower +than an error. + +## Configuring warning levels + +Remember our `missing_docs` example from the 'allow' lint level? + +```bash +$ cat lib.rs +pub fn foo() {} +$ rustc lib.rs --crate-type=lib +$ +``` + +We can configure this lint to operate at a higher level, both with +compiler flags, as well as with an attribute in the source code. + +You can also "cap" lints so that the compiler can choose to ignore +certain lint levels. We'll talk about that last. + +### Via compiler flag + +The `-A`, `-W`, `-D`, and `-F` flags let you turn one or more lints +into allowed, warning, deny, or forbid levels, like this: + +```bash +$ rustc lib.rs --crate-type=lib -W missing-docs +warning: missing documentation for crate + --> lib.rs:1:1 + | +1 | pub fn foo() {} + | ^^^^^^^^^^^^ + | + = note: requested on the command line with `-W missing-docs` + +warning: missing documentation for a function + --> lib.rs:1:1 + | +1 | pub fn foo() {} + | ^^^^^^^^^^^^ +> rustc lib.rs --crate-type=lib -D missing-docs +error: missing documentation for crate + --> lib.rs:1:1 + | +1 | pub fn foo() {} + | ^^^^^^^^^^^^ + | + = note: requested on the command line with `-D missing-docs` + +error: missing documentation for a function + --> lib.rs:1:1 + | +1 | pub fn foo() {} + | ^^^^^^^^^^^^ + +error: aborting due to 2 previous errors +``` + +You can also pass each flag more than once for changing multiple lints: + +```bash +rustc lib.rs --crate-type=lib -D missing-docs -D unused-variables +``` + +And of course, you can mix these four flags together: + +```bash +rustc lib.rs --crate-type=lib -D missing-docs -A unused-variables +``` + +### Via an attribute + +You can also modify the lint level with a crate-wide attribute: + +```bash +> cat lib.rs +#![warn(missing_docs)] + +pub fn foo() {} +$ rustc lib.rs --crate-type=lib +warning: missing documentation for crate + --> lib.rs:1:1 + | +1 | / #![warn(missing_docs)] +2 | | +3 | | pub fn foo() {} + | |_______________^ + | +note: lint level defined here + --> lib.rs:1:9 + | +1 | #![warn(missing_docs)] + | ^^^^^^^^^^^^ + +warning: missing documentation for a function + --> lib.rs:3:1 + | +3 | pub fn foo() {} + | ^^^^^^^^^^^^ +``` + +All four, `warn`, `allow`, `deny`, and `forbid` all work this way. + +You can also pass in multiple lints per attribute: + +```rust +#![warn(missing_docs, unused_variables)] + +pub fn foo() {} +``` + +And use multiple attributes together: + +```rust +#![warn(missing_docs)] +#![deny(unused_variables)] + +pub fn foo() {} +``` + +### Capping lints + +`rustc` supports a flag, `--cap-lints LEVEL` that sets the "lint cap level." +This is the maximum level for all lints. So for example, if we take our +code sample from the "deny" lint level above: + +```rust,ignore +fn main() { + 100u8 << 10; +} +``` + +And we compile it, capping lints to warn: + +```bash +$ rustc lib.rs --cap-lints warn +warning: bitshift exceeds the type's number of bits + --> lib.rs:2:5 + | +2 | 100u8 << 10; + | ^^^^^^^^^^^ + | + = note: #[warn(exceeding_bitshifts)] on by default + +warning: this expression will panic at run-time + --> lib.rs:2:5 + | +2 | 100u8 << 10; + | ^^^^^^^^^^^ attempt to shift left with overflow +``` + +It now only warns, rather than errors. We can go further and allow all lints: + +```bash +$ rustc lib.rs --cap-lints allow +$ +``` + +This feature is used heavily by Cargo; it will pass `--cap-lints allow` when +compiling your dependencies, so that if they have any warnings, they do not +pollute the output of your build. diff --git a/src/doc/rustc/src/lints/listing/allowed-by-default.md b/src/doc/rustc/src/lints/listing/allowed-by-default.md new file mode 100644 index 000000000000..7768b41f85ee --- /dev/null +++ b/src/doc/rustc/src/lints/listing/allowed-by-default.md @@ -0,0 +1,453 @@ +# Allowed-by-default lints + +These lints are all set to the 'allow' level by default. As such, they won't show up +unless you set them to a higher lint level with a flag or attribute. + +## anonymous-parameters + +This lint detects anonymous parameters. Some example code that triggers this lint: + +```rust +trait Foo { + fn foo(usize); +} +``` + +When set to 'deny', this will produce: + +```text +error: use of deprecated anonymous parameter + --> src/lib.rs:5:11 + | +5 | fn foo(usize); + | ^ + | + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #41686 +``` + +This syntax is mostly a historical accident, and can be worked around quite +easily: + +```rust +trait Foo { + fn foo(_: usize); +} +``` + +## bare-trait-object + +This lint suggests using `dyn Trait` for trait objects. Some example code +that triggers this lint: + +```rust +#![feature(dyn_trait)] + +trait Trait { } + +fn takes_trait_object(_: Box) { +} +``` + +When set to 'deny', this will produce: + +```text +error: trait objects without an explicit `dyn` are deprecated + --> src/lib.rs:7:30 + | +7 | fn takes_trait_object(_: Box) { + | ^^^^^ help: use `dyn`: `dyn Trait` + | +``` + +To fix it, do as the help message suggests: + +```rust +#![feature(dyn_trait)] +#![deny(bare_trait_objects)] + +trait Trait { } + +fn takes_trait_object(_: Box) { +} +``` + +## box-pointers + +This lints use of the Box type. Some example code that triggers this lint: + +```rust +struct Foo { + x: Box, +} +``` + +When set to 'deny', this will produce: + +```text +error: type uses owned (Box type) pointers: std::boxed::Box + --> src/lib.rs:6:5 + | +6 | x: Box //~ ERROR type uses owned + | ^^^^^^^^^^^^^ + | +``` + +This lint is mostly historical, and not particularly useful. `Box` used to +be built into the language, and the only way to do heap allocation. Today's +Rust can call into other allocators, etc. + +## elided-lifetime-in-path + +This lint detects the use of hidden lifetime parameters. Some example code +that triggers this lint: + +```rust +struct Foo<'a> { + x: &'a u32 +} + +fn foo(x: &Foo) { +} +``` + +When set to 'deny', this will produce: + +```text +error: hidden lifetime parameters are deprecated, try `Foo<'_>` + --> src/lib.rs:5:12 + | +5 | fn foo(x: &Foo) { + | ^^^ + | +``` + +Lifetime elision elides this lifetime, but that is being deprecated. + +## missing-copy-implementations + +This lint detects potentially-forgotten implementations of `Copy`. Some +example code that triggers this lint: + +```rust +pub struct Foo { + pub field: i32 +} +``` + +When set to 'deny', this will produce: + +```text +error: type could implement `Copy`; consider adding `impl Copy` + --> src/main.rs:3:1 + | +3 | / pub struct Foo { //~ ERROR type could implement `Copy`; consider adding `impl Copy` +4 | | pub field: i32 +5 | | } + | |_^ + | +``` + +You can fix the lint by deriving `Copy`. + +This lint is set to 'allow' because this code isn't bad; it's common to write +newtypes like this specifically so that a `Copy` type is no longer `Copy`. + +## missing-debug-implementations + +This lint detects missing implementations of `fmt::Debug`. Some example code +that triggers this lint: + +```rust +pub struct Foo; +``` + +When set to 'deny', this will produce: + +```text +error: type does not implement `fmt::Debug`; consider adding #[derive(Debug)] or a manual implementation + --> src/main.rs:3:1 + | +3 | pub struct Foo; + | ^^^^^^^^^^^^^^^ + | +``` + +You can fix the lint by deriving `Debug`. + +## missing-docs + +This lint detects missing documentation for public items. Some example code +that triggers this lint: + +```rust +pub fn foo() {} +``` + +When set to 'deny', this will produce: + +```text +error: missing documentation for crate + --> src/main.rs:1:1 + | +1 | / #![deny(missing_docs)] +2 | | +3 | | pub fn foo() {} +4 | | +5 | | fn main() {} + | |____________^ + | + +error: missing documentation for a function + --> src/main.rs:3:1 + | +3 | pub fn foo() {} + | ^^^^^^^^^^^^ + +``` + +To fix the lint, add documentation to all items. + +## single-use-lifetime + +This lint detects lifetimes that are only used once. Some example code that +triggers this lint: + +```rust +struct Foo<'x> { + x: &'x u32 +} +``` + +When set to 'deny', this will produce: + +```text +error: lifetime name `'x` only used once + --> src/main.rs:3:12 + | +3 | struct Foo<'x> { + | ^^ + | +``` + +## trivial-casts + +This lint detects trivial casts which could be removed. Some example code +that triggers this lint: + +```rust +let x: &u32 = &42; +let _ = x as *const u32; +``` + +When set to 'deny', this will produce: + +```text +error: trivial cast: `&u32` as `*const u32`. Cast can be replaced by coercion, this might require type ascription or a temporary variable + --> src/main.rs:5:13 + | +5 | let _ = x as *const u32; + | ^^^^^^^^^^^^^^^ + | +note: lint level defined here + --> src/main.rs:1:9 + | +1 | #![deny(trivial_casts)] + | ^^^^^^^^^^^^^ +``` + +## trivial-numeric-casts + +This lint detects trivial casts of numeric types which could be removed. Some +example code that triggers this lint: + +```rust +let x = 42i32 as i32; +``` + +When set to 'deny', this will produce: + +```text +error: trivial numeric cast: `i32` as `i32`. Cast can be replaced by coercion, this might require type ascription or a temporary variable + --> src/main.rs:4:13 + | +4 | let x = 42i32 as i32; + | ^^^^^^^^^^^^ + | +``` + +## unreachable-pub + +This lint triggers for `pub` items not reachable from the crate root. Some +example code that triggers this lint: + +```rust +mod foo { + pub mod bar { + + } +} +``` + +When set to 'deny', this will produce: + +```text +error: unreachable `pub` item + --> src/main.rs:4:5 + | +4 | pub mod bar { + | ---^^^^^^^^ + | | + | help: consider restricting its visibility: `pub(crate)` + | +``` + +## unsafe-code + +This lint catches usage of `unsafe` code. Some example code that triggers this lint: + +```rust +fn main() { + unsafe { + + } +} +``` + +When set to 'deny', this will produce: + +```text +error: usage of an `unsafe` block + --> src/main.rs:4:5 + | +4 | / unsafe { +5 | | +6 | | } + | |_____^ + | +``` + +## unstable-features + +This lint is deprecated and no longer used. + +## unused-extern-crates + +This lint guards against `extern crate` items that are never used. Some +example code that triggers this lint: + +```rust,ignore +extern crate semver; +``` + +When set to 'deny', this will produce: + +```text +error: unused extern crate + --> src/main.rs:3:1 + | +3 | extern crate semver; + | ^^^^^^^^^^^^^^^^^^^^ + | +``` + +## unused-import-braces + +This lint catches unnecessary braces around an imported item. Some example +code that triggers this lint: + +```rust +use test::{A}; + +pub mod test { + pub struct A; +} +# fn main() {} +``` + +When set to 'deny', this will produce: + +```text +error: braces around A is unnecessary + --> src/main.rs:3:1 + | +3 | use test::{A}; + | ^^^^^^^^^^^^^^ + | +``` + +To fix it, `use test::A;` + +## unused-qualifications + +This lint detects unnecessarily qualified names. Some example code that triggers this lint: + +```rust +mod foo { + pub fn bar() {} +} + +fn main() { + use foo::bar; + foo::bar(); +} +``` + +When set to 'deny', this will produce: + +```text +error: unnecessary qualification + --> src/main.rs:9:5 + | +9 | foo::bar(); + | ^^^^^^^^ + | +``` + +You can call `bar()` directly, without the `foo::`. + +## unused-results + +This lint checks for the unused result of an expression in a statement. Some +example code that triggers this lint: + +```rust,no_run +fn foo() -> T { panic!() } + +fn main() { + foo::(); +} +``` + +When set to 'deny', this will produce: + +```text +error: unused result + --> src/main.rs:6:5 + | +6 | foo::(); + | ^^^^^^^^^^^^^^^ + | +``` + +## variant-size-differences + +This lint detects enums with widely varying variant sizes. Some example code that triggers this lint: + +```rust +enum En { + V0(u8), + VBig([u8; 1024]), +} +``` + +When set to 'deny', this will produce: + +```text +error: enum variant is more than three times larger (1024 bytes) than the next largest + --> src/main.rs:5:5 + | +5 | VBig([u8; 1024]), //~ ERROR variant is more than three times larger + | ^^^^^^^^^^^^^^^^ + | +``` diff --git a/src/doc/rustc/src/lints/listing/deny-by-default.md b/src/doc/rustc/src/lints/listing/deny-by-default.md new file mode 100644 index 000000000000..3a85a40fd1fd --- /dev/null +++ b/src/doc/rustc/src/lints/listing/deny-by-default.md @@ -0,0 +1,245 @@ +# Deny-by-default lints + +These lints are all set to the 'deny' level by default. + +## exceeding-bitshifts + +This lint detects that a shift exceeds the type's number of bits. Some +example code that triggers this lint: + +```rust,ignore +1_i32 << 32; +``` + +This will produce: + +```text +error: bitshift exceeds the type's number of bits + --> src/main.rs:2:5 + | +2 | 1_i32 << 32; + | ^^^^^^^^^^^ + | +``` + +## invalid-type-param-default + +This lint detects type parameter default erroneously allowed in invalid location. Some +example code that triggers this lint: + +```rust,ignore +fn foo(t: T) {} +``` + +This will produce: + +```text +error: defaults for type parameters are only allowed in `struct`, `enum`, `type`, or `trait` definitions. + --> src/main.rs:4:8 + | +4 | fn foo(t: T) {} + | ^ + | + = note: #[deny(invalid_type_param_default)] on by default + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #36887 +``` + +## legacy-constructor-visibility + +[RFC 1506](https://github.com/rust-lang/rfcs/blob/master/text/1506-adt-kinds.md) modified some +visibility rules, and changed the visibility of struct constructors. Some +example code that triggers this lint: + +```rust,ignore +mod m { + pub struct S(u8); + + fn f() { + // this is trying to use S from the 'use' line, but becuase the `u8` is + // not pub, it is private + ::S; + } +} + +use m::S; +``` + +This will produce: + +```text +error: private struct constructors are not usable through re-exports in outer modules + --> src/main.rs:5:9 + | +5 | ::S; + | ^^^ + | + = note: #[deny(legacy_constructor_visibility)] on by default + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #39207 +``` + + +## legacy-directory-ownership + +The legacy_directory_ownership warning is issued when + +* There is a non-inline module with a #[path] attribute (e.g. #[path = "foo.rs"] mod bar;), +* The module's file ("foo.rs" in the above example) is not named "mod.rs", and +* The module's file contains a non-inline child module without a #[path] attribute. + +The warning can be fixed by renaming the parent module to "mod.rs" and moving +it into its own directory if appropriate. + + +## missing-fragment-specifier + +The missing_fragment_specifier warning is issued when an unused pattern in a +`macro_rules!` macro definition has a meta-variable (e.g. `$e`) that is not +followed by a fragment specifier (e.g. `:expr`). + +This warning can always be fixed by removing the unused pattern in the +`macro_rules!` macro definition. + +## mutable-transmutes + +This lint catches transmuting from `&T` to `&mut T` becuase it is undefined +behavior. Some example code that triggers this lint: + +```rust,ignore +unsafe { + let y = std::mem::transmute::<&i32, &mut i32>(&5); +} +``` + +This will produce: + +```text +error: mutating transmuted &mut T from &T may cause undefined behavior, consider instead using an UnsafeCell + --> src/main.rs:3:17 + | +3 | let y = std::mem::transmute::<&i32, &mut i32>(&5); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | +``` + + +## no-mangle-const-items + +This lint detects any `const` items with the `#[no_mangle]` attribute. +Constants do not have their symbols exported, and therefore, this probably +means you meant to use a `static`, not a `const`. Some example code that +triggers this lint: + +```rust,ignore +#[no_mangle] +const FOO: i32 = 5; +``` + +This will produce: + +```text +error: const items should never be #[no_mangle] + --> src/main.rs:3:1 + | +3 | const FOO: i32 = 5; + | -----^^^^^^^^^^^^^^ + | | + | help: try a static value: `pub static` + | +``` + +## parenthesized-params-in-types-and-modules + +This lint detects incorrect parentheses. Some example code that triggers this +lint: + +```rust,ignore +let x = 5 as usize(); +``` + +This will produce: + +```text +error: parenthesized parameters may only be used with a trait + --> src/main.rs:2:21 + | +2 | let x = 5 as usize(); + | ^^ + | + = note: #[deny(parenthesized_params_in_types_and_modules)] on by default + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #42238 +``` + +To fix it, remove the `()`s. + +## pub-use-of-private-extern-crate + +This lint detects a specific situation of re-exporting a private `extern crate`; + +## safe-extern-statics + +In older versions of Rust, there was a soundness issue where `extern static`s were allowed +to be accessed in safe code. This lint now catches and denies this kind of code. + +## unknown-crate-types + +This lint detects an unknown crate type found in a `#[crate_type]` directive. Some +example code that triggers this lint: + +```rust,ignore +#![crate_type="lol"] +``` + +This will produce: + +```text +error: invalid `crate_type` value + --> src/lib.rs:1:1 + | +1 | #![crate_type="lol"] + | ^^^^^^^^^^^^^^^^^^^^ + | +``` + +## incoherent-fundamental-impls + +This lint detects potentially-conflicting impls that were erroneously allowed. Some +example code that triggers this lint: + +```rust,ignore +pub trait Trait1 { + type Output; +} + +pub trait Trait2 {} + +pub struct A; + +impl Trait1 for T where T: Trait2 { + type Output = (); +} + +impl Trait1> for A { + type Output = i32; +} +``` + +This will produce: + +```text +error: conflicting implementations of trait `Trait1>` for type `A`: (E0119) + --> src/main.rs:13:1 + | +9 | impl Trait1 for T where T: Trait2 { + | --------------------------------------------- first implementation here +... +13 | impl Trait1> for A { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ conflicting implementation for `A` + | + = note: #[deny(incoherent_fundamental_impls)] on by default + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #46205 + = note: downstream crates may implement trait `Trait2>` for type `A` +``` diff --git a/src/doc/rustc/src/lints/listing/index.md b/src/doc/rustc/src/lints/listing/index.md new file mode 100644 index 000000000000..18cd2fe32a3c --- /dev/null +++ b/src/doc/rustc/src/lints/listing/index.md @@ -0,0 +1,5 @@ +# Lint listing + +This section lists out all of the lints, grouped by their default lint levels. + +You can also see this list by running `rustc -W help`. \ No newline at end of file diff --git a/src/doc/rustc/src/lints/listing/warn-by-default.md b/src/doc/rustc/src/lints/listing/warn-by-default.md new file mode 100644 index 000000000000..b49708ff6adc --- /dev/null +++ b/src/doc/rustc/src/lints/listing/warn-by-default.md @@ -0,0 +1,998 @@ +# Warn-by-default lints + +These lints are all set to the 'warn' level by default. + +## const-err + +This lint detects an erroneous expression while doing constant evaluation. Some +example code that triggers this lint: + +```rust,ignore +let b = 200u8 + 200u8; +``` + +This will produce: + +```text +warning: attempt to add with overflow + --> src/main.rs:2:9 + | +2 | let b = 200u8 + 200u8; + | ^^^^^^^^^^^^^ + | +``` + +## dead-code + +This lint detects detect unused, unexported items. Some +example code that triggers this lint: + +```rust +fn foo() {} +``` + +This will produce: + +```text +warning: function is never used: `foo` + --> src/lib.rs:2:1 + | +2 | fn foo() {} + | ^^^^^^^^ + | +``` + +## deprecated + +This lint detects detects use of deprecated items. Some +example code that triggers this lint: + +```rust +#[deprecated] +fn foo() {} + +fn bar() { + foo(); +} +``` + +This will produce: + +```text +warning: use of deprecated item 'foo' + --> src/lib.rs:7:5 + | +7 | foo(); + | ^^^ + | +``` + +## illegal-floating-point-literal-pattern + +This lint detects floating-point literals used in patterns. Some example code +that triggers this lint: + +```rust +let x = 42.0; + +match x { + 5.0 => {}, + _ => {}, +} +``` + +This will produce: + +```text +warning: floating-point literals cannot be used in patterns + --> src/main.rs:4:9 + | +4 | 5.0 => {}, + | ^^^ + | + = note: #[warn(illegal_floating_point_literal_pattern)] on by default + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #41620 +``` + +## improper-ctypes + +This lint detects proper use of libc types in foreign modules. Some +example code that triggers this lint: + +```rust +extern "C" { + static STATIC: String; +} +``` + +This will produce: + +```text +warning: found struct without foreign-function-safe representation annotation in foreign module, consider adding a #[repr(C)] attribute to the type + --> src/main.rs:2:20 + | +2 | static STATIC: String; + | ^^^^^^ + | +``` + +## late-bound-lifetime-arguments + +This lint detects detects generic lifetime arguments in path segments with +late bound lifetime parameters. Some example code that triggers this lint: + +```rust +struct S; + +impl S { + fn late<'a, 'b>(self, _: &'a u8, _: &'b u8) {} +} + +fn main() { + S.late::<'static>(&0, &0); +} +``` + +This will produce: + +```text +warning: cannot specify lifetime arguments explicitly if late bound lifetime parameters are present + --> src/main.rs:8:14 + | +4 | fn late<'a, 'b>(self, _: &'a u8, _: &'b u8) {} + | -- the late bound lifetime parameter is introduced here +... +8 | S.late::<'static>(&0, &0); + | ^^^^^^^ + | + = note: #[warn(late_bound_lifetime_arguments)] on by default + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #42868 +``` + +## non-camel-case-types + +This lint detects types, variants, traits and type parameters that don't have +camel case names. Some example code that triggers this lint: + +```rust +struct s; +``` + +This will produce: + +```text +warning: type `s` should have a camel case name such as `S` + --> src/main.rs:1:1 + | +1 | struct s; + | ^^^^^^^^^ + | +``` + +## non-shorthand-field-patterns + +This lint detects using `Struct { x: x }` instead of `Struct { x }` in a pattern. Some +example code that triggers this lint: + +```rust +struct Point { + x: i32, + y: i32, +} + + +fn main() { + let p = Point { + x: 5, + y: 5, + }; + + match p { + Point { x: x, y: y } => (), + } +} +``` + +This will produce: + +```text +warning: the `x:` in this pattern is redundant + --> src/main.rs:14:17 + | +14 | Point { x: x, y: y } => (), + | --^^ + | | + | help: remove this + | + +warning: the `y:` in this pattern is redundant + --> src/main.rs:14:23 + | +14 | Point { x: x, y: y } => (), + | --^^ + | | + | help: remove this + +``` + +## non-snake-case + +This lint detects variables, methods, functions, lifetime parameters and +modules that don't have snake case names. Some example code that triggers +this lint: + +```rust +let X = 5; +``` + +This will produce: + +```text +warning: variable `X` should have a snake case name such as `x` + --> src/main.rs:2:9 + | +2 | let X = 5; + | ^ + | +``` + +## non-upper-case-globals + +This lint detects static constants that don't have uppercase identifiers. +Some example code that triggers this lint: + +```rust +static x: i32 = 5; +``` + +This will produce: + +```text +warning: static variable `x` should have an upper case name such as `X` + --> src/main.rs:1:1 + | +1 | static x: i32 = 5; + | ^^^^^^^^^^^^^^^^^^ + | +``` + +## no-mangle-generic-items + +This lint detects generic items must be mangled. Some +example code that triggers this lint: + +```rust +#[no_mangle] +fn foo(t: T) { + +} +``` + +This will produce: + +```text +warning: functions generic over types must be mangled + --> src/main.rs:2:1 + | +1 | #[no_mangle] + | ------------ help: remove this attribute +2 | / fn foo(t: T) { +3 | | +4 | | } + | |_^ + | +``` + +## overflowing-literals + +This lint detects literal out of range for its type. Some +example code that triggers this lint: + +```rust +let x: u8 = 1000; +``` + +This will produce: + +```text +warning: literal out of range for u8 + --> src/main.rs:2:17 + | +2 | let x: u8 = 1000; + | ^^^^ + | +``` + +## path-statements + +This lint detects path statements with no effect. Some example code that +triggers this lint: + +```rust +let x = 42; + +x; +``` + +This will produce: + +```text +warning: path statement with no effect + --> src/main.rs:3:5 + | +3 | x; + | ^^ + | +``` + +## patterns-in-fns-without-body + +This lint detects patterns in functions without body were that were +previously erroneously allowed. Some example code that triggers this lint: + +```rust +trait Trait { + fn foo(mut arg: u8); +} +``` + +This will produce: + +```text +warning: patterns aren't allowed in methods without bodies + --> src/main.rs:2:12 + | +2 | fn foo(mut arg: u8); + | ^^^^^^^ + | + = note: #[warn(patterns_in_fns_without_body)] on by default + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #35203 +``` + +To fix this, remove the pattern; it can be used in the implementation without +being used in the definition. That is: + +```rust +trait Trait { + fn foo(arg: u8); +} + +impl Trait for i32 { + fn foo(mut arg: u8) { + + } +} +``` + +## plugin-as-library + +This lint detects when compiler plugins are used as ordinary library in +non-plugin crate. Some example code that triggers this lint: + +```rust,ignore +#![feature(plugin)] +#![plugin(macro_crate_test)] + +extern crate macro_crate_test; +``` + +## private-in-public + +This lint detects detect private items in public interfaces not caught by the old implementation. Some +example code that triggers this lint: + +```rust,ignore +pub trait Trait { + type A; +} + +pub struct S; + +mod foo { + struct Z; + + impl ::Trait for ::S { + type A = Z; + } +} +# fn main() {} +``` + +This will produce: + +```text +error[E0446]: private type `foo::Z` in public interface + --> src/main.rs:11:9 + | +11 | type A = Z; + | ^^^^^^^^^^^ can't leak private type +``` + +## private-no-mangle-fns + +This lint detects functions marked `#[no_mangle]` that are also private. +Given that private functions aren't exposed publicly, and `#[no_mangle]` +controls the public symbol, this combination is erroneous. Some example code +that triggers this lint: + +```rust +#[no_mangle] +fn foo() {} +``` + +This will produce: + +```text +warning: function is marked #[no_mangle], but not exported + --> src/main.rs:2:1 + | +2 | fn foo() {} + | -^^^^^^^^^^ + | | + | help: try making it public: `pub` + | +``` + +To fix this, either make it public or remove the `#[no_mangle]`. + +## private-no-mangle-statics + +This lint detects any statics marked `#[no_mangle]` that are private. +Given that private statics aren't exposed publicly, and `#[no_mangle]` +controls the public symbol, this combination is erroneous. Some example code +that triggers this lint: + +```rust +#[no_mangle] +static X: i32 = 4; +``` + +This will produce: + +```text +warning: static is marked #[no_mangle], but not exported + --> src/main.rs:2:1 + | +2 | static X: i32 = 4; + | -^^^^^^^^^^^^^^^^^ + | | + | help: try making it public: `pub` + | +``` + +To fix this, either make it public or remove the `#[no_mangle]`. + +## renamed-and-removed-lints + +This lint detects lints that have been renamed or removed. Some +example code that triggers this lint: + +```rust +#![deny(raw_pointer_derive)] +``` + +This will produce: + +```text +warning: lint raw_pointer_derive has been removed: using derive with raw pointers is ok + --> src/main.rs:1:9 + | +1 | #![deny(raw_pointer_derive)] + | ^^^^^^^^^^^^^^^^^^ + | +``` + +To fix this, either remove the lint or use the new name. + +## safe-packed-borrows + +This lint detects borrowing a field in the interior of a packed structure +with alignment other than 1. Some example code that triggers this lint: + +```rust +#[repr(packed)] +pub struct Unaligned(pub T); + +pub struct Foo { + start: u8, + data: Unaligned, +} + +fn main() { + let x = Foo { start: 0, data: Unaligned(1) }; + let y = &x.data.0; +} +``` + +This will produce: + +```text +warning: borrow of packed field requires unsafe function or block (error E0133) + --> src/main.rs:11:13 + | +11 | let y = &x.data.0; + | ^^^^^^^^^ + | + = note: #[warn(safe_packed_borrows)] on by default + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #46043 +``` + +## stable-features + +This lint detects a `#[feature]` attribute that's since been made stable. Some +example code that triggers this lint: + +```rust +#![feature(test_accepted_feature)] +``` + +This will produce: + +```text +warning: this feature has been stable since 1.0.0. Attribute no longer needed + --> src/main.rs:1:12 + | +1 | #![feature(test_accepted_feature)] + | ^^^^^^^^^^^^^^^^^^^^^ + | +``` + +To fix, simply remove the `#![feature]` attribute, as it's no longer needed. + +## type-alias-bounds + +This lint detects bounds in type aliases. These are not currently enforced. +Some example code that triggers this lint: + +```rust +type SendVec = Vec; +``` + +This will produce: + +```text +warning: type alias is never used: `SendVec` + --> src/main.rs:1:1 + | +1 | type SendVec = Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | +``` + +## tyvar-behind-raw-pointer + +This lint detects raw pointer to an inference variable. Some +example code that triggers this lint: + +```rust +let data = std::ptr::null(); +let _ = &data as *const *const (); + +if data.is_null() {} +``` + +This will produce: + +```text +warning: type annotations needed + --> src/main.rs:4:13 + | +4 | if data.is_null() {} + | ^^^^^^^ + | + = note: #[warn(tyvar_behind_raw_pointer)] on by default + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in the 2018 edition! + = note: for more information, see issue #46906 +``` + +## unconditional-recursion + +This lint detects functions that cannot return without calling themselves. +Some example code that triggers this lint: + +```rust +fn foo() { + foo(); +} +``` + +This will produce: + +```text +warning: function cannot return without recurring + --> src/main.rs:1:1 + | +1 | fn foo() { + | ^^^^^^^^ cannot return without recurring +2 | foo(); + | ----- recursive call site + | +``` + +## unions-with-drop-fields + +This lint detects use of unions that contain fields with possibly non-trivial drop code. Some +example code that triggers this lint: + +```rust +#![feature(untagged_unions)] + +union U { + s: String, +} +``` + +This will produce: + +```text +warning: union contains a field with possibly non-trivial drop code, drop code of union fields is ignored when dropping the union + --> src/main.rs:4:5 + | +4 | s: String, + | ^^^^^^^^^ + | +``` + +## unknown-lints + +This lint detects unrecognized lint attribute. Some +example code that triggers this lint: + +```rust,ignore +#[allow(not_a_real_lint)] +``` + +This will produce: + +```text +warning: unknown lint: `not_a_real_lint` + --> src/main.rs:1:10 + | +1 | #![allow(not_a_real_lint)] + | ^^^^^^^^^^^^^^^ + | +``` + +## unreachable-code + +This lint detects detects unreachable code paths. Some example code that +triggers this lint: + +```rust,no_run +panic!("we never go past here!"); + +let x = 5; +``` + +This will produce: + +```text +warning: unreachable statement + --> src/main.rs:4:5 + | +4 | let x = 5; + | ^^^^^^^^^^ + | +``` + +## unreachable-patterns + +This lint detects detects unreachable patterns. Some +example code that triggers this lint: + +```rust +let x = 5; +match x { + y => (), + 5 => (), +} +``` + +This will produce: + +```text +warning: unreachable pattern + --> src/main.rs:5:5 + | +5 | 5 => (), + | ^ + | +``` + +The `y` pattern will always match, so the five is impossible to reach. +Remember, match arms match in order, you probably wanted to put the `5` case +above the `y` case. + +## unstable-name-collision + +This lint detects that you've used a name that the standard library plans to +add in the future, which means that your code may fail to compile without +additional type annotations in the future. Either rename, or add those +annotations now. + +## unused-allocation + +This lint detects detects unnecessary allocations that can be eliminated. + +## unused-assignments + +This lint detects detect assignments that will never be read. Some +example code that triggers this lint: + +```rust +let mut x = 5; +x = 6; +``` + +This will produce: + +```text +warning: value assigned to `x` is never read + --> src/main.rs:4:5 + | +4 | x = 6; + | ^ + | +``` + +## unused-attributes + +This lint detects detects attributes that were not used by the compiler. Some +example code that triggers this lint: + +```rust +#![feature(custom_attribute)] + +#![mutable_doc] +``` + +This will produce: + +```text +warning: unused attribute + --> src/main.rs:4:1 + | +4 | #![mutable_doc] + | ^^^^^^^^^^^^^^^ + | +``` + +## unused-comparisons + +This lint detects comparisons made useless by limits of the types involved. Some +example code that triggers this lint: + +```rust +fn foo(x: u8) { + x >= 0; +} +``` + +This will produce: + +```text +warning: comparison is useless due to type limits + --> src/main.rs:6:5 + | +6 | x >= 0; + | ^^^^^^ + | +``` + +## unused-doc-comment + +This lint detects detects doc comments that aren't used by rustdoc. Some +example code that triggers this lint: + +```rust +/// docs for x +let x = 12; +``` + +This will produce: + +```text +warning: doc comment not used by rustdoc + --> src/main.rs:2:5 + | +2 | /// docs for x + | ^^^^^^^^^^^^^^ + | +``` + +## unused-features + +This lint detects unused or unknown features found in crate-level #[feature] directives. +To fix this, simply remove the feature flag. + +## unused-imports + +This lint detects imports that are never used. Some +example code that triggers this lint: + +```rust +use std::collections::HashMap; +``` + +This will produce: + +```text +warning: unused import: `std::collections::HashMap` + --> src/main.rs:1:5 + | +1 | use std::collections::HashMap; + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + | +``` + +## unused-macros + +This lint detects detects macros that were not used. Some example code that +triggers this lint: + +```rust +macro_rules! unused { + () => {}; +} + +fn main() { +} +``` + +This will produce: + +```text +warning: unused macro definition + --> src/main.rs:1:1 + | +1 | / macro_rules! unused { +2 | | () => {}; +3 | | } + | |_^ + | +``` + +## unused-must-use + +This lint detects unused result of a type flagged as #[must_use]. Some +example code that triggers this lint: + +```rust +fn returns_result() -> Result<(), ()> { + Ok(()) +} + +fn main() { + returns_result(); +} +``` + +This will produce: + +```text +warning: unused `std::result::Result` which must be used + --> src/main.rs:6:5 + | +6 | returns_result(); + | ^^^^^^^^^^^^^^^^^ + | +``` + +## unused-mut + +This lint detects detect mut variables which don't need to be mutable. Some +example code that triggers this lint: + +```rust +let mut x = 5; +``` + +This will produce: + +```text +warning: variable does not need to be mutable + --> src/main.rs:2:9 + | +2 | let mut x = 5; + | ----^ + | | + | help: remove this `mut` + | +``` + +## unused-parens + +This lint detects `if`, `match`, `while` and `return` with parentheses; they +do not need them. Some example code that triggers this lint: + +```rust +if(true) {} +``` + +This will produce: + +```text +warning: unnecessary parentheses around `if` condition + --> src/main.rs:2:7 + | +2 | if(true) {} + | ^^^^^^ help: remove these parentheses + | +``` + +## unused-unsafe + +This lint detects unnecessary use of an `unsafe` block. Some +example code that triggers this lint: + +```rust +unsafe {} +``` + +This will produce: + +```text +warning: unnecessary `unsafe` block + --> src/main.rs:2:5 + | +2 | unsafe {} + | ^^^^^^ unnecessary `unsafe` block + | +``` + +## unused-variables + +This lint detects detect variables which are not used in any way. Some +example code that triggers this lint: + +```rust +let x = 5; +``` + +This will produce: + +```text +warning: unused variable: `x` + --> src/main.rs:2:9 + | +2 | let x = 5; + | ^ help: consider using `_x` instead + | +``` + +## warnings + +This lint is a bit special; by changing its level, you change every other warning +that would produce a warning to whatever value you'd like: + +```rust +#![deny(warnings)] +``` + +As such, you won't ever trigger this lint in your code directly. + +## while-true + +This lint detects `while true { }`. Some example code that triggers this +lint: + +```rust,no_run +while true { + +} +``` + +This will produce: + +```text +warning: denote infinite loops with `loop { ... }` + --> src/main.rs:2:5 + | +2 | while true { + | ^^^^^^^^^^ help: use `loop` + | +``` diff --git a/src/doc/rustc/src/targets/built-in.md b/src/doc/rustc/src/targets/built-in.md new file mode 100644 index 000000000000..8620346e5b74 --- /dev/null +++ b/src/doc/rustc/src/targets/built-in.md @@ -0,0 +1,10 @@ +# Built-in Targets + +`rustc` ships with the ability to compile to many targets automatically, we +call these "built-in" targets, and they generally correspond to targets that +the team is supporting directly. + +To see the list of built-in targets, you can run `rustc --print target-list`, +or look at [the API +docs](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_back/target/#modules). +Each module there defines a builder for a particular target. \ No newline at end of file diff --git a/src/doc/rustc/src/targets/custom.md b/src/doc/rustc/src/targets/custom.md new file mode 100644 index 000000000000..0691afc60eae --- /dev/null +++ b/src/doc/rustc/src/targets/custom.md @@ -0,0 +1,17 @@ +# Custom Targets + +If you'd like to build for a target that is not yet supported by `rustc`, you can use a +"custom target specification" to define a target. These target specification files +are JSON. To see the JSON for the host target, you can run: + +```bash +$ rustc +nightly -Z unstable-options --print target-spec-json +``` + +To see it for a different target, add the `--target` flag: + +```bash +$ rustc +nightly -Z unstable-options --target=wasm32-unknown-unknown --print target-spec-json +``` + +To use a custom target, see [`xargo`](https://github.com/japaric/xargo). \ No newline at end of file diff --git a/src/doc/rustc/src/targets/index.md b/src/doc/rustc/src/targets/index.md new file mode 100644 index 000000000000..07e3a79471f5 --- /dev/null +++ b/src/doc/rustc/src/targets/index.md @@ -0,0 +1,13 @@ +# Targets + +`rustc` is a cross-compiler by default. This means that you can use any compiler to build for any +architecture. The list of *targets* are the possible architectures that you can build for. + +To see all the options that you can set with a target, see the docs +[here](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_back/target/struct.Target.html). + +To compile to a particular target, use the `--target` flag: + +```bash +$ rustc src/main.rs --target=wasm32-unknown-unknown +``` diff --git a/src/doc/rustc/src/what-is-rustc.md b/src/doc/rustc/src/what-is-rustc.md new file mode 100644 index 000000000000..bed1b71c24e0 --- /dev/null +++ b/src/doc/rustc/src/what-is-rustc.md @@ -0,0 +1,68 @@ +# What is rustc? + +Welcome to "The rustc book"! `rustc` is the compiler for the Rust programming +language, provided by the project itself. Compilers take your source code and +produce binary code, either as a library or executable. + +Most Rust programmers don't invoke `rustc` directly, but instead do it through +[Cargo](../cargo/index.html). It's all in service of `rustc` though! If you +want to see how Cargo calls `rustc`, you can + +```bash +$ cargo build --verbose +``` + +And it will print out each `rustc` invocation. This book can help you +understand what each of these options does. Additionally, while most +Rustaceans use Cargo, not all do: sometimes they integrate `rustc` into other +build systems. This book should provide a guide to all of the options you'd +need to do so. + +## Basic usage + +Let's say you've got a little hello world program in a file `hello.rs`: + +```rust +fn main() { + println!("Hello, world!"); +} +``` + +To turn this source code into an executable, you can use `rustc`: + +```bash +$ rustc hello.rs +$ ./hello # on a *NIX +$ .\hello.exe # on Windows +``` + +Note that we only ever pass `rustc` the *crate root*, not every file we wish +to compile. For example, if we had a `main.rs` that looked like this: + +```rust,ignore +mod foo; + +fn main() { + foo::hello(); +} +``` + +And a `foo.rs` that had this: + +```rust,ignore +fn hello() { + println!("Hello, world!"); +} +``` + +To compile this, we'd run this command: + +```bash +$ rustc main.rs +``` + +No need to tell `rustc` about `foo.rs`; the `mod` statements give it +everything that it needs. This is different than how you would use a C +compiler, where you invoke the compiler on each file, and then link +everything together. In other words, the *crate* is a translation unit, not a +particular module. \ No newline at end of file diff --git a/src/doc/rustdoc/src/SUMMARY.md b/src/doc/rustdoc/src/SUMMARY.md index 6315cb81a849..46528187c117 100644 --- a/src/doc/rustdoc/src/SUMMARY.md +++ b/src/doc/rustdoc/src/SUMMARY.md @@ -5,3 +5,4 @@ - [The `#[doc]` attribute](the-doc-attribute.md) - [Documentation tests](documentation-tests.md) - [Passes](passes.md) +- [Unstable features](unstable-features.md) diff --git a/src/doc/rustdoc/src/command-line-arguments.md b/src/doc/rustdoc/src/command-line-arguments.md index e51c63cf0089..417608cc5ca0 100644 --- a/src/doc/rustdoc/src/command-line-arguments.md +++ b/src/doc/rustdoc/src/command-line-arguments.md @@ -141,6 +141,31 @@ Similar to `--library-path`, `--extern` is about specifying the location of a dependency. `--library-path` provides directories to search in, `--extern` instead lets you specify exactly which dependency is located where. +## `-C`/`--codegen`: pass codegen options to rustc + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -C target_feature=+avx +$ rustdoc src/lib.rs --codegen target_feature=+avx + +$ rustdoc --test src/lib.rs -C target_feature=+avx +$ rustdoc --test src/lib.rs --codegen target_feature=+avx + +$ rustdoc --test README.md -C target_feature=+avx +$ rustdoc --test README.md --codegen target_feature=+avx +``` + +When rustdoc generates documentation, looks for documentation tests, or executes documentation +tests, it needs to compile some rust code, at least part-way. This flag allows you to tell rustdoc +to provide some extra codegen options to rustc when it runs these compilations. Most of the time, +these options won't affect a regular documentation run, but if something depends on target features +to be enabled, or documentation tests need to use some additional options, this flag allows you to +affect that. + +The arguments to this flag are the same as those for the `-C` flag on rustc. Run `rustc -C help` to +get the full list. + ## `--passes`: add more rustdoc passes Using this flag looks like this: diff --git a/src/doc/rustdoc/src/documentation-tests.md b/src/doc/rustdoc/src/documentation-tests.md index e5a603a3709f..e4af122d0cb9 100644 --- a/src/doc/rustdoc/src/documentation-tests.md +++ b/src/doc/rustdoc/src/documentation-tests.md @@ -19,18 +19,38 @@ running `rustdoc --test foo.rs` will extract this example, and then run it as a Please note that by default, if no language is set for the block code, `rustdoc` assumes it is `Rust` code. So the following: +``````markdown ```rust let x = 5; ``` +`````` is strictly equivalent to: +``````markdown ``` let x = 5; ``` +`````` There's some subtlety though! Read on for more details. +## Passing or failing a doctest + +Like regular unit tests, regular doctests are considered to "pass" +if they compile and run without panicking. +So if you want to demonstrate that some computation gives a certain result, +the `assert!` family of macros works the same as other Rust code: + +```rust +let foo = "foo"; + +assert_eq!(foo, "foo"); +``` + +This way, if the computation ever returns something different, +the code panics and the doctest fails. + ## Pre-processing examples In the example above, you'll note something strange: there's no `main` @@ -59,8 +79,9 @@ from your example, but are important to make the tests work. Consider an example block that looks like this: ```text -/// Some documentation. -# fn foo() {} +/// /// Some documentation. +/// # fn foo() {} // this function will be hidden +/// println!("Hello, World!"); ``` It will render like this: @@ -68,6 +89,7 @@ It will render like this: ```rust /// Some documentation. # fn foo() {} +println!("Hello, World!"); ``` Yes, that's right: you can add lines that start with `# `, and they will @@ -118,67 +140,103 @@ To keep each code block testable, we want the whole program in each block, but we don't want the reader to see every line every time. Here's what we put in our source code: -```text - First, we set `x` to five: +``````markdown +First, we set `x` to five: - ``` - let x = 5; - # let y = 6; - # println!("{}", x + y); - ``` - - Next, we set `y` to six: - - ``` - # let x = 5; - let y = 6; - # println!("{}", x + y); - ``` - - Finally, we print the sum of `x` and `y`: - - ``` - # let x = 5; - # let y = 6; - println!("{}", x + y); - ``` ``` +let x = 5; +# let y = 6; +# println!("{}", x + y); +``` + +Next, we set `y` to six: + +``` +# let x = 5; +let y = 6; +# println!("{}", x + y); +``` + +Finally, we print the sum of `x` and `y`: + +``` +# let x = 5; +# let y = 6; +println!("{}", x + y); +``` +`````` By repeating all parts of the example, you can ensure that your example still compiles, while only showing the parts that are relevant to that part of your explanation. -Another case where the use of `#` is handy is when you want to ignore -error handling. Lets say you want the following, +The `#`-hiding of lines can be prevented by using two consecutive hashes +`##`. This only needs to be done with with the first `#` which would've +otherwise caused hiding. If we have a string literal like the following, +which has a line that starts with a `#`: + +```rust +let s = "foo +## bar # baz"; +``` + +We can document it by escaping the initial `#`: + +```text +/// let s = "foo +/// ## bar # baz"; +``` + + +## Using `?` in doc tests + +When writing an example, it is rarely useful to include a complete error +handling, as it would add significant amounts of boilerplate code. Instead, you +may want the following: ```ignore +/// ``` /// use std::io; /// let mut input = String::new(); /// io::stdin().read_line(&mut input)?; +/// ``` ``` -The problem is that `?` returns a `Result` and test functions -don't return anything so this will give a mismatched types error. +The problem is that `?` returns a `Result` and test functions don't +return anything, so this will give a mismatched types error. + +You can get around this limitation by manually adding a `main` that returns +`Result`, because `Result` implements the `Termination` trait: ```ignore /// A doc test using ? /// /// ``` /// use std::io; -/// # fn foo() -> io::Result<()> { +/// +/// fn main() -> io::Result<()> { +/// let mut input = String::new(); +/// io::stdin().read_line(&mut input)?; +/// Ok(()) +/// } +/// ``` +``` + +Together with the `# ` from the section above, you arrive at a solution that +appears to the reader as the initial idea but works with doc tests: + +```ignore +/// ``` +/// use std::io; +/// # fn main() -> io::Result<()> { /// let mut input = String::new(); /// io::stdin().read_line(&mut input)?; /// # Ok(()) /// # } /// ``` -# fn foo() {} ``` -You can get around this by wrapping the code in a function. This catches -and swallows the `Result` when running tests on the docs. This -pattern appears regularly in the standard library. - -### Documenting macros +## Documenting macros Here’s an example of documenting a macro: @@ -248,10 +306,10 @@ not actually pass as a test. # fn foo() {} ``` -`compile_fail` tells `rustdoc` that the compilation should fail. If it -compiles, then the test will fail. However please note that code failing -with the current Rust release may work in a future release, as new features -are added. +The `no_run` attribute will compile your code, but not run it. This is +important for examples such as "Here's how to retrieve a web page," +which you would want to ensure compiles, but might be run in a test +environment that has no network access. ```text /// ```compile_fail @@ -260,7 +318,31 @@ are added. /// ``` ``` -The `no_run` attribute will compile your code, but not run it. This is -important for examples such as "Here's how to retrieve a web page," -which you would want to ensure compiles, but might be run in a test -environment that has no network access. +`compile_fail` tells `rustdoc` that the compilation should fail. If it +compiles, then the test will fail. However please note that code failing +with the current Rust release may work in a future release, as new features +are added. + +## Syntax reference + +The *exact* syntax for code blocks, including the edge cases, can be found +in the [Fenced Code Blocks](https://spec.commonmark.org/0.28/#fenced-code-blocks) +section of the CommonMark specification. + +Rustdoc also accepts *indented* code blocks as an alternative to fenced +code blocks: instead of surrounding your code with three backticks, you +can indent each line by four or more spaces. + +``````markdown + let foo = "foo"; + assert_eq!(foo, "foo"); +`````` + +These, too, are documented in the CommonMark specification, in the +[Indented Code Blocks](https://spec.commonmark.org/0.28/#indented-code-blocks) +section. + +However, it's preferable to use fenced code blocks over indented code blocks. +Not only are fenced code blocks considered more idiomatic for Rust code, +but there is no way to use directives such as `ignore` or `should_panic` with +indented code blocks. diff --git a/src/doc/rustdoc/src/passes.md b/src/doc/rustdoc/src/passes.md index de7c10292680..615b3dca199f 100644 --- a/src/doc/rustdoc/src/passes.md +++ b/src/doc/rustdoc/src/passes.md @@ -5,8 +5,8 @@ Rustdoc has a concept called "passes". These are transformations that In addition to the passes below, check out the docs for these flags: -* [`--passes`](command-line-arguments.html#--passes-add-more-rustdoc-passes) -* [`--no-defaults`](command-line-arguments.html#--no-defaults-dont-run-default-passes) +* [`--passes`](command-line-arguments.html#a--passes-add-more-rustdoc-passes) +* [`--no-defaults`](command-line-arguments.html#a--no-defaults-dont-run-default-passes) ## Default passes diff --git a/src/doc/rustdoc/src/unstable-features.md b/src/doc/rustdoc/src/unstable-features.md new file mode 100644 index 000000000000..7f110d6a3d22 --- /dev/null +++ b/src/doc/rustdoc/src/unstable-features.md @@ -0,0 +1,391 @@ +# Unstable features + +Rustdoc is under active developement, and like the Rust compiler, some features are only available +on the nightly releases. Some of these are new and need some more testing before they're able to get +released to the world at large, and some of them are tied to features in the Rust compiler that are +themselves unstable. Several features here require a matching `#![feature(...)]` attribute to +enable, and thus are more fully documented in the [Unstable Book]. Those sections will link over +there as necessary. + +[Unstable Book]: ../unstable-book/index.html + +## Nightly-gated functionality + +These features just require a nightly build to operate. Unlike the other features on this page, +these don't need to be "turned on" with a command-line flag or a `#![feature(...)]` attribute in +your crate. This can give them some subtle fallback modes when used on a stable release, so be +careful! + +### Error numbers for `compile-fail` doctests + +As detailed in [the chapter on documentation tests][doctest-attributes], you can add a +`compile_fail` attribute to a doctest to state that the test should fail to compile. However, on +nightly, you can optionally add an error number to state that a doctest should emit a specific error +number: + +[doctest-attributes]: documentation-tests.html#attributes + +``````markdown +```compile_fail,E0044 +extern { fn some_func(x: T); } +``` +`````` + +This is used by the error index to ensure that the samples that correspond to a given error number +properly emit that error code. However, these error codes aren't guaranteed to be the only thing +that a piece of code emits from version to version, so this is unlikely to be stabilized in the +future. + +Attempting to use these error numbers on stable will result in the code sample being interpreted as +plain text. + +### Linking to items by type + +As designed in [RFC 1946], Rustdoc can parse paths to items when you use them as links. To resolve +these type names, it uses the items currently in-scope, either by declaration or by `use` statement. +For modules, the "active scope" depends on whether the documentation is written outside the module +(as `///` comments on the `mod` statement) or inside the module (at `//!` comments inside the file +or block). For all other items, it uses the enclosing module's scope. + +[RFC 1946]: https://github.com/rust-lang/rfcs/pull/1946 + +For example, in the following code: + +```rust +/// Does the thing. +pub fn do_the_thing(_: SomeType) { + println!("Let's do the thing!"); +} + +/// Token you use to [`do_the_thing`]. +pub struct SomeType; +``` + +The link to ``[`do_the_thing`]`` in `SomeType`'s docs will properly link to the page for `fn +do_the_thing`. Note that here, rustdoc will insert the link target for you, but manually writing the +target out also works: + +```rust +pub mod some_module { + /// Token you use to do the thing. + pub struct SomeStruct; +} + +/// Does the thing. Requires one [`SomeStruct`] for the thing to work. +/// +/// [`SomeStruct`]: some_module::SomeStruct +pub fn do_the_thing(_: some_module::SomeStruct) { + println!("Let's do the thing!"); +} +``` + +For more details, check out [the RFC][RFC 1946], and see [the tracking issue][43466] for more +information about what parts of the feature are available. + +[43466]: https://github.com/rust-lang/rust/issues/43466 + +## Extensions to the `#[doc]` attribute + +These features operate by extending the `#[doc]` attribute, and thus can be caught by the compiler +and enabled with a `#![feature(...)]` attribute in your crate. + +### Documenting platform-/feature-specific information + +Because of the way Rustdoc documents a crate, the documentation it creates is specific to the target +rustc compiles for. Anything that's specific to any other target is dropped via `#[cfg]` attribute +processing early in the compilation process. However, Rustdoc has a trick up its sleeve to handle +platform-specific code if it *does* receive it. + +Because Rustdoc doesn't need to fully compile a crate to binary, it replaces function bodies with +`loop {}` to prevent having to process more than necessary. This means that any code within a +function that requires platform-specific pieces is ignored. Combined with a special attribute, +`#[doc(cfg(...))]`, you can tell Rustdoc exactly which platform something is supposed to run on, +ensuring that doctests are only run on the appropriate platforms. + +The `#[doc(cfg(...))]` attribute has another effect: When Rustdoc renders documentation for that +item, it will be accompanied by a banner explaining that the item is only available on certain +platforms. + +As mentioned earlier, getting the items to Rustdoc requires some extra preparation. The standard +library adds a `--cfg dox` flag to every Rustdoc command, but the same thing can be accomplished by +adding a feature to your Cargo.toml and adding `--feature dox` (or whatever you choose to name the +feature) to your `cargo doc` calls. + +Either way, once you create an environment for the documentation, you can start to augment your +`#[cfg]` attributes to allow both the target platform *and* the documentation configuration to leave +the item in. For example, `#[cfg(any(windows, feature = "dox"))]` will preserve the item either on +Windows or during the documentation process. Then, adding a new attribute `#[doc(cfg(windows))]` +will tell Rustdoc that the item is supposed to be used on Windows. For example: + +```rust +#![feature(doc_cfg)] + +/// Token struct that can only be used on Windows. +#[cfg(any(windows, feature = "dox"))] +#[doc(cfg(windows))] +pub struct WindowsToken; + +/// Token struct that can only be used on Unix. +#[cfg(any(unix, feature = "dox"))] +#[doc(cfg(unix))] +pub struct UnixToken; +``` + +In this sample, the tokens will only appear on their respective platforms, but they will both appear +in documentation. + +`#[doc(cfg(...))]` was introduced to be used by the standard library and currently requires the +`#![feature(doc_cfg)]` feature gate. For more information, see [its chapter in the Unstable +Book][unstable-doc-cfg] and [its tracking issue][issue-doc-cfg]. + +[unstable-doc-cfg]: ../unstable-book/language-features/doc-cfg.html +[issue-doc-cfg]: https://github.com/rust-lang/rust/issues/43781 + +### Adding your trait to the "Important Traits" dialog + +Rustdoc keeps a list of a few traits that are believed to be "fundamental" to a given type when +implemented on it. These traits are intended to be the primary interface for their types, and are +often the only thing available to be documented on their types. For this reason, Rustdoc will track +when a given type implements one of these traits and call special attention to it when a function +returns one of these types. This is the "Important Traits" dialog, visible as a circle-i button next +to the function, which, when clicked, shows the dialog. + +In the standard library, the traits that qualify for inclusion are `Iterator`, `io::Read`, and +`io::Write`. However, rather than being implemented as a hard-coded list, these traits have a +special marker attribute on them: `#[doc(spotlight)]`. This means that you could apply this +attribute to your own trait to include it in the "Important Traits" dialog in documentation. + +The `#[doc(spotlight)]` attribute currently requires the `#![feature(doc_spotlight)]` feature gate. +For more information, see [its chapter in the Unstable Book][unstable-spotlight] and [its tracking +issue][issue-spotlight]. + +[unstable-spotlight]: ../unstable-book/language-features/doc-spotlight.html +[issue-spotlight]: https://github.com/rust-lang/rust/issues/45040 + +### Exclude certain dependencies from documentation + +The standard library uses several dependencies which, in turn, use several types and traits from the +standard library. In addition, there are several compiler-internal crates that are not considered to +be part of the official standard library, and thus would be a distraction to include in +documentation. It's not enough to exclude their crate documentation, since information about trait +implementations appears on the pages for both the type and the trait, which can be in different +crates! + +To prevent internal types from being included in documentation, the standard library adds an +attribute to their `extern crate` declarations: `#[doc(masked)]`. This causes Rustdoc to "mask out" +types from these crates when building lists of trait implementations. + +The `#[doc(masked)]` attribute is intended to be used internally, and requires the +`#![feature(doc_masked)]` feature gate. For more information, see [its chapter in the Unstable +Book][unstable-masked] and [its tracking issue][issue-masked]. + +[unstable-masked]: ../unstable-book/language-features/doc-masked.html +[issue-masked]: https://github.com/rust-lang/rust/issues/44027 + +### Include external files as API documentation + +As designed in [RFC 1990], Rustdoc can read an external file to use as a type's documentation. This +is useful if certain documentation is so long that it would break the flow of reading the source. +Instead of writing it all inline, writing `#[doc(include = "sometype.md")]` (where `sometype.md` is +a file adjacent to the `lib.rs` for the crate) will ask Rustdoc to instead read that file and use it +as if it were written inline. + +[RFC 1990]: https://github.com/rust-lang/rfcs/pull/1990 + +`#[doc(include = "...")]` currently requires the `#![feature(external_doc)]` feature gate. For more +information, see [its chapter in the Unstable Book][unstable-include] and [its tracking +issue][issue-include]. + +[unstable-include]: ../unstable-book/language-features/external-doc.html +[issue-include]: https://github.com/rust-lang/rust/issues/44732 + +## Unstable command-line arguments + +These features are enabled by passing a command-line flag to Rustdoc, but the flags in question are +themselves marked as unstable. To use any of these options, pass `-Z unstable-options` as well as +the flag in question to Rustdoc on the command-line. To do this from Cargo, you can either use the +`RUSTDOCFLAGS` environment variable or the `cargo rustdoc` command. + +### `--markdown-before-content`: include rendered Markdown before the content + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --markdown-before-content extra.md +$ rustdoc README.md -Z unstable-options --markdown-before-content extra.md +``` + +Just like `--html-before-content`, this allows you to insert extra content inside the `` tag +but before the other content `rustdoc` would normally produce in the rendered documentation. +However, instead of directly inserting the file verbatim, `rustdoc` will pass the files through a +Markdown renderer before inserting the result into the file. + +### `--markdown-after-content`: include rendered Markdown after the content + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --markdown-after-content extra.md +$ rustdoc README.md -Z unstable-options --markdown-after-content extra.md +``` + +Just like `--html-after-content`, this allows you to insert extra content before the `` tag +but after the other content `rustdoc` would normally produce in the rendered documentation. +However, instead of directly inserting the file verbatim, `rustdoc` will pass the files through a +Markdown renderer before inserting the result into the file. + +### `--playground-url`: control the location of the playground + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --playground-url https://play.rust-lang.org/ +``` + +When rendering a crate's docs, this flag gives the base URL of the Rust Playground, to use for +generating `Run` buttons. Unlike `--markdown-playground-url`, this argument works for standalone +Markdown files *and* Rust crates. This works the same way as adding `#![doc(html_playground_url = +"url")]` to your crate root, as mentioned in [the chapter about the `#[doc]` +attribute][doc-playground]. Please be aware that the official Rust Playground at +https://play.rust-lang.org does not have every crate available, so if your examples require your +crate, make sure the playground you provide has your crate available. + +[doc-playground]: the-doc-attribute.html#html_playground_url + +If both `--playground-url` and `--markdown-playground-url` are present when rendering a standalone +Markdown file, the URL given to `--markdown-playground-url` will take precedence. If both +`--playground-url` and `#![doc(html_playground_url = "url")]` are present when rendering crate docs, +the attribute will take precedence. + +### `--crate-version`: control the crate version + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --crate-version 1.3.37 +``` + +When `rustdoc` receives this flag, it will print an extra "Version (version)" into the sidebar of +the crate root's docs. You can use this flag to differentiate between different versions of your +library's documentation. + +### `--linker`: control the linker used for documentation tests + +Using this flag looks like this: + +```bash +$ rustdoc --test src/lib.rs -Z unstable-options --linker foo +$ rustdoc --test README.md -Z unstable-options --linker foo +``` + +When `rustdoc` runs your documentation tests, it needs to compile and link the tests as executables +before running them. This flag can be used to change the linker used on these executables. It's +equivalent to passing `-C linker=foo` to `rustc`. + +### `--sort-modules-by-appearance`: control how items on module pages are sorted + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --sort-modules-by-appearance +``` + +Ordinarily, when `rustdoc` prints items in module pages, it will sort them alphabetically (taking +some consideration for their stability, and names that end in a number). Giving this flag to +`rustdoc` will disable this sorting and instead make it print the items in the order they appear in +the source. + +### `--themes`: provide additional themes + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --themes theme.css +``` + +Giving this flag to `rustdoc` will make it copy your theme into the generated crate docs and enable +it in the theme selector. Note that `rustdoc` will reject your theme file if it doesn't style +everything the "light" theme does. See `--theme-checker` below for details. + +### `--theme-checker`: verify theme CSS for validity + +Using this flag looks like this: + +```bash +$ rustdoc -Z unstable-options --theme-checker theme.css +``` + +Before including your theme in crate docs, `rustdoc` will compare all the CSS rules it contains +against the "light" theme included by default. Using this flag will allow you to see which rules are +missing if `rustdoc` rejects your theme. + +### `--resource-suffix`: modifying the name of CSS/JavaScript in crate docs + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --resource-suffix suf +``` + +When rendering docs, `rustdoc` creates several CSS and JavaScript files as part of the output. Since +all these files are linked from every page, changing where they are can be cumbersome if you need to +specially cache them. This flag will rename all these files in the output to include the suffix in +the filename. For example, `light.css` would become `light-suf.css` with the above command. + +### `--display-warnings`: display warnings when documenting or running documentation tests + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --display-warnings +$ rustdoc --test src/lib.rs -Z unstable-options --display-warnings +``` + +The intent behind this flag is to allow the user to see warnings that occur within their library or +their documentation tests, which are usually suppressed. However, [due to a +bug][issue-display-warnings], this flag doesn't 100% work as intended. See the linked issue for +details. + +[issue-display-warnings]: https://github.com/rust-lang/rust/issues/41574 + +### `--edition`: control the edition of docs and doctests + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --edition 2018 +$ rustdoc --test src/lib.rs -Z unstable-options --edition 2018 +``` + +This flag allows rustdoc to treat your rust code as the given edition. It will compile doctests with +the given edition as well. As with `rustc`, the default edition that `rustdoc` will use is `2015` +(the first edition). + +### `-Z force-unstable-if-unmarked` + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z force-unstable-if-unmarked +``` + +This is an internal flag intended for the standard library and compiler that applies an +`#[unstable]` attribute to any dependent crate that doesn't have another stability attribute. This +allows `rustdoc` to be able to generate documentation for the compiler crates and the standard +library, as an equivalent command-line argument is provided to `rustc` when building those crates. + +### `doc_alias` feature + +This feature allows you to add alias(es) to an item when using the `rustdoc` search through the +`doc(alias)` attribute. Example: + +```rust,no_run +#![feature(doc_alias)] + +#[doc(alias = "x")] +#[doc(alias = "big")] +pub struct BigX; +``` + +Then, when looking for it through the `rustdoc` search, if you enter "x" or +"big", search will show the `BigX` struct first. diff --git a/src/doc/tutorial.md b/src/doc/tutorial.md index 87f3a0c765c5..320283f31b51 100644 --- a/src/doc/tutorial.md +++ b/src/doc/tutorial.md @@ -1,3 +1,3 @@ % The Rust Tutorial -This tutorial has been deprecated in favor of [the Book](book/index.html). Go check that out instead! +This tutorial has been deprecated in favor of [the Book](book/index.html), which is available free online and in dead tree form. Go check that out instead! diff --git a/src/doc/unstable-book/src/compiler-flags/remap-path-prefix.md b/src/doc/unstable-book/src/compiler-flags/remap-path-prefix.md deleted file mode 100644 index 8ca04d253259..000000000000 --- a/src/doc/unstable-book/src/compiler-flags/remap-path-prefix.md +++ /dev/null @@ -1,37 +0,0 @@ -# `remap-path-prefix` - -The tracking issue for this feature is: [#41555](https://github.com/rust-lang/rust/issues/41555) - ------------------------- - -The `-Z remap-path-prefix-from`, `-Z remap-path-prefix-to` commandline option -pair allows to replace prefixes of any file paths the compiler emits in various -places. This is useful for bringing debuginfo paths into a well-known form and -for achieving reproducible builds independent of the directory the compiler was -executed in. All paths emitted by the compiler are affected, including those in -error messages. - -In order to map all paths starting with `/home/foo/my-project/src` to -`/sources/my-project`, one would invoke the compiler as follows: - -```text -rustc -Zremap-path-prefix-from="/home/foo/my-project/src" -Zremap-path-prefix-to="/sources/my-project" -``` - -Debuginfo for code from the file `/home/foo/my-project/src/foo/mod.rs`, -for example, would then point debuggers to `/sources/my-project/foo/mod.rs` -instead of the original file. - -The options can be specified multiple times when multiple prefixes should be -mapped: - -```text -rustc -Zremap-path-prefix-from="/home/foo/my-project/src" \ - -Zremap-path-prefix-to="/sources/my-project" \ - -Zremap-path-prefix-from="/home/foo/my-project/build-dir" \ - -Zremap-path-prefix-to="/stable-build-dir" -``` - -When the options are given multiple times, the nth `-from` will be matched up -with the nth `-to` and they can appear anywhere on the commandline. Mappings -specified later on the line will take precedence over earlier ones. diff --git a/src/doc/unstable-book/src/language-features/advanced-slice-patterns.md b/src/doc/unstable-book/src/language-features/advanced-slice-patterns.md deleted file mode 100644 index e8256469b145..000000000000 --- a/src/doc/unstable-book/src/language-features/advanced-slice-patterns.md +++ /dev/null @@ -1,35 +0,0 @@ -# `advanced_slice_patterns` - -The tracking issue for this feature is: [#23121] - -[#23121]: https://github.com/rust-lang/rust/issues/23121 - -See also [`slice_patterns`](language-features/slice-patterns.html). - ------------------------- - - -The `advanced_slice_patterns` gate lets you use `..` to indicate any number of -elements inside a pattern matching a slice. This wildcard can only be used once -for a given array. If there's an identifier before the `..`, the result of the -slice will be bound to that name. For example: - -```rust -#![feature(advanced_slice_patterns, slice_patterns)] - -fn is_symmetric(list: &[u32]) -> bool { - match list { - &[] | &[_] => true, - &[x, ref inside.., y] if x == y => is_symmetric(inside), - _ => false - } -} - -fn main() { - let sym = &[0, 1, 4, 2, 4, 1, 0]; - assert!(is_symmetric(sym)); - - let not_sym = &[0, 1, 7, 2, 4, 1, 0]; - assert!(!is_symmetric(not_sym)); -} -``` diff --git a/src/doc/unstable-book/src/language-features/box-syntax.md b/src/doc/unstable-book/src/language-features/box-syntax.md index 50e59231a4df..414dc48e557d 100644 --- a/src/doc/unstable-book/src/language-features/box-syntax.md +++ b/src/doc/unstable-book/src/language-features/box-syntax.md @@ -1,8 +1,8 @@ # `box_syntax` -The tracking issue for this feature is: [#27779] +The tracking issue for this feature is: [#49733] -[#27779]: https://github.com/rust-lang/rust/issues/27779 +[#49733]: https://github.com/rust-lang/rust/issues/49733 See also [`box_patterns`](language-features/box-patterns.html) diff --git a/src/doc/unstable-book/src/language-features/catch-expr.md b/src/doc/unstable-book/src/language-features/catch-expr.md index fbd213dca569..247333d841ad 100644 --- a/src/doc/unstable-book/src/language-features/catch-expr.md +++ b/src/doc/unstable-book/src/language-features/catch-expr.md @@ -15,16 +15,16 @@ expression creates a new scope one can use the `?` operator in. use std::num::ParseIntError; let result: Result = do catch { - Ok("1".parse::()? + "1".parse::()? + "2".parse::()? - + "3".parse::()?) + + "3".parse::()? }; assert_eq!(result, Ok(6)); let result: Result = do catch { - Ok("1".parse::()? + "1".parse::()? + "foo".parse::()? - + "3".parse::()?) + + "3".parse::()? }; assert!(result.is_err()); ``` diff --git a/src/doc/unstable-book/src/language-features/conservative-impl-trait.md b/src/doc/unstable-book/src/language-features/conservative-impl-trait.md deleted file mode 100644 index 0be6a321103f..000000000000 --- a/src/doc/unstable-book/src/language-features/conservative-impl-trait.md +++ /dev/null @@ -1,66 +0,0 @@ -# `conservative_impl_trait` - -The tracking issue for this feature is: [#34511] - -[#34511]: https://github.com/rust-lang/rust/issues/34511 - ------------------------- - -The `conservative_impl_trait` feature allows a conservative form of abstract -return types. - -Abstract return types allow a function to hide a concrete return type behind a -trait interface similar to trait objects, while still generating the same -statically dispatched code as with concrete types. - -## Examples - -```rust -#![feature(conservative_impl_trait)] - -fn even_iter() -> impl Iterator { - (0..).map(|n| n * 2) -} - -fn main() { - let first_four_even_numbers = even_iter().take(4).collect::>(); - assert_eq!(first_four_even_numbers, vec![0, 2, 4, 6]); -} -``` - -## Background - -In today's Rust, you can write function signatures like: - -````rust,ignore -fn consume_iter_static>(iter: I) { } - -fn consume_iter_dynamic(iter: Box>) { } -```` - -In both cases, the function does not depend on the exact type of the argument. -The type held is "abstract", and is assumed only to satisfy a trait bound. - -* In the `_static` version using generics, each use of the function is - specialized to a concrete, statically-known type, giving static dispatch, - inline layout, and other performance wins. -* In the `_dynamic` version using trait objects, the concrete argument type is - only known at runtime using a vtable. - -On the other hand, while you can write: - -````rust,ignore -fn produce_iter_dynamic() -> Box> { } -```` - -...but you _cannot_ write something like: - -````rust,ignore -fn produce_iter_static() -> Iterator { } -```` - -That is, in today's Rust, abstract return types can only be written using trait -objects, which can be a significant performance penalty. This RFC proposes -"unboxed abstract types" as a way of achieving signatures like -`produce_iter_static`. Like generics, unboxed abstract types guarantee static -dispatch and inline data layout. diff --git a/src/doc/unstable-book/src/language-features/const-indexing.md b/src/doc/unstable-book/src/language-features/const-indexing.md deleted file mode 100644 index 42d46ce15f67..000000000000 --- a/src/doc/unstable-book/src/language-features/const-indexing.md +++ /dev/null @@ -1,19 +0,0 @@ -# `const_indexing` - -The tracking issue for this feature is: [#29947] - -[#29947]: https://github.com/rust-lang/rust/issues/29947 - ------------------------- - -The `const_indexing` feature allows the constant evaluation of index operations -on constant arrays and repeat expressions. - -## Examples - -```rust -#![feature(const_indexing)] - -const ARR: [usize; 5] = [1, 2, 3, 4, 5]; -const ARR2: [usize; ARR[1]] = [42, 99]; -``` \ No newline at end of file diff --git a/src/doc/unstable-book/src/language-features/crate-in-paths.md b/src/doc/unstable-book/src/language-features/crate-in-paths.md index f1656993e87a..9901dc1ebe30 100644 --- a/src/doc/unstable-book/src/language-features/crate-in-paths.md +++ b/src/doc/unstable-book/src/language-features/crate-in-paths.md @@ -9,10 +9,6 @@ The tracking issue for this feature is: [#44660] The `crate_in_paths` feature allows to explicitly refer to the crate root in absolute paths using keyword `crate`. -`crate` can be used *only* in absolute paths, i.e. either in `::crate::a::b::c` form or in `use` -items where the starting `::` is added implicitly. -Paths like `crate::a::b::c` are not accepted currently. - This feature is required in `feature(extern_absolute_paths)` mode to refer to any absolute path in the local crate (absolute paths refer to extern crates by default in that mode), but can be used without `feature(extern_absolute_paths)` as well. @@ -39,15 +35,14 @@ mod n use crate as root; pub fn check() { assert_eq!(f(), 1); - // `::` is required in non-import paths - assert_eq!(::crate::m::g(), 2); + assert_eq!(crate::m::g(), 2); assert_eq!(root::m::h(), 3); } } fn main() { assert_eq!(f(), 1); - assert_eq!(::crate::m::g(), 2); + assert_eq!(crate::m::g(), 2); assert_eq!(root::m::h(), 3); n::check(); } diff --git a/src/doc/unstable-book/src/language-features/doc-alias.md b/src/doc/unstable-book/src/language-features/doc-alias.md new file mode 100644 index 000000000000..647ac0cf663f --- /dev/null +++ b/src/doc/unstable-book/src/language-features/doc-alias.md @@ -0,0 +1,23 @@ +# `doc_alias` + +The tracking issue for this feature is: [#50146] + +[#50146]: https://github.com/rust-lang/rust/issues/50146 + +------------------------ + +You can add alias(es) to an item when using the `rustdoc` search through the +`doc(alias)` attribute. Example: + +```rust,no_run +#![feature(doc_alias)] + +#[doc(alias = "x")] +#[doc(alias = "big")] +pub struct BigX; +``` + +Then, when looking for it through the `rustdoc` search, if you enter "x" or +"big", search will show the `BigX` struct first. + +Note that this feature is currently hidden behind the `feature(doc_alias)` gate. diff --git a/src/doc/unstable-book/src/language-features/extern-absolute-paths.md b/src/doc/unstable-book/src/language-features/extern-absolute-paths.md index f45c5053e8db..6a22e7eba646 100644 --- a/src/doc/unstable-book/src/language-features/extern-absolute-paths.md +++ b/src/doc/unstable-book/src/language-features/extern-absolute-paths.md @@ -12,7 +12,7 @@ The `extern_absolute_paths` feature enables mode allowing to refer to names from `::my_crate::a::b` will resolve to path `a::b` in crate `my_crate`. `feature(crate_in_paths)` can be used in `feature(extern_absolute_paths)` mode for referring -to absolute paths in the local crate (`::crate::a::b`). +to absolute paths in the local crate (`crate::a::b`). `feature(extern_in_paths)` provides the same effect by using keyword `extern` to refer to paths from other crates (`extern::my_crate::a::b`). diff --git a/src/doc/unstable-book/src/language-features/fn-must-use.md b/src/doc/unstable-book/src/language-features/fn-must-use.md deleted file mode 100644 index 71b6cd663a08..000000000000 --- a/src/doc/unstable-book/src/language-features/fn-must-use.md +++ /dev/null @@ -1,30 +0,0 @@ -# `fn_must_use` - -The tracking issue for this feature is [#43302]. - -[#43302]: https://github.com/rust-lang/rust/issues/43302 - ------------------------- - -The `fn_must_use` feature allows functions and methods to be annotated with -`#[must_use]`, indicating that the `unused_must_use` lint should require their -return values to be used (similarly to how types annotated with `must_use`, -most notably `Result`, are linted if not used). - -## Examples - -```rust -#![feature(fn_must_use)] - -#[must_use] -fn double(x: i32) -> i32 { - 2 * x -} - -fn main() { - double(4); // warning: unused return value of `double` which must be used - - let _ = double(4); // (no warning) -} - -``` diff --git a/src/doc/unstable-book/src/language-features/generators.md b/src/doc/unstable-book/src/language-features/generators.md index 7a559a7bec86..8e888de90a95 100644 --- a/src/doc/unstable-book/src/language-features/generators.md +++ b/src/doc/unstable-book/src/language-features/generators.md @@ -36,11 +36,11 @@ fn main() { return "foo" }; - match generator.resume() { + match unsafe { generator.resume() } { GeneratorState::Yielded(1) => {} _ => panic!("unexpected value from resume"), } - match generator.resume() { + match unsafe { generator.resume() } { GeneratorState::Complete("foo") => {} _ => panic!("unexpected value from resume"), } @@ -69,9 +69,9 @@ fn main() { }; println!("1"); - generator.resume(); + unsafe { generator.resume() }; println!("3"); - generator.resume(); + unsafe { generator.resume() }; println!("5"); } ``` @@ -92,7 +92,7 @@ The `Generator` trait in `std::ops` currently looks like: pub trait Generator { type Yield; type Return; - fn resume(&mut self) -> GeneratorState; + unsafe fn resume(&mut self) -> GeneratorState; } ``` @@ -139,11 +139,11 @@ closure-like semantics. Namely: types and such. * Traits like `Send` and `Sync` are automatically implemented for a `Generator` - depending on the captured variables of the environment. Unlike closures though + depending on the captured variables of the environment. Unlike closures, generators also depend on variables live across suspension points. This means that although the ambient environment may be `Send` or `Sync`, the generator itself may not be due to internal variables live across `yield` points being - not-`Send` or not-`Sync`. Note, though, that generators, like closures, do + not-`Send` or not-`Sync`. Note that generators, like closures, do not implement traits like `Copy` or `Clone` automatically. * Whenever a generator is dropped it will drop all captured environment @@ -155,7 +155,7 @@ lifted at a future date, the design is ongoing! ### Generators as state machines -In the compiler generators are currently compiled as state machines. Each +In the compiler, generators are currently compiled as state machines. Each `yield` expression will correspond to a different state that stores all live variables over that suspension point. Resumption of a generator will dispatch on the current state and then execute internally until a `yield` is reached, at @@ -175,8 +175,8 @@ fn main() { return ret }; - generator.resume(); - generator.resume(); + unsafe { generator.resume() }; + unsafe { generator.resume() }; } ``` @@ -200,7 +200,7 @@ fn main() { type Yield = i32; type Return = &'static str; - fn resume(&mut self) -> GeneratorState { + unsafe fn resume(&mut self) -> GeneratorState { use std::mem; match mem::replace(self, __Generator::Done) { __Generator::Start(s) => { @@ -223,8 +223,8 @@ fn main() { __Generator::Start(ret) }; - generator.resume(); - generator.resume(); + unsafe { generator.resume() }; + unsafe { generator.resume() }; } ``` diff --git a/src/doc/unstable-book/src/language-features/global-allocator.md b/src/doc/unstable-book/src/language-features/global-allocator.md deleted file mode 100644 index b3e6925b666b..000000000000 --- a/src/doc/unstable-book/src/language-features/global-allocator.md +++ /dev/null @@ -1,71 +0,0 @@ -# `global_allocator` - -The tracking issue for this feature is: [#27389] - -[#27389]: https://github.com/rust-lang/rust/issues/27389 - ------------------------- - -Rust programs may need to change the allocator that they're running with from -time to time. This use case is distinct from an allocator-per-collection (e.g. a -`Vec` with a custom allocator) and instead is more related to changing the -global default allocator, e.g. what `Vec` uses by default. - -Currently Rust programs don't have a specified global allocator. The compiler -may link to a version of [jemalloc] on some platforms, but this is not -guaranteed. Libraries, however, like cdylibs and staticlibs are guaranteed -to use the "system allocator" which means something like `malloc` on Unixes and -`HeapAlloc` on Windows. - -[jemalloc]: https://github.com/jemalloc/jemalloc - -The `#[global_allocator]` attribute, however, allows configuring this choice. -You can use this to implement a completely custom global allocator to route all -default allocation requests to a custom object. Defined in [RFC 1974] usage -looks like: - -[RFC 1974]: https://github.com/rust-lang/rfcs/pull/1974 - -```rust -#![feature(global_allocator, allocator_api, heap_api)] - -use std::heap::{Alloc, System, Layout, AllocErr}; - -struct MyAllocator; - -unsafe impl<'a> Alloc for &'a MyAllocator { - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - System.alloc(layout) - } - - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { - System.dealloc(ptr, layout) - } -} - -#[global_allocator] -static GLOBAL: MyAllocator = MyAllocator; - -fn main() { - // This `Vec` will allocate memory through `GLOBAL` above - let mut v = Vec::new(); - v.push(1); -} -``` - -And that's it! The `#[global_allocator]` attribute is applied to a `static` -which implements the `Alloc` trait in the `std::heap` module. Note, though, -that the implementation is defined for `&MyAllocator`, not just `MyAllocator`. -You may wish, however, to also provide `Alloc for MyAllocator` for other use -cases. - -A crate can only have one instance of `#[global_allocator]` and this instance -may be loaded through a dependency. For example `#[global_allocator]` above -could have been placed in one of the dependencies loaded through `extern crate`. - -Note that `Alloc` itself is an `unsafe` trait, with much documentation on the -trait itself about usage and for implementors. Extra care should be taken when -implementing a global allocator as well as the allocator may be called from many -portions of the standard library, such as the panicking routine. As a result it -is highly recommended to not panic during allocation and work in as many -situations with as few dependencies as possible as well. diff --git a/src/doc/unstable-book/src/language-features/i128-type.md b/src/doc/unstable-book/src/language-features/i128-type.md deleted file mode 100644 index a850b7644c3a..000000000000 --- a/src/doc/unstable-book/src/language-features/i128-type.md +++ /dev/null @@ -1,25 +0,0 @@ -# `i128_type` - -The tracking issue for this feature is: [#35118] - -[#35118]: https://github.com/rust-lang/rust/issues/35118 - ------------------------- - -The `i128_type` feature adds support for 128 bit signed and unsigned integer -types. - -```rust -#![feature(i128_type)] - -fn main() { - assert_eq!(1u128 + 1u128, 2u128); - assert_eq!(u128::min_value(), 0); - assert_eq!(u128::max_value(), 340282366920938463463374607431768211455); - - assert_eq!(1i128 - 2i128, -1i128); - assert_eq!(i128::min_value(), -170141183460469231731687303715884105728); - assert_eq!(i128::max_value(), 170141183460469231731687303715884105727); -} -``` - diff --git a/src/doc/unstable-book/src/language-features/inclusive-range-syntax.md b/src/doc/unstable-book/src/language-features/inclusive-range-syntax.md deleted file mode 100644 index 56f58803150c..000000000000 --- a/src/doc/unstable-book/src/language-features/inclusive-range-syntax.md +++ /dev/null @@ -1,20 +0,0 @@ -# `inclusive_range_syntax` - -The tracking issue for this feature is: [#28237] - -[#28237]: https://github.com/rust-lang/rust/issues/28237 - ------------------------- - -To get a range that goes from 0 to 10 and includes the value 10, you -can write `0..=10`: - -```rust -#![feature(inclusive_range_syntax)] - -fn main() { - for i in 0..=10 { - println!("{}", i); - } -} -``` diff --git a/src/doc/unstable-book/src/language-features/infer-outlives-requirements.md b/src/doc/unstable-book/src/language-features/infer-outlives-requirements.md new file mode 100644 index 000000000000..73c7eafdb98d --- /dev/null +++ b/src/doc/unstable-book/src/language-features/infer-outlives-requirements.md @@ -0,0 +1,67 @@ +# `infer_outlives_requirements` + +The tracking issue for this feature is: [#44493] + +[#44493]: https://github.com/rust-lang/rust/issues/44493 + +------------------------ +The `infer_outlives_requirements` feature indicates that certain +outlives requirements can be infered by the compiler rather than +stating them explicitly. + +For example, currently generic struct definitions that contain +references, require where-clauses of the form T: 'a. By using +this feature the outlives predicates will be infered, although +they may still be written explicitly. + +```rust,ignore (pseudo-Rust) +struct Foo<'a, T> + where T: 'a // <-- currently required + { + bar: &'a T, + } +``` + + +## Examples: + + +```rust,ignore (pseudo-Rust) +#![feature(infer_outlives_requirements)] + +// Implicitly infer T: 'a +struct Foo<'a, T> { + bar: &'a T, +} +``` + +```rust,ignore (pseudo-Rust) +#![feature(infer_outlives_requirements)] + +// Implicitly infer `U: 'b` +struct Foo<'b, U> { + bar: Bar<'b, U> +} + +struct Bar<'a, T> where T: 'a { + x: &'a (), + y: T, +} +``` + +```rust,ignore (pseudo-Rust) +#![feature(infer_outlives_requirements)] + +// Implicitly infer `b': 'a` +struct Foo<'a, 'b, T> { + x: &'a &'b T +} +``` + +```rust,ignore (pseudo-Rust) +#![feature(infer_outlives_requirements)] + +// Implicitly infer `::Item : 'a` +struct Foo<'a, T: Iterator> { + bar: &'a T::Item +``` diff --git a/src/doc/unstable-book/src/language-features/infer-static-outlives-requirements.md b/src/doc/unstable-book/src/language-features/infer-static-outlives-requirements.md new file mode 100644 index 000000000000..f50472fb41e3 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/infer-static-outlives-requirements.md @@ -0,0 +1,45 @@ +# `infer_static_outlives_requirements` + +The tracking issue for this feature is: [#44493] + +[#44493]: https://github.com/rust-lang/rust/issues/44493 + +------------------------ +The `infer_static_outlives_requirements` feature indicates that certain +`'static` outlives requirements can be infered by the compiler rather than +stating them explicitly. + +Note: It is an accompanying feature to `infer_outlives_requirements`, +which must be enabled to infer outlives requirements. + +For example, currently generic struct definitions that contain +references, require where-clauses of the form T: 'static. By using +this feature the outlives predicates will be infered, although +they may still be written explicitly. + +```rust,ignore (pseudo-Rust) +struct Foo where U: 'static { // <-- currently required + bar: Bar +} +struct Bar { + x: T, +} +``` + + +## Examples: + +```rust,ignore (pseudo-Rust) +#![feature(infer_outlives_requirements)] +#![feature(infer_static_outlives_requirements)] + +#[rustc_outlives] +// Implicitly infer U: 'static +struct Foo { + bar: Bar +} +struct Bar { + x: T, +} +``` + diff --git a/src/doc/unstable-book/src/language-features/irrefutable-let-patterns.md b/src/doc/unstable-book/src/language-features/irrefutable-let-patterns.md new file mode 100644 index 000000000000..46b843778e81 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/irrefutable-let-patterns.md @@ -0,0 +1,28 @@ +# `irrefutable_let_patterns` + +The tracking issue for this feature is: [#44495] + +[#44495]: https://github.com/rust-lang/rust/issues/44495 + +------------------------ + +This feature changes the way that "irrefutable patterns" are handled +in the `if let` and `while let` forms. An *irrefutable pattern* is one +that cannot fail to match -- for example, the `_` pattern matches any +value, and hence it is "irrefutable". Without this feature, using an +irrefutable pattern in an `if let` gives a hard error (since often +this indicates programmer error). But when the feature is enabled, the +error becomes a lint (since in some cases irrefutable patterns are +expected). This means you can use `#[allow]` to silence the lint: + +```rust +#![feature(irrefutable_let_patterns)] + +#[allow(irrefutable_let_patterns)] +fn main() { + // These two examples used to be errors, but now they + // trigger a lint (that is allowed): + if let _ = 5 {} + while let _ = 5 { break; } +} +``` diff --git a/src/doc/unstable-book/src/language-features/lang-items.md b/src/doc/unstable-book/src/language-features/lang-items.md index 0137a052a62d..bac619fd4a30 100644 --- a/src/doc/unstable-book/src/language-features/lang-items.md +++ b/src/doc/unstable-book/src/language-features/lang-items.md @@ -19,6 +19,7 @@ sugar for dynamic allocations via `malloc` and `free`: #![feature(lang_items, box_syntax, start, libc, core_intrinsics)] #![no_std] use core::intrinsics; +use core::panic::PanicInfo; extern crate libc; @@ -37,28 +38,23 @@ unsafe fn allocate(size: usize, _align: usize) -> *mut u8 { p } -#[lang = "exchange_free"] -unsafe fn deallocate(ptr: *mut u8, _size: usize, _align: usize) { +#[lang = "box_free"] +unsafe fn box_free(ptr: *mut T) { libc::free(ptr as *mut libc::c_void) } -#[lang = "box_free"] -unsafe fn box_free(ptr: *mut T) { - deallocate(ptr as *mut u8, ::core::mem::size_of_val(&*ptr), ::core::mem::align_of_val(&*ptr)); -} - #[start] -fn main(argc: isize, argv: *const *const u8) -> isize { - let x = box 1; +fn main(_argc: isize, _argv: *const *const u8) -> isize { + let _x = box 1; 0 } #[lang = "eh_personality"] extern fn rust_eh_personality() {} -#[lang = "panic_fmt"] extern fn rust_begin_panic() -> ! { unsafe { intrinsics::abort() } } -# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {} -# #[no_mangle] pub extern fn rust_eh_register_frames () {} -# #[no_mangle] pub extern fn rust_eh_unregister_frames () {} +#[lang = "panic_impl"] extern fn rust_begin_panic(info: &PanicInfo) -> ! { unsafe { intrinsics::abort() } } +#[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {} +#[no_mangle] pub extern fn rust_eh_register_frames () {} +#[no_mangle] pub extern fn rust_eh_unregister_frames () {} ``` Note the use of `abort`: the `exchange_malloc` lang item is assumed to @@ -80,7 +76,7 @@ Other features provided by lang items include: Lang items are loaded lazily by the compiler; e.g. if one never uses `Box` then there is no need to define functions for `exchange_malloc` -and `exchange_free`. `rustc` will emit an error when an item is needed +and `box_free`. `rustc` will emit an error when an item is needed but not found in the current crate or any that it depends on. Most lang items are defined by `libcore`, but if you're trying to build @@ -115,6 +111,7 @@ in the same format as C: #![feature(start)] #![no_std] use core::intrinsics; +use core::panic::PanicInfo; // Pull in the system libc library for what crt0.o likely requires. extern crate libc; @@ -139,12 +136,9 @@ pub extern fn rust_eh_personality() { pub extern fn rust_eh_unwind_resume() { } -#[lang = "panic_fmt"] +#[lang = "panic_impl"] #[no_mangle] -pub extern fn rust_begin_panic(_msg: core::fmt::Arguments, - _file: &'static str, - _line: u32, - _column: u32) -> ! { +pub extern fn rust_begin_panic(info: &PanicInfo) -> ! { unsafe { intrinsics::abort() } } ``` @@ -160,6 +154,7 @@ compiler's name mangling too: #![no_std] #![no_main] use core::intrinsics; +use core::panic::PanicInfo; // Pull in the system libc library for what crt0.o likely requires. extern crate libc; @@ -184,12 +179,9 @@ pub extern fn rust_eh_personality() { pub extern fn rust_eh_unwind_resume() { } -#[lang = "panic_fmt"] +#[lang = "panic_impl"] #[no_mangle] -pub extern fn rust_begin_panic(_msg: core::fmt::Arguments, - _file: &'static str, - _line: u32, - _column: u32) -> ! { +pub extern fn rust_begin_panic(info: &PanicInfo) -> ! { unsafe { intrinsics::abort() } } ``` @@ -220,7 +212,7 @@ called. The language item's name is `eh_personality`. The second function, `rust_begin_panic`, is also used by the failure mechanisms of the compiler. When a panic happens, this controls the message that's displayed on -the screen. While the language item's name is `panic_fmt`, the symbol name is +the screen. While the language item's name is `panic_impl`, the symbol name is `rust_begin_panic`. A third function, `rust_eh_unwind_resume`, is also needed if the `custom_unwind_resume` @@ -248,7 +240,7 @@ the source code. - `usize`: `libcore/num/mod.rs` - `f32`: `libstd/f32.rs` - `f64`: `libstd/f64.rs` - - `char`: `libstd_unicode/char.rs` + - `char`: `libcore/char.rs` - `slice`: `liballoc/slice.rs` - `str`: `liballoc/str.rs` - `const_ptr`: `libcore/ptr.rs` @@ -264,8 +256,8 @@ the source code. - `msvc_try_filter`: `libpanic_unwind/seh.rs` (SEH) - `panic`: `libcore/panicking.rs` - `panic_bounds_check`: `libcore/panicking.rs` - - `panic_fmt`: `libcore/panicking.rs` - - `panic_fmt`: `libstd/panicking.rs` + - `panic_impl`: `libcore/panicking.rs` + - `panic_impl`: `libstd/panicking.rs` - Allocations - `owned_box`: `liballoc/boxed.rs` - `exchange_malloc`: `liballoc/heap.rs` @@ -318,4 +310,4 @@ the source code. - `phantom_data`: `libcore/marker.rs` - `freeze`: `libcore/marker.rs` - `debug_trait`: `libcore/fmt/mod.rs` - - `non_zero`: `libcore/nonzero.rs` \ No newline at end of file + - `non_zero`: `libcore/nonzero.rs` diff --git a/src/doc/unstable-book/src/language-features/macro-at-most-once-rep.md b/src/doc/unstable-book/src/language-features/macro-at-most-once-rep.md new file mode 100644 index 000000000000..251fc7209122 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/macro-at-most-once-rep.md @@ -0,0 +1,22 @@ +# `macro_at_most_once_rep` + +NOTE: This feature is only available in the 2018 Edition. + +The tracking issue for this feature is: #48075 + +With this feature gate enabled, one can use `?` as a Kleene operator meaning "0 +or 1 repetitions" in a macro definition. Previously only `+` and `*` were allowed. + +For example: + +```rust,ignore +#![feature(macro_at_most_once_rep)] + +macro_rules! foo { + (something $(,)?) // `?` indicates `,` is "optional"... + => {} +} +``` + +------------------------ + diff --git a/src/doc/unstable-book/src/language-features/macro-lifetime-matcher.md b/src/doc/unstable-book/src/language-features/macro-lifetime-matcher.md deleted file mode 100644 index 5b585d7f041d..000000000000 --- a/src/doc/unstable-book/src/language-features/macro-lifetime-matcher.md +++ /dev/null @@ -1,14 +0,0 @@ -# `macro_lifetime_matcher` - -The tracking issue for this feature is: [#46895] - -With this feature gate enabled, the [list of fragment specifiers][frags] gains one more entry: - -* `lifetime`: a lifetime. Examples: 'static, 'a. - -A `lifetime` variable may be followed by anything. - -[#46895]: https://github.com/rust-lang/rust/issues/46895 -[frags]: ../book/first-edition/macros.html#syntactic-requirements - ------------------------- diff --git a/src/doc/unstable-book/src/language-features/macro-literal-matcher.md b/src/doc/unstable-book/src/language-features/macro-literal-matcher.md new file mode 100644 index 000000000000..7e3638fd1cf4 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/macro-literal-matcher.md @@ -0,0 +1,17 @@ +# `macro_literal_matcher` + +The tracking issue for this feature is: [#35625] + +The RFC is: [rfc#1576]. + +With this feature gate enabled, the [list of fragment specifiers][frags] gains one more entry: + +* `literal`: a literal. Examples: 2, "string", 'c' + +A `literal` may be followed by anything, similarly to the `ident` specifier. + +[rfc#1576]: http://rust-lang.github.io/rfcs/1576-macros-literal-matcher.html +[#35625]: https://github.com/rust-lang/rust/issues/35625 +[frags]: ../book/first-edition/macros.html#syntactic-requirements + +------------------------ diff --git a/src/doc/unstable-book/src/language-features/match-beginning-vert.md b/src/doc/unstable-book/src/language-features/match-beginning-vert.md deleted file mode 100644 index f0a51af7fd1c..000000000000 --- a/src/doc/unstable-book/src/language-features/match-beginning-vert.md +++ /dev/null @@ -1,23 +0,0 @@ -# `match_beginning_vert` - -The tracking issue for this feature is [#44101]. - -With this feature enabled, you are allowed to add a '|' to the beginning of a -match arm: - -```rust -#![feature(match_beginning_vert)] - -enum Foo { A, B, C } - -fn main() { - let x = Foo::A; - match x { - | Foo::A - | Foo::B => println!("AB"), - | Foo::C => println!("C"), - } -} -``` - -[#44101]: https://github.com/rust-lang/rust/issues/44101 \ No newline at end of file diff --git a/src/doc/unstable-book/src/language-features/match-default-bindings.md b/src/doc/unstable-book/src/language-features/match-default-bindings.md deleted file mode 100644 index cc542931cbe1..000000000000 --- a/src/doc/unstable-book/src/language-features/match-default-bindings.md +++ /dev/null @@ -1,58 +0,0 @@ -# `match_default_bindings` - -The tracking issue for this feature is: [#42640] - -[#42640]: https://github.com/rust-lang/rust/issues/42640 - ------------------------- - -Match default bindings (also called "default binding modes in match") improves ergonomics for -pattern-matching on references by introducing automatic dereferencing (and a corresponding shift -in binding modes) for large classes of patterns that would otherwise not compile. - -For example, under match default bindings, - -```rust -#![feature(match_default_bindings)] - -fn main() { - let x: &Option<_> = &Some(0); - - match x { - Some(y) => { - println!("y={}", *y); - }, - None => {}, - } -} -``` - -compiles and is equivalent to either of the below: - -```rust -fn main() { - let x: &Option<_> = &Some(0); - - match *x { - Some(ref y) => { - println!("y={}", *y); - }, - None => {}, - } -} -``` - -or - -```rust -fn main() { - let x: &Option<_> = &Some(0); - - match x { - &Some(ref y) => { - println!("y={}", *y); - }, - &None => {}, - } -} -``` diff --git a/src/doc/unstable-book/src/language-features/on-unimplemented.md b/src/doc/unstable-book/src/language-features/on-unimplemented.md index 70c7c110b786..f787f629756f 100644 --- a/src/doc/unstable-book/src/language-features/on-unimplemented.md +++ b/src/doc/unstable-book/src/language-features/on-unimplemented.md @@ -8,7 +8,9 @@ The tracking issue for this feature is: [#29628] The `on_unimplemented` feature provides the `#[rustc_on_unimplemented]` attribute, which allows trait definitions to add specialized notes to error -messages when an implementation was expected but not found. +messages when an implementation was expected but not found. You can refer +to the trait's generic arguments by name and to the resolved type using +`Self`. For example: @@ -41,7 +43,98 @@ error[E0277]: the trait bound `&[{integer}]: MyIterator` is not satisfied | = help: the trait `MyIterator` is not implemented for `&[{integer}]` = note: required by `iterate_chars` - -error: aborting due to previous error ``` +`on_unimplemented` also supports advanced filtering for better targeting +of messages, as well as modifying specific parts of the error message. You +target the text of: + + - the main error message (`message`) + - the label (`label`) + - an extra note (`note`) + +For example, the following attribute + +```rust,compile_fail +#[rustc_on_unimplemented( + message="message", + label="label", + note="note" +)] +trait MyIterator { + fn next(&mut self) -> A; +} +``` + +Would generate the following output: + +```text +error[E0277]: message + --> :14:5 + | +14 | iterate_chars(&[1, 2, 3][..]); + | ^^^^^^^^^^^^^ label + | + = note: note + = help: the trait `MyIterator` is not implemented for `&[{integer}]` + = note: required by `iterate_chars` +``` + +To allow more targeted error messages, it is possible to filter the +application of these fields based on a variety of attributes when using +`on`: + + - `crate_local`: whether the code causing the trait bound to not be + fulfilled is part of the user's crate. This is used to avoid suggesting + code changes that would require modifying a dependency. + - Any of the generic arguments that can be substituted in the text can be + referred by name as well for filtering, like `Rhs="i32"`, except for + `Self`. + - `_Self`: to filter only on a particular calculated trait resolution, like + `Self="std::iter::Iterator"`. This is needed because `Self` is a + keyword which cannot appear in attributes. + - `direct`: user-specified rather than derived obligation. + - `from_method`: usable both as boolean (whether the flag is present, like + `crate_local`) or matching against a particular method. Currently used + for `try`. + - `from_desugaring`: usable both as boolean (whether the flag is present) + or matching against a particular desugaring. + +For example, the `Iterator` trait can be annotated in the following way: + +```rust,compile_fail +#[rustc_on_unimplemented( + on( + _Self="&str", + note="call `.chars()` or `.as_bytes()` on `{Self}" + ), + message="`{Self}` is not an iterator", + label="`{Self}` is not an iterator", + note="maybe try calling `.iter()` or a similar method" +)] +pub trait Iterator {} +``` + +Which would produce the following outputs: + +```text +error[E0277]: `Foo` is not an iterator + --> src/main.rs:4:16 + | +4 | for foo in Foo {} + | ^^^ `Foo` is not an iterator + | + = note: maybe try calling `.iter()` or a similar method + = help: the trait `std::iter::Iterator` is not implemented for `Foo` + = note: required by `std::iter::IntoIterator::into_iter` + +error[E0277]: `&str` is not an iterator + --> src/main.rs:5:16 + | +5 | for foo in "" {} + | ^^ `&str` is not an iterator + | + = note: call `.chars()` or `.bytes() on `&str` + = help: the trait `std::iter::Iterator` is not implemented for `&str` + = note: required by `std::iter::IntoIterator::into_iter` +``` diff --git a/src/doc/unstable-book/src/language-features/plugin.md b/src/doc/unstable-book/src/language-features/plugin.md index 1cece930eeaa..19ece0950907 100644 --- a/src/doc/unstable-book/src/language-features/plugin.md +++ b/src/doc/unstable-book/src/language-features/plugin.md @@ -183,6 +183,8 @@ that warns about any item named `lintme`. ```rust,ignore #![feature(plugin_registrar)] #![feature(box_syntax, rustc_private)] +#![feature(macro_vis_matcher)] +#![feature(macro_at_most_once_rep)] extern crate syntax; @@ -208,7 +210,7 @@ impl LintPass for Pass { impl EarlyLintPass for Pass { fn check_item(&mut self, cx: &EarlyContext, it: &ast::Item) { - if it.ident.name.as_str() == "lintme" { + if it.ident.as_str() == "lintme" { cx.span_lint(TEST_LINT, it.span, "item is named 'lintme'"); } } diff --git a/src/doc/unstable-book/src/language-features/proc-macro.md b/src/doc/unstable-book/src/language-features/proc-macro.md deleted file mode 100644 index 1bd8c41629ee..000000000000 --- a/src/doc/unstable-book/src/language-features/proc-macro.md +++ /dev/null @@ -1,241 +0,0 @@ -# `proc_macro` - -The tracking issue for this feature is: [#38356] - -[#38356]: https://github.com/rust-lang/rust/issues/38356 - ------------------------- - -This feature flag guards the new procedural macro features as laid out by [RFC 1566], which alongside the now-stable -[custom derives], provide stabilizable alternatives to the compiler plugin API (which requires the use of -perma-unstable internal APIs) for programmatically modifying Rust code at compile-time. - -The two new procedural macro kinds are: - -* Function-like procedural macros which are invoked like regular declarative macros, and: - -* Attribute-like procedural macros which can be applied to any item which built-in attributes can -be applied to, and which can take arguments in their invocation as well. - -Additionally, this feature flag implicitly enables the [`use_extern_macros`](language-features/use-extern-macros.html) feature, -which allows macros to be imported like any other item with `use` statements, as compared to -applying `#[macro_use]` to an `extern crate` declaration. It is important to note that procedural macros may -**only** be imported in this manner, and will throw an error otherwise. - -You **must** declare the `proc_macro` feature in both the crate declaring these new procedural macro kinds as well as -in any crates that use them. - -### Common Concepts - -As with custom derives, procedural macros may only be declared in crates of the `proc-macro` type, and must be public -functions. No other public items may be declared in `proc-macro` crates, but private items are fine. - -To declare your crate as a `proc-macro` crate, simply add: - -```toml -[lib] -proc-macro = true -``` - -to your `Cargo.toml`. - -Unlike custom derives, however, the name of the function implementing the procedural macro is used directly as the -procedural macro's name, so choose carefully. - -Additionally, both new kinds of procedural macros return a `TokenStream` which *wholly* replaces the original -invocation and its input. - -#### Importing - -As referenced above, the new procedural macros are not meant to be imported via `#[macro_use]` and will throw an -error if they are. Instead, they are meant to be imported like any other item in Rust, with `use` statements: - -```rust,ignore -#![feature(proc_macro)] - -// Where `my_proc_macros` is some crate of type `proc_macro` -extern crate my_proc_macros; - -// And declares a `#[proc_macro] pub fn my_bang_macro()` at its root. -use my_proc_macros::my_bang_macro; - -fn main() { - println!("{}", my_bang_macro!()); -} -``` - -#### Error Reporting - -Any panics in a procedural macro implementation will be caught by the compiler and turned into an error message pointing -to the problematic invocation. Thus, it is important to make your panic messages as informative as possible: use -`Option::expect` instead of `Option::unwrap` and `Result::expect` instead of `Result::unwrap`, and inform the user of -the error condition as unambiguously as you can. - -#### `TokenStream` - -The `proc_macro::TokenStream` type is hardcoded into the signatures of procedural macro functions for both input and -output. It is a wrapper around the compiler's internal representation for a given chunk of Rust code. - -### Function-like Procedural Macros - -These are procedural macros that are invoked like regular declarative macros. They are declared as public functions in -crates of the `proc_macro` type and using the `#[proc_macro]` attribute. The name of the declared function becomes the -name of the macro as it is to be imported and used. The function must be of the kind `fn(TokenStream) -> TokenStream` -where the sole argument is the input to the macro and the return type is the macro's output. - -This kind of macro can expand to anything that is valid for the context it is invoked in, including expressions and -statements, as well as items. - -**Note**: invocations of this kind of macro require a wrapping `[]`, `{}` or `()` like regular macros, but these do not -appear in the input, only the tokens between them. The tokens between the braces do not need to be valid Rust syntax. - -my_macro_crate/src/lib.rs - -```rust,ignore -#![feature(proc_macro)] - -// This is always necessary to get the `TokenStream` typedef. -extern crate proc_macro; - -use proc_macro::TokenStream; - -#[proc_macro] -pub fn say_hello(_input: TokenStream) -> TokenStream { - // This macro will accept any input because it ignores it. - // To enforce correctness in macros which don't take input, - // you may want to add `assert!(_input.to_string().is_empty());`. - "println!(\"Hello, world!\")".parse().unwrap() -} -``` - -my_macro_user/Cargo.toml - -```toml -[dependencies] -my_macro_crate = { path = "" } -``` - -my_macro_user/src/lib.rs - -```rust,ignore -#![feature(proc_macro)] - -extern crate my_macro_crate; - -use my_macro_crate::say_hello; - -fn main() { - say_hello!(); -} -``` - -As expected, this prints `Hello, world!`. - -### Attribute-like Procedural Macros - -These are arguably the most powerful flavor of procedural macro as they can be applied anywhere attributes are allowed. - -They are declared as public functions in crates of the `proc-macro` type, using the `#[proc_macro_attribute]` attribute. -The name of the function becomes the name of the attribute as it is to be imported and used. The function must be of the -kind `fn(TokenStream, TokenStream) -> TokenStream` where: - -The first argument represents any metadata for the attribute (see [the reference chapter on attributes][refr-attr]). -Only the metadata itself will appear in this argument, for example: - - * `#[my_macro]` will get an empty string. - * `#[my_macro = "string"]` will get `= "string"`. - * `#[my_macro(ident)]` will get `(ident)`. - * etc. - -The second argument is the item that the attribute is applied to. It can be a function, a type definition, -an impl block, an `extern` block, or a module—attribute invocations can take the inner form (`#![my_attr]`) -or outer form (`#[my_attr]`). - -The return type is the output of the macro which *wholly* replaces the item it was applied to. Thus, if your intention -is to merely modify an item, it *must* be copied to the output. The output must be an item; expressions, statements -and bare blocks are not allowed. - -There is no restriction on how many items an attribute-like procedural macro can emit as long as they are valid in -the given context. - -my_macro_crate/src/lib.rs - -```rust,ignore -#![feature(proc_macro)] - -extern crate proc_macro; - -use proc_macro::TokenStream; - -/// Adds a `/// ### Panics` docstring to the end of the input's documentation -/// -/// Does not assert that its receiver is a function or method. -#[proc_macro_attribute] -pub fn panics_note(args: TokenStream, input: TokenStream) -> TokenStream { - let args = args.to_string(); - let mut input = input.to_string(); - - assert!(args.starts_with("= \""), "`#[panics_note]` requires an argument of the form \ - `#[panics_note = \"panic note here\"]`"); - - // Get just the bare note string - let panics_note = args.trim_matches(&['=', ' ', '"'][..]); - - // The input will include all docstrings regardless of where the attribute is placed, - // so we need to find the last index before the start of the item - let insert_idx = idx_after_last_docstring(&input); - - // And insert our `### Panics` note there so it always appears at the end of an item's docs - input.insert_str(insert_idx, &format!("/// # Panics \n/// {}\n", panics_note)); - - input.parse().unwrap() -} - -// `proc-macro` crates can contain any kind of private item still -fn idx_after_last_docstring(input: &str) -> usize { - // Skip docstring lines to find the start of the item proper - input.lines().skip_while(|line| line.trim_left().starts_with("///")).next() - // Find the index of the first non-docstring line in the input - // Note: assumes this exact line is unique in the input - .and_then(|line_after| input.find(line_after)) - // No docstrings in the input - .unwrap_or(0) -} -``` - -my_macro_user/Cargo.toml - -```toml -[dependencies] -my_macro_crate = { path = "" } -``` - -my_macro_user/src/lib.rs - -```rust,ignore -#![feature(proc_macro)] - -extern crate my_macro_crate; - -use my_macro_crate::panics_note; - -/// Do the `foo` thing. -#[panics_note = "Always."] -pub fn foo() { - panic!() -} -``` - -Then the rendered documentation for `pub fn foo` will look like this: - -> `pub fn foo()` -> -> ---- -> Do the `foo` thing. -> # Panics -> Always. - -[RFC 1566]: https://github.com/rust-lang/rfcs/blob/master/text/1566-proc-macros.md -[custom derives]: https://doc.rust-lang.org/book/procedural-macros.html -[rust-lang/rust#41430]: https://github.com/rust-lang/rust/issues/41430 -[refr-attr]: https://doc.rust-lang.org/reference/attributes.html diff --git a/src/doc/unstable-book/src/language-features/repr-packed.md b/src/doc/unstable-book/src/language-features/repr-packed.md new file mode 100644 index 000000000000..2dd763d04b0a --- /dev/null +++ b/src/doc/unstable-book/src/language-features/repr-packed.md @@ -0,0 +1,8 @@ +# `repr_packed` + +The tracking issue for this feature is [#33158] + +[#33158]: https://github.com/rust-lang/rust/issues/33158 + +------------------------ + diff --git a/src/doc/unstable-book/src/language-features/repr128.md b/src/doc/unstable-book/src/language-features/repr128.md new file mode 100644 index 000000000000..0858988952c1 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/repr128.md @@ -0,0 +1,18 @@ +# `repr128` + +The tracking issue for this feature is: [#35118] + +[#35118]: https://github.com/rust-lang/rust/issues/35118 + +------------------------ + +The `repr128` feature adds support for `#[repr(u128)]` on `enum`s. + +```rust +#![feature(repr128)] + +#[repr(u128)] +enum Foo { + Bar(u64), +} +``` diff --git a/src/doc/unstable-book/src/language-features/self-in-typedefs.md b/src/doc/unstable-book/src/language-features/self-in-typedefs.md new file mode 100644 index 000000000000..2416e85c17d1 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/self-in-typedefs.md @@ -0,0 +1,24 @@ +# `self_in_typedefs` + +The tracking issue for this feature is: [#49303] + +[#49303]: https://github.com/rust-lang/rust/issues/49303 + +------------------------ + +The `self_in_typedefs` feature gate lets you use the special `Self` identifier +in `struct`, `enum`, and `union` type definitions. + +A simple example is: + +```rust +#![feature(self_in_typedefs)] + +enum List +where + Self: PartialOrd // can write `Self` instead of `List` +{ + Nil, + Cons(T, Box) // likewise here +} +``` diff --git a/src/doc/unstable-book/src/language-features/slice-patterns.md b/src/doc/unstable-book/src/language-features/slice-patterns.md index 69857297582d..133174268ef9 100644 --- a/src/doc/unstable-book/src/language-features/slice-patterns.md +++ b/src/doc/unstable-book/src/language-features/slice-patterns.md @@ -4,25 +4,29 @@ The tracking issue for this feature is: [#23121] [#23121]: https://github.com/rust-lang/rust/issues/23121 -See also -[`advanced_slice_patterns`](language-features/advanced-slice-patterns.html). - ------------------------ - -If you want to match against a slice or array, you can use `&` with the -`slice_patterns` feature: +The `slice_patterns` feature gate lets you use `..` to indicate any number of +elements inside a pattern matching a slice. This wildcard can only be used once +for a given array. If there's an pattern before the `..`, the subslice will be +matched against that pattern. For example: ```rust #![feature(slice_patterns)] -fn main() { - let v = vec!["match_this", "1"]; - - match &v[..] { - &["match_this", second] => println!("The second element is {}", second), - _ => {}, +fn is_symmetric(list: &[u32]) -> bool { + match list { + &[] | &[_] => true, + &[x, ref inside.., y] if x == y => is_symmetric(inside), + &[..] => false, } } -``` +fn main() { + let sym = &[0, 1, 4, 2, 4, 1, 0]; + assert!(is_symmetric(sym)); + + let not_sym = &[0, 1, 7, 2, 4, 1, 0]; + assert!(!is_symmetric(not_sym)); +} +``` diff --git a/src/doc/unstable-book/src/language-features/tool-attributes.md b/src/doc/unstable-book/src/language-features/tool-attributes.md new file mode 100644 index 000000000000..15fc84a3e2a3 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/tool-attributes.md @@ -0,0 +1,26 @@ +# `tool_attributes` + +The tracking issue for this feature is: [#44690] + +[#44690]: https://github.com/rust-lang/rust/issues/44690 + +------------------------ + +Tool attributes let you use scoped attributes to control the behavior +of certain tools. + +Currently tool names which can be appear in scoped attributes are restricted to +`clippy` and `rustfmt`. + +## An example + +```rust +#![feature(tool_attributes)] + +#[rustfmt::skip] +fn foo() { println!("hello, world"); } + +fn main() { + foo(); +} +``` diff --git a/src/doc/unstable-book/src/language-features/tool-lints.md b/src/doc/unstable-book/src/language-features/tool-lints.md new file mode 100644 index 000000000000..5c0d33b5ab0c --- /dev/null +++ b/src/doc/unstable-book/src/language-features/tool-lints.md @@ -0,0 +1,35 @@ +# `tool_lints` + +The tracking issue for this feature is: [#44690] + +[#44690]: https://github.com/rust-lang/rust/issues/44690 + +------------------------ + +Tool lints let you use scoped lints, to `allow`, `warn`, `deny` or `forbid` lints of +certain tools. + +Currently `clippy` is the only available lint tool. + +It is recommended for lint tools to implement the scoped lints like this: + +- `#[_(TOOL_NAME::lintname)]`: for lint names +- `#[_(TOOL_NAME::lintgroup)]`: for groups of lints +- `#[_(TOOL_NAME::all)]`: for (almost[^1]) all lints + +## An example + +```rust +#![feature(tool_lints)] + +#![warn(clippy::pedantic)] + +#[allow(clippy::filter_map)] +fn main() { + let v = vec![0; 10]; + let _ = v.into_iter().filter(|&x| x < 1).map(|x| x + 1).collect::>(); + println!("No filter_map()!"); +} +``` + +[^1]: Some defined lint groups can be excluded here. diff --git a/src/doc/unstable-book/src/language-features/universal-impl-trait.md b/src/doc/unstable-book/src/language-features/universal-impl-trait.md deleted file mode 100644 index 6b3c5e92720d..000000000000 --- a/src/doc/unstable-book/src/language-features/universal-impl-trait.md +++ /dev/null @@ -1,32 +0,0 @@ -# `universal_impl_trait` - -The tracking issue for this feature is: [#34511]. - -[#34511]: https://github.com/rust-lang/rust/issues/34511 - --------------------- - -The `universal_impl_trait` feature extends the [`conservative_impl_trait`] -feature allowing the `impl Trait` syntax in arguments (universal -quantification). - -[`conservative_impl_trait`]: ./language-features/conservative-impl-trait.html - -## Examples - -```rust -#![feature(universal_impl_trait)] -use std::ops::Not; - -fn any_zero(values: impl IntoIterator) -> bool { - for val in values { if val == 0 { return true; } } - false -} - -fn main() { - let test1 = -5..; - let test2 = vec![1, 8, 42, -87, 60]; - assert!(any_zero(test1)); - assert!(bool::not(any_zero(test2))); -} -``` diff --git a/src/doc/unstable-book/src/language-features/unsized-locals.md b/src/doc/unstable-book/src/language-features/unsized-locals.md new file mode 100644 index 000000000000..7a5fe5b7f28c --- /dev/null +++ b/src/doc/unstable-book/src/language-features/unsized-locals.md @@ -0,0 +1,180 @@ +# `unsized_locals` + +The tracking issue for this feature is: [#48055] + +[#48055]: https://github.com/rust-lang/rust/issues/48055 + +------------------------ + +This implements [RFC1909]. When turned on, you can have unsized arguments and locals: + +[RFC1909]: https://github.com/rust-lang/rfcs/blob/master/text/1909-coercions.md + +```rust +#![feature(unsized_locals)] + +use std::any::Any; + +fn main() { + let x: Box = Box::new(42); + let x: dyn Any = *x; + // ^ unsized local variable + // ^^ unsized temporary + foo(x); +} + +fn foo(_: dyn Any) {} +// ^^^^^^ unsized argument +``` + +The RFC still forbids the following unsized expressions: + +```rust,ignore +#![feature(unsized_locals)] + +use std::any::Any; + +struct MyStruct { + content: T, +} + +struct MyTupleStruct(T); + +fn answer() -> Box { + Box::new(42) +} + +fn main() { + // You CANNOT have unsized statics. + static X: dyn Any = *answer(); // ERROR + const Y: dyn Any = *answer(); // ERROR + + // You CANNOT have struct initialized unsized. + MyStruct { content: *answer() }; // ERROR + MyTupleStruct(*answer()); // ERROR + (42, *answer()); // ERROR + + // You CANNOT have unsized return types. + fn my_function() -> dyn Any { *answer() } // ERROR + + // You CAN have unsized local variables... + let mut x: dyn Any = *answer(); // OK + // ...but you CANNOT reassign to them. + x = *answer(); // ERROR + + // You CANNOT even initialize them separately. + let y: dyn Any; // OK + y = *answer(); // ERROR + + // Not mentioned in the RFC, but by-move captured variables are also Sized. + let x: dyn Any = *answer(); + (move || { // ERROR + let y = x; + })(); + + // You CAN create a closure with unsized arguments, + // but you CANNOT call it. + // This is an implementation detail and may be changed in the future. + let f = |x: dyn Any| {}; + f(*answer()); // ERROR +} +``` + +However, the current implementation allows `MyTupleStruct(..)` to be unsized. This will be fixed in the future. + +## By-value trait objects + +With this feature, you can have by-value `self` arguments without `Self: Sized` bounds. + +```rust +#![feature(unsized_locals)] + +trait Foo { + fn foo(self) {} +} + +impl Foo for T {} + +fn main() { + let slice: Box<[i32]> = Box::new([1, 2, 3]); + <[i32] as Foo>::foo(*slice); +} +``` + +And `Foo` will also be object-safe. However, this object-safety is not yet implemented. + +```rust,ignore +#![feature(unsized_locals)] + +trait Foo { + fn foo(self) {} +} + +impl Foo for T {} + +fn main () { + let slice: Box = Box::new([1, 2, 3]); + // doesn't compile yet + ::foo(*slice); +} +``` + +Unfortunately, this is not implemented yet. + +One of the objectives of this feature is to allow `Box`, instead of `Box` in the future. See [#28796] for details. + +[#28796]: https://github.com/rust-lang/rust/issues/28796 + +## Variable length arrays + +The RFC also describes an extension to the array literal syntax: `[e; dyn n]`. In the syntax, `n` isn't necessarily a constant expression. The array is dynamically allocated on the stack and has the type of `[T]`, instead of `[T; n]`. + +```rust,ignore +#![feature(unsized_locals)] + +fn mergesort(a: &mut [T]) { + let mut tmp = [T; dyn a.len()]; + // ... +} + +fn main() { + let mut a = [3, 1, 5, 6]; + mergesort(&mut a); + assert_eq!(a, [1, 3, 5, 6]); +} +``` + +VLAs are not implemented yet. The syntax isn't final, either. We may need an alternative syntax for Rust 2015 because, in Rust 2015, expressions like `[e; dyn(1)]` would be ambiguous. One possible alternative proposed in the RFC is `[e; n]`: if `n` captures one or more local variables, then it is considered as `[e; dyn n]`. + +## Advisory on stack usage + +It's advised not to casually use the `#![feature(unsized_locals)]` feature. Typical use-cases are: + +- When you need a by-value trait objects. +- When you really need a fast allocation of small temporary arrays. + +Another pitfall is repetitive allocation and temporaries. Currently the compiler simply extends the stack frame every time it encounters an unsized assignment. So for example, the code + +```rust +#![feature(unsized_locals)] + +fn main() { + let x: Box<[i32]> = Box::new([1, 2, 3, 4, 5]); + let _x = {{{{{{{{{{*x}}}}}}}}}}; +} +``` + +and the code + +```rust +#![feature(unsized_locals)] + +fn main() { + for _ in 0..10 { + let x: Box<[i32]> = Box::new([1, 2, 3, 4, 5]); + let _x = *x; + } +} +``` + +will unnecessarily extend the stack frame. diff --git a/src/doc/unstable-book/src/language-features/use-nested-groups.md b/src/doc/unstable-book/src/language-features/use-nested-groups.md deleted file mode 100644 index 47b635bad736..000000000000 --- a/src/doc/unstable-book/src/language-features/use-nested-groups.md +++ /dev/null @@ -1,90 +0,0 @@ -# `use_nested_groups` - -The tracking issue for this feature is: [#44494] - -[#44494]: https://github.com/rust-lang/rust/issues/44494 - ------------------------- - -The `use_nested_groups` feature allows you to import multiple items from a -complex module tree easily, by nesting different imports in the same -declaration. For example: - -```rust -#![feature(use_nested_groups)] -# #![allow(unused_imports, dead_code)] -# -# mod foo { -# pub mod bar { -# pub type Foo = (); -# } -# pub mod baz { -# pub mod quux { -# pub type Bar = (); -# } -# } -# } - -use foo::{ - bar::{self, Foo}, - baz::{*, quux::Bar}, -}; -# -# fn main() {} -``` - -## Snippet for the book's new features appendix - -When stabilizing, add this to -`src/doc/book/second-edition/src/appendix-07-newest-features.md`: - -### Nested groups in `use` declarations - -If you have a complex module tree with many different submodules and you need -to import a few items from each one, it might be useful to group all the -imports in the same declaration to keep your code clean and avoid repeating the -base modules' name. - -The `use` declaration supports nesting to help you in those cases, both with -simple imports and glob ones. For example this snippets imports `bar`, `Foo`, -all the items in `baz` and `Bar`: - -```rust -# #![feature(use_nested_groups)] -# #![allow(unused_imports, dead_code)] -# -# mod foo { -# pub mod bar { -# pub type Foo = (); -# } -# pub mod baz { -# pub mod quux { -# pub type Bar = (); -# } -# } -# } -# -use foo::{ - bar::{self, Foo}, - baz::{*, quux::Bar}, -}; -# -# fn main() {} -``` - -## Updated reference - -When stabilizing, replace the shortcut list in -`src/doc/reference/src/items/use-declarations.md` with this updated one: - -* Simultaneously binding a list of paths with a common prefix, using the - glob-like brace syntax `use a::b::{c, d, e::f, g::h::i};` -* Simultaneously binding a list of paths with a common prefix and their common - parent module, using the `self` keyword, such as `use a::b::{self, c, d::e};` -* Rebinding the target name as a new local name, using the syntax `use p::q::r - as x;`. This can also be used with the last two features: - `use a::b::{self as ab, c as abc}`. -* Binding all paths matching a given prefix, using the asterisk wildcard syntax - `use a::b::*;`. -* Nesting groups of the previous features multiple times, such as - `use a::b::{self as ab, c d::{*, e::f}};` diff --git a/src/doc/unstable-book/src/language-features/used.md b/src/doc/unstable-book/src/language-features/used.md index 75a8b2774f42..c3b7f2e41e15 100644 --- a/src/doc/unstable-book/src/language-features/used.md +++ b/src/doc/unstable-book/src/language-features/used.md @@ -87,11 +87,13 @@ This condition can be met using `#[used]` and `#[link_section]` plus a linker script. ``` rust,ignore -#![feature(lang_items)] +#![feature(panic_implementation)] #![feature(used)] #![no_main] #![no_std] +use core::panic::PanicInfo; + extern "C" fn reset_handler() -> ! { loop {} } @@ -100,8 +102,10 @@ extern "C" fn reset_handler() -> ! { #[used] static RESET_HANDLER: extern "C" fn() -> ! = reset_handler; -#[lang = "panic_fmt"] -fn panic_fmt() {} +#[panic_implementation] +fn panic_impl(info: &PanicInfo) -> ! { + loop {} +} ``` ``` text diff --git a/src/doc/unstable-book/src/library-features/alloc-jemalloc.md b/src/doc/unstable-book/src/library-features/alloc-jemalloc.md deleted file mode 100644 index 425d4cb79b2d..000000000000 --- a/src/doc/unstable-book/src/library-features/alloc-jemalloc.md +++ /dev/null @@ -1,13 +0,0 @@ -# `alloc_jemalloc` - -The tracking issue for this feature is: [#33082] - -[#33082]: https://github.com/rust-lang/rust/issues/33082 - -See also [`alloc_system`](library-features/alloc-system.html). - ------------------------- - -This feature has been replaced by [the `jemallocator` crate on crates.io.][jemallocator]. - -[jemallocator]: https://crates.io/crates/jemallocator diff --git a/src/doc/unstable-book/src/library-features/alloc-system.md b/src/doc/unstable-book/src/library-features/alloc-system.md deleted file mode 100644 index 9effab202cab..000000000000 --- a/src/doc/unstable-book/src/library-features/alloc-system.md +++ /dev/null @@ -1,77 +0,0 @@ -# `alloc_system` - -The tracking issue for this feature is: [#32838] - -[#32838]: https://github.com/rust-lang/rust/issues/32838 - -See also [`global_allocator`](language-features/global-allocator.html). - ------------------------- - -The compiler currently ships two default allocators: `alloc_system` and -`alloc_jemalloc` (some targets don't have jemalloc, however). These allocators -are normal Rust crates and contain an implementation of the routines to -allocate and deallocate memory. The standard library is not compiled assuming -either one, and the compiler will decide which allocator is in use at -compile-time depending on the type of output artifact being produced. - -Binaries generated by the compiler will use `alloc_jemalloc` by default (where -available). In this situation the compiler "controls the world" in the sense of -it has power over the final link. Primarily this means that the allocator -decision can be left up the compiler. - -Dynamic and static libraries, however, will use `alloc_system` by default. Here -Rust is typically a 'guest' in another application or another world where it -cannot authoritatively decide what allocator is in use. As a result it resorts -back to the standard APIs (e.g. `malloc` and `free`) for acquiring and releasing -memory. - -# Switching Allocators - -Although the compiler's default choices may work most of the time, it's often -necessary to tweak certain aspects. Overriding the compiler's decision about -which allocator is in use is done through the `#[global_allocator]` attribute: - -```rust,no_run -#![feature(alloc_system, global_allocator, allocator_api)] - -extern crate alloc_system; - -use alloc_system::System; - -#[global_allocator] -static A: System = System; - -fn main() { - let a = Box::new(4); // Allocates from the system allocator. - println!("{}", a); -} -``` - -In this example the binary generated will not link to jemalloc by default but -instead use the system allocator. Conversely to generate a dynamic library which -uses jemalloc by default one would write: - -(The `alloc_jemalloc` crate cannot be used to control the global allocator, -crate.io’s `jemallocator` crate provides equivalent functionality.) - -```toml -# Cargo.toml -[dependencies] -jemallocator = "0.1" -``` -```rust,ignore -#![feature(global_allocator)] -#![crate_type = "dylib"] - -extern crate jemallocator; - -#[global_allocator] -static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; - -pub fn foo() { - let a = Box::new(4); // Allocates from jemalloc. - println!("{}", a); -} -# fn main() {} -``` diff --git a/src/doc/unstable-book/src/library-features/entry-and-modify.md b/src/doc/unstable-book/src/library-features/entry-and-modify.md deleted file mode 100644 index 1280c71e83c9..000000000000 --- a/src/doc/unstable-book/src/library-features/entry-and-modify.md +++ /dev/null @@ -1,77 +0,0 @@ -# `entry_and_modify` - -The tracking issue for this feature is: [#44733] - -[#44733]: https://github.com/rust-lang/rust/issues/44733 - ------------------------- - -This introduces a new method for the Entry API of maps -(`std::collections::HashMap` and `std::collections::BTreeMap`), so that -occupied entries can be modified before any potential inserts into the -map. - -For example: - -```rust -#![feature(entry_and_modify)] -# fn main() { -use std::collections::HashMap; - -struct Foo { - new: bool, -} - -let mut map: HashMap<&str, Foo> = HashMap::new(); - -map.entry("quux") - .and_modify(|e| e.new = false) - .or_insert(Foo { new: true }); -# } -``` - -This is not possible with the stable API alone since inserting a default -_before_ modifying the `new` field would mean we would lose the default state: - -```rust -# fn main() { -use std::collections::HashMap; - -struct Foo { - new: bool, -} - -let mut map: HashMap<&str, Foo> = HashMap::new(); - -map.entry("quux").or_insert(Foo { new: true }).new = false; -# } -``` - -In the above code the `new` field will never be `true`, even though we only -intended to update that field to `false` for previously extant entries. - -To achieve the same effect as `and_modify` we would have to manually match -against the `Occupied` and `Vacant` variants of the `Entry` enum, which is -a little less user-friendly, and much more verbose: - -```rust -# fn main() { -use std::collections::HashMap; -use std::collections::hash_map::Entry; - -struct Foo { - new: bool, -} - -let mut map: HashMap<&str, Foo> = HashMap::new(); - -match map.entry("quux") { - Entry::Occupied(entry) => { - entry.into_mut().new = false; - }, - Entry::Vacant(entry) => { - entry.insert(Foo { new: true }); - }, -}; -# } -``` diff --git a/src/doc/unstable-book/src/library-features/entry-or-default.md b/src/doc/unstable-book/src/library-features/entry-or-default.md deleted file mode 100644 index f8c8a2a7a718..000000000000 --- a/src/doc/unstable-book/src/library-features/entry-or-default.md +++ /dev/null @@ -1,13 +0,0 @@ -# `entry_or_default` - -The tracking issue for this feature is: [#44324] - -[#44324]: https://github.com/rust-lang/rust/issues/44324 - ------------------------- - -The `entry_or_default` feature adds a new method to `hash_map::Entry` -and `btree_map::Entry`, `or_default`, when `V: Default`. This method is -semantically identical to `or_insert_with(Default::default)`, and will -insert the default value for the type if no entry exists for the current -key. diff --git a/src/doc/unstable-book/src/library-features/future-atomic-orderings.md b/src/doc/unstable-book/src/library-features/future-atomic-orderings.md deleted file mode 100644 index 40c2ef2db055..000000000000 --- a/src/doc/unstable-book/src/library-features/future-atomic-orderings.md +++ /dev/null @@ -1,5 +0,0 @@ -# `future_atomic_orderings` - -This feature is internal to the Rust compiler and is not intended for general use. - ------------------------- diff --git a/src/doc/unstable-book/src/library-features/io-error-internals.md b/src/doc/unstable-book/src/library-features/io-error-internals.md deleted file mode 100644 index 5bee18d33d61..000000000000 --- a/src/doc/unstable-book/src/library-features/io-error-internals.md +++ /dev/null @@ -1,5 +0,0 @@ -# `io_error_internals` - -This feature is internal to the Rust compiler and is not intended for general use. - ------------------------- diff --git a/src/doc/unstable-book/src/library-features/slice-rsplit.md b/src/doc/unstable-book/src/library-features/slice-rsplit.md deleted file mode 100644 index 8c2954f7294e..000000000000 --- a/src/doc/unstable-book/src/library-features/slice-rsplit.md +++ /dev/null @@ -1,10 +0,0 @@ -# `slice_rsplit` - -The tracking issue for this feature is: [#41020] - -[#41020]: https://github.com/rust-lang/rust/issues/41020 - ------------------------- - -The `slice_rsplit` feature enables two methods on slices: -`slice.rsplit(predicate)` and `slice.rsplit_mut(predicate)`. diff --git a/src/doc/unstable-book/src/library-features/splice.md b/src/doc/unstable-book/src/library-features/splice.md deleted file mode 100644 index 2e4bb1a5257c..000000000000 --- a/src/doc/unstable-book/src/library-features/splice.md +++ /dev/null @@ -1,22 +0,0 @@ -# `splice` - -The tracking issue for this feature is: [#44643] - -[#44643]: https://github.com/rust-lang/rust/issues/44643 - ------------------------- - -The `splice()` method on `String` allows you to replace a range -of values in a string with another range of values. - -A simple example: - -```rust -#![feature(splice)] -let mut s = String::from("α is alpha, β is beta"); -let beta_offset = s.find('β').unwrap_or(s.len()); - -// Replace the range up until the β from the string -s.splice(..beta_offset, "Α is capital alpha; "); -assert_eq!(s, "Α is capital alpha; β is beta"); -``` diff --git a/src/doc/unstable-book/src/library-features/string-retain.md b/src/doc/unstable-book/src/library-features/string-retain.md deleted file mode 100644 index 049444aa49bd..000000000000 --- a/src/doc/unstable-book/src/library-features/string-retain.md +++ /dev/null @@ -1,23 +0,0 @@ -# `string_retain` - -The tracking issue for this feature is: [#43874] - -[#43874]: https://github.com/rust-lang/rust/issues/43874 - ------------------------- - -Retains only the characters specified by the predicate. - -In other words, remove all characters `c` such that `f(c)` returns `false`. -This method operates in place and preserves the order of the retained -characters. - -```rust -#![feature(string_retain)] - -let mut s = String::from("f_o_ob_ar"); - -s.retain(|c| c != '_'); - -assert_eq!(s, "foobar"); -``` diff --git a/src/etc/cat-and-grep.sh b/src/etc/cat-and-grep.sh index ef9884d2e980..361e8d8e60ee 100755 --- a/src/etc/cat-and-grep.sh +++ b/src/etc/cat-and-grep.sh @@ -63,6 +63,11 @@ done shift $((OPTIND - 1)) +# use gnu version of tool if available (for bsd) +if command -v "g${GREPPER}"; then + GREPPER="g${GREPPER}" +fi + LOG=$(mktemp -t cgrep.XXXXXX) trap "rm -f $LOG" EXIT diff --git a/src/etc/char_private.py b/src/etc/char_private.py deleted file mode 100644 index cfe5b01e934e..000000000000 --- a/src/etc/char_private.py +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011-2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -# This script uses the following Unicode tables: -# - UnicodeData.txt - - -from collections import namedtuple -import csv -import os -import subprocess - -NUM_CODEPOINTS=0x110000 - -def to_ranges(iter): - current = None - for i in iter: - if current is None or i != current[1] or i in (0x10000, 0x20000): - if current is not None: - yield tuple(current) - current = [i, i + 1] - else: - current[1] += 1 - if current is not None: - yield tuple(current) - -def get_escaped(codepoints): - for c in codepoints: - if (c.class_ or "Cn") in "Cc Cf Cs Co Cn Zl Zp Zs".split() and c.value != ord(' '): - yield c.value - -def get_file(f): - try: - return open(os.path.basename(f)) - except FileNotFoundError: - subprocess.run(["curl", "-O", f], check=True) - return open(os.path.basename(f)) - -Codepoint = namedtuple('Codepoint', 'value class_') - -def get_codepoints(f): - r = csv.reader(f, delimiter=";") - prev_codepoint = 0 - class_first = None - for row in r: - codepoint = int(row[0], 16) - name = row[1] - class_ = row[2] - - if class_first is not None: - if not name.endswith("Last>"): - raise ValueError("Missing Last after First") - - for c in range(prev_codepoint + 1, codepoint): - yield Codepoint(c, class_first) - - class_first = None - if name.endswith("First>"): - class_first = class_ - - yield Codepoint(codepoint, class_) - prev_codepoint = codepoint - - if class_first != None: - raise ValueError("Missing Last after First") - - for c in range(prev_codepoint + 1, NUM_CODEPOINTS): - yield Codepoint(c, None) - -def compress_singletons(singletons): - uppers = [] # (upper, # items in lowers) - lowers = [] - - for i in singletons: - upper = i >> 8 - lower = i & 0xff - if len(uppers) == 0 or uppers[-1][0] != upper: - uppers.append((upper, 1)) - else: - upper, count = uppers[-1] - uppers[-1] = upper, count + 1 - lowers.append(lower) - - return uppers, lowers - -def compress_normal(normal): - # lengths 0x00..0x7f are encoded as 00, 01, ..., 7e, 7f - # lengths 0x80..0x7fff are encoded as 80 80, 80 81, ..., ff fe, ff ff - compressed = [] # [truelen, (truelenaux), falselen, (falselenaux)] - - prev_start = 0 - for start, count in normal: - truelen = start - prev_start - falselen = count - prev_start = start + count - - assert truelen < 0x8000 and falselen < 0x8000 - entry = [] - if truelen > 0x7f: - entry.append(0x80 | (truelen >> 8)) - entry.append(truelen & 0xff) - else: - entry.append(truelen & 0x7f) - if falselen > 0x7f: - entry.append(0x80 | (falselen >> 8)) - entry.append(falselen & 0xff) - else: - entry.append(falselen & 0x7f) - - compressed.append(entry) - - return compressed - -def print_singletons(uppers, lowers, uppersname, lowersname): - print("const {}: &'static [(u8, u8)] = &[".format(uppersname)) - for u, c in uppers: - print(" ({:#04x}, {}),".format(u, c)) - print("];") - print("const {}: &'static [u8] = &[".format(lowersname)) - for i in range(0, len(lowers), 8): - print(" {}".format(" ".join("{:#04x},".format(l) for l in lowers[i:i+8]))) - print("];") - -def print_normal(normal, normalname): - print("const {}: &'static [u8] = &[".format(normalname)) - for v in normal: - print(" {}".format(" ".join("{:#04x},".format(i) for i in v))) - print("];") - -def main(): - file = get_file("http://www.unicode.org/Public/UNIDATA/UnicodeData.txt") - - codepoints = get_codepoints(file) - - CUTOFF=0x10000 - singletons0 = [] - singletons1 = [] - normal0 = [] - normal1 = [] - extra = [] - - for a, b in to_ranges(get_escaped(codepoints)): - if a > 2 * CUTOFF: - extra.append((a, b - a)) - elif a == b - 1: - if a & CUTOFF: - singletons1.append(a & ~CUTOFF) - else: - singletons0.append(a) - elif a == b - 2: - if a & CUTOFF: - singletons1.append(a & ~CUTOFF) - singletons1.append((a + 1) & ~CUTOFF) - else: - singletons0.append(a) - singletons0.append(a + 1) - else: - if a >= 2 * CUTOFF: - extra.append((a, b - a)) - elif a & CUTOFF: - normal1.append((a & ~CUTOFF, b - a)) - else: - normal0.append((a, b - a)) - - singletons0u, singletons0l = compress_singletons(singletons0) - singletons1u, singletons1l = compress_singletons(singletons1) - normal0 = compress_normal(normal0) - normal1 = compress_normal(normal1) - - print("""\ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// NOTE: The following code was generated by "src/etc/char_private.py", -// do not edit directly! - -fn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], - normal: &[u8]) -> bool { - let xupper = (x >> 8) as u8; - let mut lowerstart = 0; - for &(upper, lowercount) in singletonuppers { - let lowerend = lowerstart + lowercount as usize; - if xupper == upper { - for &lower in &singletonlowers[lowerstart..lowerend] { - if lower == x as u8 { - return false; - } - } - } else if xupper < upper { - break; - } - lowerstart = lowerend; - } - - let mut x = x as i32; - let mut normal = normal.iter().cloned(); - let mut current = true; - while let Some(v) = normal.next() { - let len = if v & 0x80 != 0 { - ((v & 0x7f) as i32) << 8 | normal.next().unwrap() as i32 - } else { - v as i32 - }; - x -= len; - if x < 0 { - break; - } - current = !current; - } - current -} - -pub(crate) fn is_printable(x: char) -> bool { - let x = x as u32; - let lower = x as u16; - if x < 0x10000 { - check(lower, SINGLETONS0U, SINGLETONS0L, NORMAL0) - } else if x < 0x20000 { - check(lower, SINGLETONS1U, SINGLETONS1L, NORMAL1) - } else {\ -""") - for a, b in extra: - print(" if 0x{:x} <= x && x < 0x{:x} {{".format(a, a + b)) - print(" return false;") - print(" }") - print("""\ - true - } -}\ -""") - print() - print_singletons(singletons0u, singletons0l, 'SINGLETONS0U', 'SINGLETONS0L') - print_singletons(singletons1u, singletons1l, 'SINGLETONS1U', 'SINGLETONS1L') - print_normal(normal0, 'NORMAL0') - print_normal(normal1, 'NORMAL1') - -if __name__ == '__main__': - main() diff --git a/src/etc/debugger_pretty_printers_common.py b/src/etc/debugger_pretty_printers_common.py index 4a38d4be083f..1797f6708ac5 100644 --- a/src/etc/debugger_pretty_printers_common.py +++ b/src/etc/debugger_pretty_printers_common.py @@ -47,6 +47,9 @@ TYPE_KIND_PTR = 15 TYPE_KIND_FIXED_SIZE_VEC = 16 TYPE_KIND_REGULAR_UNION = 17 TYPE_KIND_OS_STRING = 18 +TYPE_KIND_STD_VECDEQUE = 19 +TYPE_KIND_STD_BTREESET = 20 +TYPE_KIND_STD_BTREEMAP = 21 ENCODED_ENUM_PREFIX = "RUST$ENCODED$ENUM$" ENUM_DISR_FIELD_NAME = "RUST$ENUM$DISR" @@ -62,6 +65,20 @@ STD_VEC_FIELD_NAME_BUF = "buf" STD_VEC_FIELD_NAMES = [STD_VEC_FIELD_NAME_BUF, STD_VEC_FIELD_NAME_LENGTH] +# std::collections::VecDeque<> related constants +STD_VECDEQUE_FIELD_NAME_TAIL = "tail" +STD_VECDEQUE_FIELD_NAME_HEAD = "head" +STD_VECDEQUE_FIELD_NAME_BUF = "buf" +STD_VECDEQUE_FIELD_NAMES = [STD_VECDEQUE_FIELD_NAME_TAIL, + STD_VECDEQUE_FIELD_NAME_HEAD, + STD_VECDEQUE_FIELD_NAME_BUF] + +# std::collections::BTreeSet<> related constants +STD_BTREESET_FIELD_NAMES = ["map"] + +# std::collections::BTreeMap<> related constants +STD_BTREEMAP_FIELD_NAMES = ["root", "length"] + # std::String related constants STD_STRING_FIELD_NAMES = ["vec"] @@ -161,6 +178,21 @@ class Type(object): self.__conforms_to_field_layout(STD_VEC_FIELD_NAMES)): return TYPE_KIND_STD_VEC + # STD COLLECTION VECDEQUE + if (unqualified_type_name.startswith("VecDeque<") and + self.__conforms_to_field_layout(STD_VECDEQUE_FIELD_NAMES)): + return TYPE_KIND_STD_VECDEQUE + + # STD COLLECTION BTREESET + if (unqualified_type_name.startswith("BTreeSet<") and + self.__conforms_to_field_layout(STD_BTREESET_FIELD_NAMES)): + return TYPE_KIND_STD_BTREESET + + # STD COLLECTION BTREEMAP + if (unqualified_type_name.startswith("BTreeMap<") and + self.__conforms_to_field_layout(STD_BTREEMAP_FIELD_NAMES)): + return TYPE_KIND_STD_BTREEMAP + # STD STRING if (unqualified_type_name.startswith("String") and self.__conforms_to_field_layout(STD_STRING_FIELD_NAMES)): @@ -325,6 +357,50 @@ def extract_length_ptr_and_cap_from_std_vec(vec_val): assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR return (length, data_ptr, capacity) + +def extract_tail_head_ptr_and_cap_from_std_vecdeque(vec_val): + assert vec_val.type.get_type_kind() == TYPE_KIND_STD_VECDEQUE + tail_field_index = STD_VECDEQUE_FIELD_NAMES.index(STD_VECDEQUE_FIELD_NAME_TAIL) + head_field_index = STD_VECDEQUE_FIELD_NAMES.index(STD_VECDEQUE_FIELD_NAME_HEAD) + buf_field_index = STD_VECDEQUE_FIELD_NAMES.index(STD_VECDEQUE_FIELD_NAME_BUF) + + tail = vec_val.get_child_at_index(tail_field_index).as_integer() + head = vec_val.get_child_at_index(head_field_index).as_integer() + buf = vec_val.get_child_at_index(buf_field_index) + + vec_ptr_val = buf.get_child_at_index(0) + capacity = buf.get_child_at_index(1).as_integer() + unique_ptr_val = vec_ptr_val.get_child_at_index(0) + data_ptr = unique_ptr_val.get_child_at_index(0) + assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR + return (tail, head, data_ptr, capacity) + + +def extract_length_and_ptr_from_std_btreeset(vec_val): + assert vec_val.type.get_type_kind() == TYPE_KIND_STD_BTREESET + map = vec_val.get_child_at_index(0) + root = map.get_child_at_index(0) + length = map.get_child_at_index(1).as_integer() + node = root.get_child_at_index(0) + ptr = node.get_child_at_index(0) + unique_ptr_val = ptr.get_child_at_index(0) + data_ptr = unique_ptr_val.get_child_at_index(0) + assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR + return (length, data_ptr) + + +def extract_length_and_ptr_from_std_btreemap(vec_val): + assert vec_val.type.get_type_kind() == TYPE_KIND_STD_BTREEMAP + root = vec_val.get_child_at_index(0) + length = vec_val.get_child_at_index(1).as_integer() + node = root.get_child_at_index(0) + ptr = node.get_child_at_index(0) + unique_ptr_val = ptr.get_child_at_index(0) + data_ptr = unique_ptr_val.get_child_at_index(0) + assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR + return (length, data_ptr) + + def extract_length_and_ptr_from_slice(slice_val): assert (slice_val.type.get_type_kind() == TYPE_KIND_SLICE or slice_val.type.get_type_kind() == TYPE_KIND_STR_SLICE) diff --git a/src/etc/gdb_rust_pretty_printing.py b/src/etc/gdb_rust_pretty_printing.py index 0612873e2815..216915dba5fe 100755 --- a/src/etc/gdb_rust_pretty_printing.py +++ b/src/etc/gdb_rust_pretty_printing.py @@ -124,6 +124,15 @@ def rust_pretty_printer_lookup_function(gdb_val): if type_kind == rustpp.TYPE_KIND_STD_VEC: return RustStdVecPrinter(val) + if type_kind == rustpp.TYPE_KIND_STD_VECDEQUE: + return RustStdVecDequePrinter(val) + + if type_kind == rustpp.TYPE_KIND_STD_BTREESET: + return RustStdBTreeSetPrinter(val) + + if type_kind == rustpp.TYPE_KIND_STD_BTREEMAP: + return RustStdBTreeMapPrinter(val) + if type_kind == rustpp.TYPE_KIND_STD_STRING: return RustStdStringPrinter(val) @@ -274,6 +283,77 @@ class RustStdVecPrinter(object): yield (str(index), (gdb_ptr + index).dereference()) +class RustStdVecDequePrinter(object): + def __init__(self, val): + self.__val = val + + @staticmethod + def display_hint(): + return "array" + + def to_string(self): + (tail, head, data_ptr, cap) = \ + rustpp.extract_tail_head_ptr_and_cap_from_std_vecdeque(self.__val) + return (self.__val.type.get_unqualified_type_name() + + ("(len: %i, cap: %i)" % (head - tail, cap))) + + def children(self): + (tail, head, data_ptr, cap) = \ + rustpp.extract_tail_head_ptr_and_cap_from_std_vecdeque(self.__val) + gdb_ptr = data_ptr.get_wrapped_value() + for index in xrange(tail, head): + yield (str(index), (gdb_ptr + index).dereference()) + + +class RustStdBTreeSetPrinter(object): + def __init__(self, val): + self.__val = val + + @staticmethod + def display_hint(): + return "array" + + def to_string(self): + (length, data_ptr) = \ + rustpp.extract_length_and_ptr_from_std_btreeset(self.__val) + return (self.__val.type.get_unqualified_type_name() + + ("(len: %i)" % length)) + + def children(self): + (length, data_ptr) = \ + rustpp.extract_length_and_ptr_from_std_btreeset(self.__val) + val = GdbValue(data_ptr.get_wrapped_value().dereference()).get_child_at_index(3) + gdb_ptr = val.get_wrapped_value() + for index in xrange(length): + yield (str(index), gdb_ptr[index]) + + +class RustStdBTreeMapPrinter(object): + def __init__(self, val): + self.__val = val + + @staticmethod + def display_hint(): + return "map" + + def to_string(self): + (length, data_ptr) = \ + rustpp.extract_length_and_ptr_from_std_btreemap(self.__val) + return (self.__val.type.get_unqualified_type_name() + + ("(len: %i)" % length)) + + def children(self): + (length, data_ptr) = \ + rustpp.extract_length_and_ptr_from_std_btreemap(self.__val) + keys = GdbValue(data_ptr.get_wrapped_value().dereference()).get_child_at_index(3) + keys_ptr = keys.get_wrapped_value() + vals = GdbValue(data_ptr.get_wrapped_value().dereference()).get_child_at_index(4) + vals_ptr = vals.get_wrapped_value() + for index in xrange(length): + yield (str(index), keys_ptr[index]) + yield (str(index), vals_ptr[index]) + + class RustStdStringPrinter(object): def __init__(self, val): self.__val = val @@ -287,6 +367,7 @@ class RustStdStringPrinter(object): def display_hint(self): return "string" + class RustOsStringPrinter(object): def __init__(self, val): self.__val = val diff --git a/src/etc/generate-deriving-span-tests.py b/src/etc/generate-deriving-span-tests.py index 15c9fc2e504a..2e9169ce5b94 100755 --- a/src/etc/generate-deriving-span-tests.py +++ b/src/etc/generate-deriving-span-tests.py @@ -18,7 +18,7 @@ derives have spans that point to the fields, rather than the sample usage: src/etc/generate-deriving-span-tests.py """ -import sys, os, datetime, stat +import sys, os, datetime, stat, re TEST_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), '../test/compile-fail')) @@ -87,16 +87,25 @@ def create_test_case(type, trait, super_traits, error_count): def write_file(name, string): test_file = os.path.join(TEST_DIR, 'derives-span-%s.rs' % name) + with open(test_file) as f: + old_str = f.read() + old_str_ignoring_date = re.sub(r'^// Copyright \d+', + '// Copyright {year}'.format(year = YEAR), old_str) + if old_str_ignoring_date == string: + # if all we're doing is updating the copyright year, ignore it + return 0 + # set write permission if file exists, so it can be changed if os.path.exists(test_file): os.chmod(test_file, stat.S_IWUSR) - with open(test_file, 'wt') as f: + with open(test_file, 'w') as f: f.write(string) # mark file read-only os.chmod(test_file, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH) + return 1 ENUM = 1 @@ -113,18 +122,22 @@ traits = { for (trait, supers, errs) in [('Clone', [], 1), ('PartialEq', [], 2), - ('PartialOrd', ['PartialEq'], 3), + ('PartialOrd', ['PartialEq'], 1), ('Eq', ['PartialEq'], 1), ('Ord', ['Eq', 'PartialOrd', 'PartialEq'], 1), ('Debug', [], 1), ('Hash', [], 1)]: traits[trait] = (ALL, supers, errs) +files = 0 + for (trait, (types, super_traits, error_count)) in traits.items(): mk = lambda ty: create_test_case(ty, trait, super_traits, error_count) if types & ENUM: - write_file(trait + '-enum', mk(ENUM_TUPLE)) - write_file(trait + '-enum-struct-variant', mk(ENUM_STRUCT)) + files += write_file(trait + '-enum', mk(ENUM_TUPLE)) + files += write_file(trait + '-enum-struct-variant', mk(ENUM_STRUCT)) if types & STRUCT: - write_file(trait + '-struct', mk(STRUCT_FIELDS)) - write_file(trait + '-tuple-struct', mk(STRUCT_TUPLE)) + files += write_file(trait + '-struct', mk(STRUCT_FIELDS)) + files += write_file(trait + '-tuple-struct', mk(STRUCT_TUPLE)) + +print('Generated {files} deriving span test{}.'.format('s' if files != 1 else '', files = files)) diff --git a/src/etc/htmldocck.py b/src/etc/htmldocck.py index 8a11c6f7cfc4..569788fe9c08 100644 --- a/src/etc/htmldocck.py +++ b/src/etc/htmldocck.py @@ -346,15 +346,19 @@ def check_tree_attr(tree, path, attr, pat, regexp): def check_tree_text(tree, path, pat, regexp): path = normalize_xpath(path) ret = False - for e in tree.findall(path): - try: - value = flatten(e) - except KeyError: - continue - else: - ret = check_string(value, pat, regexp) - if ret: - break + try: + for e in tree.findall(path): + try: + value = flatten(e) + except KeyError: + continue + else: + ret = check_string(value, pat, regexp) + if ret: + break + except Exception as e: + print('Failed to get path "{}"'.format(path)) + raise e return ret diff --git a/src/etc/installer/msi/rust.wxs b/src/etc/installer/msi/rust.wxs index d95b096d732f..a471ccc6f5b4 100644 --- a/src/etc/installer/msi/rust.wxs +++ b/src/etc/installer/msi/rust.wxs @@ -18,7 +18,7 @@ - + @@ -129,7 +129,7 @@ - + diff --git a/src/etc/platform-intrinsics/generator.py b/src/etc/platform-intrinsics/generator.py index e9cf71c32fe9..046ea48638ba 100644 --- a/src/etc/platform-intrinsics/generator.py +++ b/src/etc/platform-intrinsics/generator.py @@ -591,7 +591,7 @@ def parse_args(): The X86 architecture is specified as multiple files (for the different instruction sets that x86 supports). To generate the compiler definitions one needs to pass the script a "platform information file" - (with the -i flag) next to the files of the different intruction sets. + (with the -i flag) next to the files of the different instruction sets. For example, to generate the X86 compiler-definitions for SSE4.2, just: python generator.py --format compiler-defs -i x86/info.json sse42.json @@ -806,9 +806,6 @@ class CompilerDefs(object): use {{Intrinsic, Type}}; use IntrinsicDef::Named; -// The default inlining settings trigger a pathological behaviour in -// LLVM, which causes makes compilation very slow. See #28273. -#[inline(never)] pub fn find(name: &str) -> Option {{ if !name.starts_with("{0}") {{ return None }} Some(match &name["{0}".len()..] {{'''.format(platform.platform_prefix()) diff --git a/src/etc/rust-gdb b/src/etc/rust-gdb index 52601cd96f80..6835d6aa9087 100755 --- a/src/etc/rust-gdb +++ b/src/etc/rust-gdb @@ -21,6 +21,6 @@ GDB_PYTHON_MODULE_DIRECTORY="$RUSTC_SYSROOT/lib/rustlib/etc" # different/specific command (defaults to `gdb`). RUST_GDB="${RUST_GDB:-gdb}" PYTHONPATH="$PYTHONPATH:$GDB_PYTHON_MODULE_DIRECTORY" ${RUST_GDB} \ - -d "$GDB_PYTHON_MODULE_DIRECTORY" \ + --directory="$GDB_PYTHON_MODULE_DIRECTORY" \ -iex "add-auto-load-safe-path $GDB_PYTHON_MODULE_DIRECTORY" \ "$@" diff --git a/src/etc/sugarise-doc-comments.py b/src/etc/sugarise-doc-comments.py deleted file mode 100755 index ac2223f4acef..000000000000 --- a/src/etc/sugarise-doc-comments.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -# -# this script attempts to turn doc comment attributes (#[doc = "..."]) -# into sugared-doc-comments (/** ... */ and /// ...) -# -# it sugarises all .rs/.rc files underneath the working directory -# - -import sys -import os -import fnmatch -import re - - -DOC_PATTERN = '^(?P[\\t ]*)#\\[(\\s*)doc(\\s*)=' + \ - '(\\s*)"(?P(\\"|[^"])*?)"(\\s*)\\]' + \ - '(?P;)?' - -ESCAPES = [("\\'", "'"), - ('\\"', '"'), - ("\\n", "\n"), - ("\\r", "\r"), - ("\\t", "\t")] - - -def unescape(s): - for (find, repl) in ESCAPES: - s = s.replace(find, repl) - return s - - -def block_trim(s): - lns = s.splitlines() - - # remove leading/trailing whitespace-lines - while lns and not lns[0].strip(): - lns = lns[1:] - while lns and not lns[-1].strip(): - lns = lns[:-1] - - # remove leading horizontal whitespace - n = sys.maxsize - for ln in lns: - if ln.strip(): - n = min(n, len(re.search('^\s*', ln).group())) - if n != sys.maxsize: - lns = [ln[n:] for ln in lns] - - # strip trailing whitespace - lns = [ln.rstrip() for ln in lns] - - return lns - - -def replace_doc(m): - indent = m.group('indent') - text = block_trim(unescape(m.group('text'))) - - if len(text) > 1: - inner = '!' if m.group('semi') else '*' - starify = lambda s: indent + ' *' + (' ' + s if s else '') - text = '\n'.join(map(starify, text)) - repl = indent + '/*' + inner + '\n' + text + '\n' + indent + ' */' - else: - inner = '!' if m.group('semi') else '/' - repl = indent + '//' + inner + ' ' + text[0] - - return repl - - -def sugarise_file(path): - s = open(path).read() - - r = re.compile(DOC_PATTERN, re.MULTILINE | re.DOTALL) - ns = re.sub(r, replace_doc, s) - - if s != ns: - open(path, 'w').write(ns) - -for (dirpath, dirnames, filenames) in os.walk('.'): - for name in fnmatch.filter(filenames, '*.r[sc]'): - sugarise_file(os.path.join(dirpath, name)) diff --git a/src/etc/test-float-parse/many-digits.rs b/src/etc/test-float-parse/many-digits.rs index 674c30ad84ed..469a38da2b83 100644 --- a/src/etc/test-float-parse/many-digits.rs +++ b/src/etc/test-float-parse/many-digits.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rand)] - extern crate rand; mod _common; diff --git a/src/etc/test-float-parse/rand-f64.rs b/src/etc/test-float-parse/rand-f64.rs index 1d82912054e2..2994dd9d5e64 100644 --- a/src/etc/test-float-parse/rand-f64.rs +++ b/src/etc/test-float-parse/rand-f64.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rand)] - extern crate rand; mod _common; diff --git a/src/etc/test-float-parse/runtests.py b/src/etc/test-float-parse/runtests.py index d520c9bd5c30..e9f5bba2312d 100644 --- a/src/etc/test-float-parse/runtests.py +++ b/src/etc/test-float-parse/runtests.py @@ -41,7 +41,7 @@ Instead, we take the input and compute the true value with bignum arithmetic (as a fraction, using the ``fractions`` module). Given an input string and the corresponding float computed via Rust, simply -decode the float into f * 2^k (for intergers f, k) and the ULP. +decode the float into f * 2^k (for integers f, k) and the ULP. We can now easily compute the error and check if it is within 0.5 ULP as it should be. Zero and infinites are handled similarly: diff --git a/src/etc/wasm32-shim.js b/src/etc/wasm32-shim.js index d55083e0f8e0..378aae597331 100644 --- a/src/etc/wasm32-shim.js +++ b/src/etc/wasm32-shim.js @@ -28,14 +28,76 @@ let m = new WebAssembly.Module(buffer); let memory = null; +function viewstruct(data, fields) { + return new Uint32Array(memory.buffer).subarray(data/4, data/4 + fields); +} + function copystr(a, b) { - if (memory === null) { - return null - } - let view = new Uint8Array(memory.buffer).slice(a, a + b); + let view = new Uint8Array(memory.buffer).subarray(a, a + b); return String.fromCharCode.apply(null, view); } +function syscall_write([fd, ptr, len]) { + let s = copystr(ptr, len); + switch (fd) { + case 1: process.stdout.write(s); break; + case 2: process.stderr.write(s); break; + } +} + +function syscall_exit([code]) { + process.exit(code); +} + +function syscall_args(params) { + let [ptr, len] = params; + + // Calculate total required buffer size + let totalLen = -1; + for (let i = 2; i < process.argv.length; ++i) { + totalLen += Buffer.byteLength(process.argv[i]) + 1; + } + if (totalLen < 0) { totalLen = 0; } + params[2] = totalLen; + + // If buffer is large enough, copy data + if (len >= totalLen) { + let view = new Uint8Array(memory.buffer); + for (let i = 2; i < process.argv.length; ++i) { + let value = process.argv[i]; + Buffer.from(value).copy(view, ptr); + ptr += Buffer.byteLength(process.argv[i]) + 1; + } + } +} + +function syscall_getenv(params) { + let [keyPtr, keyLen, valuePtr, valueLen] = params; + + let key = copystr(keyPtr, keyLen); + let value = process.env[key]; + + if (value == null) { + params[4] = 0xFFFFFFFF; + } else { + let view = new Uint8Array(memory.buffer); + let totalLen = Buffer.byteLength(value); + params[4] = totalLen; + if (valueLen >= totalLen) { + Buffer.from(value).copy(view, valuePtr); + } + } +} + +function syscall_time(params) { + let t = Date.now(); + let secs = Math.floor(t / 1000); + let millis = t % 1000; + params[1] = Math.floor(secs / 0x100000000); + params[2] = secs % 0x100000000; + params[3] = Math.floor(millis * 1000000); +} + let imports = {}; imports.env = { // These are generated by LLVM itself for various intrinsic calls. Hopefully @@ -45,71 +107,34 @@ imports.env = { exp2f: function(x) { return Math.pow(2, x); }, ldexp: function(x, y) { return x * Math.pow(2, y); }, ldexpf: function(x, y) { return x * Math.pow(2, y); }, + sin: Math.sin, + sinf: Math.sin, + cos: Math.cos, + cosf: Math.cos, + log: Math.log, + log2: Math.log2, log10: Math.log10, log10f: Math.log10, - // These are called in src/libstd/sys/wasm/stdio.rs and are used when - // debugging is enabled. - rust_wasm_write_stdout: function(a, b) { - let s = copystr(a, b); - if (s !== null) { - process.stdout.write(s); + rust_wasm_syscall: function(index, data) { + switch (index) { + case 1: syscall_write(viewstruct(data, 3)); return true; + case 2: syscall_exit(viewstruct(data, 1)); return true; + case 3: syscall_args(viewstruct(data, 3)); return true; + case 4: syscall_getenv(viewstruct(data, 5)); return true; + case 6: syscall_time(viewstruct(data, 4)); return true; + default: + console.log("Unsupported syscall: " + index); + return false; } - }, - rust_wasm_write_stderr: function(a, b) { - let s = copystr(a, b); - if (s !== null) { - process.stderr.write(s); - } - }, - - // These are called in src/libstd/sys/wasm/args.rs and are used when - // debugging is enabled. - rust_wasm_args_count: function() { - if (memory === null) - return 0; - return process.argv.length - 2; - }, - rust_wasm_args_arg_size: function(i) { - return Buffer.byteLength(process.argv[i + 2]); - }, - rust_wasm_args_arg_fill: function(idx, ptr) { - let arg = process.argv[idx + 2]; - let view = new Uint8Array(memory.buffer); - Buffer.from(arg).copy(view, ptr); - }, - - // These are called in src/libstd/sys/wasm/os.rs and are used when - // debugging is enabled. - rust_wasm_getenv_len: function(a, b) { - let key = copystr(a, b); - if (key === null) { - return -1; - } - if (!(key in process.env)) { - return -1; - } - return Buffer.byteLength(process.env[key]); - }, - rust_wasm_getenv_data: function(a, b, ptr) { - let key = copystr(a, b); - let value = process.env[key]; - let view = new Uint8Array(memory.buffer); - Buffer.from(value).copy(view, ptr); - }, + } }; -let module_imports = WebAssembly.Module.imports(m); - -for (var i = 0; i < module_imports.length; i++) { - let imp = module_imports[i]; - if (imp.module != 'env') { - continue - } - if (imp.name == 'memory' && imp.kind == 'memory') { - memory = new WebAssembly.Memory({initial: 20}); - imports.env.memory = memory; - } -} - let instance = new WebAssembly.Instance(m, imports); +memory = instance.exports.memory; +try { + instance.exports.main(); +} catch (e) { + console.error(e); + process.exit(101); +} diff --git a/src/etc/ziggurat_tables.py b/src/etc/ziggurat_tables.py deleted file mode 100755 index 762f9565b780..000000000000 --- a/src/etc/ziggurat_tables.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2013 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -# This creates the tables used for distributions implemented using the -# ziggurat algorithm in `rand::distributions;`. They are -# (basically) the tables as used in the ZIGNOR variant (Doornik 2005). -# They are changed rarely, so the generated file should be checked in -# to git. -# -# It creates 3 tables: X as in the paper, F which is f(x_i), and -# F_DIFF which is f(x_i) - f(x_{i-1}). The latter two are just cached -# values which is not done in that paper (but is done in other -# variants). Note that the adZigR table is unnecessary because of -# algebra. -# -# It is designed to be compatible with Python 2 and 3. - -from math import exp, sqrt, log, floor -import random - -# The order should match the return value of `tables` -TABLE_NAMES = ['X', 'F'] - -# The actual length of the table is 1 more, to stop -# index-out-of-bounds errors. This should match the bitwise operation -# to find `i` in `zigurrat` in `libstd/rand/mod.rs`. Also the *_R and -# *_V constants below depend on this value. -TABLE_LEN = 256 - -# equivalent to `zigNorInit` in Doornik2005, but generalised to any -# distribution. r = dR, v = dV, f = probability density function, -# f_inv = inverse of f -def tables(r, v, f, f_inv): - # compute the x_i - xvec = [0]*(TABLE_LEN+1) - - xvec[0] = v / f(r) - xvec[1] = r - - for i in range(2, TABLE_LEN): - last = xvec[i-1] - xvec[i] = f_inv(v / last + f(last)) - - # cache the f's - fvec = [0]*(TABLE_LEN+1) - for i in range(TABLE_LEN+1): - fvec[i] = f(xvec[i]) - - return xvec, fvec - -# Distributions -# N(0, 1) -def norm_f(x): - return exp(-x*x/2.0) -def norm_f_inv(y): - return sqrt(-2.0*log(y)) - -NORM_R = 3.6541528853610088 -NORM_V = 0.00492867323399 - -NORM = tables(NORM_R, NORM_V, - norm_f, norm_f_inv) - -# Exp(1) -def exp_f(x): - return exp(-x) -def exp_f_inv(y): - return -log(y) - -EXP_R = 7.69711747013104972 -EXP_V = 0.0039496598225815571993 - -EXP = tables(EXP_R, EXP_V, - exp_f, exp_f_inv) - - -# Output the tables/constants/types - -def render_static(name, type, value): - # no space or - return 'pub static %s: %s =%s;\n' % (name, type, value) - -# static `name`: [`type`, .. `len(values)`] = -# [values[0], ..., values[3], -# values[4], ..., values[7], -# ... ]; -def render_table(name, values): - rows = [] - # 4 values on each row - for i in range(0, len(values), 4): - row = values[i:i+4] - rows.append(', '.join('%.18f' % f for f in row)) - - rendered = '\n [%s]' % ',\n '.join(rows) - return render_static(name, '[f64, .. %d]' % len(values), rendered) - - -with open('ziggurat_tables.rs', 'w') as f: - f.write('''// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Tables for distributions which are sampled using the ziggurat -// algorithm. Autogenerated by `ziggurat_tables.py`. - -pub type ZigTable = &\'static [f64, .. %d]; -''' % (TABLE_LEN + 1)) - for name, tables, r in [('NORM', NORM, NORM_R), - ('EXP', EXP, EXP_R)]: - f.write(render_static('ZIG_%s_R' % name, 'f64', ' %.18f' % r)) - for (tabname, table) in zip(TABLE_NAMES, tables): - f.write(render_table('ZIG_%s_%s' % (name, tabname), table)) diff --git a/src/grammar/parser-lalr.y b/src/grammar/parser-lalr.y index de1f96aac504..a7da69f65faf 100644 --- a/src/grammar/parser-lalr.y +++ b/src/grammar/parser-lalr.y @@ -1400,7 +1400,6 @@ nonblock_expr | BREAK lifetime { $$ = mk_node("ExprBreak", 1, $2); } | YIELD { $$ = mk_node("ExprYield", 0); } | YIELD expr { $$ = mk_node("ExprYield", 1, $2); } -| nonblock_expr LARROW expr { $$ = mk_node("ExprInPlace", 2, $1, $3); } | nonblock_expr '=' expr { $$ = mk_node("ExprAssign", 2, $1, $3); } | nonblock_expr SHLEQ expr { $$ = mk_node("ExprAssignShl", 2, $1, $3); } | nonblock_expr SHREQ expr { $$ = mk_node("ExprAssignShr", 2, $1, $3); } @@ -1463,7 +1462,6 @@ expr | BREAK ident { $$ = mk_node("ExprBreak", 1, $2); } | YIELD { $$ = mk_node("ExprYield", 0); } | YIELD expr { $$ = mk_node("ExprYield", 1, $2); } -| expr LARROW expr { $$ = mk_node("ExprInPlace", 2, $1, $3); } | expr '=' expr { $$ = mk_node("ExprAssign", 2, $1, $3); } | expr SHLEQ expr { $$ = mk_node("ExprAssignShl", 2, $1, $3); } | expr SHREQ expr { $$ = mk_node("ExprAssignShr", 2, $1, $3); } @@ -1527,7 +1525,6 @@ expr_nostruct | BREAK ident { $$ = mk_node("ExprBreak", 1, $2); } | YIELD { $$ = mk_node("ExprYield", 0); } | YIELD expr { $$ = mk_node("ExprYield", 1, $2); } -| expr_nostruct LARROW expr_nostruct { $$ = mk_node("ExprInPlace", 2, $1, $3); } | expr_nostruct '=' expr_nostruct { $$ = mk_node("ExprAssign", 2, $1, $3); } | expr_nostruct SHLEQ expr_nostruct { $$ = mk_node("ExprAssignShl", 2, $1, $3); } | expr_nostruct SHREQ expr_nostruct { $$ = mk_node("ExprAssignShr", 2, $1, $3); } diff --git a/src/liballoc/Cargo.toml b/src/liballoc/Cargo.toml index 0a265ee1376a..1dad323769a0 100644 --- a/src/liballoc/Cargo.toml +++ b/src/liballoc/Cargo.toml @@ -2,6 +2,8 @@ authors = ["The Rust Project Developers"] name = "alloc" version = "0.0.0" +autotests = false +autobenches = false [lib] name = "alloc" @@ -9,10 +11,10 @@ path = "lib.rs" [dependencies] core = { path = "../libcore" } -std_unicode = { path = "../libstd_unicode" } +compiler_builtins = { path = "../rustc/compiler_builtins_shim" } [dev-dependencies] -rand = "0.3" +rand = "0.4" [[test]] name = "collectionstests" @@ -21,3 +23,8 @@ path = "../liballoc/tests/lib.rs" [[bench]] name = "collectionsbenches" path = "../liballoc/benches/lib.rs" + +[[bench]] +name = "vec_deque_append_bench" +path = "../liballoc/benches/vec_deque_append.rs" +harness = false diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs new file mode 100644 index 000000000000..c69b2fb5e1c2 --- /dev/null +++ b/src/liballoc/alloc.rs @@ -0,0 +1,263 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Memory allocation APIs + +#![stable(feature = "alloc_module", since = "1.28.0")] + +use core::intrinsics::{min_align_of_val, size_of_val}; +use core::ptr::{NonNull, Unique}; +use core::usize; + +#[stable(feature = "alloc_module", since = "1.28.0")] +#[doc(inline)] +pub use core::alloc::*; + +extern "Rust" { + #[allocator] + #[rustc_allocator_nounwind] + fn __rust_alloc(size: usize, align: usize) -> *mut u8; + #[rustc_allocator_nounwind] + fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); + #[rustc_allocator_nounwind] + fn __rust_realloc(ptr: *mut u8, + old_size: usize, + align: usize, + new_size: usize) -> *mut u8; + #[rustc_allocator_nounwind] + fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8; +} + +/// The global memory allocator. +/// +/// This type implements the [`Alloc`] trait by forwarding calls +/// to the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +#[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Copy, Clone, Default, Debug)] +pub struct Global; + +/// Allocate memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::alloc`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `alloc` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::alloc`]. +/// +/// # Examples +/// +/// ``` +/// use std::alloc::{alloc, dealloc, Layout}; +/// +/// unsafe { +/// let layout = Layout::new::(); +/// let ptr = alloc(layout); +/// +/// *(ptr as *mut u16) = 42; +/// assert_eq!(*(ptr as *mut u16), 42); +/// +/// dealloc(ptr, layout); +/// } +/// ``` +#[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn alloc(layout: Layout) -> *mut u8 { + __rust_alloc(layout.size(), layout.align()) +} + +/// Deallocate memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::dealloc`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `dealloc` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::dealloc`]. +#[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) { + __rust_dealloc(ptr, layout.size(), layout.align()) +} + +/// Reallocate memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::realloc`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `realloc` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::realloc`]. +#[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + __rust_realloc(ptr, layout.size(), layout.align(), new_size) +} + +/// Allocate zero-initialized memory with the global allocator. +/// +/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method +/// of the allocator registered with the `#[global_allocator]` attribute +/// if there is one, or the `std` crate’s default. +/// +/// This function is expected to be deprecated in favor of the `alloc_zeroed` method +/// of the [`Global`] type when it and the [`Alloc`] trait become stable. +/// +/// # Safety +/// +/// See [`GlobalAlloc::alloc_zeroed`]. +/// +/// # Examples +/// +/// ``` +/// use std::alloc::{alloc_zeroed, dealloc, Layout}; +/// +/// unsafe { +/// let layout = Layout::new::(); +/// let ptr = alloc_zeroed(layout); +/// +/// assert_eq!(*(ptr as *mut u16), 0); +/// +/// dealloc(ptr, layout); +/// } +/// ``` +#[stable(feature = "global_alloc", since = "1.28.0")] +#[inline] +pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 { + __rust_alloc_zeroed(layout.size(), layout.align()) +} + +#[unstable(feature = "allocator_api", issue = "32838")] +unsafe impl Alloc for Global { + #[inline] + unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr> { + NonNull::new(alloc(layout)).ok_or(AllocErr) + } + + #[inline] + unsafe fn dealloc(&mut self, ptr: NonNull, layout: Layout) { + dealloc(ptr.as_ptr(), layout) + } + + #[inline] + unsafe fn realloc(&mut self, + ptr: NonNull, + layout: Layout, + new_size: usize) + -> Result, AllocErr> + { + NonNull::new(realloc(ptr.as_ptr(), layout, new_size)).ok_or(AllocErr) + } + + #[inline] + unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result, AllocErr> { + NonNull::new(alloc_zeroed(layout)).ok_or(AllocErr) + } +} + +/// The allocator for unique pointers. +// This function must not unwind. If it does, MIR codegen will fail. +#[cfg(not(test))] +#[lang = "exchange_malloc"] +#[inline] +unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { + if size == 0 { + align as *mut u8 + } else { + let layout = Layout::from_size_align_unchecked(size, align); + let ptr = alloc(layout); + if !ptr.is_null() { + ptr + } else { + handle_alloc_error(layout) + } + } +} + +#[cfg_attr(not(test), lang = "box_free")] +#[inline] +pub(crate) unsafe fn box_free(ptr: Unique) { + let ptr = ptr.as_ptr(); + let size = size_of_val(&*ptr); + let align = min_align_of_val(&*ptr); + // We do not allocate for Box when T is ZST, so deallocation is also not necessary. + if size != 0 { + let layout = Layout::from_size_align_unchecked(size, align); + dealloc(ptr as *mut u8, layout); + } +} + +/// Abort on memory allocation error or failure. +/// +/// Callers of memory allocation APIs wishing to abort computation +/// in response to an allocation error are encouraged to call this function, +/// rather than directly invoking `panic!` or similar. +/// +/// The default behavior of this function is to print a message to standard error +/// and abort the process. +/// It can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`]. +/// +/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html +/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html +#[stable(feature = "global_alloc", since = "1.28.0")] +#[rustc_allocator_nounwind] +pub fn handle_alloc_error(layout: Layout) -> ! { + #[allow(improper_ctypes)] + extern "Rust" { + #[lang = "oom"] + fn oom_impl(layout: Layout) -> !; + } + unsafe { oom_impl(layout) } +} + +#[cfg(test)] +mod tests { + extern crate test; + use self::test::Bencher; + use boxed::Box; + use alloc::{Global, Alloc, Layout, handle_alloc_error}; + + #[test] + fn allocate_zeroed() { + unsafe { + let layout = Layout::from_size_align(1024, 1).unwrap(); + let ptr = Global.alloc_zeroed(layout.clone()) + .unwrap_or_else(|_| handle_alloc_error(layout)); + + let mut i = ptr.cast::().as_ptr(); + let end = i.offset(layout.size() as isize); + while i < end { + assert_eq!(*i, 0); + i = i.offset(1); + } + Global.dealloc(ptr, layout); + } + } + + #[bench] + fn alloc_owned_small(b: &mut Bencher) { + b.iter(|| { + let _: Box<_> = box 10; + }) + } +} diff --git a/src/liballoc/allocator.rs b/src/liballoc/allocator.rs deleted file mode 100644 index c2a8f5f8ff95..000000000000 --- a/src/liballoc/allocator.rs +++ /dev/null @@ -1,1064 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![unstable(feature = "allocator_api", - reason = "the precise API and guarantees it provides may be tweaked \ - slightly, especially to possibly take into account the \ - types being stored to make room for a future \ - tracing garbage collector", - issue = "32838")] - -use core::cmp; -use core::fmt; -use core::mem; -use core::usize; -use core::ptr::{self, Unique}; - -/// Represents the combination of a starting address and -/// a total capacity of the returned block. -#[derive(Debug)] -pub struct Excess(pub *mut u8, pub usize); - -fn size_align() -> (usize, usize) { - (mem::size_of::(), mem::align_of::()) -} - -/// Layout of a block of memory. -/// -/// An instance of `Layout` describes a particular layout of memory. -/// You build a `Layout` up as an input to give to an allocator. -/// -/// All layouts have an associated non-negative size and a -/// power-of-two alignment. -/// -/// (Note however that layouts are *not* required to have positive -/// size, even though many allocators require that all memory -/// requests have positive size. A caller to the `Alloc::alloc` -/// method must either ensure that conditions like this are met, or -/// use specific allocators with looser requirements.) -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct Layout { - // size of the requested block of memory, measured in bytes. - size: usize, - - // alignment of the requested block of memory, measured in bytes. - // we ensure that this is always a power-of-two, because API's - // like `posix_memalign` require it and it is a reasonable - // constraint to impose on Layout constructors. - // - // (However, we do not analogously require `align >= sizeof(void*)`, - // even though that is *also* a requirement of `posix_memalign`.) - align: usize, -} - - -// FIXME: audit default implementations for overflow errors, -// (potentially switching to overflowing_add and -// overflowing_mul as necessary). - -impl Layout { - /// Constructs a `Layout` from a given `size` and `align`, - /// or returns `None` if any of the following conditions - /// are not met: - /// - /// * `align` must be a power of two, - /// - /// * `align` must not exceed 231 (i.e. `1 << 31`), - /// - /// * `size`, when rounded up to the nearest multiple of `align`, - /// must not overflow (i.e. the rounded value must be less than - /// `usize::MAX`). - #[inline] - pub fn from_size_align(size: usize, align: usize) -> Option { - if !align.is_power_of_two() { - return None; - } - - if align > (1 << 31) { - return None; - } - - // (power-of-two implies align != 0.) - - // Rounded up size is: - // size_rounded_up = (size + align - 1) & !(align - 1); - // - // We know from above that align != 0. If adding (align - 1) - // does not overflow, then rounding up will be fine. - // - // Conversely, &-masking with !(align - 1) will subtract off - // only low-order-bits. Thus if overflow occurs with the sum, - // the &-mask cannot subtract enough to undo that overflow. - // - // Above implies that checking for summation overflow is both - // necessary and sufficient. - if size > usize::MAX - (align - 1) { - return None; - } - - unsafe { - Some(Layout::from_size_align_unchecked(size, align)) - } - } - - /// Creates a layout, bypassing all checks. - /// - /// # Safety - /// - /// This function is unsafe as it does not verify that `align` is - /// a power-of-two that is also less than or equal to 231, nor - /// that `size` aligned to `align` fits within the address space - /// (i.e. the `Layout::from_size_align` preconditions). - #[inline] - pub unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Layout { - Layout { size: size, align: align } - } - - /// The minimum size in bytes for a memory block of this layout. - #[inline] - pub fn size(&self) -> usize { self.size } - - /// The minimum byte alignment for a memory block of this layout. - #[inline] - pub fn align(&self) -> usize { self.align } - - /// Constructs a `Layout` suitable for holding a value of type `T`. - pub fn new() -> Self { - let (size, align) = size_align::(); - Layout::from_size_align(size, align).unwrap() - } - - /// Produces layout describing a record that could be used to - /// allocate backing structure for `T` (which could be a trait - /// or other unsized type like a slice). - pub fn for_value(t: &T) -> Self { - let (size, align) = (mem::size_of_val(t), mem::align_of_val(t)); - Layout::from_size_align(size, align).unwrap() - } - - /// Creates a layout describing the record that can hold a value - /// of the same layout as `self`, but that also is aligned to - /// alignment `align` (measured in bytes). - /// - /// If `self` already meets the prescribed alignment, then returns - /// `self`. - /// - /// Note that this method does not add any padding to the overall - /// size, regardless of whether the returned layout has a different - /// alignment. In other words, if `K` has size 16, `K.align_to(32)` - /// will *still* have size 16. - /// - /// # Panics - /// - /// Panics if the combination of `self.size` and the given `align` - /// violates the conditions listed in `from_size_align`. - #[inline] - pub fn align_to(&self, align: usize) -> Self { - Layout::from_size_align(self.size, cmp::max(self.align, align)).unwrap() - } - - /// Returns the amount of padding we must insert after `self` - /// to ensure that the following address will satisfy `align` - /// (measured in bytes). - /// - /// E.g. if `self.size` is 9, then `self.padding_needed_for(4)` - /// returns 3, because that is the minimum number of bytes of - /// padding required to get a 4-aligned address (assuming that the - /// corresponding memory block starts at a 4-aligned address). - /// - /// The return value of this function has no meaning if `align` is - /// not a power-of-two. - /// - /// Note that the utility of the returned value requires `align` - /// to be less than or equal to the alignment of the starting - /// address for the whole allocated block of memory. One way to - /// satisfy this constraint is to ensure `align <= self.align`. - #[inline] - pub fn padding_needed_for(&self, align: usize) -> usize { - let len = self.size(); - - // Rounded up value is: - // len_rounded_up = (len + align - 1) & !(align - 1); - // and then we return the padding difference: `len_rounded_up - len`. - // - // We use modular arithmetic throughout: - // - // 1. align is guaranteed to be > 0, so align - 1 is always - // valid. - // - // 2. `len + align - 1` can overflow by at most `align - 1`, - // so the &-mask wth `!(align - 1)` will ensure that in the - // case of overflow, `len_rounded_up` will itself be 0. - // Thus the returned padding, when added to `len`, yields 0, - // which trivially satisfies the alignment `align`. - // - // (Of course, attempts to allocate blocks of memory whose - // size and padding overflow in the above manner should cause - // the allocator to yield an error anyway.) - - let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1); - return len_rounded_up.wrapping_sub(len); - } - - /// Creates a layout describing the record for `n` instances of - /// `self`, with a suitable amount of padding between each to - /// ensure that each instance is given its requested size and - /// alignment. On success, returns `(k, offs)` where `k` is the - /// layout of the array and `offs` is the distance between the start - /// of each element in the array. - /// - /// On arithmetic overflow, returns `None`. - #[inline] - pub fn repeat(&self, n: usize) -> Option<(Self, usize)> { - let padded_size = self.size.checked_add(self.padding_needed_for(self.align))?; - let alloc_size = padded_size.checked_mul(n)?; - - // We can assume that `self.align` is a power-of-two that does - // not exceed 231. Furthermore, `alloc_size` has already been - // rounded up to a multiple of `self.align`; therefore, the - // call to `Layout::from_size_align` below should never panic. - Some((Layout::from_size_align(alloc_size, self.align).unwrap(), padded_size)) - } - - /// Creates a layout describing the record for `self` followed by - /// `next`, including any necessary padding to ensure that `next` - /// will be properly aligned. Note that the result layout will - /// satisfy the alignment properties of both `self` and `next`. - /// - /// Returns `Some((k, offset))`, where `k` is layout of the concatenated - /// record and `offset` is the relative location, in bytes, of the - /// start of the `next` embedded within the concatenated record - /// (assuming that the record itself starts at offset 0). - /// - /// On arithmetic overflow, returns `None`. - pub fn extend(&self, next: Self) -> Option<(Self, usize)> { - let new_align = cmp::max(self.align, next.align); - let realigned = Layout::from_size_align(self.size, new_align)?; - - let pad = realigned.padding_needed_for(next.align); - - let offset = self.size.checked_add(pad)?; - let new_size = offset.checked_add(next.size)?; - - let layout = Layout::from_size_align(new_size, new_align)?; - Some((layout, offset)) - } - - /// Creates a layout describing the record for `n` instances of - /// `self`, with no padding between each instance. - /// - /// Note that, unlike `repeat`, `repeat_packed` does not guarantee - /// that the repeated instances of `self` will be properly - /// aligned, even if a given instance of `self` is properly - /// aligned. In other words, if the layout returned by - /// `repeat_packed` is used to allocate an array, it is not - /// guaranteed that all elements in the array will be properly - /// aligned. - /// - /// On arithmetic overflow, returns `None`. - pub fn repeat_packed(&self, n: usize) -> Option { - let size = self.size().checked_mul(n)?; - Layout::from_size_align(size, self.align) - } - - /// Creates a layout describing the record for `self` followed by - /// `next` with no additional padding between the two. Since no - /// padding is inserted, the alignment of `next` is irrelevant, - /// and is not incorporated *at all* into the resulting layout. - /// - /// Returns `(k, offset)`, where `k` is layout of the concatenated - /// record and `offset` is the relative location, in bytes, of the - /// start of the `next` embedded within the concatenated record - /// (assuming that the record itself starts at offset 0). - /// - /// (The `offset` is always the same as `self.size()`; we use this - /// signature out of convenience in matching the signature of - /// `extend`.) - /// - /// On arithmetic overflow, returns `None`. - pub fn extend_packed(&self, next: Self) -> Option<(Self, usize)> { - let new_size = self.size().checked_add(next.size())?; - let layout = Layout::from_size_align(new_size, self.align)?; - Some((layout, self.size())) - } - - /// Creates a layout describing the record for a `[T; n]`. - /// - /// On arithmetic overflow, returns `None`. - pub fn array(n: usize) -> Option { - Layout::new::() - .repeat(n) - .map(|(k, offs)| { - debug_assert!(offs == mem::size_of::()); - k - }) - } -} - -/// The `AllocErr` error specifies whether an allocation failure is -/// specifically due to resource exhaustion or if it is due to -/// something wrong when combining the given input arguments with this -/// allocator. -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum AllocErr { - /// Error due to hitting some resource limit or otherwise running - /// out of memory. This condition strongly implies that *some* - /// series of deallocations would allow a subsequent reissuing of - /// the original allocation request to succeed. - Exhausted { request: Layout }, - - /// Error due to allocator being fundamentally incapable of - /// satisfying the original request. This condition implies that - /// such an allocation request will never succeed on the given - /// allocator, regardless of environment, memory pressure, or - /// other contextual conditions. - /// - /// For example, an allocator that does not support requests for - /// large memory blocks might return this error variant. - Unsupported { details: &'static str }, -} - -impl AllocErr { - #[inline] - pub fn invalid_input(details: &'static str) -> Self { - AllocErr::Unsupported { details: details } - } - #[inline] - pub fn is_memory_exhausted(&self) -> bool { - if let AllocErr::Exhausted { .. } = *self { true } else { false } - } - #[inline] - pub fn is_request_unsupported(&self) -> bool { - if let AllocErr::Unsupported { .. } = *self { true } else { false } - } - #[inline] - pub fn description(&self) -> &str { - match *self { - AllocErr::Exhausted { .. } => "allocator memory exhausted", - AllocErr::Unsupported { .. } => "unsupported allocator request", - } - } -} - -// (we need this for downstream impl of trait Error) -impl fmt::Display for AllocErr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.description()) - } -} - -/// The `CannotReallocInPlace` error is used when `grow_in_place` or -/// `shrink_in_place` were unable to reuse the given memory block for -/// a requested layout. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct CannotReallocInPlace; - -impl CannotReallocInPlace { - pub fn description(&self) -> &str { - "cannot reallocate allocator's memory in place" - } -} - -// (we need this for downstream impl of trait Error) -impl fmt::Display for CannotReallocInPlace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.description()) - } -} - -/// An implementation of `Alloc` can allocate, reallocate, and -/// deallocate arbitrary blocks of data described via `Layout`. -/// -/// Some of the methods require that a memory block be *currently -/// allocated* via an allocator. This means that: -/// -/// * the starting address for that memory block was previously -/// returned by a previous call to an allocation method (`alloc`, -/// `alloc_zeroed`, `alloc_excess`, `alloc_one`, `alloc_array`) or -/// reallocation method (`realloc`, `realloc_excess`, or -/// `realloc_array`), and -/// -/// * the memory block has not been subsequently deallocated, where -/// blocks are deallocated either by being passed to a deallocation -/// method (`dealloc`, `dealloc_one`, `dealloc_array`) or by being -/// passed to a reallocation method (see above) that returns `Ok`. -/// -/// A note regarding zero-sized types and zero-sized layouts: many -/// methods in the `Alloc` trait state that allocation requests -/// must be non-zero size, or else undefined behavior can result. -/// -/// * However, some higher-level allocation methods (`alloc_one`, -/// `alloc_array`) are well-defined on zero-sized types and can -/// optionally support them: it is left up to the implementor -/// whether to return `Err`, or to return `Ok` with some pointer. -/// -/// * If an `Alloc` implementation chooses to return `Ok` in this -/// case (i.e. the pointer denotes a zero-sized inaccessible block) -/// then that returned pointer must be considered "currently -/// allocated". On such an allocator, *all* methods that take -/// currently-allocated pointers as inputs must accept these -/// zero-sized pointers, *without* causing undefined behavior. -/// -/// * In other words, if a zero-sized pointer can flow out of an -/// allocator, then that allocator must likewise accept that pointer -/// flowing back into its deallocation and reallocation methods. -/// -/// Some of the methods require that a layout *fit* a memory block. -/// What it means for a layout to "fit" a memory block means (or -/// equivalently, for a memory block to "fit" a layout) is that the -/// following two conditions must hold: -/// -/// 1. The block's starting address must be aligned to `layout.align()`. -/// -/// 2. The block's size must fall in the range `[use_min, use_max]`, where: -/// -/// * `use_min` is `self.usable_size(layout).0`, and -/// -/// * `use_max` is the capacity that was (or would have been) -/// returned when (if) the block was allocated via a call to -/// `alloc_excess` or `realloc_excess`. -/// -/// Note that: -/// -/// * the size of the layout most recently used to allocate the block -/// is guaranteed to be in the range `[use_min, use_max]`, and -/// -/// * a lower-bound on `use_max` can be safely approximated by a call to -/// `usable_size`. -/// -/// * if a layout `k` fits a memory block (denoted by `ptr`) -/// currently allocated via an allocator `a`, then it is legal to -/// use that layout to deallocate it, i.e. `a.dealloc(ptr, k);`. -/// -/// # Unsafety -/// -/// The `Alloc` trait is an `unsafe` trait for a number of reasons, and -/// implementors must ensure that they adhere to these contracts: -/// -/// * Pointers returned from allocation functions must point to valid memory and -/// retain their validity until at least the instance of `Alloc` is dropped -/// itself. -/// -/// * It's undefined behavior if global allocators unwind. This restriction may -/// be lifted in the future, but currently a panic from any of these -/// functions may lead to memory unsafety. Note that as of the time of this -/// writing allocators *not* intending to be global allocators can still panic -/// in their implementation without violating memory safety. -/// -/// * `Layout` queries and calculations in general must be correct. Callers of -/// this trait are allowed to rely on the contracts defined on each method, -/// and implementors must ensure such contracts remain true. -/// -/// Note that this list may get tweaked over time as clarifications are made in -/// the future. Additionally global allocators may gain unique requirements for -/// how to safely implement one in the future as well. -pub unsafe trait Alloc { - - // (Note: existing allocators have unspecified but well-defined - // behavior in response to a zero size allocation request ; - // e.g. in C, `malloc` of 0 will either return a null pointer or a - // unique pointer, but will not have arbitrary undefined - // behavior. Rust should consider revising the alloc::heap crate - // to reflect this reality.) - - /// Returns a pointer meeting the size and alignment guarantees of - /// `layout`. - /// - /// If this method returns an `Ok(addr)`, then the `addr` returned - /// will be non-null address pointing to a block of storage - /// suitable for holding an instance of `layout`. - /// - /// The returned block of storage may or may not have its contents - /// initialized. (Extension subtraits might restrict this - /// behavior, e.g. to ensure initialization to particular sets of - /// bit patterns.) - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure that `layout` has non-zero size. - /// - /// (Extension subtraits might provide more specific bounds on - /// behavior, e.g. guarantee a sentinel address or a null pointer - /// in response to a zero-size allocation request.) - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `layout` does not meet allocator's size or alignment - /// constraints. - /// - /// Implementations are encouraged to return `Err` on memory - /// exhaustion rather than panicking or aborting, but this is not - /// a strict requirement. (Specifically: it is *legal* to - /// implement this trait atop an underlying native allocation - /// library that aborts on memory exhaustion.) - /// - /// Clients wishing to abort computation in response to an - /// allocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr>; - - /// Deallocate the memory referenced by `ptr`. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure all of the following: - /// - /// * `ptr` must denote a block of memory currently allocated via - /// this allocator, - /// - /// * `layout` must *fit* that block of memory, - /// - /// * In addition to fitting the block of memory `layout`, the - /// alignment of the `layout` must match the alignment used - /// to allocate that block of memory. - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout); - - /// Allocator-specific method for signaling an out-of-memory - /// condition. - /// - /// `oom` aborts the thread or process, optionally performing - /// cleanup or logging diagnostic information before panicking or - /// aborting. - /// - /// `oom` is meant to be used by clients unable to cope with an - /// unsatisfied allocation request (signaled by an error such as - /// `AllocErr::Exhausted`), and wish to abandon computation rather - /// than attempt to recover locally. Such clients should pass the - /// signaling error value back into `oom`, where the allocator - /// may incorporate that error value into its diagnostic report - /// before aborting. - /// - /// Implementations of the `oom` method are discouraged from - /// infinitely regressing in nested calls to `oom`. In - /// practice this means implementors should eschew allocating, - /// especially from `self` (directly or indirectly). - /// - /// Implementations of the allocation and reallocation methods - /// (e.g. `alloc`, `alloc_one`, `realloc`) are discouraged from - /// panicking (or aborting) in the event of memory exhaustion; - /// instead they should return an appropriate error from the - /// invoked method, and let the client decide whether to invoke - /// this `oom` method in response. - fn oom(&mut self, _: AllocErr) -> ! { - unsafe { ::core::intrinsics::abort() } - } - - // == ALLOCATOR-SPECIFIC QUANTITIES AND LIMITS == - // usable_size - - /// Returns bounds on the guaranteed usable size of a successful - /// allocation created with the specified `layout`. - /// - /// In particular, if one has a memory block allocated via a given - /// allocator `a` and layout `k` where `a.usable_size(k)` returns - /// `(l, u)`, then one can pass that block to `a.dealloc()` with a - /// layout in the size range [l, u]. - /// - /// (All implementors of `usable_size` must ensure that - /// `l <= k.size() <= u`) - /// - /// Both the lower- and upper-bounds (`l` and `u` respectively) - /// are provided, because an allocator based on size classes could - /// misbehave if one attempts to deallocate a block without - /// providing a correct value for its size (i.e., one within the - /// range `[l, u]`). - /// - /// Clients who wish to make use of excess capacity are encouraged - /// to use the `alloc_excess` and `realloc_excess` instead, as - /// this method is constrained to report conservative values that - /// serve as valid bounds for *all possible* allocation method - /// calls. - /// - /// However, for clients that do not wish to track the capacity - /// returned by `alloc_excess` locally, this method is likely to - /// produce useful results. - #[inline] - fn usable_size(&self, layout: &Layout) -> (usize, usize) { - (layout.size(), layout.size()) - } - - // == METHODS FOR MEMORY REUSE == - // realloc. alloc_excess, realloc_excess - - /// Returns a pointer suitable for holding data described by - /// `new_layout`, meeting its size and alignment guarantees. To - /// accomplish this, this may extend or shrink the allocation - /// referenced by `ptr` to fit `new_layout`. - /// - /// If this returns `Ok`, then ownership of the memory block - /// referenced by `ptr` has been transferred to this - /// allocator. The memory may or may not have been freed, and - /// should be considered unusable (unless of course it was - /// transferred back to the caller again via the return value of - /// this method). - /// - /// If this method returns `Err`, then ownership of the memory - /// block has not been transferred to this allocator, and the - /// contents of the memory block are unaltered. - /// - /// For best results, `new_layout` should not impose a different - /// alignment constraint than `layout`. (In other words, - /// `new_layout.align()` should equal `layout.align()`.) However, - /// behavior is well-defined (though underspecified) when this - /// constraint is violated; further discussion below. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure all of the following: - /// - /// * `ptr` must be currently allocated via this allocator, - /// - /// * `layout` must *fit* the `ptr` (see above). (The `new_layout` - /// argument need not fit it.) - /// - /// * `new_layout` must have size greater than zero. - /// - /// * the alignment of `new_layout` is non-zero. - /// - /// (Extension subtraits might provide more specific bounds on - /// behavior, e.g. guarantee a sentinel address or a null pointer - /// in response to a zero-size allocation request.) - /// - /// # Errors - /// - /// Returns `Err` only if `new_layout` does not match the - /// alignment of `layout`, or does not meet the allocator's size - /// and alignment constraints of the allocator, or if reallocation - /// otherwise fails. - /// - /// (Note the previous sentence did not say "if and only if" -- in - /// particular, an implementation of this method *can* return `Ok` - /// if `new_layout.align() != old_layout.align()`; or it can - /// return `Err` in that scenario, depending on whether this - /// allocator can dynamically adjust the alignment constraint for - /// the block.) - /// - /// Implementations are encouraged to return `Err` on memory - /// exhaustion rather than panicking or aborting, but this is not - /// a strict requirement. (Specifically: it is *legal* to - /// implement this trait atop an underlying native allocation - /// library that aborts on memory exhaustion.) - /// - /// Clients wishing to abort computation in response to an - /// reallocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn realloc(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<*mut u8, AllocErr> { - let new_size = new_layout.size(); - let old_size = layout.size(); - let aligns_match = layout.align == new_layout.align; - - if new_size >= old_size && aligns_match { - if let Ok(()) = self.grow_in_place(ptr, layout.clone(), new_layout.clone()) { - return Ok(ptr); - } - } else if new_size < old_size && aligns_match { - if let Ok(()) = self.shrink_in_place(ptr, layout.clone(), new_layout.clone()) { - return Ok(ptr); - } - } - - // otherwise, fall back on alloc + copy + dealloc. - let result = self.alloc(new_layout); - if let Ok(new_ptr) = result { - ptr::copy_nonoverlapping(ptr as *const u8, new_ptr, cmp::min(old_size, new_size)); - self.dealloc(ptr, layout); - } - result - } - - /// Behaves like `alloc`, but also ensures that the contents - /// are set to zero before being returned. - /// - /// # Safety - /// - /// This function is unsafe for the same reasons that `alloc` is. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `layout` does not meet allocator's size or alignment - /// constraints, just as in `alloc`. - /// - /// Clients wishing to abort computation in response to an - /// allocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - let size = layout.size(); - let p = self.alloc(layout); - if let Ok(p) = p { - ptr::write_bytes(p, 0, size); - } - p - } - - /// Behaves like `alloc`, but also returns the whole size of - /// the returned block. For some `layout` inputs, like arrays, this - /// may include extra storage usable for additional data. - /// - /// # Safety - /// - /// This function is unsafe for the same reasons that `alloc` is. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `layout` does not meet allocator's size or alignment - /// constraints, just as in `alloc`. - /// - /// Clients wishing to abort computation in response to an - /// allocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn alloc_excess(&mut self, layout: Layout) -> Result { - let usable_size = self.usable_size(&layout); - self.alloc(layout).map(|p| Excess(p, usable_size.1)) - } - - /// Behaves like `realloc`, but also returns the whole size of - /// the returned block. For some `layout` inputs, like arrays, this - /// may include extra storage usable for additional data. - /// - /// # Safety - /// - /// This function is unsafe for the same reasons that `realloc` is. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `layout` does not meet allocator's size or alignment - /// constraints, just as in `realloc`. - /// - /// Clients wishing to abort computation in response to an - /// reallocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn realloc_excess(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result { - let usable_size = self.usable_size(&new_layout); - self.realloc(ptr, layout, new_layout) - .map(|p| Excess(p, usable_size.1)) - } - - /// Attempts to extend the allocation referenced by `ptr` to fit `new_layout`. - /// - /// If this returns `Ok`, then the allocator has asserted that the - /// memory block referenced by `ptr` now fits `new_layout`, and thus can - /// be used to carry data of that layout. (The allocator is allowed to - /// expend effort to accomplish this, such as extending the memory block to - /// include successor blocks, or virtual memory tricks.) - /// - /// Regardless of what this method returns, ownership of the - /// memory block referenced by `ptr` has not been transferred, and - /// the contents of the memory block are unaltered. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure all of the following: - /// - /// * `ptr` must be currently allocated via this allocator, - /// - /// * `layout` must *fit* the `ptr` (see above); note the - /// `new_layout` argument need not fit it, - /// - /// * `new_layout.size()` must not be less than `layout.size()`, - /// - /// * `new_layout.align()` must equal `layout.align()`. - /// - /// # Errors - /// - /// Returns `Err(CannotReallocInPlace)` when the allocator is - /// unable to assert that the memory block referenced by `ptr` - /// could fit `layout`. - /// - /// Note that one cannot pass `CannotReallocInPlace` to the `oom` - /// method; clients are expected either to be able to recover from - /// `grow_in_place` failures without aborting, or to fall back on - /// another reallocation method before resorting to an abort. - unsafe fn grow_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - let _ = ptr; // this default implementation doesn't care about the actual address. - debug_assert!(new_layout.size >= layout.size); - debug_assert!(new_layout.align == layout.align); - let (_l, u) = self.usable_size(&layout); - // _l <= layout.size() [guaranteed by usable_size()] - // layout.size() <= new_layout.size() [required by this method] - if new_layout.size <= u { - return Ok(()); - } else { - return Err(CannotReallocInPlace); - } - } - - /// Attempts to shrink the allocation referenced by `ptr` to fit `new_layout`. - /// - /// If this returns `Ok`, then the allocator has asserted that the - /// memory block referenced by `ptr` now fits `new_layout`, and - /// thus can only be used to carry data of that smaller - /// layout. (The allocator is allowed to take advantage of this, - /// carving off portions of the block for reuse elsewhere.) The - /// truncated contents of the block within the smaller layout are - /// unaltered, and ownership of block has not been transferred. - /// - /// If this returns `Err`, then the memory block is considered to - /// still represent the original (larger) `layout`. None of the - /// block has been carved off for reuse elsewhere, ownership of - /// the memory block has not been transferred, and the contents of - /// the memory block are unaltered. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure all of the following: - /// - /// * `ptr` must be currently allocated via this allocator, - /// - /// * `layout` must *fit* the `ptr` (see above); note the - /// `new_layout` argument need not fit it, - /// - /// * `new_layout.size()` must not be greater than `layout.size()` - /// (and must be greater than zero), - /// - /// * `new_layout.align()` must equal `layout.align()`. - /// - /// # Errors - /// - /// Returns `Err(CannotReallocInPlace)` when the allocator is - /// unable to assert that the memory block referenced by `ptr` - /// could fit `layout`. - /// - /// Note that one cannot pass `CannotReallocInPlace` to the `oom` - /// method; clients are expected either to be able to recover from - /// `shrink_in_place` failures without aborting, or to fall back - /// on another reallocation method before resorting to an abort. - unsafe fn shrink_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - let _ = ptr; // this default implementation doesn't care about the actual address. - debug_assert!(new_layout.size <= layout.size); - debug_assert!(new_layout.align == layout.align); - let (l, _u) = self.usable_size(&layout); - // layout.size() <= _u [guaranteed by usable_size()] - // new_layout.size() <= layout.size() [required by this method] - if l <= new_layout.size { - return Ok(()); - } else { - return Err(CannotReallocInPlace); - } - } - - - // == COMMON USAGE PATTERNS == - // alloc_one, dealloc_one, alloc_array, realloc_array. dealloc_array - - /// Allocates a block suitable for holding an instance of `T`. - /// - /// Captures a common usage pattern for allocators. - /// - /// The returned block is suitable for passing to the - /// `alloc`/`realloc` methods of this allocator. - /// - /// Note to implementors: If this returns `Ok(ptr)`, then `ptr` - /// must be considered "currently allocated" and must be - /// acceptable input to methods such as `realloc` or `dealloc`, - /// *even if* `T` is a zero-sized type. In other words, if your - /// `Alloc` implementation overrides this method in a manner - /// that can return a zero-sized `ptr`, then all reallocation and - /// deallocation methods need to be similarly overridden to accept - /// such values as input. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `T` does not meet allocator's size or alignment constraints. - /// - /// For zero-sized `T`, may return either of `Ok` or `Err`, but - /// will *not* yield undefined behavior. - /// - /// Clients wishing to abort computation in response to an - /// allocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - fn alloc_one(&mut self) -> Result, AllocErr> - where Self: Sized - { - let k = Layout::new::(); - if k.size() > 0 { - unsafe { self.alloc(k).map(|p| Unique::new_unchecked(p as *mut T)) } - } else { - Err(AllocErr::invalid_input("zero-sized type invalid for alloc_one")) - } - } - - /// Deallocates a block suitable for holding an instance of `T`. - /// - /// The given block must have been produced by this allocator, - /// and must be suitable for storing a `T` (in terms of alignment - /// as well as minimum and maximum size); otherwise yields - /// undefined behavior. - /// - /// Captures a common usage pattern for allocators. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure both: - /// - /// * `ptr` must denote a block of memory currently allocated via this allocator - /// - /// * the layout of `T` must *fit* that block of memory. - unsafe fn dealloc_one(&mut self, ptr: Unique) - where Self: Sized - { - let raw_ptr = ptr.as_ptr() as *mut u8; - let k = Layout::new::(); - if k.size() > 0 { - self.dealloc(raw_ptr, k); - } - } - - /// Allocates a block suitable for holding `n` instances of `T`. - /// - /// Captures a common usage pattern for allocators. - /// - /// The returned block is suitable for passing to the - /// `alloc`/`realloc` methods of this allocator. - /// - /// Note to implementors: If this returns `Ok(ptr)`, then `ptr` - /// must be considered "currently allocated" and must be - /// acceptable input to methods such as `realloc` or `dealloc`, - /// *even if* `T` is a zero-sized type. In other words, if your - /// `Alloc` implementation overrides this method in a manner - /// that can return a zero-sized `ptr`, then all reallocation and - /// deallocation methods need to be similarly overridden to accept - /// such values as input. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `[T; n]` does not meet allocator's size or alignment - /// constraints. - /// - /// For zero-sized `T` or `n == 0`, may return either of `Ok` or - /// `Err`, but will *not* yield undefined behavior. - /// - /// Always returns `Err` on arithmetic overflow. - /// - /// Clients wishing to abort computation in response to an - /// allocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - fn alloc_array(&mut self, n: usize) -> Result, AllocErr> - where Self: Sized - { - match Layout::array::(n) { - Some(ref layout) if layout.size() > 0 => { - unsafe { - self.alloc(layout.clone()) - .map(|p| { - Unique::new_unchecked(p as *mut T) - }) - } - } - _ => Err(AllocErr::invalid_input("invalid layout for alloc_array")), - } - } - - /// Reallocates a block previously suitable for holding `n_old` - /// instances of `T`, returning a block suitable for holding - /// `n_new` instances of `T`. - /// - /// Captures a common usage pattern for allocators. - /// - /// The returned block is suitable for passing to the - /// `alloc`/`realloc` methods of this allocator. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure all of the following: - /// - /// * `ptr` must be currently allocated via this allocator, - /// - /// * the layout of `[T; n_old]` must *fit* that block of memory. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or - /// `[T; n_new]` does not meet allocator's size or alignment - /// constraints. - /// - /// For zero-sized `T` or `n_new == 0`, may return either of `Ok` or - /// `Err`, but will *not* yield undefined behavior. - /// - /// Always returns `Err` on arithmetic overflow. - /// - /// Clients wishing to abort computation in response to an - /// reallocation error are encouraged to call the allocator's `oom` - /// method, rather than directly invoking `panic!` or similar. - unsafe fn realloc_array(&mut self, - ptr: Unique, - n_old: usize, - n_new: usize) -> Result, AllocErr> - where Self: Sized - { - match (Layout::array::(n_old), Layout::array::(n_new), ptr.as_ptr()) { - (Some(ref k_old), Some(ref k_new), ptr) if k_old.size() > 0 && k_new.size() > 0 => { - self.realloc(ptr as *mut u8, k_old.clone(), k_new.clone()) - .map(|p|Unique::new_unchecked(p as *mut T)) - } - _ => { - Err(AllocErr::invalid_input("invalid layout for realloc_array")) - } - } - } - - /// Deallocates a block suitable for holding `n` instances of `T`. - /// - /// Captures a common usage pattern for allocators. - /// - /// # Safety - /// - /// This function is unsafe because undefined behavior can result - /// if the caller does not ensure both: - /// - /// * `ptr` must denote a block of memory currently allocated via this allocator - /// - /// * the layout of `[T; n]` must *fit* that block of memory. - /// - /// # Errors - /// - /// Returning `Err` indicates that either `[T; n]` or the given - /// memory block does not meet allocator's size or alignment - /// constraints. - /// - /// Always returns `Err` on arithmetic overflow. - unsafe fn dealloc_array(&mut self, ptr: Unique, n: usize) -> Result<(), AllocErr> - where Self: Sized - { - let raw_ptr = ptr.as_ptr() as *mut u8; - match Layout::array::(n) { - Some(ref k) if k.size() > 0 => { - Ok(self.dealloc(raw_ptr, k.clone())) - } - _ => { - Err(AllocErr::invalid_input("invalid layout for dealloc_array")) - } - } - } -} diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs deleted file mode 100644 index 185af8835d1e..000000000000 --- a/src/liballoc/arc.rs +++ /dev/null @@ -1,1864 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![stable(feature = "rust1", since = "1.0.0")] - -//! Thread-safe reference-counting pointers. -//! -//! See the [`Arc`][arc] documentation for more details. -//! -//! [arc]: struct.Arc.html - -use core::sync::atomic; -use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; -use core::borrow; -use core::fmt; -use core::cmp::Ordering; -use core::intrinsics::abort; -use core::mem::{self, align_of_val, size_of_val, uninitialized}; -use core::ops::Deref; -use core::ops::CoerceUnsized; -use core::ptr::{self, Shared}; -use core::marker::{Unsize, PhantomData}; -use core::hash::{Hash, Hasher}; -use core::{isize, usize}; -use core::convert::From; - -use heap::{Heap, Alloc, Layout, box_free}; -use boxed::Box; -use string::String; -use vec::Vec; - -/// A soft limit on the amount of references that may be made to an `Arc`. -/// -/// Going above this limit will abort your program (although not -/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. -const MAX_REFCOUNT: usize = (isize::MAX) as usize; - -/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically -/// Reference Counted'. -/// -/// The type `Arc` provides shared ownership of a value of type `T`, -/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces -/// a new pointer to the same value in the heap. When the last `Arc` -/// pointer to a given value is destroyed, the pointed-to value is -/// also destroyed. -/// -/// Shared references in Rust disallow mutation by default, and `Arc` is no -/// exception: you cannot generally obtain a mutable reference to something -/// inside an `Arc`. If you need to mutate through an `Arc`, use -/// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic] -/// types. -/// -/// ## Thread Safety -/// -/// Unlike [`Rc`], `Arc` uses atomic operations for its reference -/// counting This means that it is thread-safe. The disadvantage is that -/// atomic operations are more expensive than ordinary memory accesses. If you -/// are not sharing reference-counted values between threads, consider using -/// [`Rc`] for lower overhead. [`Rc`] is a safe default, because the -/// compiler will catch any attempt to send an [`Rc`] between threads. -/// However, a library might choose `Arc` in order to give library consumers -/// more flexibility. -/// -/// `Arc` will implement [`Send`] and [`Sync`] as long as the `T` implements -/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an -/// `Arc` to make it thread-safe? This may be a bit counter-intuitive at -/// first: after all, isn't the point of `Arc` thread safety? The key is -/// this: `Arc` makes it thread safe to have multiple ownership of the same -/// data, but it doesn't add thread safety to its data. Consider -/// `Arc<`[`RefCell`]`>`. [`RefCell`] isn't [`Sync`], and if `Arc` was always -/// [`Send`], `Arc<`[`RefCell`]`>` would be as well. But then we'd have a problem: -/// [`RefCell`] is not thread safe; it keeps track of the borrowing count using -/// non-atomic operations. -/// -/// In the end, this means that you may need to pair `Arc` with some sort of -/// [`std::sync`] type, usually [`Mutex`][mutex]. -/// -/// ## Breaking cycles with `Weak` -/// -/// The [`downgrade`][downgrade] method can be used to create a non-owning -/// [`Weak`][weak] pointer. A [`Weak`][weak] pointer can be [`upgrade`][upgrade]d -/// to an `Arc`, but this will return [`None`] if the value has already been -/// dropped. -/// -/// A cycle between `Arc` pointers will never be deallocated. For this reason, -/// [`Weak`][weak] is used to break cycles. For example, a tree could have -/// strong `Arc` pointers from parent nodes to children, and [`Weak`][weak] -/// pointers from children back to their parents. -/// -/// # Cloning references -/// -/// Creating a new reference from an existing reference counted pointer is done using the -/// `Clone` trait implemented for [`Arc`][arc] and [`Weak`][weak]. -/// -/// ``` -/// use std::sync::Arc; -/// let foo = Arc::new(vec![1.0, 2.0, 3.0]); -/// // The two syntaxes below are equivalent. -/// let a = foo.clone(); -/// let b = Arc::clone(&foo); -/// // a and b both point to the same memory location as foo. -/// ``` -/// -/// The [`Arc::clone(&from)`] syntax is the most idiomatic because it conveys more explicitly -/// the meaning of the code. In the example above, this syntax makes it easier to see that -/// this code is creating a new reference rather than copying the whole content of foo. -/// -/// ## `Deref` behavior -/// -/// `Arc` automatically dereferences to `T` (via the [`Deref`][deref] trait), -/// so you can call `T`'s methods on a value of type `Arc`. To avoid name -/// clashes with `T`'s methods, the methods of `Arc` itself are [associated -/// functions][assoc], called using function-like syntax: -/// -/// ``` -/// use std::sync::Arc; -/// let my_arc = Arc::new(()); -/// -/// Arc::downgrade(&my_arc); -/// ``` -/// -/// [`Weak`][weak] does not auto-dereference to `T`, because the value may have -/// already been destroyed. -/// -/// [arc]: struct.Arc.html -/// [weak]: struct.Weak.html -/// [`Rc`]: ../../std/rc/struct.Rc.html -/// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone -/// [mutex]: ../../std/sync/struct.Mutex.html -/// [rwlock]: ../../std/sync/struct.RwLock.html -/// [atomic]: ../../std/sync/atomic/index.html -/// [`Send`]: ../../std/marker/trait.Send.html -/// [`Sync`]: ../../std/marker/trait.Sync.html -/// [deref]: ../../std/ops/trait.Deref.html -/// [downgrade]: struct.Arc.html#method.downgrade -/// [upgrade]: struct.Weak.html#method.upgrade -/// [`None`]: ../../std/option/enum.Option.html#variant.None -/// [assoc]: ../../book/first-edition/method-syntax.html#associated-functions -/// [`RefCell`]: ../../std/cell/struct.RefCell.html -/// [`std::sync`]: ../../std/sync/index.html -/// [`Arc::clone(&from)`]: #method.clone -/// -/// # Examples -/// -/// Sharing some immutable data between threads: -/// -// Note that we **do not** run these tests here. The windows builders get super -// unhappy if a thread outlives the main thread and then exits at the same time -// (something deadlocks) so we just avoid this entirely by not running these -// tests. -/// ```no_run -/// use std::sync::Arc; -/// use std::thread; -/// -/// let five = Arc::new(5); -/// -/// for _ in 0..10 { -/// let five = Arc::clone(&five); -/// -/// thread::spawn(move || { -/// println!("{:?}", five); -/// }); -/// } -/// ``` -/// -/// Sharing a mutable [`AtomicUsize`]: -/// -/// [`AtomicUsize`]: ../../std/sync/atomic/struct.AtomicUsize.html -/// -/// ```no_run -/// use std::sync::Arc; -/// use std::sync::atomic::{AtomicUsize, Ordering}; -/// use std::thread; -/// -/// let val = Arc::new(AtomicUsize::new(5)); -/// -/// for _ in 0..10 { -/// let val = Arc::clone(&val); -/// -/// thread::spawn(move || { -/// let v = val.fetch_add(1, Ordering::SeqCst); -/// println!("{:?}", v); -/// }); -/// } -/// ``` -/// -/// See the [`rc` documentation][rc_examples] for more examples of reference -/// counting in general. -/// -/// [rc_examples]: ../../std/rc/index.html#examples -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Arc { - ptr: Shared>, - phantom: PhantomData, -} - -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Send for Arc {} -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Sync for Arc {} - -#[unstable(feature = "coerce_unsized", issue = "27732")] -impl, U: ?Sized> CoerceUnsized> for Arc {} - -/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the -/// managed value. The value is accessed by calling [`upgrade`] on the `Weak` -/// pointer, which returns an [`Option`]`<`[`Arc`]`>`. -/// -/// Since a `Weak` reference does not count towards ownership, it will not -/// prevent the inner value from being dropped, and `Weak` itself makes no -/// guarantees about the value still being present and may return [`None`] -/// when [`upgrade`]d. -/// -/// A `Weak` pointer is useful for keeping a temporary reference to the value -/// within [`Arc`] without extending its lifetime. It is also used to prevent -/// circular references between [`Arc`] pointers, since mutual owning references -/// would never allow either [`Arc`] to be dropped. For example, a tree could -/// have strong [`Arc`] pointers from parent nodes to children, and `Weak` -/// pointers from children back to their parents. -/// -/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`]. -/// -/// [`Arc`]: struct.Arc.html -/// [`Arc::downgrade`]: struct.Arc.html#method.downgrade -/// [`upgrade`]: struct.Weak.html#method.upgrade -/// [`Option`]: ../../std/option/enum.Option.html -/// [`None`]: ../../std/option/enum.Option.html#variant.None -#[stable(feature = "arc_weak", since = "1.4.0")] -pub struct Weak { - ptr: Shared>, -} - -#[stable(feature = "arc_weak", since = "1.4.0")] -unsafe impl Send for Weak {} -#[stable(feature = "arc_weak", since = "1.4.0")] -unsafe impl Sync for Weak {} - -#[unstable(feature = "coerce_unsized", issue = "27732")] -impl, U: ?Sized> CoerceUnsized> for Weak {} - -#[stable(feature = "arc_weak", since = "1.4.0")] -impl fmt::Debug for Weak { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "(Weak)") - } -} - -struct ArcInner { - strong: atomic::AtomicUsize, - - // the value usize::MAX acts as a sentinel for temporarily "locking" the - // ability to upgrade weak pointers or downgrade strong ones; this is used - // to avoid races in `make_mut` and `get_mut`. - weak: atomic::AtomicUsize, - - data: T, -} - -unsafe impl Send for ArcInner {} -unsafe impl Sync for ArcInner {} - -impl Arc { - /// Constructs a new `Arc`. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn new(data: T) -> Arc { - // Start the weak pointer count as 1 which is the weak pointer that's - // held by all the strong pointers (kinda), see std/rc.rs for more info - let x: Box<_> = box ArcInner { - strong: atomic::AtomicUsize::new(1), - weak: atomic::AtomicUsize::new(1), - data, - }; - Arc { ptr: Shared::from(Box::into_unique(x)), phantom: PhantomData } - } - - /// Returns the contained value, if the `Arc` has exactly one strong reference. - /// - /// Otherwise, an [`Err`][result] is returned with the same `Arc` that was - /// passed in. - /// - /// This will succeed even if there are outstanding weak references. - /// - /// [result]: ../../std/result/enum.Result.html - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let x = Arc::new(3); - /// assert_eq!(Arc::try_unwrap(x), Ok(3)); - /// - /// let x = Arc::new(4); - /// let _y = Arc::clone(&x); - /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4); - /// ``` - #[inline] - #[stable(feature = "arc_unique", since = "1.4.0")] - pub fn try_unwrap(this: Self) -> Result { - // See `drop` for why all these atomics are like this - if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() { - return Err(this); - } - - atomic::fence(Acquire); - - unsafe { - let elem = ptr::read(&this.ptr.as_ref().data); - - // Make a weak pointer to clean up the implicit strong-weak reference - let _weak = Weak { ptr: this.ptr }; - mem::forget(this); - - Ok(elem) - } - } -} - -impl Arc { - /// Consumes the `Arc`, returning the wrapped pointer. - /// - /// To avoid a memory leak the pointer must be converted back to an `Arc` using - /// [`Arc::from_raw`][from_raw]. - /// - /// [from_raw]: struct.Arc.html#method.from_raw - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let x = Arc::new(10); - /// let x_ptr = Arc::into_raw(x); - /// assert_eq!(unsafe { *x_ptr }, 10); - /// ``` - #[stable(feature = "rc_raw", since = "1.17.0")] - pub fn into_raw(this: Self) -> *const T { - let ptr: *const T = &*this; - mem::forget(this); - ptr - } - - /// Constructs an `Arc` from a raw pointer. - /// - /// The raw pointer must have been previously returned by a call to a - /// [`Arc::into_raw`][into_raw]. - /// - /// This function is unsafe because improper use may lead to memory problems. For example, a - /// double-free may occur if the function is called twice on the same raw pointer. - /// - /// [into_raw]: struct.Arc.html#method.into_raw - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let x = Arc::new(10); - /// let x_ptr = Arc::into_raw(x); - /// - /// unsafe { - /// // Convert back to an `Arc` to prevent leak. - /// let x = Arc::from_raw(x_ptr); - /// assert_eq!(*x, 10); - /// - /// // Further calls to `Arc::from_raw(x_ptr)` would be memory unsafe. - /// } - /// - /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! - /// ``` - #[stable(feature = "rc_raw", since = "1.17.0")] - pub unsafe fn from_raw(ptr: *const T) -> Self { - // Align the unsized value to the end of the ArcInner. - // Because it is ?Sized, it will always be the last field in memory. - let align = align_of_val(&*ptr); - let layout = Layout::new::>(); - let offset = (layout.size() + layout.padding_needed_for(align)) as isize; - - // Reverse the offset to find the original ArcInner. - let fake_ptr = ptr as *mut ArcInner; - let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset)); - - Arc { - ptr: Shared::new_unchecked(arc_ptr), - phantom: PhantomData, - } - } - - /// Creates a new [`Weak`][weak] pointer to this value. - /// - /// [weak]: struct.Weak.html - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// let weak_five = Arc::downgrade(&five); - /// ``` - #[stable(feature = "arc_weak", since = "1.4.0")] - pub fn downgrade(this: &Self) -> Weak { - // This Relaxed is OK because we're checking the value in the CAS - // below. - let mut cur = this.inner().weak.load(Relaxed); - - loop { - // check if the weak counter is currently "locked"; if so, spin. - if cur == usize::MAX { - cur = this.inner().weak.load(Relaxed); - continue; - } - - // NOTE: this code currently ignores the possibility of overflow - // into usize::MAX; in general both Rc and Arc need to be adjusted - // to deal with overflow. - - // Unlike with Clone(), we need this to be an Acquire read to - // synchronize with the write coming from `is_unique`, so that the - // events prior to that write happen before this read. - match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) { - Ok(_) => return Weak { ptr: this.ptr }, - Err(old) => cur = old, - } - } - } - - /// Gets the number of [`Weak`][weak] pointers to this value. - /// - /// [weak]: struct.Weak.html - /// - /// # Safety - /// - /// This method by itself is safe, but using it correctly requires extra care. - /// Another thread can change the weak count at any time, - /// including potentially between calling this method and acting on the result. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// let _weak_five = Arc::downgrade(&five); - /// - /// // This assertion is deterministic because we haven't shared - /// // the `Arc` or `Weak` between threads. - /// assert_eq!(1, Arc::weak_count(&five)); - /// ``` - #[inline] - #[stable(feature = "arc_counts", since = "1.15.0")] - pub fn weak_count(this: &Self) -> usize { - let cnt = this.inner().weak.load(SeqCst); - // If the weak count is currently locked, the value of the - // count was 0 just before taking the lock. - if cnt == usize::MAX { 0 } else { cnt - 1 } - } - - /// Gets the number of strong (`Arc`) pointers to this value. - /// - /// # Safety - /// - /// This method by itself is safe, but using it correctly requires extra care. - /// Another thread can change the strong count at any time, - /// including potentially between calling this method and acting on the result. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// let _also_five = Arc::clone(&five); - /// - /// // This assertion is deterministic because we haven't shared - /// // the `Arc` between threads. - /// assert_eq!(2, Arc::strong_count(&five)); - /// ``` - #[inline] - #[stable(feature = "arc_counts", since = "1.15.0")] - pub fn strong_count(this: &Self) -> usize { - this.inner().strong.load(SeqCst) - } - - #[inline] - fn inner(&self) -> &ArcInner { - // This unsafety is ok because while this arc is alive we're guaranteed - // that the inner pointer is valid. Furthermore, we know that the - // `ArcInner` structure itself is `Sync` because the inner data is - // `Sync` as well, so we're ok loaning out an immutable pointer to these - // contents. - unsafe { self.ptr.as_ref() } - } - - // Non-inlined part of `drop`. - #[inline(never)] - unsafe fn drop_slow(&mut self) { - let ptr = self.ptr.as_ptr(); - - // Destroy the data at this time, even though we may not free the box - // allocation itself (there may still be weak pointers lying around). - ptr::drop_in_place(&mut self.ptr.as_mut().data); - - if self.inner().weak.fetch_sub(1, Release) == 1 { - atomic::fence(Acquire); - Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr)) - } - } - - #[inline] - #[stable(feature = "ptr_eq", since = "1.17.0")] - /// Returns true if the two `Arc`s point to the same value (not - /// just values that compare as equal). - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// let same_five = Arc::clone(&five); - /// let other_five = Arc::new(5); - /// - /// assert!(Arc::ptr_eq(&five, &same_five)); - /// assert!(!Arc::ptr_eq(&five, &other_five)); - /// ``` - pub fn ptr_eq(this: &Self, other: &Self) -> bool { - this.ptr.as_ptr() == other.ptr.as_ptr() - } -} - -impl Arc { - // Allocates an `ArcInner` with sufficient space for an unsized value - unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner { - // Create a fake ArcInner to find allocation size and alignment - let fake_ptr = ptr as *mut ArcInner; - - let layout = Layout::for_value(&*fake_ptr); - - let mem = Heap.alloc(layout) - .unwrap_or_else(|e| Heap.oom(e)); - - // Initialize the real ArcInner - let inner = set_data_ptr(ptr as *mut T, mem) as *mut ArcInner; - - ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1)); - ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1)); - - inner - } - - fn from_box(v: Box) -> Arc { - unsafe { - let bptr = Box::into_raw(v); - - let value_size = size_of_val(&*bptr); - let ptr = Self::allocate_for_ptr(bptr); - - // Copy value as bytes - ptr::copy_nonoverlapping( - bptr as *const T as *const u8, - &mut (*ptr).data as *mut _ as *mut u8, - value_size); - - // Free the allocation without dropping its contents - box_free(bptr); - - Arc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } - } - } -} - -// Sets the data pointer of a `?Sized` raw pointer. -// -// For a slice/trait object, this sets the `data` field and leaves the rest -// unchanged. For a sized raw pointer, this simply sets the pointer. -unsafe fn set_data_ptr(mut ptr: *mut T, data: *mut U) -> *mut T { - ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8); - ptr -} - -impl Arc<[T]> { - // Copy elements from slice into newly allocated Arc<[T]> - // - // Unsafe because the caller must either take ownership or bind `T: Copy` - unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> { - let v_ptr = v as *const [T]; - let ptr = Self::allocate_for_ptr(v_ptr); - - ptr::copy_nonoverlapping( - v.as_ptr(), - &mut (*ptr).data as *mut [T] as *mut T, - v.len()); - - Arc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } - } -} - -// Specialization trait used for From<&[T]> -trait ArcFromSlice { - fn from_slice(slice: &[T]) -> Self; -} - -impl ArcFromSlice for Arc<[T]> { - #[inline] - default fn from_slice(v: &[T]) -> Self { - // Panic guard while cloning T elements. - // In the event of a panic, elements that have been written - // into the new ArcInner will be dropped, then the memory freed. - struct Guard { - mem: *mut u8, - elems: *mut T, - layout: Layout, - n_elems: usize, - } - - impl Drop for Guard { - fn drop(&mut self) { - use core::slice::from_raw_parts_mut; - - unsafe { - let slice = from_raw_parts_mut(self.elems, self.n_elems); - ptr::drop_in_place(slice); - - Heap.dealloc(self.mem, self.layout.clone()); - } - } - } - - unsafe { - let v_ptr = v as *const [T]; - let ptr = Self::allocate_for_ptr(v_ptr); - - let mem = ptr as *mut _ as *mut u8; - let layout = Layout::for_value(&*ptr); - - // Pointer to first element - let elems = &mut (*ptr).data as *mut [T] as *mut T; - - let mut guard = Guard{ - mem: mem, - elems: elems, - layout: layout, - n_elems: 0, - }; - - for (i, item) in v.iter().enumerate() { - ptr::write(elems.offset(i as isize), item.clone()); - guard.n_elems += 1; - } - - // All clear. Forget the guard so it doesn't free the new ArcInner. - mem::forget(guard); - - Arc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } - } - } -} - -impl ArcFromSlice for Arc<[T]> { - #[inline] - fn from_slice(v: &[T]) -> Self { - unsafe { Arc::copy_from_slice(v) } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Clone for Arc { - /// Makes a clone of the `Arc` pointer. - /// - /// This creates another pointer to the same inner value, increasing the - /// strong reference count. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// Arc::clone(&five); - /// ``` - #[inline] - fn clone(&self) -> Arc { - // Using a relaxed ordering is alright here, as knowledge of the - // original reference prevents other threads from erroneously deleting - // the object. - // - // As explained in the [Boost documentation][1], Increasing the - // reference counter can always be done with memory_order_relaxed: New - // references to an object can only be formed from an existing - // reference, and passing an existing reference from one thread to - // another must already provide any required synchronization. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - let old_size = self.inner().strong.fetch_add(1, Relaxed); - - // However we need to guard against massive refcounts in case someone - // is `mem::forget`ing Arcs. If we don't do this the count can overflow - // and users will use-after free. We racily saturate to `isize::MAX` on - // the assumption that there aren't ~2 billion threads incrementing - // the reference count at once. This branch will never be taken in - // any realistic program. - // - // We abort because such a program is incredibly degenerate, and we - // don't care to support it. - if old_size > MAX_REFCOUNT { - unsafe { - abort(); - } - } - - Arc { ptr: self.ptr, phantom: PhantomData } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Deref for Arc { - type Target = T; - - #[inline] - fn deref(&self) -> &T { - &self.inner().data - } -} - -impl Arc { - /// Makes a mutable reference into the given `Arc`. - /// - /// If there are other `Arc` or [`Weak`][weak] pointers to the same value, - /// then `make_mut` will invoke [`clone`][clone] on the inner value to - /// ensure unique ownership. This is also referred to as clone-on-write. - /// - /// See also [`get_mut`][get_mut], which will fail rather than cloning. - /// - /// [weak]: struct.Weak.html - /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone - /// [get_mut]: struct.Arc.html#method.get_mut - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let mut data = Arc::new(5); - /// - /// *Arc::make_mut(&mut data) += 1; // Won't clone anything - /// let mut other_data = Arc::clone(&data); // Won't clone inner data - /// *Arc::make_mut(&mut data) += 1; // Clones inner data - /// *Arc::make_mut(&mut data) += 1; // Won't clone anything - /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything - /// - /// // Now `data` and `other_data` point to different values. - /// assert_eq!(*data, 8); - /// assert_eq!(*other_data, 12); - /// ``` - #[inline] - #[stable(feature = "arc_unique", since = "1.4.0")] - pub fn make_mut(this: &mut Self) -> &mut T { - // Note that we hold both a strong reference and a weak reference. - // Thus, releasing our strong reference only will not, by itself, cause - // the memory to be deallocated. - // - // Use Acquire to ensure that we see any writes to `weak` that happen - // before release writes (i.e., decrements) to `strong`. Since we hold a - // weak count, there's no chance the ArcInner itself could be - // deallocated. - if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() { - // Another strong pointer exists; clone - *this = Arc::new((**this).clone()); - } else if this.inner().weak.load(Relaxed) != 1 { - // Relaxed suffices in the above because this is fundamentally an - // optimization: we are always racing with weak pointers being - // dropped. Worst case, we end up allocated a new Arc unnecessarily. - - // We removed the last strong ref, but there are additional weak - // refs remaining. We'll move the contents to a new Arc, and - // invalidate the other weak refs. - - // Note that it is not possible for the read of `weak` to yield - // usize::MAX (i.e., locked), since the weak count can only be - // locked by a thread with a strong reference. - - // Materialize our own implicit weak pointer, so that it can clean - // up the ArcInner as needed. - let weak = Weak { ptr: this.ptr }; - - // mark the data itself as already deallocated - unsafe { - // there is no data race in the implicit write caused by `read` - // here (due to zeroing) because data is no longer accessed by - // other threads (due to there being no more strong refs at this - // point). - let mut swap = Arc::new(ptr::read(&weak.ptr.as_ref().data)); - mem::swap(this, &mut swap); - mem::forget(swap); - } - } else { - // We were the sole reference of either kind; bump back up the - // strong ref count. - this.inner().strong.store(1, Release); - } - - // As with `get_mut()`, the unsafety is ok because our reference was - // either unique to begin with, or became one upon cloning the contents. - unsafe { - &mut this.ptr.as_mut().data - } - } -} - -impl Arc { - /// Returns a mutable reference to the inner value, if there are - /// no other `Arc` or [`Weak`][weak] pointers to the same value. - /// - /// Returns [`None`][option] otherwise, because it is not safe to - /// mutate a shared value. - /// - /// See also [`make_mut`][make_mut], which will [`clone`][clone] - /// the inner value when it's shared. - /// - /// [weak]: struct.Weak.html - /// [option]: ../../std/option/enum.Option.html - /// [make_mut]: struct.Arc.html#method.make_mut - /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let mut x = Arc::new(3); - /// *Arc::get_mut(&mut x).unwrap() = 4; - /// assert_eq!(*x, 4); - /// - /// let _y = Arc::clone(&x); - /// assert!(Arc::get_mut(&mut x).is_none()); - /// ``` - #[inline] - #[stable(feature = "arc_unique", since = "1.4.0")] - pub fn get_mut(this: &mut Self) -> Option<&mut T> { - if this.is_unique() { - // This unsafety is ok because we're guaranteed that the pointer - // returned is the *only* pointer that will ever be returned to T. Our - // reference count is guaranteed to be 1 at this point, and we required - // the Arc itself to be `mut`, so we're returning the only possible - // reference to the inner data. - unsafe { - Some(&mut this.ptr.as_mut().data) - } - } else { - None - } - } - - /// Determine whether this is the unique reference (including weak refs) to - /// the underlying data. - /// - /// Note that this requires locking the weak ref count. - fn is_unique(&mut self) -> bool { - // lock the weak pointer count if we appear to be the sole weak pointer - // holder. - // - // The acquire label here ensures a happens-before relationship with any - // writes to `strong` prior to decrements of the `weak` count (via drop, - // which uses Release). - if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { - // Due to the previous acquire read, this will observe any writes to - // `strong` that were due to upgrading weak pointers; only strong - // clones remain, which require that the strong count is > 1 anyway. - let unique = self.inner().strong.load(Relaxed) == 1; - - // The release write here synchronizes with a read in `downgrade`, - // effectively preventing the above read of `strong` from happening - // after the write. - self.inner().weak.store(1, Release); // release the lock - unique - } else { - false - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc { - /// Drops the `Arc`. - /// - /// This will decrement the strong reference count. If the strong reference - /// count reaches zero then the only other references (if any) are - /// [`Weak`][weak], so we `drop` the inner value. - /// - /// [weak]: struct.Weak.html - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// struct Foo; - /// - /// impl Drop for Foo { - /// fn drop(&mut self) { - /// println!("dropped!"); - /// } - /// } - /// - /// let foo = Arc::new(Foo); - /// let foo2 = Arc::clone(&foo); - /// - /// drop(foo); // Doesn't print anything - /// drop(foo2); // Prints "dropped!" - /// ``` - #[inline] - fn drop(&mut self) { - // Because `fetch_sub` is already atomic, we do not need to synchronize - // with other threads unless we are going to delete the object. This - // same logic applies to the below `fetch_sub` to the `weak` count. - if self.inner().strong.fetch_sub(1, Release) != 1 { - return; - } - - // This fence is needed to prevent reordering of use of the data and - // deletion of the data. Because it is marked `Release`, the decreasing - // of the reference count synchronizes with this `Acquire` fence. This - // means that use of the data happens before decreasing the reference - // count, which happens before this fence, which happens before the - // deletion of the data. - // - // As explained in the [Boost documentation][1], - // - // > It is important to enforce any possible access to the object in one - // > thread (through an existing reference) to *happen before* deleting - // > the object in a different thread. This is achieved by a "release" - // > operation after dropping a reference (any access to the object - // > through this reference must obviously happened before), and an - // > "acquire" operation before deleting the object. - // - // In particular, while the contents of an Arc are usually immutable, it's - // possible to have interior writes to something like a Mutex. Since a - // Mutex is not acquired when it is deleted, we can't rely on its - // synchronization logic to make writes in thread A visible to a destructor - // running in thread B. - // - // Also note that the Acquire fence here could probably be replaced with an - // Acquire load, which could improve performance in highly-contended - // situations. See [2]. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - // [2]: (https://github.com/rust-lang/rust/pull/41714) - atomic::fence(Acquire); - - unsafe { - self.drop_slow(); - } - } -} - -impl Weak { - /// Constructs a new `Weak`, allocating memory for `T` without initializing - /// it. Calling [`upgrade`] on the return value always gives [`None`]. - /// - /// [`upgrade`]: struct.Weak.html#method.upgrade - /// [`None`]: ../../std/option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// ``` - /// use std::sync::Weak; - /// - /// let empty: Weak = Weak::new(); - /// assert!(empty.upgrade().is_none()); - /// ``` - #[stable(feature = "downgraded_weak", since = "1.10.0")] - pub fn new() -> Weak { - unsafe { - Weak { - ptr: Shared::from(Box::into_unique(box ArcInner { - strong: atomic::AtomicUsize::new(0), - weak: atomic::AtomicUsize::new(1), - data: uninitialized(), - })), - } - } - } -} - -impl Weak { - /// Attempts to upgrade the `Weak` pointer to an [`Arc`], extending - /// the lifetime of the value if successful. - /// - /// Returns [`None`] if the value has since been dropped. - /// - /// [`Arc`]: struct.Arc.html - /// [`None`]: ../../std/option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// let weak_five = Arc::downgrade(&five); - /// - /// let strong_five: Option> = weak_five.upgrade(); - /// assert!(strong_five.is_some()); - /// - /// // Destroy all strong pointers. - /// drop(strong_five); - /// drop(five); - /// - /// assert!(weak_five.upgrade().is_none()); - /// ``` - #[stable(feature = "arc_weak", since = "1.4.0")] - pub fn upgrade(&self) -> Option> { - // We use a CAS loop to increment the strong count instead of a - // fetch_add because once the count hits 0 it must never be above 0. - let inner = self.inner(); - - // Relaxed load because any write of 0 that we can observe - // leaves the field in a permanently zero state (so a - // "stale" read of 0 is fine), and any other value is - // confirmed via the CAS below. - let mut n = inner.strong.load(Relaxed); - - loop { - if n == 0 { - return None; - } - - // See comments in `Arc::clone` for why we do this (for `mem::forget`). - if n > MAX_REFCOUNT { - unsafe { - abort(); - } - } - - // Relaxed is valid for the same reason it is on Arc's Clone impl - match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) { - Ok(_) => return Some(Arc { ptr: self.ptr, phantom: PhantomData }), - Err(old) => n = old, - } - } - } - - #[inline] - fn inner(&self) -> &ArcInner { - // See comments above for why this is "safe" - unsafe { self.ptr.as_ref() } - } -} - -#[stable(feature = "arc_weak", since = "1.4.0")] -impl Clone for Weak { - /// Makes a clone of the `Weak` pointer that points to the same value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::{Arc, Weak}; - /// - /// let weak_five = Arc::downgrade(&Arc::new(5)); - /// - /// Weak::clone(&weak_five); - /// ``` - #[inline] - fn clone(&self) -> Weak { - // See comments in Arc::clone() for why this is relaxed. This can use a - // fetch_add (ignoring the lock) because the weak count is only locked - // where are *no other* weak pointers in existence. (So we can't be - // running this code in that case). - let old_size = self.inner().weak.fetch_add(1, Relaxed); - - // See comments in Arc::clone() for why we do this (for mem::forget). - if old_size > MAX_REFCOUNT { - unsafe { - abort(); - } - } - - return Weak { ptr: self.ptr }; - } -} - -#[stable(feature = "downgraded_weak", since = "1.10.0")] -impl Default for Weak { - /// Constructs a new `Weak`, allocating memory for `T` without initializing - /// it. Calling [`upgrade`] on the return value always gives [`None`]. - /// - /// [`upgrade`]: struct.Weak.html#method.upgrade - /// [`None`]: ../../std/option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// ``` - /// use std::sync::Weak; - /// - /// let empty: Weak = Default::default(); - /// assert!(empty.upgrade().is_none()); - /// ``` - fn default() -> Weak { - Weak::new() - } -} - -#[stable(feature = "arc_weak", since = "1.4.0")] -impl Drop for Weak { - /// Drops the `Weak` pointer. - /// - /// # Examples - /// - /// ``` - /// use std::sync::{Arc, Weak}; - /// - /// struct Foo; - /// - /// impl Drop for Foo { - /// fn drop(&mut self) { - /// println!("dropped!"); - /// } - /// } - /// - /// let foo = Arc::new(Foo); - /// let weak_foo = Arc::downgrade(&foo); - /// let other_weak_foo = Weak::clone(&weak_foo); - /// - /// drop(weak_foo); // Doesn't print anything - /// drop(foo); // Prints "dropped!" - /// - /// assert!(other_weak_foo.upgrade().is_none()); - /// ``` - fn drop(&mut self) { - let ptr = self.ptr.as_ptr(); - - // If we find out that we were the last weak pointer, then its time to - // deallocate the data entirely. See the discussion in Arc::drop() about - // the memory orderings - // - // It's not necessary to check for the locked state here, because the - // weak count can only be locked if there was precisely one weak ref, - // meaning that drop could only subsequently run ON that remaining weak - // ref, which can only happen after the lock is released. - if self.inner().weak.fetch_sub(1, Release) == 1 { - atomic::fence(Acquire); - unsafe { - Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr)) - } - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl PartialEq for Arc { - /// Equality for two `Arc`s. - /// - /// Two `Arc`s are equal if their inner values are equal. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// assert!(five == Arc::new(5)); - /// ``` - fn eq(&self, other: &Arc) -> bool { - *(*self) == *(*other) - } - - /// Inequality for two `Arc`s. - /// - /// Two `Arc`s are unequal if their inner values are unequal. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// assert!(five != Arc::new(6)); - /// ``` - fn ne(&self, other: &Arc) -> bool { - *(*self) != *(*other) - } -} -#[stable(feature = "rust1", since = "1.0.0")] -impl PartialOrd for Arc { - /// Partial comparison for two `Arc`s. - /// - /// The two are compared by calling `partial_cmp()` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use std::cmp::Ordering; - /// - /// let five = Arc::new(5); - /// - /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6))); - /// ``` - fn partial_cmp(&self, other: &Arc) -> Option { - (**self).partial_cmp(&**other) - } - - /// Less-than comparison for two `Arc`s. - /// - /// The two are compared by calling `<` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// assert!(five < Arc::new(6)); - /// ``` - fn lt(&self, other: &Arc) -> bool { - *(*self) < *(*other) - } - - /// 'Less than or equal to' comparison for two `Arc`s. - /// - /// The two are compared by calling `<=` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// assert!(five <= Arc::new(5)); - /// ``` - fn le(&self, other: &Arc) -> bool { - *(*self) <= *(*other) - } - - /// Greater-than comparison for two `Arc`s. - /// - /// The two are compared by calling `>` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// assert!(five > Arc::new(4)); - /// ``` - fn gt(&self, other: &Arc) -> bool { - *(*self) > *(*other) - } - - /// 'Greater than or equal to' comparison for two `Arc`s. - /// - /// The two are compared by calling `>=` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// assert!(five >= Arc::new(5)); - /// ``` - fn ge(&self, other: &Arc) -> bool { - *(*self) >= *(*other) - } -} -#[stable(feature = "rust1", since = "1.0.0")] -impl Ord for Arc { - /// Comparison for two `Arc`s. - /// - /// The two are compared by calling `cmp()` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// use std::cmp::Ordering; - /// - /// let five = Arc::new(5); - /// - /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6))); - /// ``` - fn cmp(&self, other: &Arc) -> Ordering { - (**self).cmp(&**other) - } -} -#[stable(feature = "rust1", since = "1.0.0")] -impl Eq for Arc {} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Display for Arc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Arc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Pointer for Arc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Pointer::fmt(&(&**self as *const T), f) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Default for Arc { - /// Creates a new `Arc`, with the `Default` value for `T`. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let x: Arc = Default::default(); - /// assert_eq!(*x, 0); - /// ``` - fn default() -> Arc { - Arc::new(Default::default()) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Hash for Arc { - fn hash(&self, state: &mut H) { - (**self).hash(state) - } -} - -#[stable(feature = "from_for_ptrs", since = "1.6.0")] -impl From for Arc { - fn from(t: T) -> Self { - Arc::new(t) - } -} - -#[stable(feature = "shared_from_slice", since = "1.21.0")] -impl<'a, T: Clone> From<&'a [T]> for Arc<[T]> { - #[inline] - fn from(v: &[T]) -> Arc<[T]> { - >::from_slice(v) - } -} - -#[stable(feature = "shared_from_slice", since = "1.21.0")] -impl<'a> From<&'a str> for Arc { - #[inline] - fn from(v: &str) -> Arc { - let arc = Arc::<[u8]>::from(v.as_bytes()); - unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) } - } -} - -#[stable(feature = "shared_from_slice", since = "1.21.0")] -impl From for Arc { - #[inline] - fn from(v: String) -> Arc { - Arc::from(&v[..]) - } -} - -#[stable(feature = "shared_from_slice", since = "1.21.0")] -impl From> for Arc { - #[inline] - fn from(v: Box) -> Arc { - Arc::from_box(v) - } -} - -#[stable(feature = "shared_from_slice", since = "1.21.0")] -impl From> for Arc<[T]> { - #[inline] - fn from(mut v: Vec) -> Arc<[T]> { - unsafe { - let arc = Arc::copy_from_slice(&v); - - // Allow the Vec to free its memory, but not destroy its contents - v.set_len(0); - - arc - } - } -} - -#[cfg(test)] -mod tests { - use std::boxed::Box; - use std::clone::Clone; - use std::sync::mpsc::channel; - use std::mem::drop; - use std::ops::Drop; - use std::option::Option; - use std::option::Option::{None, Some}; - use std::sync::atomic; - use std::sync::atomic::Ordering::{Acquire, SeqCst}; - use std::thread; - use std::sync::Mutex; - use std::convert::From; - - use super::{Arc, Weak}; - use vec::Vec; - - struct Canary(*mut atomic::AtomicUsize); - - impl Drop for Canary { - fn drop(&mut self) { - unsafe { - match *self { - Canary(c) => { - (*c).fetch_add(1, SeqCst); - } - } - } - } - } - - #[test] - #[cfg_attr(target_os = "emscripten", ignore)] - fn manually_share_arc() { - let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; - let arc_v = Arc::new(v); - - let (tx, rx) = channel(); - - let _t = thread::spawn(move || { - let arc_v: Arc> = rx.recv().unwrap(); - assert_eq!((*arc_v)[3], 4); - }); - - tx.send(arc_v.clone()).unwrap(); - - assert_eq!((*arc_v)[2], 3); - assert_eq!((*arc_v)[4], 5); - } - - #[test] - fn test_arc_get_mut() { - let mut x = Arc::new(3); - *Arc::get_mut(&mut x).unwrap() = 4; - assert_eq!(*x, 4); - let y = x.clone(); - assert!(Arc::get_mut(&mut x).is_none()); - drop(y); - assert!(Arc::get_mut(&mut x).is_some()); - let _w = Arc::downgrade(&x); - assert!(Arc::get_mut(&mut x).is_none()); - } - - #[test] - fn try_unwrap() { - let x = Arc::new(3); - assert_eq!(Arc::try_unwrap(x), Ok(3)); - let x = Arc::new(4); - let _y = x.clone(); - assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4))); - let x = Arc::new(5); - let _w = Arc::downgrade(&x); - assert_eq!(Arc::try_unwrap(x), Ok(5)); - } - - #[test] - fn into_from_raw() { - let x = Arc::new(box "hello"); - let y = x.clone(); - - let x_ptr = Arc::into_raw(x); - drop(y); - unsafe { - assert_eq!(**x_ptr, "hello"); - - let x = Arc::from_raw(x_ptr); - assert_eq!(**x, "hello"); - - assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello")); - } - } - - #[test] - fn test_into_from_raw_unsized() { - use std::fmt::Display; - use std::string::ToString; - - let arc: Arc = Arc::from("foo"); - - let ptr = Arc::into_raw(arc.clone()); - let arc2 = unsafe { Arc::from_raw(ptr) }; - - assert_eq!(unsafe { &*ptr }, "foo"); - assert_eq!(arc, arc2); - - let arc: Arc = Arc::new(123); - - let ptr = Arc::into_raw(arc.clone()); - let arc2 = unsafe { Arc::from_raw(ptr) }; - - assert_eq!(unsafe { &*ptr }.to_string(), "123"); - assert_eq!(arc2.to_string(), "123"); - } - - #[test] - fn test_cowarc_clone_make_mut() { - let mut cow0 = Arc::new(75); - let mut cow1 = cow0.clone(); - let mut cow2 = cow1.clone(); - - assert!(75 == *Arc::make_mut(&mut cow0)); - assert!(75 == *Arc::make_mut(&mut cow1)); - assert!(75 == *Arc::make_mut(&mut cow2)); - - *Arc::make_mut(&mut cow0) += 1; - *Arc::make_mut(&mut cow1) += 2; - *Arc::make_mut(&mut cow2) += 3; - - assert!(76 == *cow0); - assert!(77 == *cow1); - assert!(78 == *cow2); - - // none should point to the same backing memory - assert!(*cow0 != *cow1); - assert!(*cow0 != *cow2); - assert!(*cow1 != *cow2); - } - - #[test] - fn test_cowarc_clone_unique2() { - let mut cow0 = Arc::new(75); - let cow1 = cow0.clone(); - let cow2 = cow1.clone(); - - assert!(75 == *cow0); - assert!(75 == *cow1); - assert!(75 == *cow2); - - *Arc::make_mut(&mut cow0) += 1; - assert!(76 == *cow0); - assert!(75 == *cow1); - assert!(75 == *cow2); - - // cow1 and cow2 should share the same contents - // cow0 should have a unique reference - assert!(*cow0 != *cow1); - assert!(*cow0 != *cow2); - assert!(*cow1 == *cow2); - } - - #[test] - fn test_cowarc_clone_weak() { - let mut cow0 = Arc::new(75); - let cow1_weak = Arc::downgrade(&cow0); - - assert!(75 == *cow0); - assert!(75 == *cow1_weak.upgrade().unwrap()); - - *Arc::make_mut(&mut cow0) += 1; - - assert!(76 == *cow0); - assert!(cow1_weak.upgrade().is_none()); - } - - #[test] - fn test_live() { - let x = Arc::new(5); - let y = Arc::downgrade(&x); - assert!(y.upgrade().is_some()); - } - - #[test] - fn test_dead() { - let x = Arc::new(5); - let y = Arc::downgrade(&x); - drop(x); - assert!(y.upgrade().is_none()); - } - - #[test] - fn weak_self_cyclic() { - struct Cycle { - x: Mutex>>, - } - - let a = Arc::new(Cycle { x: Mutex::new(None) }); - let b = Arc::downgrade(&a.clone()); - *a.x.lock().unwrap() = Some(b); - - // hopefully we don't double-free (or leak)... - } - - #[test] - fn drop_arc() { - let mut canary = atomic::AtomicUsize::new(0); - let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); - drop(x); - assert!(canary.load(Acquire) == 1); - } - - #[test] - fn drop_arc_weak() { - let mut canary = atomic::AtomicUsize::new(0); - let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); - let arc_weak = Arc::downgrade(&arc); - assert!(canary.load(Acquire) == 0); - drop(arc); - assert!(canary.load(Acquire) == 1); - drop(arc_weak); - } - - #[test] - fn test_strong_count() { - let a = Arc::new(0); - assert!(Arc::strong_count(&a) == 1); - let w = Arc::downgrade(&a); - assert!(Arc::strong_count(&a) == 1); - let b = w.upgrade().expect(""); - assert!(Arc::strong_count(&b) == 2); - assert!(Arc::strong_count(&a) == 2); - drop(w); - drop(a); - assert!(Arc::strong_count(&b) == 1); - let c = b.clone(); - assert!(Arc::strong_count(&b) == 2); - assert!(Arc::strong_count(&c) == 2); - } - - #[test] - fn test_weak_count() { - let a = Arc::new(0); - assert!(Arc::strong_count(&a) == 1); - assert!(Arc::weak_count(&a) == 0); - let w = Arc::downgrade(&a); - assert!(Arc::strong_count(&a) == 1); - assert!(Arc::weak_count(&a) == 1); - let x = w.clone(); - assert!(Arc::weak_count(&a) == 2); - drop(w); - drop(x); - assert!(Arc::strong_count(&a) == 1); - assert!(Arc::weak_count(&a) == 0); - let c = a.clone(); - assert!(Arc::strong_count(&a) == 2); - assert!(Arc::weak_count(&a) == 0); - let d = Arc::downgrade(&c); - assert!(Arc::weak_count(&c) == 1); - assert!(Arc::strong_count(&c) == 2); - - drop(a); - drop(c); - drop(d); - } - - #[test] - fn show_arc() { - let a = Arc::new(5); - assert_eq!(format!("{:?}", a), "5"); - } - - // Make sure deriving works with Arc - #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)] - struct Foo { - inner: Arc, - } - - #[test] - fn test_unsized() { - let x: Arc<[i32]> = Arc::new([1, 2, 3]); - assert_eq!(format!("{:?}", x), "[1, 2, 3]"); - let y = Arc::downgrade(&x.clone()); - drop(x); - assert!(y.upgrade().is_none()); - } - - #[test] - fn test_from_owned() { - let foo = 123; - let foo_arc = Arc::from(foo); - assert!(123 == *foo_arc); - } - - #[test] - fn test_new_weak() { - let foo: Weak = Weak::new(); - assert!(foo.upgrade().is_none()); - } - - #[test] - fn test_ptr_eq() { - let five = Arc::new(5); - let same_five = five.clone(); - let other_five = Arc::new(5); - - assert!(Arc::ptr_eq(&five, &same_five)); - assert!(!Arc::ptr_eq(&five, &other_five)); - } - - #[test] - #[cfg_attr(target_os = "emscripten", ignore)] - fn test_weak_count_locked() { - let mut a = Arc::new(atomic::AtomicBool::new(false)); - let a2 = a.clone(); - let t = thread::spawn(move || { - for _i in 0..1000000 { - Arc::get_mut(&mut a); - } - a.store(true, SeqCst); - }); - - while !a2.load(SeqCst) { - let n = Arc::weak_count(&a2); - assert!(n < 2, "bad weak count: {}", n); - } - t.join().unwrap(); - } - - #[test] - fn test_from_str() { - let r: Arc = Arc::from("foo"); - - assert_eq!(&r[..], "foo"); - } - - #[test] - fn test_copy_from_slice() { - let s: &[u32] = &[1, 2, 3]; - let r: Arc<[u32]> = Arc::from(s); - - assert_eq!(&r[..], [1, 2, 3]); - } - - #[test] - fn test_clone_from_slice() { - #[derive(Clone, Debug, Eq, PartialEq)] - struct X(u32); - - let s: &[X] = &[X(1), X(2), X(3)]; - let r: Arc<[X]> = Arc::from(s); - - assert_eq!(&r[..], s); - } - - #[test] - #[should_panic] - fn test_clone_from_slice_panic() { - use std::string::{String, ToString}; - - struct Fail(u32, String); - - impl Clone for Fail { - fn clone(&self) -> Fail { - if self.0 == 2 { - panic!(); - } - Fail(self.0, self.1.clone()) - } - } - - let s: &[Fail] = &[ - Fail(0, "foo".to_string()), - Fail(1, "bar".to_string()), - Fail(2, "baz".to_string()), - ]; - - // Should panic, but not cause memory corruption - let _r: Arc<[Fail]> = Arc::from(s); - } - - #[test] - fn test_from_box() { - let b: Box = box 123; - let r: Arc = Arc::from(b); - - assert_eq!(*r, 123); - } - - #[test] - fn test_from_box_str() { - use std::string::String; - - let s = String::from("foo").into_boxed_str(); - let r: Arc = Arc::from(s); - - assert_eq!(&r[..], "foo"); - } - - #[test] - fn test_from_box_slice() { - let s = vec![1, 2, 3].into_boxed_slice(); - let r: Arc<[u32]> = Arc::from(s); - - assert_eq!(&r[..], [1, 2, 3]); - } - - #[test] - fn test_from_box_trait() { - use std::fmt::Display; - use std::string::ToString; - - let b: Box = box 123; - let r: Arc = Arc::from(b); - - assert_eq!(r.to_string(), "123"); - } - - #[test] - fn test_from_box_trait_zero_sized() { - use std::fmt::Debug; - - let b: Box = box (); - let r: Arc = Arc::from(b); - - assert_eq!(format!("{:?}", r), "()"); - } - - #[test] - fn test_from_vec() { - let v = vec![1, 2, 3]; - let r: Arc<[u32]> = Arc::from(v); - - assert_eq!(&r[..], [1, 2, 3]); - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl borrow::Borrow for Arc { - fn borrow(&self) -> &T { - &**self - } -} - -#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] -impl AsRef for Arc { - fn as_ref(&self) -> &T { - &**self - } -} diff --git a/src/liballoc/benches/lib.rs b/src/liballoc/benches/lib.rs index 174628ccd078..b4f4fd74f3a3 100644 --- a/src/liballoc/benches/lib.rs +++ b/src/liballoc/benches/lib.rs @@ -8,12 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(warnings)] - -#![feature(i128_type)] -#![feature(rand)] #![feature(repr_simd)] -#![feature(slice_rotate)] +#![feature(slice_sort_by_cached_key)] #![feature(test)] extern crate rand; diff --git a/src/liballoc/benches/slice.rs b/src/liballoc/benches/slice.rs index ee5182a1d466..a699ff9c0a76 100644 --- a/src/liballoc/benches/slice.rs +++ b/src/liballoc/benches/slice.rs @@ -284,6 +284,17 @@ macro_rules! sort_expensive { } } +macro_rules! sort_lexicographic { + ($f:ident, $name:ident, $gen:expr, $len:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let v = $gen($len); + b.iter(|| v.clone().$f(|x| x.to_string())); + b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64; + } + } +} + sort!(sort, sort_small_ascending, gen_ascending, 10); sort!(sort, sort_small_descending, gen_descending, 10); sort!(sort, sort_small_random, gen_random, 10); @@ -312,6 +323,10 @@ sort!(sort_unstable, sort_unstable_large_big, gen_big_random, 10000); sort_strings!(sort_unstable, sort_unstable_large_strings, gen_strings, 10000); sort_expensive!(sort_unstable_by, sort_unstable_large_expensive, gen_random, 10000); +sort_lexicographic!(sort_by_key, sort_by_key_lexicographic, gen_random, 10000); +sort_lexicographic!(sort_unstable_by_key, sort_unstable_by_key_lexicographic, gen_random, 10000); +sort_lexicographic!(sort_by_cached_key, sort_by_cached_key_lexicographic, gen_random, 10000); + macro_rules! reverse { ($name:ident, $ty:ty, $f:expr) => { #[bench] diff --git a/src/liballoc/benches/vec_deque_append.rs b/src/liballoc/benches/vec_deque_append.rs new file mode 100644 index 000000000000..bd3356511375 --- /dev/null +++ b/src/liballoc/benches/vec_deque_append.rs @@ -0,0 +1,48 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(duration_as_u128)] +use std::{collections::VecDeque, time::Instant}; + +const VECDEQUE_LEN: i32 = 100000; +const WARMUP_N: usize = 100; +const BENCH_N: usize = 1000; + +fn main() { + let a: VecDeque = (0..VECDEQUE_LEN).collect(); + let b: VecDeque = (0..VECDEQUE_LEN).collect(); + + for _ in 0..WARMUP_N { + let mut c = a.clone(); + let mut d = b.clone(); + c.append(&mut d); + } + + let mut durations = Vec::with_capacity(BENCH_N); + + for _ in 0..BENCH_N { + let mut c = a.clone(); + let mut d = b.clone(); + let before = Instant::now(); + c.append(&mut d); + let after = Instant::now(); + durations.push(after.duration_since(before)); + } + + let l = durations.len(); + durations.sort(); + + assert!(BENCH_N % 2 == 0); + let median = (durations[(l / 2) - 1] + durations[l / 2]) / 2; + println!( + "\ncustom-bench vec_deque_append {:?} ns/iter\n", + median.as_nanos() + ); +} diff --git a/src/liballoc/borrow.rs b/src/liballoc/borrow.rs index acae0daa86b6..c6741ddb822d 100644 --- a/src/liballoc/borrow.rs +++ b/src/liballoc/borrow.rs @@ -59,6 +59,7 @@ pub trait ToOwned { /// let vv: Vec = v.to_owned(); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "cloning is often expensive and is not expected to have side effects"] fn to_owned(&self) -> Self::Owned; /// Uses borrowed data to replace owned data, usually by cloning. diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 6f125cdba819..08db5136d040 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -55,54 +55,23 @@ #![stable(feature = "rust1", since = "1.0.0")] -use heap::{Heap, Layout, Alloc}; -use raw_vec::RawVec; - use core::any::Any; use core::borrow; use core::cmp::Ordering; -use core::fmt; -use core::hash::{self, Hash, Hasher}; -use core::iter::FusedIterator; -use core::marker::{self, Unsize}; -use core::mem; -use core::ops::{CoerceUnsized, Deref, DerefMut, Generator, GeneratorState}; -use core::ops::{BoxPlace, Boxed, InPlace, Place, Placer}; -use core::ptr::{self, Unique}; use core::convert::From; +use core::fmt; +use core::future::{Future, FutureObj, LocalFutureObj, UnsafeFutureObj}; +use core::hash::{Hash, Hasher}; +use core::iter::FusedIterator; +use core::marker::{Unpin, Unsize}; +use core::mem::{self, PinMut}; +use core::ops::{CoerceUnsized, Deref, DerefMut, Generator, GeneratorState}; +use core::ptr::{self, NonNull, Unique}; +use core::task::{Context, Poll, Spawn, SpawnErrorKind, SpawnObjError}; + +use raw_vec::RawVec; use str::from_boxed_utf8_unchecked; -/// A value that represents the heap. This is the default place that the `box` -/// keyword allocates into when no place is supplied. -/// -/// The following two examples are equivalent: -/// -/// ``` -/// #![feature(box_heap)] -/// -/// #![feature(box_syntax, placement_in_syntax)] -/// use std::boxed::HEAP; -/// -/// fn main() { -/// let foo: Box = in HEAP { 5 }; -/// let foo = box 5; -/// } -/// ``` -#[unstable(feature = "box_heap", - reason = "may be renamed; uncertain about custom allocator design", - issue = "27779")] -pub const HEAP: ExchangeHeapSingleton = ExchangeHeapSingleton { _force_singleton: () }; - -/// This the singleton type used solely for `boxed::HEAP`. -#[unstable(feature = "box_heap", - reason = "may be renamed; uncertain about custom allocator design", - issue = "27779")] -#[allow(missing_debug_implementations)] -#[derive(Copy, Clone)] -pub struct ExchangeHeapSingleton { - _force_singleton: (), -} - /// A pointer type for heap allocation. /// /// See the [module-level documentation](../../std/boxed/index.html) for more. @@ -111,121 +80,6 @@ pub struct ExchangeHeapSingleton { #[stable(feature = "rust1", since = "1.0.0")] pub struct Box(Unique); -/// `IntermediateBox` represents uninitialized backing storage for `Box`. -/// -/// FIXME (pnkfelix): Ideally we would just reuse `Box` instead of -/// introducing a separate `IntermediateBox`; but then you hit -/// issues when you e.g. attempt to destructure an instance of `Box`, -/// since it is a lang item and so it gets special handling by the -/// compiler. Easier just to make this parallel type for now. -/// -/// FIXME (pnkfelix): Currently the `box` protocol only supports -/// creating instances of sized types. This IntermediateBox is -/// designed to be forward-compatible with a future protocol that -/// supports creating instances of unsized types; that is why the type -/// parameter has the `?Sized` generalization marker, and is also why -/// this carries an explicit size. However, it probably does not need -/// to carry the explicit alignment; that is just a work-around for -/// the fact that the `align_of` intrinsic currently requires the -/// input type to be Sized (which I do not think is strictly -/// necessary). -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -#[allow(missing_debug_implementations)] -pub struct IntermediateBox { - ptr: *mut u8, - layout: Layout, - marker: marker::PhantomData<*mut T>, -} - -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -impl Place for IntermediateBox { - fn pointer(&mut self) -> *mut T { - self.ptr as *mut T - } -} - -unsafe fn finalize(b: IntermediateBox) -> Box { - let p = b.ptr as *mut T; - mem::forget(b); - Box::from_raw(p) -} - -fn make_place() -> IntermediateBox { - let layout = Layout::new::(); - - let p = if layout.size() == 0 { - mem::align_of::() as *mut u8 - } else { - unsafe { - Heap.alloc(layout.clone()).unwrap_or_else(|err| { - Heap.oom(err) - }) - } - }; - - IntermediateBox { - ptr: p, - layout, - marker: marker::PhantomData, - } -} - -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -impl BoxPlace for IntermediateBox { - fn make_place() -> IntermediateBox { - make_place() - } -} - -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -impl InPlace for IntermediateBox { - type Owner = Box; - unsafe fn finalize(self) -> Box { - finalize(self) - } -} - -#[unstable(feature = "placement_new_protocol", issue = "27779")] -impl Boxed for Box { - type Data = T; - type Place = IntermediateBox; - unsafe fn finalize(b: IntermediateBox) -> Box { - finalize(b) - } -} - -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -impl Placer for ExchangeHeapSingleton { - type Place = IntermediateBox; - - fn make_place(self) -> IntermediateBox { - make_place() - } -} - -#[unstable(feature = "placement_in", - reason = "placement box design is still being worked out.", - issue = "27779")] -impl Drop for IntermediateBox { - fn drop(&mut self) { - if self.layout.size() > 0 { - unsafe { - Heap.dealloc(self.ptr, self.layout.clone()) - } - } - } -} - impl Box { /// Allocates memory on the heap and then places `x` into it. /// @@ -269,38 +123,7 @@ impl Box { #[stable(feature = "box_raw", since = "1.4.0")] #[inline] pub unsafe fn from_raw(raw: *mut T) -> Self { - Box::from_unique(Unique::new_unchecked(raw)) - } - - /// Constructs a `Box` from a `Unique` pointer. - /// - /// After calling this function, the memory is owned by a `Box` and `T` can - /// then be destroyed and released upon drop. - /// - /// # Safety - /// - /// A `Unique` can be safely created via [`Unique::new`] and thus doesn't - /// necessarily own the data pointed to nor is the data guaranteed to live - /// as long as the pointer. - /// - /// [`Unique::new`]: ../../core/ptr/struct.Unique.html#method.new - /// - /// # Examples - /// - /// ``` - /// #![feature(unique)] - /// - /// fn main() { - /// let x = Box::new(5); - /// let ptr = Box::into_unique(x); - /// let x = unsafe { Box::from_unique(ptr) }; - /// } - /// ``` - #[unstable(feature = "unique", reason = "needs an RFC to flesh out design", - issue = "27730")] - #[inline] - pub unsafe fn from_unique(u: Unique) -> Self { - Box(u) + Box(Unique::new_unchecked(raw)) } /// Consumes the `Box`, returning the wrapped raw pointer. @@ -326,41 +149,44 @@ impl Box { #[stable(feature = "box_raw", since = "1.4.0")] #[inline] pub fn into_raw(b: Box) -> *mut T { - Box::into_unique(b).as_ptr() + Box::into_raw_non_null(b).as_ptr() } - /// Consumes the `Box`, returning the wrapped pointer as `Unique`. + /// Consumes the `Box`, returning the wrapped pointer as `NonNull`. /// /// After calling this function, the caller is responsible for the /// memory previously managed by the `Box`. In particular, the /// caller should properly destroy `T` and release the memory. The - /// proper way to do so is to either convert the `Unique` pointer: - /// - /// - Into a `Box` with the [`Box::from_unique`] function. - /// - /// - Into a raw pointer and back into a `Box` with the [`Box::from_raw`] - /// function. + /// proper way to do so is to convert the `NonNull` pointer + /// into a raw pointer and back into a `Box` with the [`Box::from_raw`] + /// function. /// /// Note: this is an associated function, which means that you have - /// to call it as `Box::into_unique(b)` instead of `b.into_unique()`. This + /// to call it as `Box::into_raw_non_null(b)` + /// instead of `b.into_raw_non_null()`. This /// is so that there is no conflict with a method on the inner type. /// - /// [`Box::from_unique`]: struct.Box.html#method.from_unique /// [`Box::from_raw`]: struct.Box.html#method.from_raw /// /// # Examples /// /// ``` - /// #![feature(unique)] + /// #![feature(box_into_raw_non_null)] /// /// fn main() { /// let x = Box::new(5); - /// let ptr = Box::into_unique(x); + /// let ptr = Box::into_raw_non_null(x); /// } /// ``` - #[unstable(feature = "unique", reason = "needs an RFC to flesh out design", - issue = "27730")] + #[unstable(feature = "box_into_raw_non_null", issue = "47336")] #[inline] + pub fn into_raw_non_null(b: Box) -> NonNull { + Box::into_unique(b).into() + } + + #[unstable(feature = "ptr_internals", issue = "0", reason = "use into_raw_non_null instead")] + #[inline] + #[doc(hidden)] pub fn into_unique(b: Box) -> Unique { let unique = b.0; mem::forget(b); @@ -368,7 +194,9 @@ impl Box { } /// Consumes and leaks the `Box`, returning a mutable reference, - /// `&'a mut T`. Here, the lifetime `'a` may be chosen to be `'static`. + /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime + /// `'a`. If the type has only static references, or none at all, then this + /// may be chosen to be `'static`. /// /// This function is mainly useful for data that lives for the remainder of /// the program's life. Dropping the returned reference will cause a memory @@ -388,8 +216,6 @@ impl Box { /// Simple usage: /// /// ``` - /// #![feature(box_leak)] - /// /// fn main() { /// let x = Box::new(41); /// let static_ref: &'static mut usize = Box::leak(x); @@ -401,8 +227,6 @@ impl Box { /// Unsized data: /// /// ``` - /// #![feature(box_leak)] - /// /// fn main() { /// let x = vec![1, 2, 3].into_boxed_slice(); /// let static_ref = Box::leak(x); @@ -410,8 +234,7 @@ impl Box { /// assert_eq!(*static_ref, [4, 2, 3]); /// } /// ``` - #[unstable(feature = "box_leak", reason = "needs an FCP to stabilize", - issue = "46179")] + #[stable(feature = "box_leak", since = "1.26.0")] #[inline] pub fn leak<'a>(b: Box) -> &'a mut T where @@ -542,7 +365,7 @@ impl Eq for Box {} #[stable(feature = "rust1", since = "1.0.0")] impl Hash for Box { - fn hash(&self, state: &mut H) { + fn hash(&self, state: &mut H) { (**self).hash(state); } } @@ -611,6 +434,7 @@ impl<'a, T: Copy> From<&'a [T]> for Box<[T]> { #[stable(feature = "box_from_slice", since = "1.17.0")] impl<'a> From<&'a str> for Box { + #[inline] fn from(s: &'a str) -> Box { unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) } } @@ -618,12 +442,13 @@ impl<'a> From<&'a str> for Box { #[stable(feature = "boxed_str_conv", since = "1.19.0")] impl From> for Box<[u8]> { + #[inline] fn from(s: Box) -> Self { unsafe { Box::from_raw(Box::into_raw(s) as *mut [u8]) } } } -impl Box { +impl Box { #[inline] #[stable(feature = "rust1", since = "1.0.0")] /// Attempt to downcast the box to a concrete type. @@ -645,10 +470,10 @@ impl Box { /// print_if_string(Box::new(0i8)); /// } /// ``` - pub fn downcast(self) -> Result, Box> { + pub fn downcast(self) -> Result, Box> { if self.is::() { unsafe { - let raw: *mut Any = Box::into_raw(self); + let raw: *mut dyn Any = Box::into_raw(self); Ok(Box::from_raw(raw as *mut T)) } } else { @@ -657,7 +482,7 @@ impl Box { } } -impl Box { +impl Box { #[inline] #[stable(feature = "rust1", since = "1.0.0")] /// Attempt to downcast the box to a concrete type. @@ -679,10 +504,10 @@ impl Box { /// print_if_string(Box::new(0i8)); /// } /// ``` - pub fn downcast(self) -> Result, Box> { - >::downcast(self).map_err(|s| unsafe { + pub fn downcast(self) -> Result, Box> { + >::downcast(self).map_err(|s| unsafe { // reapply the Send marker - Box::from_raw(Box::into_raw(s) as *mut (Any + Send)) + Box::from_raw(Box::into_raw(s) as *mut (dyn Any + Send)) }) } } @@ -756,7 +581,7 @@ impl ExactSizeIterator for Box { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Box {} @@ -820,7 +645,7 @@ impl FnBox for F #[unstable(feature = "fnbox", reason = "will be deprecated if and when `Box` becomes usable", issue = "28796")] -impl<'a, A, R> FnOnce for Box + 'a> { +impl<'a, A, R> FnOnce for Box + 'a> { type Output = R; extern "rust-call" fn call_once(self, args: A) -> R { @@ -830,7 +655,7 @@ impl<'a, A, R> FnOnce for Box + 'a> { #[unstable(feature = "fnbox", reason = "will be deprecated if and when `Box` becomes usable", issue = "28796")] -impl<'a, A, R> FnOnce for Box + Send + 'a> { +impl<'a, A, R> FnOnce for Box + Send + 'a> { type Output = R; extern "rust-call" fn call_once(self, args: A) -> R { @@ -926,7 +751,267 @@ impl Generator for Box { type Yield = T::Yield; type Return = T::Return; - fn resume(&mut self) -> GeneratorState { + unsafe fn resume(&mut self) -> GeneratorState { (**self).resume() } } + +/// A pinned, heap allocated reference. +#[unstable(feature = "pin", issue = "49150")] +#[fundamental] +#[repr(transparent)] +pub struct PinBox { + inner: Box, +} + +#[unstable(feature = "pin", issue = "49150")] +impl PinBox { + /// Allocate memory on the heap, move the data into it and pin it. + #[unstable(feature = "pin", issue = "49150")] + pub fn new(data: T) -> PinBox { + PinBox { inner: Box::new(data) } + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl PinBox { + /// Get a pinned reference to the data in this PinBox. + #[inline] + pub fn as_pin_mut<'a>(&'a mut self) -> PinMut<'a, T> { + unsafe { PinMut::new_unchecked(&mut *self.inner) } + } + + /// Constructs a `PinBox` from a raw pointer. + /// + /// After calling this function, the raw pointer is owned by the + /// resulting `PinBox`. Specifically, the `PinBox` destructor will call + /// the destructor of `T` and free the allocated memory. Since the + /// way `PinBox` allocates and releases memory is unspecified, the + /// only valid pointer to pass to this function is the one taken + /// from another `PinBox` via the [`PinBox::into_raw`] function. + /// + /// This function is unsafe because improper use may lead to + /// memory problems. For example, a double-free may occur if the + /// function is called twice on the same raw pointer. + /// + /// [`PinBox::into_raw`]: struct.PinBox.html#method.into_raw + /// + /// # Examples + /// + /// ``` + /// #![feature(pin)] + /// use std::boxed::PinBox; + /// let x = PinBox::new(5); + /// let ptr = PinBox::into_raw(x); + /// let x = unsafe { PinBox::from_raw(ptr) }; + /// ``` + #[inline] + pub unsafe fn from_raw(raw: *mut T) -> Self { + PinBox { inner: Box::from_raw(raw) } + } + + /// Consumes the `PinBox`, returning the wrapped raw pointer. + /// + /// After calling this function, the caller is responsible for the + /// memory previously managed by the `PinBox`. In particular, the + /// caller should properly destroy `T` and release the memory. The + /// proper way to do so is to convert the raw pointer back into a + /// `PinBox` with the [`PinBox::from_raw`] function. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `PinBox::into_raw(b)` instead of `b.into_raw()`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// [`PinBox::from_raw`]: struct.PinBox.html#method.from_raw + /// + /// # Examples + /// + /// ``` + /// #![feature(pin)] + /// use std::boxed::PinBox; + /// let x = PinBox::new(5); + /// let ptr = PinBox::into_raw(x); + /// ``` + #[inline] + pub fn into_raw(b: PinBox) -> *mut T { + Box::into_raw(b.inner) + } + + /// Get a mutable reference to the data inside this PinBox. + /// + /// This function is unsafe. Users must guarantee that the data is never + /// moved out of this reference. + #[inline] + pub unsafe fn get_mut<'a>(this: &'a mut PinBox) -> &'a mut T { + &mut *this.inner + } + + /// Convert this PinBox into an unpinned Box. + /// + /// This function is unsafe. Users must guarantee that the data is never + /// moved out of the box. + #[inline] + pub unsafe fn unpin(this: PinBox) -> Box { + this.inner + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl From> for PinBox { + fn from(boxed: Box) -> PinBox { + PinBox { inner: boxed } + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl From> for Box { + fn from(pinned: PinBox) -> Box { + pinned.inner + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl Deref for PinBox { + type Target = T; + + fn deref(&self) -> &T { + &*self.inner + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl DerefMut for PinBox { + fn deref_mut(&mut self) -> &mut T { + &mut *self.inner + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl fmt::Display for PinBox { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&*self.inner, f) + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl fmt::Debug for PinBox { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&*self.inner, f) + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl fmt::Pointer for PinBox { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // It's not possible to extract the inner Uniq directly from the Box, + // instead we cast it to a *const which aliases the Unique + let ptr: *const T = &*self.inner; + fmt::Pointer::fmt(&ptr, f) + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl, U: ?Sized> CoerceUnsized> for PinBox {} + +#[unstable(feature = "pin", issue = "49150")] +impl Unpin for PinBox {} + +#[unstable(feature = "futures_api", issue = "50547")] +impl Future for Box { + type Output = F::Output; + + fn poll(mut self: PinMut, cx: &mut Context) -> Poll { + PinMut::new(&mut **self).poll(cx) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl Future for PinBox { + type Output = F::Output; + + fn poll(mut self: PinMut, cx: &mut Context) -> Poll { + self.as_pin_mut().poll(cx) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for Box + where F: Future + 'a +{ + fn into_raw(self) -> *mut () { + Box::into_raw(self) as *mut () + } + + unsafe fn poll(ptr: *mut (), cx: &mut Context) -> Poll { + let ptr = ptr as *mut F; + let pin: PinMut = PinMut::new_unchecked(&mut *ptr); + pin.poll(cx) + } + + unsafe fn drop(ptr: *mut ()) { + drop(Box::from_raw(ptr as *mut F)) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for PinBox + where F: Future + 'a +{ + fn into_raw(self) -> *mut () { + PinBox::into_raw(self) as *mut () + } + + unsafe fn poll(ptr: *mut (), cx: &mut Context) -> Poll { + let ptr = ptr as *mut F; + let pin: PinMut = PinMut::new_unchecked(&mut *ptr); + pin.poll(cx) + } + + unsafe fn drop(ptr: *mut ()) { + drop(PinBox::from_raw(ptr as *mut F)) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl Spawn for Box + where Sp: Spawn + ?Sized +{ + fn spawn_obj( + &mut self, + future: FutureObj<'static, ()>, + ) -> Result<(), SpawnObjError> { + (**self).spawn_obj(future) + } + + fn status(&self) -> Result<(), SpawnErrorKind> { + (**self).status() + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<'a, F: Future + Send + 'a> From> for FutureObj<'a, ()> { + fn from(boxed: PinBox) -> Self { + FutureObj::new(boxed) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<'a, F: Future + Send + 'a> From> for FutureObj<'a, ()> { + fn from(boxed: Box) -> Self { + FutureObj::new(boxed) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<'a, F: Future + 'a> From> for LocalFutureObj<'a, ()> { + fn from(boxed: PinBox) -> Self { + LocalFutureObj::new(boxed) + } +} + +#[unstable(feature = "futures_api", issue = "50547")] +impl<'a, F: Future + 'a> From> for LocalFutureObj<'a, ()> { + fn from(boxed: Box) -> Self { + LocalFutureObj::new(boxed) + } +} diff --git a/src/liballoc/boxed_test.rs b/src/liballoc/boxed_test.rs index 837f8dfaca13..55995742a4a7 100644 --- a/src/liballoc/boxed_test.rs +++ b/src/liballoc/boxed_test.rs @@ -31,8 +31,8 @@ struct Test; #[test] fn any_move() { - let a = Box::new(8) as Box; - let b = Box::new(Test) as Box; + let a = Box::new(8) as Box; + let b = Box::new(Test) as Box; match a.downcast::() { Ok(a) => { @@ -47,8 +47,8 @@ fn any_move() { Err(..) => panic!(), } - let a = Box::new(8) as Box; - let b = Box::new(Test) as Box; + let a = Box::new(8) as Box; + let b = Box::new(Test) as Box; assert!(a.downcast::>().is_err()); assert!(b.downcast::>().is_err()); @@ -56,8 +56,8 @@ fn any_move() { #[test] fn test_show() { - let a = Box::new(8) as Box; - let b = Box::new(Test) as Box; + let a = Box::new(8) as Box; + let b = Box::new(Test) as Box; let a_str = format!("{:?}", a); let b_str = format!("{:?}", b); assert_eq!(a_str, "Any"); @@ -65,8 +65,8 @@ fn test_show() { static EIGHT: usize = 8; static TEST: Test = Test; - let a = &EIGHT as &Any; - let b = &TEST as &Any; + let a = &EIGHT as &dyn Any; + let b = &TEST as &dyn Any; let s = format!("{:?}", a); assert_eq!(s, "Any"); let s = format!("{:?}", b); @@ -110,12 +110,12 @@ fn raw_trait() { } } - let x: Box = Box::new(Bar(17)); + let x: Box = Box::new(Bar(17)); let p = Box::into_raw(x); unsafe { assert_eq!(17, (*p).get()); (*p).set(19); - let y: Box = Box::from_raw(p); + let y: Box = Box::from_raw(p); assert_eq!(19, y.get()); } } diff --git a/src/liballoc/binary_heap.rs b/src/liballoc/collections/binary_heap.rs similarity index 93% rename from src/liballoc/binary_heap.rs rename to src/liballoc/collections/binary_heap.rs index 94bbaf92ce9b..fcadcb544c43 100644 --- a/src/liballoc/binary_heap.rs +++ b/src/liballoc/collections/binary_heap.rs @@ -155,9 +155,9 @@ #![allow(missing_docs)] #![stable(feature = "rust1", since = "1.0.0")] -use core::ops::{Deref, DerefMut, Place, Placer, InPlace}; +use core::ops::{Deref, DerefMut}; use core::iter::{FromIterator, FusedIterator}; -use core::mem::{swap, size_of}; +use core::mem::{swap, size_of, ManuallyDrop}; use core::ptr; use core::fmt; @@ -509,6 +509,31 @@ impl BinaryHeap { self.data.shrink_to_fit(); } + /// Discards capacity with a lower bound. + /// + /// The capacity will remain at least as large as both the length + /// and the supplied value. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// #![feature(shrink_to)] + /// use std::collections::BinaryHeap; + /// let mut heap: BinaryHeap = BinaryHeap::with_capacity(100); + /// + /// assert!(heap.capacity() >= 100); + /// heap.shrink_to(10); + /// assert!(heap.capacity() >= 10); + /// ``` + #[inline] + #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.data.shrink_to(min_capacity) + } + /// Removes the greatest item from the binary heap and returns it, or `None` if it /// is empty. /// @@ -839,8 +864,7 @@ impl BinaryHeap { /// position with the value that was originally removed. struct Hole<'a, T: 'a> { data: &'a mut [T], - /// `elt` is always `Some` from new until drop. - elt: Option, + elt: ManuallyDrop, pos: usize, } @@ -854,7 +878,7 @@ impl<'a, T> Hole<'a, T> { let elt = ptr::read(&data[pos]); Hole { data, - elt: Some(elt), + elt: ManuallyDrop::new(elt), pos, } } @@ -867,7 +891,7 @@ impl<'a, T> Hole<'a, T> { /// Returns a reference to the element removed. #[inline] fn element(&self) -> &T { - self.elt.as_ref().unwrap() + &self.elt } /// Returns a reference to the element at `index`. @@ -900,7 +924,7 @@ impl<'a, T> Drop for Hole<'a, T> { // fill the hole again unsafe { let pos = self.pos; - ptr::write(self.data.get_unchecked_mut(pos), self.elt.take().unwrap()); + ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1); } } } @@ -964,7 +988,7 @@ impl<'a, T> ExactSizeIterator for Iter<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} /// An owning iterator over the elements of a `BinaryHeap`. @@ -1019,7 +1043,7 @@ impl ExactSizeIterator for IntoIter { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for IntoIter {} /// A draining iterator over the elements of a `BinaryHeap`. @@ -1065,7 +1089,7 @@ impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} #[stable(feature = "binary_heap_extras_15", since = "1.5.0")] @@ -1170,67 +1194,3 @@ impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap { self.extend(iter.into_iter().cloned()); } } - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -pub struct BinaryHeapPlace<'a, T: 'a> -where T: Clone + Ord { - heap: *mut BinaryHeap, - place: vec::PlaceBack<'a, T>, -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T: Clone + Ord + fmt::Debug> fmt::Debug for BinaryHeapPlace<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("BinaryHeapPlace") - .field(&self.place) - .finish() - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T: 'a> Placer for &'a mut BinaryHeap -where T: Clone + Ord { - type Place = BinaryHeapPlace<'a, T>; - - fn make_place(self) -> Self::Place { - let ptr = self as *mut BinaryHeap; - let place = Placer::make_place(self.data.place_back()); - BinaryHeapPlace { - heap: ptr, - place, - } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place for BinaryHeapPlace<'a, T> -where T: Clone + Ord { - fn pointer(&mut self) -> *mut T { - self.place.pointer() - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace for BinaryHeapPlace<'a, T> -where T: Clone + Ord { - type Owner = &'a T; - - unsafe fn finalize(self) -> &'a T { - self.place.finalize(); - - let heap: &mut BinaryHeap = &mut *self.heap; - let len = heap.len(); - let i = heap.sift_up(0, len - 1); - heap.data.get_unchecked(i) - } -} diff --git a/src/liballoc/btree/map.rs b/src/liballoc/collections/btree/map.rs similarity index 95% rename from src/liballoc/btree/map.rs rename to src/liballoc/collections/btree/map.rs index b114dc640fba..8c950cd06d9e 100644 --- a/src/liballoc/btree/map.rs +++ b/src/liballoc/collections/btree/map.rs @@ -13,12 +13,12 @@ use core::fmt::Debug; use core::hash::{Hash, Hasher}; use core::iter::{FromIterator, Peekable, FusedIterator}; use core::marker::PhantomData; +use core::ops::Bound::{Excluded, Included, Unbounded}; use core::ops::Index; +use core::ops::RangeBounds; use core::{fmt, intrinsics, mem, ptr}; use borrow::Borrow; -use Bound::{Excluded, Included, Unbounded}; -use range::RangeArgument; use super::node::{self, Handle, NodeRef, marker}; use super::search; @@ -149,12 +149,11 @@ unsafe impl<#[may_dangle] K, #[may_dangle] V> Drop for BTreeMap { #[stable(feature = "rust1", since = "1.0.0")] impl Clone for BTreeMap { fn clone(&self) -> BTreeMap { - fn clone_subtree(node: node::NodeRef) - -> BTreeMap { - + fn clone_subtree<'a, K: Clone, V: Clone>( + node: node::NodeRef, K, V, marker::LeafOrInternal> + ) -> BTreeMap + where K: 'a, V: 'a, + { match node.force() { Leaf(leaf) => { let mut out_tree = BTreeMap { @@ -213,7 +212,16 @@ impl Clone for BTreeMap { } } - clone_subtree(self.root.as_ref()) + if self.len() == 0 { + // Ideally we'd call `BTreeMap::new` here, but that has the `K: + // Ord` constraint, which this method lacks. + BTreeMap { + root: node::Root::shared_empty_root(), + length: 0, + } + } else { + clone_subtree(self.root.as_ref()) + } } } @@ -246,6 +254,7 @@ impl super::Recover for BTreeMap } fn replace(&mut self, key: K) -> Option { + self.ensure_root_is_owned(); match search::search_tree::(self.root.as_mut(), &key) { Found(handle) => Some(mem::replace(handle.into_kv_mut().0, key)), GoDown(handle) => { @@ -523,7 +532,7 @@ impl BTreeMap { #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> BTreeMap { BTreeMap { - root: node::Root::new_leaf(), + root: node::Root::shared_empty_root(), length: 0, } } @@ -544,7 +553,6 @@ impl BTreeMap { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn clear(&mut self) { - // FIXME(gereeter) .clear() allocates *self = BTreeMap::new(); } @@ -576,6 +584,33 @@ impl BTreeMap { } } + /// Returns the key-value pair corresponding to the supplied key. + /// + /// The supplied key may be any borrowed form of the map's key type, but the ordering + /// on the borrowed form *must* match the ordering on the key type. + /// + /// # Examples + /// + /// ``` + /// #![feature(map_get_key_value)] + /// use std::collections::BTreeMap; + /// + /// let mut map = BTreeMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.get_key_value(&1), Some((&1, &"a"))); + /// assert_eq!(map.get_key_value(&2), None); + /// ``` + #[unstable(feature = "map_get_key_value", issue = "49347")] + pub fn get_key_value(&self, k: &Q) -> Option<(&K, &V)> + where K: Borrow, + Q: Ord + { + match search::search_tree(self.root.as_ref(), k) { + Found(handle) => Some(handle.into_kv()), + GoDown(_) => None, + } + } + /// Returns `true` if the map contains a value for the specified key. /// /// The key may be any borrowed form of the map's key type, but the ordering @@ -777,7 +812,7 @@ impl BTreeMap { /// /// ``` /// use std::collections::BTreeMap; - /// use std::collections::Bound::Included; + /// use std::ops::Bound::Included; /// /// let mut map = BTreeMap::new(); /// map.insert(3, "a"); @@ -790,7 +825,7 @@ impl BTreeMap { /// ``` #[stable(feature = "btree_range", since = "1.17.0")] pub fn range(&self, range: R) -> Range - where T: Ord, K: Borrow, R: RangeArgument + where T: Ord, K: Borrow, R: RangeBounds { let root1 = self.root.as_ref(); let root2 = self.root.as_ref(); @@ -830,7 +865,7 @@ impl BTreeMap { /// ``` #[stable(feature = "btree_range", since = "1.17.0")] pub fn range_mut(&mut self, range: R) -> RangeMut - where T: Ord, K: Borrow, R: RangeArgument + where T: Ord, K: Borrow, R: RangeBounds { let root1 = self.root.as_mut(); let root2 = unsafe { ptr::read(&root1) }; @@ -863,6 +898,8 @@ impl BTreeMap { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn entry(&mut self, key: K) -> Entry { + // FIXME(@porglezomp) Avoid allocating if we don't insert + self.ensure_root_is_owned(); match search::search_tree(self.root.as_mut(), &key) { Found(handle) => { Occupied(OccupiedEntry { @@ -883,6 +920,7 @@ impl BTreeMap { } fn from_sorted_iter>(&mut self, iter: I) { + self.ensure_root_is_owned(); let mut cur_node = last_leaf_edge(self.root.as_mut()).into_node(); // Iterate through all key-value pairs, pushing them into nodes at the right level. for (key, value) in iter { @@ -992,6 +1030,7 @@ impl BTreeMap { let total_num = self.len(); let mut right = Self::new(); + right.root = node::Root::new_leaf(); for _ in 0..(self.root.as_ref().height()) { right.root.push_level(); } @@ -1040,7 +1079,11 @@ impl BTreeMap { /// Calculates the number of elements if it is incorrect. fn recalc_length(&mut self) { - fn dfs(node: NodeRef) -> usize { + fn dfs<'a, K, V>( + node: NodeRef, K, V, marker::LeafOrInternal> + ) -> usize + where K: 'a, V: 'a + { let mut res = node.len(); if let Internal(node) = node.force() { @@ -1126,6 +1169,13 @@ impl BTreeMap { self.fix_top(); } + + /// If the root node is the shared root node, allocate our own node. + fn ensure_root_is_owned(&mut self) { + if self.root.is_shared_root() { + self.root = node::Root::new_leaf(); + } + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1156,7 +1206,7 @@ impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Iter<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1235,7 +1285,7 @@ impl<'a, K: 'a, V: 'a> ExactSizeIterator for IterMut<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1260,10 +1310,13 @@ impl IntoIterator for BTreeMap { #[stable(feature = "btree_drop", since = "1.7.0")] impl Drop for IntoIter { fn drop(&mut self) { - for _ in &mut *self { - } + self.for_each(drop); unsafe { let leaf_node = ptr::read(&self.front).into_node(); + if leaf_node.is_shared_root() { + return; + } + if let Some(first_parent) = leaf_node.deallocate_and_ascend() { let mut cur_node = first_parent.into_node(); while let Some(parent) = cur_node.deallocate_and_ascend() { @@ -1365,7 +1418,7 @@ impl ExactSizeIterator for IntoIter { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for IntoIter {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1395,7 +1448,7 @@ impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Keys<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1432,7 +1485,7 @@ impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Values<'a, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1482,7 +1535,7 @@ impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {} @@ -1561,7 +1614,7 @@ impl<'a, K, V> Range<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for Range<'a, K, V> {} #[stable(feature = "btree_range", since = "1.17.0")] @@ -1630,7 +1683,7 @@ impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, K, V> FusedIterator for RangeMut<'a, K, V> {} impl<'a, K, V> RangeMut<'a, K, V> { @@ -1748,6 +1801,11 @@ impl<'a, K: Ord, Q: ?Sized, V> Index<&'a Q> for BTreeMap { type Output = V; + /// Returns a reference to the value corresponding to the supplied key. + /// + /// # Panics + /// + /// Panics if the key is not present in the `BTreeMap`. #[inline] fn index(&self, key: &Q) -> &V { self.get(key).expect("no entry found for key") @@ -1780,7 +1838,7 @@ fn last_leaf_edge } } -fn range_search>( +fn range_search>( root1: NodeRef, root2: NodeRef, range: R @@ -1788,7 +1846,7 @@ fn range_search>( Handle, marker::Edge>) where Q: Ord, K: Borrow { - match (range.start(), range.end()) { + match (range.start_bound(), range.end_bound()) { (Excluded(s), Excluded(e)) if s==e => panic!("range start and end are equal and excluded in BTreeMap"), (Included(s), Included(e)) | @@ -1806,7 +1864,7 @@ fn range_search>( let mut diverged = false; loop { - let min_edge = match (min_found, range.start()) { + let min_edge = match (min_found, range.start_bound()) { (false, Included(key)) => match search::search_linear(&min_node, key) { (i, true) => { min_found = true; i }, (i, false) => i, @@ -1820,7 +1878,7 @@ fn range_search>( (true, Excluded(_)) => 0, }; - let max_edge = match (max_found, range.end()) { + let max_edge = match (max_found, range.end_bound()) { (false, Included(key)) => match search::search_linear(&max_node, key) { (i, true) => { max_found = true; i+1 }, (i, false) => i, @@ -2109,7 +2167,6 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { /// # Examples /// /// ``` - /// #![feature(entry_and_modify)] /// use std::collections::BTreeMap; /// /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); @@ -2124,9 +2181,9 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { /// .or_insert(42); /// assert_eq!(map["poneyland"], 43); /// ``` - #[unstable(feature = "entry_and_modify", issue = "44733")] - pub fn and_modify(self, mut f: F) -> Self - where F: FnMut(&mut V) + #[stable(feature = "entry_and_modify", since = "1.26.0")] + pub fn and_modify(self, f: F) -> Self + where F: FnOnce(&mut V) { match self { Occupied(mut entry) => { @@ -2139,14 +2196,13 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { } impl<'a, K: Ord, V: Default> Entry<'a, K, V> { - #[unstable(feature = "entry_or_default", issue = "44324")] + #[stable(feature = "entry_or_default", since = "1.28.0")] /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// /// # Examples /// /// ``` - /// #![feature(entry_or_default)] /// # fn main() { /// use std::collections::BTreeMap; /// @@ -2324,6 +2380,11 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { /// Gets a mutable reference to the value in the entry. /// + /// If you need a reference to the `OccupiedEntry` which may outlive the + /// destruction of the `Entry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// /// # Examples /// /// ``` @@ -2335,9 +2396,13 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { /// /// assert_eq!(map["poneyland"], 12); /// if let Entry::Occupied(mut o) = map.entry("poneyland") { - /// *o.get_mut() += 10; + /// *o.get_mut() += 10; + /// assert_eq!(*o.get(), 22); + /// + /// // We can use the same Entry multiple times. + /// *o.get_mut() += 2; /// } - /// assert_eq!(map["poneyland"], 22); + /// assert_eq!(map["poneyland"], 24); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get_mut(&mut self) -> &mut V { @@ -2346,6 +2411,10 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { /// Converts the entry into a mutable reference to its value. /// + /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// /// # Examples /// /// ``` diff --git a/src/liballoc/btree/mod.rs b/src/liballoc/collections/btree/mod.rs similarity index 100% rename from src/liballoc/btree/mod.rs rename to src/liballoc/collections/btree/mod.rs diff --git a/src/liballoc/btree/node.rs b/src/liballoc/collections/btree/node.rs similarity index 91% rename from src/liballoc/btree/node.rs rename to src/liballoc/collections/btree/node.rs index c1618043ce66..0ae45b312325 100644 --- a/src/liballoc/btree/node.rs +++ b/src/liballoc/collections/btree/node.rs @@ -43,12 +43,11 @@ use core::marker::PhantomData; use core::mem; -use core::nonzero::NonZero; -use core::ptr::{self, Unique}; +use core::ptr::{self, Unique, NonNull}; use core::slice; +use alloc::{Global, Alloc, Layout}; use boxed::Box; -use heap::{Heap, Alloc, Layout}; const B: usize = 6; pub const MIN_LEN: usize = B - 1; @@ -61,12 +60,12 @@ pub const CAPACITY: usize = 2 * B - 1; /// /// See also rust-lang/rfcs#197, which would make this structure significantly more safe by /// avoiding accidentally dropping unused and uninitialized keys and values. +/// +/// We put the metadata first so that its position is the same for every `K` and `V`, in order +/// to statically allocate a single dummy node to avoid allocations. This struct is `repr(C)` to +/// prevent them from being reordered. +#[repr(C)] struct LeafNode { - /// The arrays storing the actual data of the node. Only the first `len` elements of each - /// array are initialized and valid. - keys: [K; CAPACITY], - vals: [V; CAPACITY], - /// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`. /// This either points to an actual node or is null. parent: *const InternalNode, @@ -78,10 +77,14 @@ struct LeafNode { /// The number of keys and values this node stores. /// - /// This is at the end of the node's representation and next to `parent_idx` to encourage - /// the compiler to join `len` and `parent_idx` into the same 32-bit word, reducing space - /// overhead. + /// This next to `parent_idx` to encourage the compiler to join `len` and + /// `parent_idx` into the same 32-bit word, reducing space overhead. len: u16, + + /// The arrays storing the actual data of the node. Only the first `len` elements of each + /// array are initialized and valid. + keys: [K; CAPACITY], + vals: [V; CAPACITY], } impl LeafNode { @@ -98,8 +101,26 @@ impl LeafNode { len: 0 } } + + fn is_shared_root(&self) -> bool { + ptr::eq(self, &EMPTY_ROOT_NODE as *const _ as *const _) + } } +// We need to implement Sync here in order to make a static instance. +unsafe impl Sync for LeafNode<(), ()> {} + +// An empty node used as a placeholder for the root node, to avoid allocations. +// We use () in order to save space, since no operation on an empty tree will +// ever take a pointer past the first key. +static EMPTY_ROOT_NODE: LeafNode<(), ()> = LeafNode { + parent: ptr::null(), + parent_idx: 0, + len: 0, + keys: [(); CAPACITY], + vals: [(); CAPACITY], +}; + /// The underlying representation of internal nodes. As with `LeafNode`s, these should be hidden /// behind `BoxedNode`s to prevent dropping uninitialized keys and values. Any pointer to an /// `InternalNode` can be directly casted to a pointer to the underlying `LeafNode` portion of the @@ -149,14 +170,12 @@ impl BoxedNode { } } - unsafe fn from_ptr(ptr: NonZero<*const LeafNode>) -> Self { - BoxedNode { ptr: Unique::new_unchecked(ptr.get() as *mut LeafNode) } + unsafe fn from_ptr(ptr: NonNull>) -> Self { + BoxedNode { ptr: Unique::from(ptr) } } - fn as_ptr(&self) -> NonZero<*const LeafNode> { - unsafe { - NonZero::from(self.ptr.as_ref()) - } + fn as_ptr(&self) -> NonNull> { + NonNull::from(self.ptr) } } @@ -171,6 +190,21 @@ unsafe impl Sync for Root { } unsafe impl Send for Root { } impl Root { + pub fn is_shared_root(&self) -> bool { + self.as_ref().is_shared_root() + } + + pub fn shared_empty_root() -> Self { + Root { + node: unsafe { + BoxedNode::from_ptr(NonNull::new_unchecked( + &EMPTY_ROOT_NODE as *const _ as *const LeafNode as *mut _ + )) + }, + height: 0, + } + } + pub fn new_leaf() -> Self { Root { node: BoxedNode::from_leaf(Box::new(unsafe { LeafNode::new() })), @@ -212,6 +246,7 @@ impl Root { /// new node the root. This increases the height by 1 and is the opposite of `pop_level`. pub fn push_level(&mut self) -> NodeRef { + debug_assert!(!self.is_shared_root()); let mut new_node = Box::new(unsafe { InternalNode::new() }); new_node.edges[0] = unsafe { BoxedNode::from_ptr(self.node.as_ptr()) }; @@ -239,7 +274,7 @@ impl Root { pub fn pop_level(&mut self) { debug_assert!(self.height > 0); - let top = self.node.ptr.as_ptr() as *mut u8; + let top = self.node.ptr; self.node = unsafe { BoxedNode::from_ptr(self.as_mut() @@ -252,7 +287,7 @@ impl Root { self.as_mut().as_leaf_mut().parent = ptr::null(); unsafe { - Heap.dealloc(top, Layout::new::>()); + Global.dealloc(NonNull::from(top).cast(), Layout::new::>()); } } } @@ -276,7 +311,7 @@ impl Root { /// `NodeRef` could be pointing to either type of node. pub struct NodeRef { height: usize, - node: NonZero<*const LeafNode>, + node: NonNull>, // This is null unless the borrow type is `Mut` root: *const Root, _marker: PhantomData<(BorrowType, Type)> @@ -302,7 +337,7 @@ unsafe impl Send impl NodeRef { fn as_internal(&self) -> &InternalNode { unsafe { - &*(self.node.get() as *const InternalNode) + &*(self.node.as_ptr() as *mut InternalNode) } } } @@ -310,7 +345,7 @@ impl NodeRef { impl<'a, K, V> NodeRef, K, V, marker::Internal> { fn as_internal_mut(&mut self) -> &mut InternalNode { unsafe { - &mut *(self.node.get() as *mut InternalNode) + &mut *(self.node.as_ptr() as *mut InternalNode) } } } @@ -352,16 +387,20 @@ impl NodeRef { fn as_leaf(&self) -> &LeafNode { unsafe { - &*self.node.get() + self.node.as_ref() } } - pub fn keys(&self) -> &[K] { - self.reborrow().into_slices().0 + pub fn is_shared_root(&self) -> bool { + self.as_leaf().is_shared_root() } - pub fn vals(&self) -> &[V] { - self.reborrow().into_slices().1 + pub fn keys(&self) -> &[K] { + self.reborrow().into_key_slice() + } + + fn vals(&self) -> &[V] { + self.reborrow().into_val_slice() } /// Finds the parent of the current node. Returns `Ok(handle)` if the current @@ -382,7 +421,8 @@ impl NodeRef { >, Self > { - if let Some(non_zero) = NonZero::new(self.as_leaf().parent as *const LeafNode) { + let parent_as_leaf = self.as_leaf().parent as *const LeafNode; + if let Some(non_zero) = NonNull::new(parent_as_leaf as *mut _) { Ok(Handle { node: NodeRef { height: self.height + 1, @@ -435,9 +475,10 @@ impl NodeRef { marker::Edge > > { - let ptr = self.as_leaf() as *const LeafNode as *const u8 as *mut u8; + debug_assert!(!self.is_shared_root()); + let node = self.node; let ret = self.ascend().ok(); - Heap.dealloc(ptr, Layout::new::>()); + Global.dealloc(node.cast(), Layout::new::>()); ret } } @@ -456,9 +497,9 @@ impl NodeRef { marker::Edge > > { - let ptr = self.as_internal() as *const InternalNode as *const u8 as *mut u8; + let node = self.node; let ret = self.ascend().ok(); - Heap.dealloc(ptr, Layout::new::>()); + Global.dealloc(node.cast(), Layout::new::>()); ret } } @@ -498,34 +539,55 @@ impl<'a, K, V, Type> NodeRef, K, V, Type> { fn as_leaf_mut(&mut self) -> &mut LeafNode { unsafe { - &mut *(self.node.get() as *mut LeafNode) + self.node.as_mut() } } - pub fn keys_mut(&mut self) -> &mut [K] { - unsafe { self.reborrow_mut().into_slices_mut().0 } + fn keys_mut(&mut self) -> &mut [K] { + unsafe { self.reborrow_mut().into_key_slice_mut() } } - pub fn vals_mut(&mut self) -> &mut [V] { - unsafe { self.reborrow_mut().into_slices_mut().1 } + fn vals_mut(&mut self) -> &mut [V] { + unsafe { self.reborrow_mut().into_val_slice_mut() } } } impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { - pub fn into_slices(self) -> (&'a [K], &'a [V]) { - unsafe { - ( + fn into_key_slice(self) -> &'a [K] { + // When taking a pointer to the keys, if our key has a stricter + // alignment requirement than the shared root does, then the pointer + // would be out of bounds, which LLVM assumes will not happen. If the + // alignment is more strict, we need to make an empty slice that doesn't + // use an out of bounds pointer. + if mem::align_of::() > mem::align_of::>() && self.is_shared_root() { + &[] + } else { + // Here either it's not the root, or the alignment is less strict, + // in which case the keys pointer will point "one-past-the-end" of + // the node, which is allowed by LLVM. + unsafe { slice::from_raw_parts( self.as_leaf().keys.as_ptr(), self.len() - ), - slice::from_raw_parts( - self.as_leaf().vals.as_ptr(), - self.len() ) + } + } + } + + fn into_val_slice(self) -> &'a [V] { + debug_assert!(!self.is_shared_root()); + unsafe { + slice::from_raw_parts( + self.as_leaf().vals.as_ptr(), + self.len() ) } } + + fn into_slices(self) -> (&'a [K], &'a [V]) { + let k = unsafe { ptr::read(&self) }; + (k.into_key_slice(), self.into_val_slice()) + } } impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { @@ -537,20 +599,33 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { } } - pub fn into_slices_mut(mut self) -> (&'a mut [K], &'a mut [V]) { - unsafe { - ( + fn into_key_slice_mut(mut self) -> &'a mut [K] { + if mem::align_of::() > mem::align_of::>() && self.is_shared_root() { + &mut [] + } else { + unsafe { slice::from_raw_parts_mut( &mut self.as_leaf_mut().keys as *mut [K] as *mut K, self.len() - ), - slice::from_raw_parts_mut( - &mut self.as_leaf_mut().vals as *mut [V] as *mut V, - self.len() ) + } + } + } + + fn into_val_slice_mut(mut self) -> &'a mut [V] { + debug_assert!(!self.is_shared_root()); + unsafe { + slice::from_raw_parts_mut( + &mut self.as_leaf_mut().vals as *mut [V] as *mut V, + self.len() ) } } + + fn into_slices_mut(self) -> (&'a mut [K], &'a mut [V]) { + let k = unsafe { ptr::read(&self) }; + (k.into_key_slice_mut(), self.into_val_slice_mut()) + } } impl<'a, K, V> NodeRef, K, V, marker::Leaf> { @@ -558,6 +633,7 @@ impl<'a, K, V> NodeRef, K, V, marker::Leaf> { pub fn push(&mut self, key: K, val: V) { // Necessary for correctness, but this is an internal module debug_assert!(self.len() < CAPACITY); + debug_assert!(!self.is_shared_root()); let idx = self.len(); @@ -573,6 +649,7 @@ impl<'a, K, V> NodeRef, K, V, marker::Leaf> { pub fn push_front(&mut self, key: K, val: V) { // Necessary for correctness, but this is an internal module debug_assert!(self.len() < CAPACITY); + debug_assert!(!self.is_shared_root()); unsafe { slice_insert(self.keys_mut(), 0, key); @@ -886,6 +963,7 @@ impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::Edge fn insert_fit(&mut self, key: K, val: V) -> *mut V { // Necessary for correctness, but in a private module debug_assert!(self.node.len() < CAPACITY); + debug_assert!(!self.node.is_shared_root()); unsafe { slice_insert(self.node.keys_mut(), self.idx, key); @@ -1063,6 +1141,7 @@ impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::KV> /// allocated node. pub fn split(mut self) -> (NodeRef, K, V, marker::Leaf>, K, V, Root) { + debug_assert!(!self.node.is_shared_root()); unsafe { let mut new_node = Box::new(LeafNode::new()); @@ -1100,6 +1179,7 @@ impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::KV> /// now adjacent key/value pairs to the left and right of this handle. pub fn remove(mut self) -> (Handle, K, V, marker::Leaf>, marker::Edge>, K, V) { + debug_assert!(!self.node.is_shared_root()); unsafe { let k = slice_remove(self.node.keys_mut(), self.idx); let v = slice_remove(self.node.vals_mut(), self.idx); @@ -1240,13 +1320,13 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: ).correct_parent_link(); } - Heap.dealloc( - right_node.node.get() as *mut u8, + Global.dealloc( + right_node.node.cast(), Layout::new::>(), ); } else { - Heap.dealloc( - right_node.node.get() as *mut u8, + Global.dealloc( + right_node.node.cast(), Layout::new::>(), ); } diff --git a/src/liballoc/btree/search.rs b/src/liballoc/collections/btree/search.rs similarity index 100% rename from src/liballoc/btree/search.rs rename to src/liballoc/collections/btree/search.rs diff --git a/src/liballoc/btree/set.rs b/src/liballoc/collections/btree/set.rs similarity index 97% rename from src/liballoc/btree/set.rs rename to src/liballoc/collections/btree/set.rs index e094070fc3dd..af9a7074e4a4 100644 --- a/src/liballoc/btree/set.rs +++ b/src/liballoc/collections/btree/set.rs @@ -16,12 +16,11 @@ use core::cmp::{min, max}; use core::fmt::Debug; use core::fmt; use core::iter::{Peekable, FromIterator, FusedIterator}; -use core::ops::{BitOr, BitAnd, BitXor, Sub}; +use core::ops::{BitOr, BitAnd, BitXor, Sub, RangeBounds}; use borrow::Borrow; -use btree_map::{BTreeMap, Keys}; +use collections::btree_map::{self, BTreeMap, Keys}; use super::Recover; -use range::RangeArgument; // FIXME(conventions): implement bounded iterators @@ -105,7 +104,7 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct IntoIter { - iter: ::btree_map::IntoIter, + iter: btree_map::IntoIter, } /// An iterator over a sub-range of items in a `BTreeSet`. @@ -118,7 +117,7 @@ pub struct IntoIter { #[derive(Debug)] #[stable(feature = "btree_range", since = "1.17.0")] pub struct Range<'a, T: 'a> { - iter: ::btree_map::Range<'a, T, ()>, + iter: btree_map::Range<'a, T, ()>, } /// A lazy iterator producing elements in the difference of `BTreeSet`s. @@ -240,7 +239,7 @@ impl BTreeSet { /// /// ``` /// use std::collections::BTreeSet; - /// use std::collections::Bound::Included; + /// use std::ops::Bound::Included; /// /// let mut set = BTreeSet::new(); /// set.insert(3); @@ -253,7 +252,7 @@ impl BTreeSet { /// ``` #[stable(feature = "btree_range", since = "1.17.0")] pub fn range(&self, range: R) -> Range - where K: Ord, T: Borrow, R: RangeArgument + where K: Ord, T: Borrow, R: RangeBounds { Range { iter: self.map.range(range) } } @@ -658,26 +657,26 @@ impl BTreeSet { /// Basic usage: /// /// ``` - /// use std::collections::BTreeMap; + /// use std::collections::BTreeSet; /// - /// let mut a = BTreeMap::new(); - /// a.insert(1, "a"); - /// a.insert(2, "b"); - /// a.insert(3, "c"); - /// a.insert(17, "d"); - /// a.insert(41, "e"); + /// let mut a = BTreeSet::new(); + /// a.insert(1); + /// a.insert(2); + /// a.insert(3); + /// a.insert(17); + /// a.insert(41); /// /// let b = a.split_off(&3); /// /// assert_eq!(a.len(), 2); /// assert_eq!(b.len(), 3); /// - /// assert_eq!(a[&1], "a"); - /// assert_eq!(a[&2], "b"); + /// assert!(a.contains(&1)); + /// assert!(a.contains(&2)); /// - /// assert_eq!(b[&3], "c"); - /// assert_eq!(b[&17], "d"); - /// assert_eq!(b[&41], "e"); + /// assert!(b.contains(&3)); + /// assert!(b.contains(&17)); + /// assert!(b.contains(&41)); /// ``` #[stable(feature = "btree_split_off", since = "1.11.0")] pub fn split_off(&mut self, key: &Q) -> Self where T: Borrow { @@ -946,7 +945,7 @@ impl<'a, T> ExactSizeIterator for Iter<'a, T> { fn len(&self) -> usize { self.iter.len() } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -971,7 +970,7 @@ impl ExactSizeIterator for IntoIter { fn len(&self) -> usize { self.iter.len() } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for IntoIter {} #[stable(feature = "btree_range", since = "1.17.0")] @@ -997,7 +996,7 @@ impl<'a, T> DoubleEndedIterator for Range<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Range<'a, T> {} /// Compare `x` and `y`, but return `short` if x is None and `long` if y is None @@ -1044,7 +1043,7 @@ impl<'a, T: Ord> Iterator for Difference<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for Difference<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1078,7 +1077,7 @@ impl<'a, T: Ord> Iterator for SymmetricDifference<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for SymmetricDifference<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1116,7 +1115,7 @@ impl<'a, T: Ord> Iterator for Intersection<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for Intersection<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1150,5 +1149,5 @@ impl<'a, T: Ord> Iterator for Union<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: Ord> FusedIterator for Union<'a, T> {} diff --git a/src/liballoc/linked_list.rs b/src/liballoc/collections/linked_list.rs similarity index 86% rename from src/liballoc/linked_list.rs rename to src/liballoc/collections/linked_list.rs index 3ac5a85d721a..9844de9a57d7 100644 --- a/src/liballoc/linked_list.rs +++ b/src/liballoc/collections/linked_list.rs @@ -28,10 +28,9 @@ use core::hash::{Hasher, Hash}; use core::iter::{FromIterator, FusedIterator}; use core::marker::PhantomData; use core::mem; -use core::ops::{BoxPlace, InPlace, Place, Placer}; -use core::ptr::{self, Shared}; +use core::ptr::NonNull; -use boxed::{Box, IntermediateBox}; +use boxed::Box; use super::SpecExtend; /// A doubly-linked list with owned nodes. @@ -44,15 +43,15 @@ use super::SpecExtend; /// more memory efficient and make better use of CPU cache. #[stable(feature = "rust1", since = "1.0.0")] pub struct LinkedList { - head: Option>>, - tail: Option>>, + head: Option>>, + tail: Option>>, len: usize, marker: PhantomData>>, } struct Node { - next: Option>>, - prev: Option>>, + next: Option>>, + prev: Option>>, element: T, } @@ -65,8 +64,8 @@ struct Node { /// [`LinkedList`]: struct.LinkedList.html #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T: 'a> { - head: Option>>, - tail: Option>>, + head: Option>>, + tail: Option>>, len: usize, marker: PhantomData<&'a Node>, } @@ -98,8 +97,8 @@ impl<'a, T> Clone for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { list: &'a mut LinkedList, - head: Option>>, - tail: Option>>, + head: Option>>, + tail: Option>>, len: usize, } @@ -157,7 +156,7 @@ impl LinkedList { unsafe { node.next = self.head; node.prev = None; - let node = Some(Shared::from(Box::into_unique(node))); + let node = Some(Box::into_raw_non_null(node)); match self.head { None => self.tail = node, @@ -192,7 +191,7 @@ impl LinkedList { unsafe { node.next = None; node.prev = self.tail; - let node = Some(Shared::from(Box::into_unique(node))); + let node = Some(Box::into_raw_non_null(node)); match self.tail { None => self.head = node, @@ -225,7 +224,7 @@ impl LinkedList { /// /// Warning: this will not check that the provided node belongs to the current list. #[inline] - unsafe fn unlink_node(&mut self, mut node: Shared>) { + unsafe fn unlink_node(&mut self, mut node: NonNull>) { let node = node.as_mut(); match node.prev { @@ -747,8 +746,8 @@ impl LinkedList { /// Creates an iterator which uses a closure to determine if an element should be removed. /// /// If the closure returns true, then the element is removed and yielded. - /// If the closure returns false, it will try again, and call the closure on the next element, - /// seeing if it passes the test. + /// If the closure returns false, the element will remain in the list and will not be yielded + /// by the iterator. /// /// Note that `drain_filter` lets you mutate every element in the filter closure, regardless of /// whether you choose to keep or remove it. @@ -786,62 +785,6 @@ impl LinkedList { old_len: old_len, } } - - /// Returns a place for insertion at the front of the list. - /// - /// Using this method with placement syntax is equivalent to - /// [`push_front`](#method.push_front), but may be more efficient. - /// - /// # Examples - /// - /// ``` - /// #![feature(collection_placement)] - /// #![feature(placement_in_syntax)] - /// - /// use std::collections::LinkedList; - /// - /// let mut list = LinkedList::new(); - /// list.front_place() <- 2; - /// list.front_place() <- 4; - /// assert!(list.iter().eq(&[4, 2])); - /// ``` - #[unstable(feature = "collection_placement", - reason = "method name and placement protocol are subject to change", - issue = "30172")] - pub fn front_place(&mut self) -> FrontPlace { - FrontPlace { - list: self, - node: IntermediateBox::make_place(), - } - } - - /// Returns a place for insertion at the back of the list. - /// - /// Using this method with placement syntax is equivalent to [`push_back`](#method.push_back), - /// but may be more efficient. - /// - /// # Examples - /// - /// ``` - /// #![feature(collection_placement)] - /// #![feature(placement_in_syntax)] - /// - /// use std::collections::LinkedList; - /// - /// let mut list = LinkedList::new(); - /// list.back_place() <- 2; - /// list.back_place() <- 4; - /// assert!(list.iter().eq(&[2, 4])); - /// ``` - #[unstable(feature = "collection_placement", - reason = "method name and placement protocol are subject to change", - issue = "30172")] - pub fn back_place(&mut self) -> BackPlace { - BackPlace { - list: self, - node: IntermediateBox::make_place(), - } - } } #[stable(feature = "rust1", since = "1.0.0")] @@ -897,7 +840,7 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -946,7 +889,7 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for IterMut<'a, T> {} impl<'a, T> IterMut<'a, T> { @@ -986,11 +929,11 @@ impl<'a, T> IterMut<'a, T> { Some(prev) => prev, }; - let node = Some(Shared::from(Box::into_unique(box Node { + let node = Some(Box::into_raw_non_null(box Node { next: Some(head), prev: Some(prev), element, - }))); + })); prev.as_mut().next = node; head.as_mut().prev = node; @@ -1038,7 +981,7 @@ pub struct DrainFilter<'a, T: 'a, F: 'a> where F: FnMut(&mut T) -> bool, { list: &'a mut LinkedList, - it: Option>>, + it: Option>>, pred: F, idx: usize, old_len: usize, @@ -1076,7 +1019,7 @@ impl<'a, T, F> Drop for DrainFilter<'a, T, F> where F: FnMut(&mut T) -> bool, { fn drop(&mut self) { - for _ in self { } + self.for_each(drop); } } @@ -1117,7 +1060,7 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for IntoIter {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1242,123 +1185,6 @@ impl Hash for LinkedList { } } -unsafe fn finalize(node: IntermediateBox>) -> Box> { - let mut node = node.finalize(); - ptr::write(&mut node.next, None); - ptr::write(&mut node.prev, None); - node -} - -/// A place for insertion at the front of a `LinkedList`. -/// -/// See [`LinkedList::front_place`](struct.LinkedList.html#method.front_place) for details. -#[must_use = "places do nothing unless written to with `<-` syntax"] -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -pub struct FrontPlace<'a, T: 'a> { - list: &'a mut LinkedList, - node: IntermediateBox>, -} - -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for FrontPlace<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("FrontPlace") - .field(&self.list) - .finish() - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Placer for FrontPlace<'a, T> { - type Place = Self; - - fn make_place(self) -> Self { - self - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place for FrontPlace<'a, T> { - fn pointer(&mut self) -> *mut T { - unsafe { &mut (*self.node.pointer()).element } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace for FrontPlace<'a, T> { - type Owner = (); - - unsafe fn finalize(self) { - let FrontPlace { list, node } = self; - list.push_front_node(finalize(node)); - } -} - -/// A place for insertion at the back of a `LinkedList`. -/// -/// See [`LinkedList::back_place`](struct.LinkedList.html#method.back_place) for details. -#[must_use = "places do nothing unless written to with `<-` syntax"] -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -pub struct BackPlace<'a, T: 'a> { - list: &'a mut LinkedList, - node: IntermediateBox>, -} - -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for BackPlace<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("BackPlace") - .field(&self.list) - .finish() - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Placer for BackPlace<'a, T> { - type Place = Self; - - fn make_place(self) -> Self { - self - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place for BackPlace<'a, T> { - fn pointer(&mut self) -> *mut T { - unsafe { &mut (*self.node.pointer()).element } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace for BackPlace<'a, T> { - type Owner = (); - - unsafe fn finalize(self) { - let BackPlace { list, node } = self; - list.push_back_node(finalize(node)); - } -} - // Ensure that `LinkedList` and its read-only iterators are covariant in their type parameters. #[allow(dead_code)] fn assert_covariance() { diff --git a/src/liballoc/collections/mod.rs b/src/liballoc/collections/mod.rs new file mode 100644 index 000000000000..96e0eb633b2f --- /dev/null +++ b/src/liballoc/collections/mod.rs @@ -0,0 +1,88 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Collection types. + +#![stable(feature = "rust1", since = "1.0.0")] + +pub mod binary_heap; +mod btree; +pub mod linked_list; +pub mod vec_deque; + +#[stable(feature = "rust1", since = "1.0.0")] +pub mod btree_map { + //! A map based on a B-Tree. + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::btree::map::*; +} + +#[stable(feature = "rust1", since = "1.0.0")] +pub mod btree_set { + //! A set based on a B-Tree. + #[stable(feature = "rust1", since = "1.0.0")] + pub use super::btree::set::*; +} + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(no_inline)] +pub use self::binary_heap::BinaryHeap; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(no_inline)] +pub use self::btree_map::BTreeMap; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(no_inline)] +pub use self::btree_set::BTreeSet; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(no_inline)] +pub use self::linked_list::LinkedList; + +#[stable(feature = "rust1", since = "1.0.0")] +#[doc(no_inline)] +pub use self::vec_deque::VecDeque; + +use alloc::{AllocErr, LayoutErr}; + +/// Augments `AllocErr` with a CapacityOverflow variant. +#[derive(Clone, PartialEq, Eq, Debug)] +#[unstable(feature = "try_reserve", reason = "new API", issue="48043")] +pub enum CollectionAllocErr { + /// Error due to the computed capacity exceeding the collection's maximum + /// (usually `isize::MAX` bytes). + CapacityOverflow, + /// Error due to the allocator (see the `AllocErr` type's docs). + AllocErr, +} + +#[unstable(feature = "try_reserve", reason = "new API", issue="48043")] +impl From for CollectionAllocErr { + #[inline] + fn from(AllocErr: AllocErr) -> Self { + CollectionAllocErr::AllocErr + } +} + +#[unstable(feature = "try_reserve", reason = "new API", issue="48043")] +impl From for CollectionAllocErr { + #[inline] + fn from(_: LayoutErr) -> Self { + CollectionAllocErr::CapacityOverflow + } +} + +/// An intermediate trait for specialization of `Extend`. +#[doc(hidden)] +trait SpecExtend { + /// Extends `self` with the contents of the given iterator. + fn spec_extend(&mut self, iter: I); +} diff --git a/src/liballoc/vec_deque.rs b/src/liballoc/collections/vec_deque.rs similarity index 87% rename from src/liballoc/vec_deque.rs rename to src/liballoc/collections/vec_deque.rs index f56aa23a4eb2..0f759bb8f0b4 100644 --- a/src/liballoc/vec_deque.rs +++ b/src/liballoc/collections/vec_deque.rs @@ -21,19 +21,18 @@ use core::cmp::Ordering; use core::fmt; use core::iter::{repeat, FromIterator, FusedIterator}; use core::mem; -use core::ops::{Index, IndexMut, Place, Placer, InPlace}; +use core::ops::Bound::{Excluded, Included, Unbounded}; +use core::ops::{Index, IndexMut, RangeBounds}; use core::ptr; -use core::ptr::Shared; +use core::ptr::NonNull; use core::slice; use core::hash::{Hash, Hasher}; use core::cmp; +use collections::CollectionAllocErr; use raw_vec::RawVec; - -use super::range::RangeArgument; -use Bound::{Excluded, Included, Unbounded}; -use super::vec::Vec; +use vec::Vec; const INITIAL_CAPACITY: usize = 7; // 2^3 - 1 const MINIMUM_CAPACITY: usize = 1; // 2 - 1 @@ -203,6 +202,23 @@ impl VecDeque { len); } + /// Returns a pair of slices which contain the contents of the buffer not used by the VecDeque. + #[inline] + unsafe fn unused_as_mut_slices<'a>(&'a mut self) -> (&'a mut [T], &'a mut [T]) { + let head = self.head; + let tail = self.tail; + let buf = self.buffer_as_mut_slice(); + if head != tail { + // In buf, head..tail contains the VecDeque and tail..head is unused. + // So calling `ring_slices` with tail and head swapped returns unused slices. + RingSlices::ring_slices(buf, tail, head) + } else { + // Swapping doesn't help when head == tail. + let (before, after) = buf.split_at_mut(head); + (after, before) + } + } + /// Copies a potentially wrapping block of memory len long from src to dest. /// (abs(dst - src) + len) must be no larger than cap() (There must be at /// most one continuous overlapping region between src and dest). @@ -566,6 +582,97 @@ impl VecDeque { } } + /// Tries to reserves the minimum capacity for exactly `additional` more elements to + /// be inserted in the given `VecDeque`. After calling `reserve_exact`, + /// capacity will be greater than or equal to `self.len() + additional`. + /// Does nothing if the capacity is already sufficient. + /// + /// Note that the allocator may give the collection more space than it + /// requests. Therefore capacity can not be relied upon to be precisely + /// minimal. Prefer `reserve` if future insertions are expected. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// use std::collections::VecDeque; + /// + /// fn process_data(data: &[u32]) -> Result, CollectionAllocErr> { + /// let mut output = VecDeque::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve_exact(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.try_reserve(additional) + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `VecDeque`. The collection may reserve more space to avoid + /// frequent reallocations. After calling `reserve`, capacity will be + /// greater than or equal to `self.len() + additional`. Does nothing if + /// capacity is already sufficient. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// use std::collections::VecDeque; + /// + /// fn process_data(data: &[u32]) -> Result, CollectionAllocErr> { + /// let mut output = VecDeque::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + let old_cap = self.cap(); + let used_cap = self.len() + 1; + let new_cap = used_cap.checked_add(additional) + .and_then(|needed_cap| needed_cap.checked_next_power_of_two()) + .ok_or(CollectionAllocErr::CapacityOverflow)?; + + if new_cap > old_cap { + self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?; + unsafe { + self.handle_cap_increase(old_cap); + } + } + Ok(()) + } + /// Shrinks the capacity of the `VecDeque` as much as possible. /// /// It will drop down as close as possible to the length but the allocator may still inform the @@ -584,9 +691,42 @@ impl VecDeque { /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn shrink_to_fit(&mut self) { + self.shrink_to(0); + } + + /// Shrinks the capacity of the `VecDeque` with a lower bound. + /// + /// The capacity will remain at least as large as both the length + /// and the supplied value. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// #![feature(shrink_to)] + /// use std::collections::VecDeque; + /// + /// let mut buf = VecDeque::with_capacity(15); + /// buf.extend(0..4); + /// assert_eq!(buf.capacity(), 15); + /// buf.shrink_to(6); + /// assert!(buf.capacity() >= 6); + /// buf.shrink_to(0); + /// assert!(buf.capacity() >= 4); + /// ``` + #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + pub fn shrink_to(&mut self, min_capacity: usize) { + assert!(self.capacity() >= min_capacity, "Tried to shrink to a larger capacity"); + // +1 since the ringbuffer always leaves one space empty // len + 1 can't overflow for an existing, well-formed ringbuffer. - let target_cap = cmp::max(self.len() + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); + let target_cap = cmp::max( + cmp::max(min_capacity, self.len()) + 1, + MINIMUM_CAPACITY + 1 + ).next_power_of_two(); + if target_cap < self.cap() { // There are three cases of interest: // All elements are out of desired bounds @@ -844,7 +984,7 @@ impl VecDeque { #[inline] #[stable(feature = "drain", since = "1.6.0")] pub fn drain(&mut self, range: R) -> Drain - where R: RangeArgument + where R: RangeBounds { // Memory safety // @@ -857,12 +997,12 @@ impl VecDeque { // and the head/tail values will be restored correctly. // let len = self.len(); - let start = match range.start() { + let start = match range.start_bound() { Included(&n) => n, Excluded(&n) => n + 1, Unbounded => 0, }; - let end = match range.end() { + let end = match range.end_bound() { Included(&n) => n + 1, Excluded(&n) => n, Unbounded => len, @@ -895,7 +1035,7 @@ impl VecDeque { self.head = drain_tail; Drain { - deque: Shared::from(&mut *self), + deque: NonNull::from(&mut *self), after_tail: drain_head, after_head: head, iter: Iter { @@ -906,7 +1046,7 @@ impl VecDeque { } } - /// Clears the buffer, removing all values. + /// Clears the `VecDeque`, removing all values. /// /// # Examples /// @@ -1624,10 +1764,10 @@ impl VecDeque { return elem; } - /// Splits the collection into two at the given index. + /// Splits the `VecDeque` into two at the given index. /// - /// Returns a newly allocated `Self`. `self` contains elements `[0, at)`, - /// and the returned `Self` contains elements `[at, len)`. + /// Returns a newly allocated `VecDeque`. `self` contains elements `[0, at)`, + /// and the returned `VecDeque` contains elements `[at, len)`. /// /// Note that the capacity of `self` does not change. /// @@ -1635,7 +1775,7 @@ impl VecDeque { /// /// # Panics /// - /// Panics if `at > len` + /// Panics if `at > len`. /// /// # Examples /// @@ -1711,8 +1851,148 @@ impl VecDeque { #[inline] #[stable(feature = "append", since = "1.4.0")] pub fn append(&mut self, other: &mut Self) { - // naive impl - self.extend(other.drain(..)); + // Copies all values from `src_slice` to the start of `dst_slice`. + unsafe fn copy_whole_slice(src_slice: &[T], dst_slice: &mut [T]) { + let len = src_slice.len(); + ptr::copy_nonoverlapping(src_slice.as_ptr(), dst_slice[..len].as_mut_ptr(), len); + } + + let src_total = other.len(); + + // Guarantees there is space in `self` for `other`. + self.reserve(src_total); + + self.head = { + let original_head = self.head; + + // The goal is to copy all values from `other` into `self`. To avoid any + // mismatch, all valid values in `other` are retrieved... + let (src_high, src_low) = other.as_slices(); + // and unoccupied parts of self are retrieved. + let (dst_high, dst_low) = unsafe { self.unused_as_mut_slices() }; + + // Then all that is needed is to copy all values from + // src (src_high and src_low) to dst (dst_high and dst_low). + // + // other [o o o . . . . . o o o o] + // [5 6 7] [1 2 3 4] + // src_low src_high + // + // self [. . . . . . o o o o . .] + // [3 4 5 6 7 .] [1 2] + // dst_low dst_high + // + // Values are not copied one by one but as slices in `copy_whole_slice`. + // What slices are used depends on various properties of src and dst. + // There are 6 cases in total: + // 1. `src` is contiguous and fits in dst_high + // 2. `src` is contiguous and does not fit in dst_high + // 3. `src` is discontiguous and fits in dst_high + // 4. `src` is discontiguous and does not fit in dst_high + // + src_high is smaller than dst_high + // 5. `src` is discontiguous and does not fit in dst_high + // + dst_high is smaller than src_high + // 6. `src` is discontiguous and does not fit in dst_high + // + dst_high is the same size as src_high + let src_contiguous = src_low.is_empty(); + let dst_high_fits_src = dst_high.len() >= src_total; + match (src_contiguous, dst_high_fits_src) { + (true, true) => { + // 1. + // other [. . . o o o . . . . . .] + // [] [1 1 1] + // + // self [. o o o o o . . . . . .] + // [.] [1 1 1 . . .] + + unsafe { + copy_whole_slice(src_high, dst_high); + } + original_head + src_total + } + (true, false) => { + // 2. + // other [. . . o o o o o . . . .] + // [] [1 1 2 2 2] + // + // self [. . . . . . . o o o . .] + // [2 2 2 . . . .] [1 1] + + let (src_1, src_2) = src_high.split_at(dst_high.len()); + unsafe { + copy_whole_slice(src_1, dst_high); + copy_whole_slice(src_2, dst_low); + } + src_total - dst_high.len() + } + (false, true) => { + // 3. + // other [o o . . . . . . . o o o] + // [2 2] [1 1 1] + // + // self [. o o . . . . . . . . .] + // [.] [1 1 1 2 2 . . . .] + + let (dst_1, dst_2) = dst_high.split_at_mut(src_high.len()); + unsafe { + copy_whole_slice(src_high, dst_1); + copy_whole_slice(src_low, dst_2); + } + original_head + src_total + } + (false, false) => { + if src_high.len() < dst_high.len() { + // 4. + // other [o o o . . . . . . o o o] + // [2 3 3] [1 1 1] + // + // self [. . . . . . o o . . . .] + // [3 3 . . . .] [1 1 1 2] + + let (dst_1, dst_2) = dst_high.split_at_mut(src_high.len()); + let (src_2, src_3) = src_low.split_at(dst_2.len()); + unsafe { + copy_whole_slice(src_high, dst_1); + copy_whole_slice(src_2, dst_2); + copy_whole_slice(src_3, dst_low); + } + src_3.len() + } else if src_high.len() > dst_high.len() { + // 5. + // other [o o o . . . . . o o o o] + // [3 3 3] [1 1 2 2] + // + // self [. . . . . . o o o o . .] + // [2 2 3 3 3 .] [1 1] + + let (src_1, src_2) = src_high.split_at(dst_high.len()); + let (dst_2, dst_3) = dst_low.split_at_mut(src_2.len()); + unsafe { + copy_whole_slice(src_1, dst_high); + copy_whole_slice(src_2, dst_2); + copy_whole_slice(src_low, dst_3); + } + dst_2.len() + src_low.len() + } else { + // 6. + // other [o o . . . . . . . o o o] + // [2 2] [1 1 1] + // + // self [. . . . . . . o o . . .] + // [2 2 . . . . .] [1 1 1] + + unsafe { + copy_whole_slice(src_high, dst_high); + copy_whole_slice(src_low, dst_low); + } + src_low.len() + } + } + } + }; + + // Some values now exist in both `other` and `self` but are made inaccessible in `other`. + other.tail = other.head; } /// Retains only the elements specified by the predicate. @@ -1761,61 +2041,12 @@ impl VecDeque { debug_assert!(!self.is_full()); } } - - /// Returns a place for insertion at the back of the `VecDeque`. - /// - /// Using this method with placement syntax is equivalent to [`push_back`](#method.push_back), - /// but may be more efficient. - /// - /// # Examples - /// - /// ``` - /// #![feature(collection_placement)] - /// #![feature(placement_in_syntax)] - /// - /// use std::collections::VecDeque; - /// - /// let mut buf = VecDeque::new(); - /// buf.place_back() <- 3; - /// buf.place_back() <- 4; - /// assert_eq!(&buf, &[3, 4]); - /// ``` - #[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] - pub fn place_back(&mut self) -> PlaceBack { - PlaceBack { vec_deque: self } - } - - /// Returns a place for insertion at the front of the `VecDeque`. - /// - /// Using this method with placement syntax is equivalent to [`push_front`](#method.push_front), - /// but may be more efficient. - /// - /// # Examples - /// - /// ``` - /// #![feature(collection_placement)] - /// #![feature(placement_in_syntax)] - /// - /// use std::collections::VecDeque; - /// - /// let mut buf = VecDeque::new(); - /// buf.place_front() <- 3; - /// buf.place_front() <- 4; - /// assert_eq!(&buf, &[4, 3]); - /// ``` - #[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] - pub fn place_front(&mut self) -> PlaceFront { - PlaceFront { vec_deque: self } - } } impl VecDeque { /// Modifies the `VecDeque` in-place so that `len()` is equal to new_len, - /// either by removing excess elements or by appending clones of `value` to the back. + /// either by removing excess elements from the back or by appending clones of `value` + /// to the back. /// /// # Examples /// @@ -1990,7 +2221,7 @@ impl<'a, T> ExactSizeIterator for Iter<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} @@ -2083,7 +2314,7 @@ impl<'a, T> ExactSizeIterator for IterMut<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for IterMut<'a, T> {} /// An owning iterator over the elements of a `VecDeque`. @@ -2139,7 +2370,7 @@ impl ExactSizeIterator for IntoIter { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for IntoIter {} /// A draining iterator over the elements of a `VecDeque`. @@ -2154,7 +2385,7 @@ pub struct Drain<'a, T: 'a> { after_tail: usize, after_head: usize, iter: Iter<'a, T>, - deque: Shared>, + deque: NonNull>, } #[stable(feature = "collection_debug", since = "1.17.0")] @@ -2176,7 +2407,7 @@ unsafe impl<'a, T: Send> Send for Drain<'a, T> {} #[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> Drop for Drain<'a, T> { fn drop(&mut self) { - for _ in self.by_ref() {} + self.for_each(drop); let source_deque = unsafe { self.deque.as_mut() }; @@ -2246,7 +2477,7 @@ impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -2296,7 +2527,7 @@ macro_rules! __impl_slice_eq1 { __impl_slice_eq1! { $Lhs, $Rhs, Sized } }; ($Lhs: ty, $Rhs: ty, $Bound: ident) => { - #[stable(feature = "vec-deque-partial-eq-slice", since = "1.17.0")] + #[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")] impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq { fn eq(&self, other: &$Rhs) -> bool { if self.len() != other.len() { @@ -2390,7 +2621,7 @@ impl IntoIterator for VecDeque { type Item = T; type IntoIter = IntoIter; - /// Consumes the list into a front-to-back iterator yielding elements by + /// Consumes the `VecDeque` into a front-to-back iterator yielding elements by /// value. fn into_iter(self) -> IntoIter { IntoIter { inner: self } @@ -2480,7 +2711,7 @@ impl From> for Vec { if other.is_contiguous() { ptr::copy(buf.offset(tail as isize), buf, len); } else { - if (tail - head) >= cmp::min((cap - tail), head) { + if (tail - head) >= cmp::min(cap - tail, head) { // There is enough free space in the centre for the shortest block so we can // do this in at most three copy moves. if (cap - tail) > head { @@ -2537,98 +2768,6 @@ impl From> for Vec { } } -/// A place for insertion at the back of a `VecDeque`. -/// -/// See [`VecDeque::place_back`](struct.VecDeque.html#method.place_back) for details. -#[must_use = "places do nothing unless written to with `<-` syntax"] -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -#[derive(Debug)] -pub struct PlaceBack<'a, T: 'a> { - vec_deque: &'a mut VecDeque, -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Placer for PlaceBack<'a, T> { - type Place = PlaceBack<'a, T>; - - fn make_place(self) -> Self { - self.vec_deque.grow_if_necessary(); - self - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place for PlaceBack<'a, T> { - fn pointer(&mut self) -> *mut T { - unsafe { self.vec_deque.ptr().offset(self.vec_deque.head as isize) } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace for PlaceBack<'a, T> { - type Owner = &'a mut T; - - unsafe fn finalize(self) -> &'a mut T { - let head = self.vec_deque.head; - self.vec_deque.head = self.vec_deque.wrap_add(head, 1); - &mut *(self.vec_deque.ptr().offset(head as isize)) - } -} - -/// A place for insertion at the front of a `VecDeque`. -/// -/// See [`VecDeque::place_front`](struct.VecDeque.html#method.place_front) for details. -#[must_use = "places do nothing unless written to with `<-` syntax"] -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -#[derive(Debug)] -pub struct PlaceFront<'a, T: 'a> { - vec_deque: &'a mut VecDeque, -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Placer for PlaceFront<'a, T> { - type Place = PlaceFront<'a, T>; - - fn make_place(self) -> Self { - self.vec_deque.grow_if_necessary(); - self - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place for PlaceFront<'a, T> { - fn pointer(&mut self) -> *mut T { - let tail = self.vec_deque.wrap_sub(self.vec_deque.tail, 1); - unsafe { self.vec_deque.ptr().offset(tail as isize) } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace for PlaceFront<'a, T> { - type Owner = &'a mut T; - - unsafe fn finalize(self) -> &'a mut T { - self.vec_deque.tail = self.vec_deque.wrap_sub(self.vec_deque.tail, 1); - &mut *(self.vec_deque.ptr().offset(self.vec_deque.tail as isize)) - } -} - #[cfg(test)] mod tests { use test; @@ -2909,7 +3048,7 @@ mod tests { #[test] fn test_from_vec() { - use super::super::vec::Vec; + use vec::Vec; for cap in 0..35 { for len in 0..cap + 1 { let mut vec = Vec::with_capacity(cap); @@ -2925,7 +3064,7 @@ mod tests { #[test] fn test_vec_from_vecdeque() { - use super::super::vec::Vec; + use vec::Vec; fn create_vec_and_test_convert(cap: usize, offset: usize, len: usize) { let mut vd = VecDeque::with_capacity(cap); diff --git a/src/liballoc/fmt.rs b/src/liballoc/fmt.rs index a092bfb3b0a8..b49ec0ae2521 100644 --- a/src/liballoc/fmt.rs +++ b/src/liballoc/fmt.rs @@ -113,6 +113,8 @@ //! //! * *nothing* ⇒ [`Display`] //! * `?` ⇒ [`Debug`] +//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers +//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers //! * `o` ⇒ [`Octal`](trait.Octal.html) //! * `x` ⇒ [`LowerHex`](trait.LowerHex.html) //! * `X` ⇒ [`UpperHex`](trait.UpperHex.html) @@ -324,7 +326,7 @@ //! sign := '+' | '-' //! width := count //! precision := count | '*' -//! type := identifier | '' +//! type := identifier | '?' | '' //! count := parameter | integer //! parameter := argument '$' //! ``` @@ -338,7 +340,8 @@ //! //! ## Fill/Alignment //! -//! The fill character is provided normally in conjunction with the `width` +//! The fill character is provided normally in conjunction with the +//! [`width`](#width) //! parameter. This indicates that if the value being formatted is smaller than //! `width` some extra characters will be printed around it. The extra //! characters are specified by `fill`, and the alignment can be one of the @@ -386,7 +389,8 @@ //! padding specified by fill/alignment will be used to take up the required //! space. //! -//! The default fill/alignment for non-numerics is a space and left-aligned. The +//! The default [fill/alignment](#fillalignment) for non-numerics is a space and +//! left-aligned. The //! defaults for numeric formatters is also a space but with right-alignment. If //! the `0` flag is specified for numerics, then the implicit fill character is //! `0`. @@ -514,19 +518,21 @@ pub use core::fmt::rt; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{Formatter, Result, Write}; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{Octal, Binary}; +pub use core::fmt::{Binary, Octal}; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{Display, Debug}; +pub use core::fmt::{Debug, Display}; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{LowerHex, UpperHex, Pointer}; +pub use core::fmt::{LowerHex, Pointer, UpperHex}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{LowerExp, UpperExp}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::Error; #[stable(feature = "rust1", since = "1.0.0")] -pub use core::fmt::{ArgumentV1, Arguments, write}; +pub use core::fmt::{write, ArgumentV1, Arguments}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple}; +#[stable(feature = "fmt_flags_align", since = "1.28.0")] +pub use core::fmt::{Alignment}; use string; @@ -561,7 +567,8 @@ use string; pub fn format(args: Arguments) -> string::String { let capacity = args.estimated_capacity(); let mut output = string::String::with_capacity(capacity); - output.write_fmt(args) - .expect("a formatting trait implementation returned an error"); + output + .write_fmt(args) + .expect("a formatting trait implementation returned an error"); output } diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs deleted file mode 100644 index b2bd9d7d8faf..000000000000 --- a/src/liballoc/heap.rs +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![unstable(feature = "allocator_api", - reason = "the precise API and guarantees it provides may be tweaked \ - slightly, especially to possibly take into account the \ - types being stored to make room for a future \ - tracing garbage collector", - issue = "32838")] - -use core::intrinsics::{min_align_of_val, size_of_val}; -use core::mem::{self, ManuallyDrop}; -use core::usize; - -pub use allocator::*; -#[doc(hidden)] -pub mod __core { - pub use core::*; -} - -extern "Rust" { - #[allocator] - #[rustc_allocator_nounwind] - fn __rust_alloc(size: usize, align: usize, err: *mut u8) -> *mut u8; - #[cold] - #[rustc_allocator_nounwind] - fn __rust_oom(err: *const u8) -> !; - #[rustc_allocator_nounwind] - fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize); - #[rustc_allocator_nounwind] - fn __rust_usable_size(layout: *const u8, - min: *mut usize, - max: *mut usize); - #[rustc_allocator_nounwind] - fn __rust_realloc(ptr: *mut u8, - old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize, - err: *mut u8) -> *mut u8; - #[rustc_allocator_nounwind] - fn __rust_alloc_zeroed(size: usize, align: usize, err: *mut u8) -> *mut u8; - #[rustc_allocator_nounwind] - fn __rust_alloc_excess(size: usize, - align: usize, - excess: *mut usize, - err: *mut u8) -> *mut u8; - #[rustc_allocator_nounwind] - fn __rust_realloc_excess(ptr: *mut u8, - old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize, - excess: *mut usize, - err: *mut u8) -> *mut u8; - #[rustc_allocator_nounwind] - fn __rust_grow_in_place(ptr: *mut u8, - old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize) -> u8; - #[rustc_allocator_nounwind] - fn __rust_shrink_in_place(ptr: *mut u8, - old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize) -> u8; -} - -#[derive(Copy, Clone, Default, Debug)] -pub struct Heap; - -unsafe impl Alloc for Heap { - #[inline] - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - let mut err = ManuallyDrop::new(mem::uninitialized::()); - let ptr = __rust_alloc(layout.size(), - layout.align(), - &mut *err as *mut AllocErr as *mut u8); - if ptr.is_null() { - Err(ManuallyDrop::into_inner(err)) - } else { - Ok(ptr) - } - } - - #[inline] - #[cold] - fn oom(&mut self, err: AllocErr) -> ! { - unsafe { - __rust_oom(&err as *const AllocErr as *const u8) - } - } - - #[inline] - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { - __rust_dealloc(ptr, layout.size(), layout.align()) - } - - #[inline] - fn usable_size(&self, layout: &Layout) -> (usize, usize) { - let mut min = 0; - let mut max = 0; - unsafe { - __rust_usable_size(layout as *const Layout as *const u8, - &mut min, - &mut max); - } - (min, max) - } - - #[inline] - unsafe fn realloc(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) - -> Result<*mut u8, AllocErr> - { - let mut err = ManuallyDrop::new(mem::uninitialized::()); - let ptr = __rust_realloc(ptr, - layout.size(), - layout.align(), - new_layout.size(), - new_layout.align(), - &mut *err as *mut AllocErr as *mut u8); - if ptr.is_null() { - Err(ManuallyDrop::into_inner(err)) - } else { - mem::forget(err); - Ok(ptr) - } - } - - #[inline] - unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - let mut err = ManuallyDrop::new(mem::uninitialized::()); - let ptr = __rust_alloc_zeroed(layout.size(), - layout.align(), - &mut *err as *mut AllocErr as *mut u8); - if ptr.is_null() { - Err(ManuallyDrop::into_inner(err)) - } else { - Ok(ptr) - } - } - - #[inline] - unsafe fn alloc_excess(&mut self, layout: Layout) -> Result { - let mut err = ManuallyDrop::new(mem::uninitialized::()); - let mut size = 0; - let ptr = __rust_alloc_excess(layout.size(), - layout.align(), - &mut size, - &mut *err as *mut AllocErr as *mut u8); - if ptr.is_null() { - Err(ManuallyDrop::into_inner(err)) - } else { - Ok(Excess(ptr, size)) - } - } - - #[inline] - unsafe fn realloc_excess(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result { - let mut err = ManuallyDrop::new(mem::uninitialized::()); - let mut size = 0; - let ptr = __rust_realloc_excess(ptr, - layout.size(), - layout.align(), - new_layout.size(), - new_layout.align(), - &mut size, - &mut *err as *mut AllocErr as *mut u8); - if ptr.is_null() { - Err(ManuallyDrop::into_inner(err)) - } else { - Ok(Excess(ptr, size)) - } - } - - #[inline] - unsafe fn grow_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) - -> Result<(), CannotReallocInPlace> - { - debug_assert!(new_layout.size() >= layout.size()); - debug_assert!(new_layout.align() == layout.align()); - let ret = __rust_grow_in_place(ptr, - layout.size(), - layout.align(), - new_layout.size(), - new_layout.align()); - if ret != 0 { - Ok(()) - } else { - Err(CannotReallocInPlace) - } - } - - #[inline] - unsafe fn shrink_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - debug_assert!(new_layout.size() <= layout.size()); - debug_assert!(new_layout.align() == layout.align()); - let ret = __rust_shrink_in_place(ptr, - layout.size(), - layout.align(), - new_layout.size(), - new_layout.align()); - if ret != 0 { - Ok(()) - } else { - Err(CannotReallocInPlace) - } - } -} - -/// An arbitrary non-null address to represent zero-size allocations. -/// -/// This preserves the non-null invariant for types like `Box`. The address -/// may overlap with non-zero-size memory allocations. -#[rustc_deprecated(since = "1.19", reason = "Use Unique/Shared::empty() instead")] -#[unstable(feature = "heap_api", issue = "27700")] -pub const EMPTY: *mut () = 1 as *mut (); - -/// The allocator for unique pointers. -// This function must not unwind. If it does, MIR trans will fail. -#[cfg(not(test))] -#[lang = "exchange_malloc"] -#[inline] -unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 { - if size == 0 { - align as *mut u8 - } else { - let layout = Layout::from_size_align_unchecked(size, align); - Heap.alloc(layout).unwrap_or_else(|err| { - Heap.oom(err) - }) - } -} - -#[cfg_attr(not(test), lang = "box_free")] -#[inline] -pub(crate) unsafe fn box_free(ptr: *mut T) { - let size = size_of_val(&*ptr); - let align = min_align_of_val(&*ptr); - // We do not allocate for Box when T is ZST, so deallocation is also not necessary. - if size != 0 { - let layout = Layout::from_size_align_unchecked(size, align); - Heap.dealloc(ptr as *mut u8, layout); - } -} - -#[cfg(test)] -mod tests { - extern crate test; - use self::test::Bencher; - use boxed::Box; - use heap::{Heap, Alloc, Layout}; - - #[test] - fn allocate_zeroed() { - unsafe { - let layout = Layout::from_size_align(1024, 1).unwrap(); - let ptr = Heap.alloc_zeroed(layout.clone()) - .unwrap_or_else(|e| Heap.oom(e)); - - let end = ptr.offset(layout.size() as isize); - let mut i = ptr; - while i < end { - assert_eq!(*i, 0); - i = i.offset(1); - } - Heap.dealloc(ptr, layout); - } - } - - #[bench] - fn alloc_owned_small(b: &mut Bencher) { - b.iter(|| { - let _: Box<_> = box 10; - }) - } -} diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 6ee4f802802a..bcdfd8c9aa5d 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -13,10 +13,10 @@ //! This library provides smart pointers and collections for managing //! heap-allocated values. //! -//! This library, like libcore, is not intended for general usage, but rather as -//! a building block of other libraries. The types and interfaces in this -//! library are re-exported through the [standard library](../std/index.html), -//! and should not be used through this library. +//! This library, like libcore, normally doesn’t need to be used directly +//! since its contents are re-exported in the [`std` crate](../std/index.html). +//! Crates that use the `#![no_std]` attribute however will typically +//! not depend on `std`, so they’d use this crate instead. //! //! ## Boxed values //! @@ -40,7 +40,7 @@ //! //! ## Atomically reference counted pointers //! -//! The [`Arc`](arc/index.html) type is the threadsafe equivalent of the `Rc` +//! The [`Arc`](sync/index.html) type is the threadsafe equivalent of the `Rc` //! type. It provides all the same functionality of `Rc`, except it requires //! that the contained type `T` is shareable. Additionally, `Arc` is itself //! sendable while `Rc` is not. @@ -57,7 +57,7 @@ //! //! ## Heap interfaces //! -//! The [`heap`](heap/index.html) module defines the low-level interface to the +//! The [`alloc`](alloc/index.html) module defines the low-level interface to the //! default global allocator. It is not compatible with the libc allocator API. #![allow(unused_attributes)] @@ -72,18 +72,17 @@ test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))] #![no_std] #![needs_allocator] -#![deny(warnings)] #![deny(missing_debug_implementations)] -#![cfg_attr(test, allow(deprecated))] // rand -#![cfg_attr(test, feature(placement_in))] -#![cfg_attr(not(test), feature(core_float))] -#![cfg_attr(not(test), feature(exact_size_is_empty))] -#![cfg_attr(not(test), feature(slice_rotate))] +#![cfg_attr(not(test), feature(fn_traits))] #![cfg_attr(not(test), feature(generator_trait))] -#![cfg_attr(test, feature(rand, test))] +#![cfg_attr(not(stage0), feature(nll))] +#![cfg_attr(test, feature(test))] + +#![feature(allocator_api)] #![feature(allow_internal_unstable)] -#![feature(ascii_ctype)] +#![feature(arbitrary_self_types)] +#![feature(box_into_raw_non_null)] #![feature(box_patterns)] #![feature(box_syntax)] #![feature(cfg_target_has_atomic)] @@ -94,40 +93,31 @@ #![feature(dropck_eyepatch)] #![feature(exact_size_is_empty)] #![feature(fmt_internals)] -#![feature(from_ref)] #![feature(fundamental)] -#![feature(fused)] -#![feature(generic_param_attrs)] -#![feature(i128_type)] -#![feature(inclusive_range)] -#![feature(iter_rfold)] +#![feature(futures_api)] #![feature(lang_items)] +#![feature(libc)] #![feature(needs_allocator)] -#![feature(nonzero)] -#![feature(offset_to)] #![feature(optin_builtin_traits)] #![feature(pattern)] -#![feature(placement_in_syntax)] -#![feature(placement_new_protocol)] +#![feature(pin)] +#![feature(ptr_internals)] +#![feature(ptr_offset_from)] #![feature(rustc_attrs)] -#![feature(shared)] -#![feature(slice_get_slice)] -#![feature(slice_patterns)] -#![feature(slice_rsplit)] #![feature(specialization)] +#![feature(split_ascii_whitespace)] #![feature(staged_api)] #![feature(str_internals)] #![feature(trusted_len)] +#![feature(try_reserve)] #![feature(unboxed_closures)] -#![feature(unicode)] -#![feature(unique)] +#![feature(unicode_internals)] #![feature(unsize)] #![feature(allocator_internals)] #![feature(on_unimplemented)] #![feature(exact_chunks)] - -#![cfg_attr(not(test), feature(fused, fn_traits, placement_new_protocol, swap_with_slice, i128))] -#![cfg_attr(test, feature(test, box_heap))] +#![feature(rustc_const_unstable)] +#![feature(const_vec_new)] // Allow testing this library @@ -139,140 +129,45 @@ extern crate test; #[cfg(test)] extern crate rand; -extern crate std_unicode; - // Module with internal macros used by other modules (needs to be included before other modules). #[macro_use] mod macros; -// Allocator trait and helper struct definitions - -pub mod allocator; - // Heaps provided for low-level allocation strategies -pub mod heap; +pub mod alloc; +#[unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] +pub mod task; // Primitive types using the heaps above // Need to conditionally define the mod from `boxed.rs` to avoid // duplicating the lang-items when building in test cfg; but also need -// to allow code to have `use boxed::HEAP;` -// and `use boxed::Box;` declarations. +// to allow code to have `use boxed::Box;` declarations. #[cfg(not(test))] pub mod boxed; #[cfg(test)] mod boxed { - pub use std::boxed::{Box, IntermediateBox, HEAP}; + pub use std::boxed::Box; } #[cfg(test)] mod boxed_test; -#[cfg(target_has_atomic = "ptr")] -pub mod arc; +pub mod collections; +#[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] +pub mod sync; pub mod rc; pub mod raw_vec; - -// collections modules -pub mod binary_heap; -mod btree; +pub mod prelude; pub mod borrow; pub mod fmt; -pub mod linked_list; -pub mod range; pub mod slice; pub mod str; pub mod string; pub mod vec; -pub mod vec_deque; - -#[stable(feature = "rust1", since = "1.0.0")] -pub mod btree_map { - //! A map based on a B-Tree. - #[stable(feature = "rust1", since = "1.0.0")] - pub use btree::map::*; -} - -#[stable(feature = "rust1", since = "1.0.0")] -pub mod btree_set { - //! A set based on a B-Tree. - #[stable(feature = "rust1", since = "1.0.0")] - pub use btree::set::*; -} #[cfg(not(test))] mod std { pub use core::ops; // RangeFull } - -/// An endpoint of a range of keys. -/// -/// # Examples -/// -/// `Bound`s are range endpoints: -/// -/// ``` -/// #![feature(collections_range)] -/// -/// use std::collections::range::RangeArgument; -/// use std::collections::Bound::*; -/// -/// assert_eq!((..100).start(), Unbounded); -/// assert_eq!((1..12).start(), Included(&1)); -/// assert_eq!((1..12).end(), Excluded(&12)); -/// ``` -/// -/// Using a tuple of `Bound`s as an argument to [`BTreeMap::range`]. -/// Note that in most cases, it's better to use range syntax (`1..5`) instead. -/// -/// ``` -/// use std::collections::BTreeMap; -/// use std::collections::Bound::{Excluded, Included, Unbounded}; -/// -/// let mut map = BTreeMap::new(); -/// map.insert(3, "a"); -/// map.insert(5, "b"); -/// map.insert(8, "c"); -/// -/// for (key, value) in map.range((Excluded(3), Included(8))) { -/// println!("{}: {}", key, value); -/// } -/// -/// assert_eq!(Some((&3, &"a")), map.range((Unbounded, Included(5))).next()); -/// ``` -/// -/// [`BTreeMap::range`]: btree_map/struct.BTreeMap.html#method.range -#[stable(feature = "collections_bound", since = "1.17.0")] -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] -pub enum Bound { - /// An inclusive bound. - #[stable(feature = "collections_bound", since = "1.17.0")] - Included(#[stable(feature = "collections_bound", since = "1.17.0")] T), - /// An exclusive bound. - #[stable(feature = "collections_bound", since = "1.17.0")] - Excluded(#[stable(feature = "collections_bound", since = "1.17.0")] T), - /// An infinite endpoint. Indicates that there is no bound in this direction. - #[stable(feature = "collections_bound", since = "1.17.0")] - Unbounded, -} - -/// An intermediate trait for specialization of `Extend`. -#[doc(hidden)] -trait SpecExtend { - /// Extends `self` with the contents of the given iterator. - fn spec_extend(&mut self, iter: I); -} - -#[doc(no_inline)] -pub use binary_heap::BinaryHeap; -#[doc(no_inline)] -pub use btree_map::BTreeMap; -#[doc(no_inline)] -pub use btree_set::BTreeSet; -#[doc(no_inline)] -pub use linked_list::LinkedList; -#[doc(no_inline)] -pub use vec_deque::VecDeque; -#[doc(no_inline)] -pub use string::String; -#[doc(no_inline)] -pub use vec::Vec; diff --git a/src/liballoc/prelude.rs b/src/liballoc/prelude.rs new file mode 100644 index 000000000000..53b5e93a66e2 --- /dev/null +++ b/src/liballoc/prelude.rs @@ -0,0 +1,29 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The alloc Prelude +//! +//! The purpose of this module is to alleviate imports of commonly-used +//! items of the `alloc` crate by adding a glob import to the top of modules: +//! +//! ``` +//! # #![allow(unused_imports)] +//! # #![feature(alloc)] +//! extern crate alloc; +//! use alloc::prelude::*; +//! ``` + +#![unstable(feature = "alloc", issue = "27783")] + +#[unstable(feature = "alloc", issue = "27783")] pub use borrow::ToOwned; +#[unstable(feature = "alloc", issue = "27783")] pub use boxed::Box; +#[unstable(feature = "alloc", issue = "27783")] pub use slice::SliceConcatExt; +#[unstable(feature = "alloc", issue = "27783")] pub use string::{String, ToString}; +#[unstable(feature = "alloc", issue = "27783")] pub use vec::Vec; diff --git a/src/liballoc/range.rs b/src/liballoc/range.rs deleted file mode 100644 index f862da0d61e0..000000000000 --- a/src/liballoc/range.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![unstable(feature = "collections_range", - reason = "waiting for dust to settle on inclusive ranges", - issue = "30877")] - -//! Range syntax. - -use core::ops::{RangeFull, Range, RangeTo, RangeFrom, RangeInclusive, RangeToInclusive}; -use Bound::{self, Excluded, Included, Unbounded}; - -/// `RangeArgument` is implemented by Rust's built-in range types, produced -/// by range syntax like `..`, `a..`, `..b` or `c..d`. -pub trait RangeArgument { - /// Start index bound. - /// - /// Returns the start value as a `Bound`. - /// - /// # Examples - /// - /// ``` - /// #![feature(alloc)] - /// #![feature(collections_range)] - /// - /// extern crate alloc; - /// - /// # fn main() { - /// use alloc::range::RangeArgument; - /// use alloc::Bound::*; - /// - /// assert_eq!((..10).start(), Unbounded); - /// assert_eq!((3..10).start(), Included(&3)); - /// # } - /// ``` - fn start(&self) -> Bound<&T>; - - /// End index bound. - /// - /// Returns the end value as a `Bound`. - /// - /// # Examples - /// - /// ``` - /// #![feature(alloc)] - /// #![feature(collections_range)] - /// - /// extern crate alloc; - /// - /// # fn main() { - /// use alloc::range::RangeArgument; - /// use alloc::Bound::*; - /// - /// assert_eq!((3..).end(), Unbounded); - /// assert_eq!((3..10).end(), Excluded(&10)); - /// # } - /// ``` - fn end(&self) -> Bound<&T>; -} - -// FIXME add inclusive ranges to RangeArgument - -impl RangeArgument for RangeFull { - fn start(&self) -> Bound<&T> { - Unbounded - } - fn end(&self) -> Bound<&T> { - Unbounded - } -} - -impl RangeArgument for RangeFrom { - fn start(&self) -> Bound<&T> { - Included(&self.start) - } - fn end(&self) -> Bound<&T> { - Unbounded - } -} - -impl RangeArgument for RangeTo { - fn start(&self) -> Bound<&T> { - Unbounded - } - fn end(&self) -> Bound<&T> { - Excluded(&self.end) - } -} - -impl RangeArgument for Range { - fn start(&self) -> Bound<&T> { - Included(&self.start) - } - fn end(&self) -> Bound<&T> { - Excluded(&self.end) - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -impl RangeArgument for RangeInclusive { - fn start(&self) -> Bound<&T> { - Included(&self.start) - } - fn end(&self) -> Bound<&T> { - Included(&self.end) - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -impl RangeArgument for RangeToInclusive { - fn start(&self) -> Bound<&T> { - Unbounded - } - fn end(&self) -> Bound<&T> { - Included(&self.end) - } -} - -impl RangeArgument for (Bound, Bound) { - fn start(&self) -> Bound<&T> { - match *self { - (Included(ref start), _) => Included(start), - (Excluded(ref start), _) => Excluded(start), - (Unbounded, _) => Unbounded, - } - } - - fn end(&self) -> Bound<&T> { - match *self { - (_, Included(ref end)) => Included(end), - (_, Excluded(ref end)) => Excluded(end), - (_, Unbounded) => Unbounded, - } - } -} - -impl<'a, T: ?Sized + 'a> RangeArgument for (Bound<&'a T>, Bound<&'a T>) { - fn start(&self) -> Bound<&T> { - self.0 - } - - fn end(&self) -> Bound<&T> { - self.1 - } -} diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index dbf1fb1367dd..4f2686abf451 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -8,13 +8,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![unstable(feature = "raw_vec_internals", reason = "implemention detail", issue = "0")] +#![doc(hidden)] + use core::cmp; use core::mem; use core::ops::Drop; -use core::ptr::{self, Unique}; +use core::ptr::{self, NonNull, Unique}; use core::slice; -use heap::{Alloc, Layout, Heap}; -use super::boxed::Box; + +use alloc::{Alloc, Layout, Global, handle_alloc_error}; +use collections::CollectionAllocErr; +use collections::CollectionAllocErr::*; +use boxed::Box; /// A low-level utility for more ergonomically allocating, reallocating, and deallocating /// a buffer of memory on the heap without having to worry about all the corner cases @@ -44,7 +50,7 @@ use super::boxed::Box; /// field. This allows zero-sized types to not be special-cased by consumers of /// this type. #[allow(missing_debug_implementations)] -pub struct RawVec { +pub struct RawVec { ptr: Unique, cap: usize, a: A, @@ -53,14 +59,16 @@ pub struct RawVec { impl RawVec { /// Like `new` but parameterized over the choice of allocator for /// the returned RawVec. - pub fn new_in(a: A) -> Self { + pub const fn new_in(a: A) -> Self { // !0 is usize::MAX. This branch should be stripped at compile time. - let cap = if mem::size_of::() == 0 { !0 } else { 0 }; + // FIXME(mark-i-m): use this line when `if`s are allowed in `const` + //let cap = if mem::size_of::() == 0 { !0 } else { 0 }; // Unique::empty() doubles as "unallocated" and "zero-sized allocation" RawVec { ptr: Unique::empty(), - cap, + // FIXME(mark-i-m): use `cap` when ifs are allowed in const + cap: [0, !0][(mem::size_of::() == 0) as usize], a, } } @@ -83,27 +91,28 @@ impl RawVec { unsafe { let elem_size = mem::size_of::(); - let alloc_size = cap.checked_mul(elem_size).expect("capacity overflow"); - alloc_guard(alloc_size); + let alloc_size = cap.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow()); + alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow()); // handles ZSTs and `cap = 0` alike let ptr = if alloc_size == 0 { - mem::align_of::() as *mut u8 + NonNull::::dangling() } else { let align = mem::align_of::(); + let layout = Layout::from_size_align(alloc_size, align).unwrap(); let result = if zeroed { - a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap()) + a.alloc_zeroed(layout) } else { - a.alloc(Layout::from_size_align(alloc_size, align).unwrap()) + a.alloc(layout) }; match result { - Ok(ptr) => ptr, - Err(err) => a.oom(err), + Ok(ptr) => ptr.cast(), + Err(_) => handle_alloc_error(layout), } }; RawVec { - ptr: Unique::new_unchecked(ptr as *mut _), + ptr: ptr.into(), cap, a, } @@ -111,14 +120,14 @@ impl RawVec { } } -impl RawVec { +impl RawVec { /// Creates the biggest possible RawVec (on the system heap) /// without allocating. If T has positive size, then this makes a /// RawVec with capacity 0. If T has 0 size, then it makes a /// RawVec with capacity `usize::MAX`. Useful for implementing /// delayed allocation. - pub fn new() -> Self { - Self::new_in(Heap) + pub const fn new() -> Self { + Self::new_in(Global) } /// Creates a RawVec (on the system heap) with exactly the @@ -138,13 +147,13 @@ impl RawVec { /// Aborts on OOM #[inline] pub fn with_capacity(cap: usize) -> Self { - RawVec::allocate_in(cap, false, Heap) + RawVec::allocate_in(cap, false, Global) } /// Like `with_capacity` but guarantees the buffer is zeroed. #[inline] pub fn with_capacity_zeroed(cap: usize) -> Self { - RawVec::allocate_in(cap, true, Heap) + RawVec::allocate_in(cap, true, Global) } } @@ -165,7 +174,7 @@ impl RawVec { } } -impl RawVec { +impl RawVec { /// Reconstitutes a RawVec from a pointer, capacity. /// /// # Undefined Behavior @@ -177,7 +186,7 @@ impl RawVec { RawVec { ptr: Unique::new_unchecked(ptr), cap, - a: Heap, + a: Global, } } @@ -258,7 +267,7 @@ impl RawVec { /// # Examples /// /// ``` - /// # #![feature(alloc)] + /// # #![feature(alloc, raw_vec_internals)] /// # extern crate alloc; /// # use std::ptr; /// # use alloc::raw_vec::RawVec; @@ -307,14 +316,15 @@ impl RawVec { // `from_size_align_unchecked`. let new_cap = 2 * self.cap; let new_size = new_cap * elem_size; - let new_layout = Layout::from_size_align_unchecked(new_size, cur.align()); - alloc_guard(new_size); - let ptr_res = self.a.realloc(self.ptr.as_ptr() as *mut u8, + alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow()); + let ptr_res = self.a.realloc(NonNull::from(self.ptr).cast(), cur, - new_layout); + new_size); match ptr_res { - Ok(ptr) => (new_cap, Unique::new_unchecked(ptr as *mut T)), - Err(e) => self.a.oom(e), + Ok(ptr) => (new_cap, ptr.cast().into()), + Err(_) => handle_alloc_error( + Layout::from_size_align_unchecked(new_size, cur.align()) + ), } } None => { @@ -322,8 +332,8 @@ impl RawVec { // would cause overflow let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; match self.a.alloc_array::(new_cap) { - Ok(ptr) => (new_cap, ptr), - Err(e) => self.a.oom(e), + Ok(ptr) => (new_cap, ptr.into()), + Err(_) => handle_alloc_error(Layout::array::(new_cap).unwrap()), } } }; @@ -367,10 +377,8 @@ impl RawVec { // overflow and the alignment is sufficiently small. let new_cap = 2 * self.cap; let new_size = new_cap * elem_size; - alloc_guard(new_size); - let ptr = self.ptr() as *mut _; - let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align()); - match self.a.grow_in_place(ptr, old_layout, new_layout) { + alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow()); + match self.a.grow_in_place(NonNull::from(self.ptr).cast(), old_layout, new_size) { Ok(_) => { // We can't directly divide `size`. self.cap = new_cap; @@ -383,6 +391,13 @@ impl RawVec { } } + /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. + pub fn try_reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) + -> Result<(), CollectionAllocErr> { + + self.reserve_internal(used_cap, needed_extra_cap, Fallible, Exact) + } + /// Ensures that the buffer contains at least enough space to hold /// `used_cap + needed_extra_cap` elements. If it doesn't already, /// will reallocate the minimum possible amount of memory necessary. @@ -404,52 +419,31 @@ impl RawVec { /// /// Aborts on OOM pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) { - unsafe { - // NOTE: we don't early branch on ZSTs here because we want this - // to actually catch "asking for more than usize::MAX" in that case. - // If we make it past the first branch then we are guaranteed to - // panic. - - // Don't actually need any more capacity. - // Wrapping in case they gave a bad `used_cap`. - if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { - return; - } - - // Nothing we can really do about these checks :( - let new_cap = used_cap.checked_add(needed_extra_cap).expect("capacity overflow"); - let new_layout = match Layout::array::(new_cap) { - Some(layout) => layout, - None => panic!("capacity overflow"), - }; - alloc_guard(new_layout.size()); - let res = match self.current_layout() { - Some(layout) => { - let old_ptr = self.ptr.as_ptr() as *mut u8; - self.a.realloc(old_ptr, layout, new_layout) - } - None => self.a.alloc(new_layout), - }; - let uniq = match res { - Ok(ptr) => Unique::new_unchecked(ptr as *mut T), - Err(e) => self.a.oom(e), - }; - self.ptr = uniq; - self.cap = new_cap; - } - } + match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Exact) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocErr) => unreachable!(), + Ok(()) => { /* yay */ } + } + } /// Calculates the buffer's new size given that it'll hold `used_cap + /// needed_extra_cap` elements. This logic is used in amortized reserve methods. /// Returns `(new_capacity, new_alloc_size)`. - fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) -> usize { + fn amortized_new_size(&self, used_cap: usize, needed_extra_cap: usize) + -> Result { + // Nothing we can really do about these checks :( - let required_cap = used_cap.checked_add(needed_extra_cap) - .expect("capacity overflow"); + let required_cap = used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?; // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. let double_cap = self.cap * 2; // `double_cap` guarantees exponential growth. - cmp::max(double_cap, required_cap) + Ok(cmp::max(double_cap, required_cap)) + } + + /// The same as `reserve`, but returns on errors instead of panicking or aborting. + pub fn try_reserve(&mut self, used_cap: usize, needed_extra_cap: usize) + -> Result<(), CollectionAllocErr> { + self.reserve_internal(used_cap, needed_extra_cap, Fallible, Amortized) } /// Ensures that the buffer contains at least enough space to hold @@ -477,7 +471,7 @@ impl RawVec { /// # Examples /// /// ``` - /// # #![feature(alloc)] + /// # #![feature(alloc, raw_vec_internals)] /// # extern crate alloc; /// # use std::ptr; /// # use alloc::raw_vec::RawVec; @@ -505,42 +499,12 @@ impl RawVec { /// # } /// ``` pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) { - unsafe { - // NOTE: we don't early branch on ZSTs here because we want this - // to actually catch "asking for more than usize::MAX" in that case. - // If we make it past the first branch then we are guaranteed to - // panic. - - // Don't actually need any more capacity. - // Wrapping in case they give a bad `used_cap` - if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { - return; - } - - let new_cap = self.amortized_new_size(used_cap, needed_extra_cap); - - let new_layout = match Layout::array::(new_cap) { - Some(layout) => layout, - None => panic!("capacity overflow"), - }; - // FIXME: may crash and burn on over-reserve - alloc_guard(new_layout.size()); - let res = match self.current_layout() { - Some(layout) => { - let old_ptr = self.ptr.as_ptr() as *mut u8; - self.a.realloc(old_ptr, layout, new_layout) - } - None => self.a.alloc(new_layout), - }; - let uniq = match res { - Ok(ptr) => Unique::new_unchecked(ptr as *mut T), - Err(e) => self.a.oom(e), - }; - self.ptr = uniq; - self.cap = new_cap; + match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Amortized) { + Err(CapacityOverflow) => capacity_overflow(), + Err(AllocErr) => unreachable!(), + Ok(()) => { /* yay */ } } } - /// Attempts to ensure that the buffer contains at least enough space to hold /// `used_cap + needed_extra_cap` elements. If it doesn't already have /// enough capacity, will reallocate in place enough space plus comfortable slack @@ -576,17 +540,19 @@ impl RawVec { return false; } - let new_cap = self.amortized_new_size(used_cap, needed_extra_cap); + let new_cap = self.amortized_new_size(used_cap, needed_extra_cap) + .unwrap_or_else(|_| capacity_overflow()); // Here, `cap < used_cap + needed_extra_cap <= new_cap` // (regardless of whether `self.cap - used_cap` wrapped). // Therefore we can safely call grow_in_place. - let ptr = self.ptr() as *mut _; let new_layout = Layout::new::().repeat(new_cap).unwrap().0; // FIXME: may crash and burn on over-reserve - alloc_guard(new_layout.size()); - match self.a.grow_in_place(ptr, old_layout, new_layout) { + alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow()); + match self.a.grow_in_place( + NonNull::from(self.ptr).cast(), old_layout, new_layout.size(), + ) { Ok(_) => { self.cap = new_cap; true @@ -646,12 +612,13 @@ impl RawVec { let new_size = elem_size * amount; let align = mem::align_of::(); let old_layout = Layout::from_size_align_unchecked(old_size, align); - let new_layout = Layout::from_size_align_unchecked(new_size, align); - match self.a.realloc(self.ptr.as_ptr() as *mut u8, + match self.a.realloc(NonNull::from(self.ptr).cast(), old_layout, - new_layout) { - Ok(p) => self.ptr = Unique::new_unchecked(p as *mut T), - Err(err) => self.a.oom(err), + new_size) { + Ok(p) => self.ptr = p.cast().into(), + Err(_) => handle_alloc_error( + Layout::from_size_align_unchecked(new_size, align) + ), } } self.cap = amount; @@ -659,7 +626,74 @@ impl RawVec { } } -impl RawVec { +enum Fallibility { + Fallible, + Infallible, +} + +use self::Fallibility::*; + +enum ReserveStrategy { + Exact, + Amortized, +} + +use self::ReserveStrategy::*; + +impl RawVec { + fn reserve_internal( + &mut self, + used_cap: usize, + needed_extra_cap: usize, + fallibility: Fallibility, + strategy: ReserveStrategy, + ) -> Result<(), CollectionAllocErr> { + unsafe { + use alloc::AllocErr; + + // NOTE: we don't early branch on ZSTs here because we want this + // to actually catch "asking for more than usize::MAX" in that case. + // If we make it past the first branch then we are guaranteed to + // panic. + + // Don't actually need any more capacity. + // Wrapping in case they gave a bad `used_cap`. + if self.cap().wrapping_sub(used_cap) >= needed_extra_cap { + return Ok(()); + } + + // Nothing we can really do about these checks :( + let new_cap = match strategy { + Exact => used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?, + Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?, + }; + let new_layout = Layout::array::(new_cap).map_err(|_| CapacityOverflow)?; + + alloc_guard(new_layout.size())?; + + let res = match self.current_layout() { + Some(layout) => { + debug_assert!(new_layout.align() == layout.align()); + self.a.realloc(NonNull::from(self.ptr).cast(), layout, new_layout.size()) + } + None => self.a.alloc(new_layout), + }; + + match (&res, fallibility) { + (Err(AllocErr), Infallible) => handle_alloc_error(new_layout), + _ => {} + } + + self.ptr = res?.cast().into(); + self.cap = new_cap; + + Ok(()) + } + } + +} + +impl RawVec { /// Converts the entire buffer into `Box<[T]>`. /// /// While it is not *strictly* Undefined Behavior to call @@ -683,8 +717,7 @@ impl RawVec { let elem_size = mem::size_of::(); if elem_size != 0 { if let Some(layout) = self.current_layout() { - let ptr = self.ptr() as *mut u8; - self.a.dealloc(ptr, layout); + self.a.dealloc(NonNull::from(self.ptr).cast(), layout); } } } @@ -709,13 +742,20 @@ unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec { // all 4GB in user-space. e.g. PAE or x32 #[inline] -fn alloc_guard(alloc_size: usize) { - if mem::size_of::() < 8 { - assert!(alloc_size <= ::core::isize::MAX as usize, - "capacity overflow"); +fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> { + if mem::size_of::() < 8 && alloc_size > ::core::isize::MAX as usize { + Err(CapacityOverflow) + } else { + Ok(()) } } +// One central function responsible for reporting capacity overflows. This'll +// ensure that the code generation related to these panics is minimal as there's +// only one location which panics rather than a bunch throughout the module. +fn capacity_overflow() -> ! { + panic!("capacity overflow") +} #[cfg(test)] mod tests { @@ -723,7 +763,7 @@ mod tests { #[test] fn allocator_param() { - use allocator::{Alloc, AllocErr}; + use alloc::AllocErr; // Writing a test of integration between third-party // allocators and RawVec is a little tricky because the RawVec @@ -739,18 +779,18 @@ mod tests { // before allocation attempts start failing. struct BoundedAlloc { fuel: usize } unsafe impl Alloc for BoundedAlloc { - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { + unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr> { let size = layout.size(); if size > self.fuel { - return Err(AllocErr::Unsupported { details: "fuel exhausted" }); + return Err(AllocErr); } - match Heap.alloc(layout) { + match Global.alloc(layout) { ok @ Ok(_) => { self.fuel -= size; ok } err @ Err(_) => err, } } - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { - Heap.dealloc(ptr, layout) + unsafe fn dealloc(&mut self, ptr: NonNull, layout: Layout) { + Global.dealloc(ptr, layout) } } diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 59079f9ba76b..be049eb6e5ef 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -253,13 +253,14 @@ use core::hash::{Hash, Hasher}; use core::intrinsics::abort; use core::marker; use core::marker::{Unsize, PhantomData}; -use core::mem::{self, align_of_val, forget, size_of_val, uninitialized}; +use core::mem::{self, align_of_val, forget, size_of_val}; use core::ops::Deref; use core::ops::CoerceUnsized; -use core::ptr::{self, Shared}; +use core::ptr::{self, NonNull}; use core::convert::From; +use core::usize; -use heap::{Heap, Alloc, Layout, box_free}; +use alloc::{Global, Alloc, Layout, box_free, handle_alloc_error}; use string::String; use vec::Vec; @@ -282,7 +283,7 @@ struct RcBox { /// [get_mut]: #method.get_mut #[stable(feature = "rust1", since = "1.0.0")] pub struct Rc { - ptr: Shared>, + ptr: NonNull>, phantom: PhantomData, } @@ -311,11 +312,11 @@ impl Rc { // pointers, which ensures that the weak destructor never frees // the allocation while the strong destructor is running, even // if the weak pointer is stored inside the strong one. - ptr: Shared::from(Box::into_unique(box RcBox { + ptr: Box::into_raw_non_null(box RcBox { strong: Cell::new(1), weak: Cell::new(1), value, - })), + }), phantom: PhantomData, } } @@ -428,7 +429,7 @@ impl Rc { let rc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset)); Rc { - ptr: Shared::new_unchecked(rc_ptr), + ptr: NonNull::new_unchecked(rc_ptr), phantom: PhantomData, } } @@ -449,6 +450,8 @@ impl Rc { #[stable(feature = "rc_weak", since = "1.4.0")] pub fn downgrade(this: &Self) -> Weak { this.inc_weak(); + // Make sure we do not create a dangling Weak + debug_assert!(!is_dangling(this.ptr)); Weak { ptr: this.ptr } } @@ -618,15 +621,14 @@ impl Rc { } } -impl Rc { +impl Rc { #[inline] - #[unstable(feature = "rc_downcast", issue = "44608")] + #[stable(feature = "rc_downcast", since = "1.29.0")] /// Attempt to downcast the `Rc` to a concrete type. /// /// # Examples /// /// ``` - /// #![feature(rc_downcast)] /// use std::any::Any; /// use std::rc::Rc; /// @@ -642,17 +644,11 @@ impl Rc { /// print_if_string(Rc::new(0i8)); /// } /// ``` - pub fn downcast(self) -> Result, Rc> { + pub fn downcast(self) -> Result, Rc> { if (*self).is::() { - // avoid the pointer arithmetic in from_raw - unsafe { - let raw: *const RcBox = self.ptr.as_ptr(); - forget(self); - Ok(Rc { - ptr: Shared::new_unchecked(raw as *const RcBox as *mut _), - phantom: PhantomData, - }) - } + let ptr = self.ptr.cast::>(); + forget(self); + Ok(Rc { ptr, phantom: PhantomData }) } else { Err(self) } @@ -667,11 +663,11 @@ impl Rc { let layout = Layout::for_value(&*fake_ptr); - let mem = Heap.alloc(layout) - .unwrap_or_else(|e| Heap.oom(e)); + let mem = Global.alloc(layout) + .unwrap_or_else(|_| handle_alloc_error(layout)); // Initialize the real RcBox - let inner = set_data_ptr(ptr as *mut T, mem) as *mut RcBox; + let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut RcBox; ptr::write(&mut (*inner).strong, Cell::new(1)); ptr::write(&mut (*inner).weak, Cell::new(1)); @@ -681,7 +677,8 @@ impl Rc { fn from_box(v: Box) -> Rc { unsafe { - let bptr = Box::into_raw(v); + let box_unique = Box::into_unique(v); + let bptr = box_unique.as_ptr(); let value_size = size_of_val(&*bptr); let ptr = Self::allocate_for_ptr(bptr); @@ -693,9 +690,9 @@ impl Rc { value_size); // Free the allocation without dropping its contents - box_free(bptr); + box_free(box_unique); - Rc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } + Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } } } } @@ -722,7 +719,7 @@ impl Rc<[T]> { &mut (*ptr).value as *mut [T] as *mut T, v.len()); - Rc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } + Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } } } @@ -737,7 +734,7 @@ impl RcFromSlice for Rc<[T]> { // In the event of a panic, elements that have been written // into the new RcBox will be dropped, then the memory freed. struct Guard { - mem: *mut u8, + mem: NonNull, elems: *mut T, layout: Layout, n_elems: usize, @@ -751,7 +748,7 @@ impl RcFromSlice for Rc<[T]> { let slice = from_raw_parts_mut(self.elems, self.n_elems); ptr::drop_in_place(slice); - Heap.dealloc(self.mem, self.layout.clone()); + Global.dealloc(self.mem, self.layout.clone()); } } } @@ -767,7 +764,7 @@ impl RcFromSlice for Rc<[T]> { let elems = &mut (*ptr).value as *mut [T] as *mut T; let mut guard = Guard{ - mem: mem, + mem: NonNull::new_unchecked(mem), elems: elems, layout: layout, n_elems: 0, @@ -781,7 +778,7 @@ impl RcFromSlice for Rc<[T]> { // All clear. Forget the guard so it doesn't free the new RcBox. forget(guard); - Rc { ptr: Shared::new_unchecked(ptr), phantom: PhantomData } + Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } } } } @@ -834,8 +831,6 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc { /// ``` fn drop(&mut self) { unsafe { - let ptr = self.ptr.as_ptr(); - self.dec_strong(); if self.strong() == 0 { // destroy the contained object @@ -846,7 +841,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc { self.dec_weak(); if self.weak() == 0 { - Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr)); + Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())); } } } @@ -1160,7 +1155,12 @@ impl From> for Rc<[T]> { /// [`None`]: ../../std/option/enum.Option.html#variant.None #[stable(feature = "rc_weak", since = "1.4.0")] pub struct Weak { - ptr: Shared>, + // This is a `NonNull` to allow optimizing the size of this type in enums, + // but it is not necessarily a valid pointer. + // `Weak::new` sets this to `usize::MAX` so that it doesn’t need + // to allocate space on the heap. That's not a value a real pointer + // will ever have because RcBox has alignment at least 2. + ptr: NonNull>, } #[stable(feature = "rc_weak", since = "1.4.0")] @@ -1172,8 +1172,8 @@ impl !marker::Sync for Weak {} impl, U: ?Sized> CoerceUnsized> for Weak {} impl Weak { - /// Constructs a new `Weak`, allocating memory for `T` without initializing - /// it. Calling [`upgrade`] on the return value always gives [`None`]. + /// Constructs a new `Weak`, without allocating any memory. + /// Calling [`upgrade`] on the return value always gives [`None`]. /// /// [`upgrade`]: struct.Weak.html#method.upgrade /// [`None`]: ../../std/option/enum.Option.html @@ -1188,18 +1188,17 @@ impl Weak { /// ``` #[stable(feature = "downgraded_weak", since = "1.10.0")] pub fn new() -> Weak { - unsafe { - Weak { - ptr: Shared::from(Box::into_unique(box RcBox { - strong: Cell::new(0), - weak: Cell::new(1), - value: uninitialized(), - })), - } + Weak { + ptr: NonNull::new(usize::MAX as *mut RcBox).expect("MAX is not 0"), } } } +pub(crate) fn is_dangling(ptr: NonNull) -> bool { + let address = ptr.as_ptr() as *mut () as usize; + address == usize::MAX +} + impl Weak { /// Attempts to upgrade the `Weak` pointer to an [`Rc`], extending /// the lifetime of the value if successful. @@ -1229,13 +1228,25 @@ impl Weak { /// ``` #[stable(feature = "rc_weak", since = "1.4.0")] pub fn upgrade(&self) -> Option> { - if self.strong() == 0 { + let inner = self.inner()?; + if inner.strong() == 0 { None } else { - self.inc_strong(); + inner.inc_strong(); Some(Rc { ptr: self.ptr, phantom: PhantomData }) } } + + /// Return `None` when the pointer is dangling and there is no allocated `RcBox`, + /// i.e. this `Weak` was created by `Weak::new` + #[inline] + fn inner(&self) -> Option<&RcBox> { + if is_dangling(self.ptr) { + None + } else { + Some(unsafe { self.ptr.as_ref() }) + } + } } #[stable(feature = "rc_weak", since = "1.4.0")] @@ -1265,14 +1276,14 @@ impl Drop for Weak { /// assert!(other_weak_foo.upgrade().is_none()); /// ``` fn drop(&mut self) { - unsafe { - let ptr = self.ptr.as_ptr(); - - self.dec_weak(); + if let Some(inner) = self.inner() { + inner.dec_weak(); // the weak count starts at 1, and will only go to zero if all // the strong pointers have disappeared. - if self.weak() == 0 { - Heap.dealloc(ptr as *mut u8, Layout::for_value(&*ptr)); + if inner.weak() == 0 { + unsafe { + Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())); + } } } } @@ -1293,7 +1304,9 @@ impl Clone for Weak { /// ``` #[inline] fn clone(&self) -> Weak { - self.inc_weak(); + if let Some(inner) = self.inner() { + inner.inc_weak() + } Weak { ptr: self.ptr } } } @@ -1326,7 +1339,7 @@ impl Default for Weak { } } -// NOTE: We checked_add here to deal with mem::forget safety. In particular +// NOTE: We checked_add here to deal with mem::forget safely. In particular // if you mem::forget Rcs (or Weaks), the ref-count can overflow, and then // you can free the allocation while outstanding Rcs (or Weaks) exist. // We abort because this is such a degenerate scenario that we don't care about @@ -1379,12 +1392,10 @@ impl RcBoxPtr for Rc { } } -impl RcBoxPtr for Weak { +impl RcBoxPtr for RcBox { #[inline(always)] fn inner(&self) -> &RcBox { - unsafe { - self.ptr.as_ref() - } + self } } @@ -1546,7 +1557,7 @@ mod tests { assert_eq!(unsafe { &*ptr }, "foo"); assert_eq!(rc, rc2); - let rc: Rc = Rc::new(123); + let rc: Rc = Rc::new(123); let ptr = Rc::into_raw(rc.clone()); let rc2 = unsafe { Rc::from_raw(ptr) }; @@ -1747,8 +1758,8 @@ mod tests { use std::fmt::Display; use std::string::ToString; - let b: Box = box 123; - let r: Rc = Rc::from(b); + let b: Box = box 123; + let r: Rc = Rc::from(b); assert_eq!(r.to_string(), "123"); } @@ -1757,8 +1768,8 @@ mod tests { fn test_from_box_trait_zero_sized() { use std::fmt::Debug; - let b: Box = box (); - let r: Rc = Rc::from(b); + let b: Box = box (); + let r: Rc = Rc::from(b); assert_eq!(format!("{:?}", r), "()"); } @@ -1775,8 +1786,8 @@ mod tests { fn test_downcast() { use std::any::Any; - let r1: Rc = Rc::new(i32::max_value()); - let r2: Rc = Rc::new("abc"); + let r1: Rc = Rc::new(i32::max_value()); + let r2: Rc = Rc::new("abc"); assert!(r1.clone().downcast::().is_err()); diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs index 861f72bcf88e..c27c596e7975 100644 --- a/src/liballoc/slice.rs +++ b/src/liballoc/slice.rs @@ -10,6 +10,8 @@ //! A dynamically-sized view into a contiguous sequence, `[T]`. //! +//! *[See also the slice primitive type](../../std/primitive.slice.html).* +//! //! Slices are a view into a block of memory represented as a pointer and a //! length. //! @@ -78,8 +80,6 @@ //! * Further methods that return iterators are [`.split`], [`.splitn`], //! [`.chunks`], [`.windows`] and more. //! -//! *[See also the slice primitive type](../../std/primitive.slice.html).* -//! //! [`Clone`]: ../../std/clone/trait.Clone.html //! [`Eq`]: ../../std/cmp/trait.Eq.html //! [`Ord`]: ../../std/cmp/trait.Ord.html @@ -101,7 +101,7 @@ use core::cmp::Ordering::{self, Less}; use core::mem::size_of; use core::mem; use core::ptr; -use core::slice as core_slice; +use core::{u8, u16, u32}; use borrow::{Borrow, BorrowMut, ToOwned}; use boxed::Box; @@ -115,13 +115,13 @@ pub use core::slice::{Iter, IterMut}; pub use core::slice::{SplitMut, ChunksMut, Split}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::slice::{SplitN, RSplitN, SplitNMut, RSplitNMut}; -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] pub use core::slice::{RSplit, RSplitMut}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::slice::{from_raw_parts, from_raw_parts_mut}; -#[unstable(feature = "from_ref", issue = "45703")] -pub use core::slice::{from_ref, from_ref_mut}; -#[unstable(feature = "slice_get_slice", issue = "35729")] +#[stable(feature = "from_ref", since = "1.28.0")] +pub use core::slice::{from_ref, from_mut}; +#[stable(feature = "slice_get_slice", since = "1.28.0")] pub use core::slice::SliceIndex; #[unstable(feature = "exact_chunks", issue = "47115")] pub use core::slice::{ExactChunks, ExactChunksMut}; @@ -170,1057 +170,9 @@ mod hack { } } -#[lang = "slice"] +#[lang = "slice_alloc"] #[cfg(not(test))] impl [T] { - /// Returns the number of elements in the slice. - /// - /// # Examples - /// - /// ``` - /// let a = [1, 2, 3]; - /// assert_eq!(a.len(), 3); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn len(&self) -> usize { - core_slice::SliceExt::len(self) - } - - /// Returns `true` if the slice has a length of 0. - /// - /// # Examples - /// - /// ``` - /// let a = [1, 2, 3]; - /// assert!(!a.is_empty()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn is_empty(&self) -> bool { - core_slice::SliceExt::is_empty(self) - } - - /// Returns the first element of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert_eq!(Some(&10), v.first()); - /// - /// let w: &[i32] = &[]; - /// assert_eq!(None, w.first()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn first(&self) -> Option<&T> { - core_slice::SliceExt::first(self) - } - - /// Returns a mutable pointer to the first element of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [0, 1, 2]; - /// - /// if let Some(first) = x.first_mut() { - /// *first = 5; - /// } - /// assert_eq!(x, &[5, 1, 2]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn first_mut(&mut self) -> Option<&mut T> { - core_slice::SliceExt::first_mut(self) - } - - /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let x = &[0, 1, 2]; - /// - /// if let Some((first, elements)) = x.split_first() { - /// assert_eq!(first, &0); - /// assert_eq!(elements, &[1, 2]); - /// } - /// ``` - #[stable(feature = "slice_splits", since = "1.5.0")] - #[inline] - pub fn split_first(&self) -> Option<(&T, &[T])> { - core_slice::SliceExt::split_first(self) - } - - /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [0, 1, 2]; - /// - /// if let Some((first, elements)) = x.split_first_mut() { - /// *first = 3; - /// elements[0] = 4; - /// elements[1] = 5; - /// } - /// assert_eq!(x, &[3, 4, 5]); - /// ``` - #[stable(feature = "slice_splits", since = "1.5.0")] - #[inline] - pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> { - core_slice::SliceExt::split_first_mut(self) - } - - /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let x = &[0, 1, 2]; - /// - /// if let Some((last, elements)) = x.split_last() { - /// assert_eq!(last, &2); - /// assert_eq!(elements, &[0, 1]); - /// } - /// ``` - #[stable(feature = "slice_splits", since = "1.5.0")] - #[inline] - pub fn split_last(&self) -> Option<(&T, &[T])> { - core_slice::SliceExt::split_last(self) - - } - - /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [0, 1, 2]; - /// - /// if let Some((last, elements)) = x.split_last_mut() { - /// *last = 3; - /// elements[0] = 4; - /// elements[1] = 5; - /// } - /// assert_eq!(x, &[4, 5, 3]); - /// ``` - #[stable(feature = "slice_splits", since = "1.5.0")] - #[inline] - pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> { - core_slice::SliceExt::split_last_mut(self) - } - - /// Returns the last element of the slice, or `None` if it is empty. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert_eq!(Some(&30), v.last()); - /// - /// let w: &[i32] = &[]; - /// assert_eq!(None, w.last()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn last(&self) -> Option<&T> { - core_slice::SliceExt::last(self) - } - - /// Returns a mutable pointer to the last item in the slice. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [0, 1, 2]; - /// - /// if let Some(last) = x.last_mut() { - /// *last = 10; - /// } - /// assert_eq!(x, &[0, 1, 10]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn last_mut(&mut self) -> Option<&mut T> { - core_slice::SliceExt::last_mut(self) - } - - /// Returns a reference to an element or subslice depending on the type of - /// index. - /// - /// - If given a position, returns a reference to the element at that - /// position or `None` if out of bounds. - /// - If given a range, returns the subslice corresponding to that range, - /// or `None` if out of bounds. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert_eq!(Some(&40), v.get(1)); - /// assert_eq!(Some(&[10, 40][..]), v.get(0..2)); - /// assert_eq!(None, v.get(3)); - /// assert_eq!(None, v.get(0..4)); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn get(&self, index: I) -> Option<&I::Output> - where I: SliceIndex - { - core_slice::SliceExt::get(self, index) - } - - /// Returns a mutable reference to an element or subslice depending on the - /// type of index (see [`get`]) or `None` if the index is out of bounds. - /// - /// [`get`]: #method.get - /// - /// # Examples - /// - /// ``` - /// let x = &mut [0, 1, 2]; - /// - /// if let Some(elem) = x.get_mut(1) { - /// *elem = 42; - /// } - /// assert_eq!(x, &[0, 42, 2]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn get_mut(&mut self, index: I) -> Option<&mut I::Output> - where I: SliceIndex - { - core_slice::SliceExt::get_mut(self, index) - } - - /// Returns a reference to an element or subslice, without doing bounds - /// checking. - /// - /// This is generally not recommended, use with caution! For a safe - /// alternative see [`get`]. - /// - /// [`get`]: #method.get - /// - /// # Examples - /// - /// ``` - /// let x = &[1, 2, 4]; - /// - /// unsafe { - /// assert_eq!(x.get_unchecked(1), &2); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub unsafe fn get_unchecked(&self, index: I) -> &I::Output - where I: SliceIndex - { - core_slice::SliceExt::get_unchecked(self, index) - } - - /// Returns a mutable reference to an element or subslice, without doing - /// bounds checking. - /// - /// This is generally not recommended, use with caution! For a safe - /// alternative see [`get_mut`]. - /// - /// [`get_mut`]: #method.get_mut - /// - /// # Examples - /// - /// ``` - /// let x = &mut [1, 2, 4]; - /// - /// unsafe { - /// let elem = x.get_unchecked_mut(1); - /// *elem = 13; - /// } - /// assert_eq!(x, &[1, 13, 4]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut I::Output - where I: SliceIndex - { - core_slice::SliceExt::get_unchecked_mut(self, index) - } - - /// Returns a raw pointer to the slice's buffer. - /// - /// The caller must ensure that the slice outlives the pointer this - /// function returns, or else it will end up pointing to garbage. - /// - /// Modifying the container referenced by this slice may cause its buffer - /// to be reallocated, which would also make any pointers to it invalid. - /// - /// # Examples - /// - /// ``` - /// let x = &[1, 2, 4]; - /// let x_ptr = x.as_ptr(); - /// - /// unsafe { - /// for i in 0..x.len() { - /// assert_eq!(x.get_unchecked(i), &*x_ptr.offset(i as isize)); - /// } - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn as_ptr(&self) -> *const T { - core_slice::SliceExt::as_ptr(self) - } - - /// Returns an unsafe mutable pointer to the slice's buffer. - /// - /// The caller must ensure that the slice outlives the pointer this - /// function returns, or else it will end up pointing to garbage. - /// - /// Modifying the container referenced by this slice may cause its buffer - /// to be reallocated, which would also make any pointers to it invalid. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [1, 2, 4]; - /// let x_ptr = x.as_mut_ptr(); - /// - /// unsafe { - /// for i in 0..x.len() { - /// *x_ptr.offset(i as isize) += 2; - /// } - /// } - /// assert_eq!(x, &[3, 4, 6]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn as_mut_ptr(&mut self) -> *mut T { - core_slice::SliceExt::as_mut_ptr(self) - } - - /// Swaps two elements in the slice. - /// - /// # Arguments - /// - /// * a - The index of the first element - /// * b - The index of the second element - /// - /// # Panics - /// - /// Panics if `a` or `b` are out of bounds. - /// - /// # Examples - /// - /// ``` - /// let mut v = ["a", "b", "c", "d"]; - /// v.swap(1, 3); - /// assert!(v == ["a", "d", "c", "b"]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn swap(&mut self, a: usize, b: usize) { - core_slice::SliceExt::swap(self, a, b) - } - - /// Reverses the order of elements in the slice, in place. - /// - /// # Examples - /// - /// ``` - /// let mut v = [1, 2, 3]; - /// v.reverse(); - /// assert!(v == [3, 2, 1]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn reverse(&mut self) { - core_slice::SliceExt::reverse(self) - } - - /// Returns an iterator over the slice. - /// - /// # Examples - /// - /// ``` - /// let x = &[1, 2, 4]; - /// let mut iterator = x.iter(); - /// - /// assert_eq!(iterator.next(), Some(&1)); - /// assert_eq!(iterator.next(), Some(&2)); - /// assert_eq!(iterator.next(), Some(&4)); - /// assert_eq!(iterator.next(), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn iter(&self) -> Iter { - core_slice::SliceExt::iter(self) - } - - /// Returns an iterator that allows modifying each value. - /// - /// # Examples - /// - /// ``` - /// let x = &mut [1, 2, 4]; - /// for elem in x.iter_mut() { - /// *elem += 2; - /// } - /// assert_eq!(x, &[3, 4, 6]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn iter_mut(&mut self) -> IterMut { - core_slice::SliceExt::iter_mut(self) - } - - /// Returns an iterator over all contiguous windows of length - /// `size`. The windows overlap. If the slice is shorter than - /// `size`, the iterator returns no values. - /// - /// # Panics - /// - /// Panics if `size` is 0. - /// - /// # Examples - /// - /// ``` - /// let slice = ['r', 'u', 's', 't']; - /// let mut iter = slice.windows(2); - /// assert_eq!(iter.next().unwrap(), &['r', 'u']); - /// assert_eq!(iter.next().unwrap(), &['u', 's']); - /// assert_eq!(iter.next().unwrap(), &['s', 't']); - /// assert!(iter.next().is_none()); - /// ``` - /// - /// If the slice is shorter than `size`: - /// - /// ``` - /// let slice = ['f', 'o', 'o']; - /// let mut iter = slice.windows(4); - /// assert!(iter.next().is_none()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn windows(&self, size: usize) -> Windows { - core_slice::SliceExt::windows(self, size) - } - - /// Returns an iterator over `chunk_size` elements of the slice at a - /// time. The chunks are slices and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last chunk will - /// not have length `chunk_size`. - /// - /// See [`exact_chunks`] for a variant of this iterator that returns chunks - /// of always exactly `chunk_size` elements. - /// - /// # Panics - /// - /// Panics if `chunk_size` is 0. - /// - /// # Examples - /// - /// ``` - /// let slice = ['l', 'o', 'r', 'e', 'm']; - /// let mut iter = slice.chunks(2); - /// assert_eq!(iter.next().unwrap(), &['l', 'o']); - /// assert_eq!(iter.next().unwrap(), &['r', 'e']); - /// assert_eq!(iter.next().unwrap(), &['m']); - /// assert!(iter.next().is_none()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn chunks(&self, chunk_size: usize) -> Chunks { - core_slice::SliceExt::chunks(self, chunk_size) - } - - /// Returns an iterator over `chunk_size` elements of the slice at a - /// time. The chunks are slices and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last up to `chunk_size-1` - /// elements will be omitted. - /// - /// Due to each chunk having exactly `chunk_size` elements, the compiler - /// can often optimize the resulting code better than in the case of - /// [`chunks`]. - /// - /// # Panics - /// - /// Panics if `chunk_size` is 0. - /// - /// # Examples - /// - /// ``` - /// #![feature(exact_chunks)] - /// - /// let slice = ['l', 'o', 'r', 'e', 'm']; - /// let mut iter = slice.exact_chunks(2); - /// assert_eq!(iter.next().unwrap(), &['l', 'o']); - /// assert_eq!(iter.next().unwrap(), &['r', 'e']); - /// assert!(iter.next().is_none()); - /// ``` - #[unstable(feature = "exact_chunks", issue = "47115")] - #[inline] - pub fn exact_chunks(&self, chunk_size: usize) -> ExactChunks { - core_slice::SliceExt::exact_chunks(self, chunk_size) - } - - /// Returns an iterator over `chunk_size` elements of the slice at a time. - /// The chunks are mutable slices, and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last chunk will not - /// have length `chunk_size`. - /// - /// See [`exact_chunks_mut`] for a variant of this iterator that returns chunks - /// of always exactly `chunk_size` elements. - /// - /// # Panics - /// - /// Panics if `chunk_size` is 0. - /// - /// # Examples - /// - /// ``` - /// let v = &mut [0, 0, 0, 0, 0]; - /// let mut count = 1; - /// - /// for chunk in v.chunks_mut(2) { - /// for elem in chunk.iter_mut() { - /// *elem += count; - /// } - /// count += 1; - /// } - /// assert_eq!(v, &[1, 1, 2, 2, 3]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut { - core_slice::SliceExt::chunks_mut(self, chunk_size) - } - - /// Returns an iterator over `chunk_size` elements of the slice at a time. - /// The chunks are mutable slices, and do not overlap. If `chunk_size` does - /// not divide the length of the slice, then the last up to `chunk_size-1` - /// elements will be omitted. - /// - /// - /// Due to each chunk having exactly `chunk_size` elements, the compiler - /// can often optimize the resulting code better than in the case of - /// [`chunks_mut`]. - /// - /// # Panics - /// - /// Panics if `chunk_size` is 0. - /// - /// # Examples - /// - /// ``` - /// #![feature(exact_chunks)] - /// - /// let v = &mut [0, 0, 0, 0, 0]; - /// let mut count = 1; - /// - /// for chunk in v.exact_chunks_mut(2) { - /// for elem in chunk.iter_mut() { - /// *elem += count; - /// } - /// count += 1; - /// } - /// assert_eq!(v, &[1, 1, 2, 2, 0]); - /// ``` - #[unstable(feature = "exact_chunks", issue = "47115")] - #[inline] - pub fn exact_chunks_mut(&mut self, chunk_size: usize) -> ExactChunksMut { - core_slice::SliceExt::exact_chunks_mut(self, chunk_size) - } - - /// Divides one slice into two at an index. - /// - /// The first will contain all indices from `[0, mid)` (excluding - /// the index `mid` itself) and the second will contain all - /// indices from `[mid, len)` (excluding the index `len` itself). - /// - /// # Panics - /// - /// Panics if `mid > len`. - /// - /// # Examples - /// - /// ``` - /// let v = [1, 2, 3, 4, 5, 6]; - /// - /// { - /// let (left, right) = v.split_at(0); - /// assert!(left == []); - /// assert!(right == [1, 2, 3, 4, 5, 6]); - /// } - /// - /// { - /// let (left, right) = v.split_at(2); - /// assert!(left == [1, 2]); - /// assert!(right == [3, 4, 5, 6]); - /// } - /// - /// { - /// let (left, right) = v.split_at(6); - /// assert!(left == [1, 2, 3, 4, 5, 6]); - /// assert!(right == []); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split_at(&self, mid: usize) -> (&[T], &[T]) { - core_slice::SliceExt::split_at(self, mid) - } - - /// Divides one mutable slice into two at an index. - /// - /// The first will contain all indices from `[0, mid)` (excluding - /// the index `mid` itself) and the second will contain all - /// indices from `[mid, len)` (excluding the index `len` itself). - /// - /// # Panics - /// - /// Panics if `mid > len`. - /// - /// # Examples - /// - /// ``` - /// let mut v = [1, 0, 3, 0, 5, 6]; - /// // scoped to restrict the lifetime of the borrows - /// { - /// let (left, right) = v.split_at_mut(2); - /// assert!(left == [1, 0]); - /// assert!(right == [3, 0, 5, 6]); - /// left[1] = 2; - /// right[1] = 4; - /// } - /// assert!(v == [1, 2, 3, 4, 5, 6]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) { - core_slice::SliceExt::split_at_mut(self, mid) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred`. The matched element is not contained in the subslices. - /// - /// # Examples - /// - /// ``` - /// let slice = [10, 40, 33, 20]; - /// let mut iter = slice.split(|num| num % 3 == 0); - /// - /// assert_eq!(iter.next().unwrap(), &[10, 40]); - /// assert_eq!(iter.next().unwrap(), &[20]); - /// assert!(iter.next().is_none()); - /// ``` - /// - /// If the first element is matched, an empty slice will be the first item - /// returned by the iterator. Similarly, if the last element in the slice - /// is matched, an empty slice will be the last item returned by the - /// iterator: - /// - /// ``` - /// let slice = [10, 40, 33]; - /// let mut iter = slice.split(|num| num % 3 == 0); - /// - /// assert_eq!(iter.next().unwrap(), &[10, 40]); - /// assert_eq!(iter.next().unwrap(), &[]); - /// assert!(iter.next().is_none()); - /// ``` - /// - /// If two matched elements are directly adjacent, an empty slice will be - /// present between them: - /// - /// ``` - /// let slice = [10, 6, 33, 20]; - /// let mut iter = slice.split(|num| num % 3 == 0); - /// - /// assert_eq!(iter.next().unwrap(), &[10]); - /// assert_eq!(iter.next().unwrap(), &[]); - /// assert_eq!(iter.next().unwrap(), &[20]); - /// assert!(iter.next().is_none()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split(&self, pred: F) -> Split - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::split(self, pred) - } - - /// Returns an iterator over mutable subslices separated by elements that - /// match `pred`. The matched element is not contained in the subslices. - /// - /// # Examples - /// - /// ``` - /// let mut v = [10, 40, 30, 20, 60, 50]; - /// - /// for group in v.split_mut(|num| *num % 3 == 0) { - /// group[0] = 1; - /// } - /// assert_eq!(v, [1, 40, 30, 1, 60, 1]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split_mut(&mut self, pred: F) -> SplitMut - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::split_mut(self, pred) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred`, starting at the end of the slice and working backwards. - /// The matched element is not contained in the subslices. - /// - /// # Examples - /// - /// ``` - /// #![feature(slice_rsplit)] - /// - /// let slice = [11, 22, 33, 0, 44, 55]; - /// let mut iter = slice.rsplit(|num| *num == 0); - /// - /// assert_eq!(iter.next().unwrap(), &[44, 55]); - /// assert_eq!(iter.next().unwrap(), &[11, 22, 33]); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// As with `split()`, if the first or last element is matched, an empty - /// slice will be the first (or last) item returned by the iterator. - /// - /// ``` - /// #![feature(slice_rsplit)] - /// - /// let v = &[0, 1, 1, 2, 3, 5, 8]; - /// let mut it = v.rsplit(|n| *n % 2 == 0); - /// assert_eq!(it.next().unwrap(), &[]); - /// assert_eq!(it.next().unwrap(), &[3, 5]); - /// assert_eq!(it.next().unwrap(), &[1, 1]); - /// assert_eq!(it.next().unwrap(), &[]); - /// assert_eq!(it.next(), None); - /// ``` - #[unstable(feature = "slice_rsplit", issue = "41020")] - #[inline] - pub fn rsplit(&self, pred: F) -> RSplit - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::rsplit(self, pred) - } - - /// Returns an iterator over mutable subslices separated by elements that - /// match `pred`, starting at the end of the slice and working - /// backwards. The matched element is not contained in the subslices. - /// - /// # Examples - /// - /// ``` - /// #![feature(slice_rsplit)] - /// - /// let mut v = [100, 400, 300, 200, 600, 500]; - /// - /// let mut count = 0; - /// for group in v.rsplit_mut(|num| *num % 3 == 0) { - /// count += 1; - /// group[0] = count; - /// } - /// assert_eq!(v, [3, 400, 300, 2, 600, 1]); - /// ``` - /// - #[unstable(feature = "slice_rsplit", issue = "41020")] - #[inline] - pub fn rsplit_mut(&mut self, pred: F) -> RSplitMut - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::rsplit_mut(self, pred) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred`, limited to returning at most `n` items. The matched element is - /// not contained in the subslices. - /// - /// The last element returned, if any, will contain the remainder of the - /// slice. - /// - /// # Examples - /// - /// Print the slice split once by numbers divisible by 3 (i.e. `[10, 40]`, - /// `[20, 60, 50]`): - /// - /// ``` - /// let v = [10, 40, 30, 20, 60, 50]; - /// - /// for group in v.splitn(2, |num| *num % 3 == 0) { - /// println!("{:?}", group); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn splitn(&self, n: usize, pred: F) -> SplitN - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::splitn(self, n, pred) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred`, limited to returning at most `n` items. The matched element is - /// not contained in the subslices. - /// - /// The last element returned, if any, will contain the remainder of the - /// slice. - /// - /// # Examples - /// - /// ``` - /// let mut v = [10, 40, 30, 20, 60, 50]; - /// - /// for group in v.splitn_mut(2, |num| *num % 3 == 0) { - /// group[0] = 1; - /// } - /// assert_eq!(v, [1, 40, 30, 1, 60, 50]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn splitn_mut(&mut self, n: usize, pred: F) -> SplitNMut - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::splitn_mut(self, n, pred) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred` limited to returning at most `n` items. This starts at the end of - /// the slice and works backwards. The matched element is not contained in - /// the subslices. - /// - /// The last element returned, if any, will contain the remainder of the - /// slice. - /// - /// # Examples - /// - /// Print the slice split once, starting from the end, by numbers divisible - /// by 3 (i.e. `[50]`, `[10, 40, 30, 20]`): - /// - /// ``` - /// let v = [10, 40, 30, 20, 60, 50]; - /// - /// for group in v.rsplitn(2, |num| *num % 3 == 0) { - /// println!("{:?}", group); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rsplitn(&self, n: usize, pred: F) -> RSplitN - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::rsplitn(self, n, pred) - } - - /// Returns an iterator over subslices separated by elements that match - /// `pred` limited to returning at most `n` items. This starts at the end of - /// the slice and works backwards. The matched element is not contained in - /// the subslices. - /// - /// The last element returned, if any, will contain the remainder of the - /// slice. - /// - /// # Examples - /// - /// ``` - /// let mut s = [10, 40, 30, 20, 60, 50]; - /// - /// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) { - /// group[0] = 1; - /// } - /// assert_eq!(s, [1, 40, 30, 20, 60, 1]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rsplitn_mut(&mut self, n: usize, pred: F) -> RSplitNMut - where F: FnMut(&T) -> bool - { - core_slice::SliceExt::rsplitn_mut(self, n, pred) - } - - /// Returns `true` if the slice contains an element with the given value. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert!(v.contains(&30)); - /// assert!(!v.contains(&50)); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn contains(&self, x: &T) -> bool - where T: PartialEq - { - core_slice::SliceExt::contains(self, x) - } - - /// Returns `true` if `needle` is a prefix of the slice. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert!(v.starts_with(&[10])); - /// assert!(v.starts_with(&[10, 40])); - /// assert!(!v.starts_with(&[50])); - /// assert!(!v.starts_with(&[10, 50])); - /// ``` - /// - /// Always returns `true` if `needle` is an empty slice: - /// - /// ``` - /// let v = &[10, 40, 30]; - /// assert!(v.starts_with(&[])); - /// let v: &[u8] = &[]; - /// assert!(v.starts_with(&[])); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn starts_with(&self, needle: &[T]) -> bool - where T: PartialEq - { - core_slice::SliceExt::starts_with(self, needle) - } - - /// Returns `true` if `needle` is a suffix of the slice. - /// - /// # Examples - /// - /// ``` - /// let v = [10, 40, 30]; - /// assert!(v.ends_with(&[30])); - /// assert!(v.ends_with(&[40, 30])); - /// assert!(!v.ends_with(&[50])); - /// assert!(!v.ends_with(&[50, 30])); - /// ``` - /// - /// Always returns `true` if `needle` is an empty slice: - /// - /// ``` - /// let v = &[10, 40, 30]; - /// assert!(v.ends_with(&[])); - /// let v: &[u8] = &[]; - /// assert!(v.ends_with(&[])); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn ends_with(&self, needle: &[T]) -> bool - where T: PartialEq - { - core_slice::SliceExt::ends_with(self, needle) - } - - /// Binary searches this sorted slice for a given element. - /// - /// If the value is found then `Ok` is returned, containing the - /// index of the matching element; if the value is not found then - /// `Err` is returned, containing the index where a matching - /// element could be inserted while maintaining sorted order. - /// - /// # Examples - /// - /// Looks up a series of four elements. The first is found, with a - /// uniquely determined position; the second and third are not - /// found; the fourth could match any position in `[1, 4]`. - /// - /// ``` - /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]; - /// - /// assert_eq!(s.binary_search(&13), Ok(9)); - /// assert_eq!(s.binary_search(&4), Err(7)); - /// assert_eq!(s.binary_search(&100), Err(13)); - /// let r = s.binary_search(&1); - /// assert!(match r { Ok(1...4) => true, _ => false, }); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn binary_search(&self, x: &T) -> Result - where T: Ord - { - core_slice::SliceExt::binary_search(self, x) - } - - /// Binary searches this sorted slice with a comparator function. - /// - /// The comparator function should implement an order consistent - /// with the sort order of the underlying slice, returning an - /// order code that indicates whether its argument is `Less`, - /// `Equal` or `Greater` the desired target. - /// - /// If a matching value is found then returns `Ok`, containing - /// the index for the matched element; if no match is found then - /// `Err` is returned, containing the index where a matching - /// element could be inserted while maintaining sorted order. - /// - /// # Examples - /// - /// Looks up a series of four elements. The first is found, with a - /// uniquely determined position; the second and third are not - /// found; the fourth could match any position in `[1, 4]`. - /// - /// ``` - /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]; - /// - /// let seek = 13; - /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9)); - /// let seek = 4; - /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7)); - /// let seek = 100; - /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13)); - /// let seek = 1; - /// let r = s.binary_search_by(|probe| probe.cmp(&seek)); - /// assert!(match r { Ok(1...4) => true, _ => false, }); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result - where F: FnMut(&'a T) -> Ordering - { - core_slice::SliceExt::binary_search_by(self, f) - } - - /// Binary searches this sorted slice with a key extraction function. - /// - /// Assumes that the slice is sorted by the key, for instance with - /// [`sort_by_key`] using the same key extraction function. - /// - /// If a matching value is found then returns `Ok`, containing the - /// index for the matched element; if no match is found then `Err` - /// is returned, containing the index where a matching element could - /// be inserted while maintaining sorted order. - /// - /// [`sort_by_key`]: #method.sort_by_key - /// - /// # Examples - /// - /// Looks up a series of four elements in a slice of pairs sorted by - /// their second elements. The first is found, with a uniquely - /// determined position; the second and third are not found; the - /// fourth could match any position in `[1, 4]`. - /// - /// ``` - /// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1), - /// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13), - /// (1, 21), (2, 34), (4, 55)]; - /// - /// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9)); - /// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7)); - /// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13)); - /// let r = s.binary_search_by_key(&1, |&(a,b)| b); - /// assert!(match r { Ok(1...4) => true, _ => false, }); - /// ``` - #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")] - #[inline] - pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result - where F: FnMut(&'a T) -> B, - B: Ord - { - core_slice::SliceExt::binary_search_by_key(self, b, f) - } - /// Sorts the slice. /// /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case. @@ -1294,7 +246,8 @@ impl [T] { /// Sorts the slice with a key extraction function. /// - /// This sort is stable (i.e. does not reorder equal elements) and `O(n log n)` worst-case. + /// This sort is stable (i.e. does not reorder equal elements) and `O(m n log(m n))` + /// worst-case, where the key function is `O(m)`. /// /// When applicable, unstable sorting is preferred because it is generally faster than stable /// sorting and it doesn't allocate auxiliary memory. @@ -1320,16 +273,22 @@ impl [T] { /// ``` #[stable(feature = "slice_sort_by_key", since = "1.7.0")] #[inline] - pub fn sort_by_key(&mut self, mut f: F) - where F: FnMut(&T) -> B, B: Ord + pub fn sort_by_key(&mut self, mut f: F) + where F: FnMut(&T) -> K, K: Ord { merge_sort(self, |a, b| f(a).lt(&f(b))); } - /// Sorts the slice, but may not preserve the order of equal elements. + /// Sorts the slice with a key extraction function. /// - /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), - /// and `O(n log n)` worst-case. + /// During sorting, the key function is called only once per element. + /// + /// This sort is stable (i.e. does not reorder equal elements) and `O(m n + n log n)` + /// worst-case, where the key function is `O(m)`. + /// + /// For simple key functions (e.g. functions that are property accesses or + /// basic operations), [`sort_by_key`](#method.sort_by_key) is likely to be + /// faster. /// /// # Current implementation /// @@ -1339,354 +298,56 @@ impl [T] { /// randomization to avoid degenerate cases, but with a fixed seed to always provide /// deterministic behavior. /// - /// It is typically faster than stable sorting, except in a few special cases, e.g. when the - /// slice consists of several concatenated sorted sequences. + /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the + /// length of the slice. /// /// # Examples /// /// ``` - /// let mut v = [-5, 4, 1, -3, 2]; + /// #![feature(slice_sort_by_cached_key)] + /// let mut v = [-5i32, 4, 32, -3, 2]; /// - /// v.sort_unstable(); - /// assert!(v == [-5, -3, 1, 2, 4]); + /// v.sort_by_cached_key(|k| k.to_string()); + /// assert!(v == [-3, -5, 2, 32, 4]); /// ``` /// /// [pdqsort]: https://github.com/orlp/pdqsort - #[stable(feature = "sort_unstable", since = "1.20.0")] + #[unstable(feature = "slice_sort_by_cached_key", issue = "34447")] #[inline] - pub fn sort_unstable(&mut self) - where T: Ord + pub fn sort_by_cached_key(&mut self, f: F) + where F: FnMut(&T) -> K, K: Ord { - core_slice::SliceExt::sort_unstable(self); - } + // Helper macro for indexing our vector by the smallest possible type, to reduce allocation. + macro_rules! sort_by_key { + ($t:ty, $slice:ident, $f:ident) => ({ + let mut indices: Vec<_> = + $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect(); + // The elements of `indices` are unique, as they are indexed, so any sort will be + // stable with respect to the original slice. We use `sort_unstable` here because + // it requires less memory allocation. + indices.sort_unstable(); + for i in 0..$slice.len() { + let mut index = indices[i].1; + while (index as usize) < i { + index = indices[index as usize].1; + } + indices[i].1 = index; + $slice.swap(i, index as usize); + } + }) + } - /// Sorts the slice with a comparator function, but may not preserve the order of equal - /// elements. - /// - /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), - /// and `O(n log n)` worst-case. - /// - /// # Current implementation - /// - /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters, - /// which combines the fast average case of randomized quicksort with the fast worst case of - /// heapsort, while achieving linear time on slices with certain patterns. It uses some - /// randomization to avoid degenerate cases, but with a fixed seed to always provide - /// deterministic behavior. - /// - /// It is typically faster than stable sorting, except in a few special cases, e.g. when the - /// slice consists of several concatenated sorted sequences. - /// - /// # Examples - /// - /// ``` - /// let mut v = [5, 4, 1, 3, 2]; - /// v.sort_unstable_by(|a, b| a.cmp(b)); - /// assert!(v == [1, 2, 3, 4, 5]); - /// - /// // reverse sorting - /// v.sort_unstable_by(|a, b| b.cmp(a)); - /// assert!(v == [5, 4, 3, 2, 1]); - /// ``` - /// - /// [pdqsort]: https://github.com/orlp/pdqsort - #[stable(feature = "sort_unstable", since = "1.20.0")] - #[inline] - pub fn sort_unstable_by(&mut self, compare: F) - where F: FnMut(&T, &T) -> Ordering - { - core_slice::SliceExt::sort_unstable_by(self, compare); - } + let sz_u8 = mem::size_of::<(K, u8)>(); + let sz_u16 = mem::size_of::<(K, u16)>(); + let sz_u32 = mem::size_of::<(K, u32)>(); + let sz_usize = mem::size_of::<(K, usize)>(); - /// Sorts the slice with a key extraction function, but may not preserve the order of equal - /// elements. - /// - /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), - /// and `O(n log n)` worst-case. - /// - /// # Current implementation - /// - /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters, - /// which combines the fast average case of randomized quicksort with the fast worst case of - /// heapsort, while achieving linear time on slices with certain patterns. It uses some - /// randomization to avoid degenerate cases, but with a fixed seed to always provide - /// deterministic behavior. - /// - /// It is typically faster than stable sorting, except in a few special cases, e.g. when the - /// slice consists of several concatenated sorted sequences. - /// - /// # Examples - /// - /// ``` - /// let mut v = [-5i32, 4, 1, -3, 2]; - /// - /// v.sort_unstable_by_key(|k| k.abs()); - /// assert!(v == [1, 2, -3, 4, -5]); - /// ``` - /// - /// [pdqsort]: https://github.com/orlp/pdqsort - #[stable(feature = "sort_unstable", since = "1.20.0")] - #[inline] - pub fn sort_unstable_by_key(&mut self, f: F) - where F: FnMut(&T) -> B, - B: Ord - { - core_slice::SliceExt::sort_unstable_by_key(self, f); - } - - /// Rotates the slice in-place such that the first `mid` elements of the - /// slice move to the end while the last `self.len() - mid` elements move to - /// the front. After calling `rotate_left`, the element previously at index - /// `mid` will become the first element in the slice. - /// - /// # Panics - /// - /// This function will panic if `mid` is greater than the length of the - /// slice. Note that `mid == self.len()` does _not_ panic and is a no-op - /// rotation. - /// - /// # Complexity - /// - /// Takes linear (in `self.len()`) time. - /// - /// # Examples - /// - /// ``` - /// #![feature(slice_rotate)] - /// - /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; - /// a.rotate_left(2); - /// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); - /// ``` - /// - /// Rotating a subslice: - /// - /// ``` - /// #![feature(slice_rotate)] - /// - /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; - /// a[1..5].rotate_left(1); - /// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']); - /// ``` - #[unstable(feature = "slice_rotate", issue = "41891")] - pub fn rotate_left(&mut self, mid: usize) { - core_slice::SliceExt::rotate_left(self, mid); - } - - #[unstable(feature = "slice_rotate", issue = "41891")] - #[rustc_deprecated(since = "", reason = "renamed to `rotate_left`")] - pub fn rotate(&mut self, mid: usize) { - core_slice::SliceExt::rotate_left(self, mid); - } - - /// Rotates the slice in-place such that the first `self.len() - k` - /// elements of the slice move to the end while the last `k` elements move - /// to the front. After calling `rotate_right`, the element previously at - /// index `self.len() - k` will become the first element in the slice. - /// - /// # Panics - /// - /// This function will panic if `k` is greater than the length of the - /// slice. Note that `k == self.len()` does _not_ panic and is a no-op - /// rotation. - /// - /// # Complexity - /// - /// Takes linear (in `self.len()`) time. - /// - /// # Examples - /// - /// ``` - /// #![feature(slice_rotate)] - /// - /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; - /// a.rotate_right(2); - /// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); - /// ``` - /// - /// Rotate a subslice: - /// - /// ``` - /// #![feature(slice_rotate)] - /// - /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; - /// a[1..5].rotate_right(1); - /// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']); - /// ``` - #[unstable(feature = "slice_rotate", issue = "41891")] - pub fn rotate_right(&mut self, k: usize) { - core_slice::SliceExt::rotate_right(self, k); - } - - /// Copies the elements from `src` into `self`. - /// - /// The length of `src` must be the same as `self`. - /// - /// If `src` implements `Copy`, it can be more performant to use - /// [`copy_from_slice`]. - /// - /// # Panics - /// - /// This function will panic if the two slices have different lengths. - /// - /// # Examples - /// - /// Cloning two elements from a slice into another: - /// - /// ``` - /// let src = [1, 2, 3, 4]; - /// let mut dst = [0, 0]; - /// - /// dst.clone_from_slice(&src[2..]); - /// - /// assert_eq!(src, [1, 2, 3, 4]); - /// assert_eq!(dst, [3, 4]); - /// ``` - /// - /// Rust enforces that there can only be one mutable reference with no - /// immutable references to a particular piece of data in a particular - /// scope. Because of this, attempting to use `clone_from_slice` on a - /// single slice will result in a compile failure: - /// - /// ```compile_fail - /// let mut slice = [1, 2, 3, 4, 5]; - /// - /// slice[..2].clone_from_slice(&slice[3..]); // compile fail! - /// ``` - /// - /// To work around this, we can use [`split_at_mut`] to create two distinct - /// sub-slices from a slice: - /// - /// ``` - /// let mut slice = [1, 2, 3, 4, 5]; - /// - /// { - /// let (left, right) = slice.split_at_mut(2); - /// left.clone_from_slice(&right[1..]); - /// } - /// - /// assert_eq!(slice, [4, 5, 3, 4, 5]); - /// ``` - /// - /// [`copy_from_slice`]: #method.copy_from_slice - /// [`split_at_mut`]: #method.split_at_mut - #[stable(feature = "clone_from_slice", since = "1.7.0")] - pub fn clone_from_slice(&mut self, src: &[T]) where T: Clone { - core_slice::SliceExt::clone_from_slice(self, src) - } - - /// Copies all elements from `src` into `self`, using a memcpy. - /// - /// The length of `src` must be the same as `self`. - /// - /// If `src` does not implement `Copy`, use [`clone_from_slice`]. - /// - /// # Panics - /// - /// This function will panic if the two slices have different lengths. - /// - /// # Examples - /// - /// Copying two elements from a slice into another: - /// - /// ``` - /// let src = [1, 2, 3, 4]; - /// let mut dst = [0, 0]; - /// - /// dst.copy_from_slice(&src[2..]); - /// - /// assert_eq!(src, [1, 2, 3, 4]); - /// assert_eq!(dst, [3, 4]); - /// ``` - /// - /// Rust enforces that there can only be one mutable reference with no - /// immutable references to a particular piece of data in a particular - /// scope. Because of this, attempting to use `copy_from_slice` on a - /// single slice will result in a compile failure: - /// - /// ```compile_fail - /// let mut slice = [1, 2, 3, 4, 5]; - /// - /// slice[..2].copy_from_slice(&slice[3..]); // compile fail! - /// ``` - /// - /// To work around this, we can use [`split_at_mut`] to create two distinct - /// sub-slices from a slice: - /// - /// ``` - /// let mut slice = [1, 2, 3, 4, 5]; - /// - /// { - /// let (left, right) = slice.split_at_mut(2); - /// left.copy_from_slice(&right[1..]); - /// } - /// - /// assert_eq!(slice, [4, 5, 3, 4, 5]); - /// ``` - /// - /// [`clone_from_slice`]: #method.clone_from_slice - /// [`split_at_mut`]: #method.split_at_mut - #[stable(feature = "copy_from_slice", since = "1.9.0")] - pub fn copy_from_slice(&mut self, src: &[T]) where T: Copy { - core_slice::SliceExt::copy_from_slice(self, src) - } - - /// Swaps all elements in `self` with those in `other`. - /// - /// The length of `other` must be the same as `self`. - /// - /// # Panics - /// - /// This function will panic if the two slices have different lengths. - /// - /// # Example - /// - /// Swapping two elements across slices: - /// - /// ``` - /// #![feature(swap_with_slice)] - /// - /// let mut slice1 = [0, 0]; - /// let mut slice2 = [1, 2, 3, 4]; - /// - /// slice1.swap_with_slice(&mut slice2[2..]); - /// - /// assert_eq!(slice1, [3, 4]); - /// assert_eq!(slice2, [1, 2, 0, 0]); - /// ``` - /// - /// Rust enforces that there can only be one mutable reference to a - /// particular piece of data in a particular scope. Because of this, - /// attempting to use `swap_with_slice` on a single slice will result in - /// a compile failure: - /// - /// ```compile_fail - /// #![feature(swap_with_slice)] - /// - /// let mut slice = [1, 2, 3, 4, 5]; - /// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail! - /// ``` - /// - /// To work around this, we can use [`split_at_mut`] to create two distinct - /// mutable sub-slices from a slice: - /// - /// ``` - /// #![feature(swap_with_slice)] - /// - /// let mut slice = [1, 2, 3, 4, 5]; - /// - /// { - /// let (left, right) = slice.split_at_mut(2); - /// left.swap_with_slice(&mut right[1..]); - /// } - /// - /// assert_eq!(slice, [4, 5, 3, 1, 2]); - /// ``` - /// - /// [`split_at_mut`]: #method.split_at_mut - #[unstable(feature = "swap_with_slice", issue = "44030")] - pub fn swap_with_slice(&mut self, other: &mut [T]) { - core_slice::SliceExt::swap_with_slice(self, other) + let len = self.len(); + if len < 2 { return } + if sz_u8 < sz_u16 && len <= ( u8::MAX as usize) { return sort_by_key!( u8, self, f) } + if sz_u16 < sz_u32 && len <= (u16::MAX as usize) { return sort_by_key!(u16, self, f) } + if sz_u32 < sz_usize && len <= (u32::MAX as usize) { return sort_by_key!(u32, self, f) } + sort_by_key!(usize, self, f) } /// Copies `self` into a new `Vec`. @@ -1728,18 +389,82 @@ impl [T] { // NB see hack module in this file hack::into_vec(self) } + + /// Creates a vector by repeating a slice `n` times. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(repeat_generic_slice)] + /// + /// fn main() { + /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]); + /// } + /// ``` + #[unstable(feature = "repeat_generic_slice", + reason = "it's on str, why not on slice?", + issue = "48784")] + pub fn repeat(&self, n: usize) -> Vec where T: Copy { + if n == 0 { + return Vec::new(); + } + + // If `n` is larger than zero, it can be split as + // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`. + // `2^expn` is the number represented by the leftmost '1' bit of `n`, + // and `rem` is the remaining part of `n`. + + // Using `Vec` to access `set_len()`. + let mut buf = Vec::with_capacity(self.len() * n); + + // `2^expn` repetition is done by doubling `buf` `expn`-times. + buf.extend(self); + { + let mut m = n >> 1; + // If `m > 0`, there are remaining bits up to the leftmost '1'. + while m > 0 { + // `buf.extend(buf)`: + unsafe { + ptr::copy_nonoverlapping( + buf.as_ptr(), + (buf.as_mut_ptr() as *mut T).add(buf.len()), + buf.len(), + ); + // `buf` has capacity of `self.len() * n`. + let buf_len = buf.len(); + buf.set_len(buf_len * 2); + } + + m >>= 1; + } + } + + // `rem` (`= n - 2^expn`) repetition is done by copying + // first `rem` repetitions from `buf` itself. + let rem_len = self.len() * n - buf.len(); // `self.len() * rem` + if rem_len > 0 { + // `buf.extend(buf[0 .. rem_len])`: + unsafe { + // This is non-overlapping since `2^expn > rem`. + ptr::copy_nonoverlapping( + buf.as_ptr(), + (buf.as_mut_ptr() as *mut T).add(buf.len()), + rem_len, + ); + // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`). + let buf_cap = buf.capacity(); + buf.set_len(buf_cap); + } + } + buf + } } -#[lang = "slice_u8"] +#[lang = "slice_u8_alloc"] #[cfg(not(test))] impl [u8] { - /// Checks if all bytes in this slice are within the ASCII range. - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn is_ascii(&self) -> bool { - self.iter().all(|b| b.is_ascii()) - } - /// Returns a vector containing a copy of this slice where each byte /// is mapped to its ASCII upper case equivalent. /// @@ -1773,53 +498,6 @@ impl [u8] { me.make_ascii_lowercase(); me } - - /// Checks that two slices are an ASCII case-insensitive match. - /// - /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`, - /// but without allocating and copying temporaries. - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool { - self.len() == other.len() && - self.iter().zip(other).all(|(a, b)| { - a.eq_ignore_ascii_case(b) - }) - } - - /// Converts this slice to its ASCII upper case equivalent in-place. - /// - /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z', - /// but non-ASCII letters are unchanged. - /// - /// To return a new uppercased value without modifying the existing one, use - /// [`to_ascii_uppercase`]. - /// - /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn make_ascii_uppercase(&mut self) { - for byte in self { - byte.make_ascii_uppercase(); - } - } - - /// Converts this slice to its ASCII lower case equivalent in-place. - /// - /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', - /// but non-ASCII letters are unchanged. - /// - /// To return a new lowercased value without modifying the existing one, use - /// [`to_ascii_lowercase`]. - /// - /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn make_ascii_lowercase(&mut self) { - for byte in self { - byte.make_ascii_lowercase(); - } - } } //////////////////////////////////////////////////////////////////////////////// @@ -1888,15 +566,17 @@ impl> SliceConcatExt for [V] { } fn join(&self, sep: &T) -> Vec { + let mut iter = self.iter(); + let first = match iter.next() { + Some(first) => first, + None => return vec![], + }; let size = self.iter().fold(0, |acc, v| acc + v.borrow().len()); let mut result = Vec::with_capacity(size + self.len()); - let mut first = true; - for v in self { - if first { - first = false - } else { - result.push(sep.clone()) - } + result.extend_from_slice(first.borrow()); + + for v in iter { + result.push(sep.clone()); result.extend_from_slice(v.borrow()) } result diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs index a00e3d17dd00..870bf971cd3f 100644 --- a/src/liballoc/str.rs +++ b/src/liballoc/str.rs @@ -10,6 +10,8 @@ //! Unicode string slices. //! +//! *[See also the `str` primitive type](../../std/primitive.str.html).* +//! //! The `&str` type is one of the two main string types, the other being `String`. //! Unlike its `String` counterpart, its contents are borrowed. //! @@ -29,8 +31,6 @@ //! ``` //! let hello_world: &'static str = "Hello, world!"; //! ``` -//! -//! *[See also the `str` primitive type](../../std/primitive.str.html).* #![stable(feature = "rust1", since = "1.0.0")] @@ -43,16 +43,15 @@ use core::str as core_str; use core::str::pattern::Pattern; use core::str::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; use core::mem; +use core::ptr; use core::iter::FusedIterator; -use std_unicode::str::{UnicodeStr, Utf16Encoder}; +use core::unicode::conversions; -use vec_deque::VecDeque; use borrow::{Borrow, ToOwned}; -use string::String; -use std_unicode; -use vec::Vec; -use slice::{SliceConcatExt, SliceIndex}; use boxed::Box; +use slice::{SliceConcatExt, SliceIndex}; +use string::String; +use vec::Vec; #[stable(feature = "rust1", since = "1.0.0")] pub use core::str::{FromStr, Utf8Error}; @@ -74,10 +73,13 @@ pub use core::str::{from_utf8, from_utf8_mut, Chars, CharIndices, Bytes}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::str::{from_utf8_unchecked, from_utf8_unchecked_mut, ParseBoolError}; #[stable(feature = "rust1", since = "1.0.0")] -pub use std_unicode::str::SplitWhitespace; +pub use core::str::SplitWhitespace; #[stable(feature = "rust1", since = "1.0.0")] pub use core::str::pattern; - +#[stable(feature = "encode_utf16", since = "1.8.0")] +pub use core::str::EncodeUtf16; +#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +pub use core::str::SplitAsciiWhitespace; #[unstable(feature = "slice_concat_ext", reason = "trait should not have to exist", @@ -86,47 +88,13 @@ impl> SliceConcatExt for [S] { type Output = String; fn concat(&self) -> String { - if self.is_empty() { - return String::new(); - } - - // `len` calculation may overflow but push_str will check boundaries - let len = self.iter().map(|s| s.borrow().len()).sum(); - let mut result = String::with_capacity(len); - - for s in self { - result.push_str(s.borrow()) - } - - result + self.join("") } fn join(&self, sep: &str) -> String { - if self.is_empty() { - return String::new(); + unsafe { + String::from_utf8_unchecked( join_generic_copy(self, sep.as_bytes()) ) } - - // concat is faster - if sep.is_empty() { - return self.concat(); - } - - // this is wrong without the guarantee that `self` is non-empty - // `len` calculation may overflow but push_str but will check boundaries - let len = sep.len() * (self.len() - 1) + - self.iter().map(|s| s.borrow().len()).sum::(); - let mut result = String::with_capacity(len); - let mut first = true; - - for s in self { - if first { - first = false; - } else { - result.push_str(sep); - } - result.push_str(s.borrow()); - } - result } fn connect(&self, sep: &str) -> String { @@ -134,46 +102,96 @@ impl> SliceConcatExt for [S] { } } -/// An iterator of [`u16`] over the string encoded as UTF-16. -/// -/// [`u16`]: ../../std/primitive.u16.html -/// -/// This struct is created by the [`encode_utf16`] method on [`str`]. -/// See its documentation for more. -/// -/// [`encode_utf16`]: ../../std/primitive.str.html#method.encode_utf16 -/// [`str`]: ../../std/primitive.str.html -#[derive(Clone)] -#[stable(feature = "encode_utf16", since = "1.8.0")] -pub struct EncodeUtf16<'a> { - encoder: Utf16Encoder>, +macro_rules! spezialize_for_lengths { + ($separator:expr, $target:expr, $iter:expr; $($num:expr),*) => { + let mut target = $target; + let iter = $iter; + let sep_bytes = $separator; + match $separator.len() { + $( + // loops with hardcoded sizes run much faster + // specialize the cases with small separator lengths + $num => { + for s in iter { + copy_slice_and_advance!(target, sep_bytes); + copy_slice_and_advance!(target, s.borrow().as_ref()); + } + }, + )* + _ => { + // arbitrary non-zero size fallback + for s in iter { + copy_slice_and_advance!(target, sep_bytes); + copy_slice_and_advance!(target, s.borrow().as_ref()); + } + } + } + }; } -#[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a> fmt::Debug for EncodeUtf16<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad("EncodeUtf16 { .. }") +macro_rules! copy_slice_and_advance { + ($target:expr, $bytes:expr) => { + let len = $bytes.len(); + let (head, tail) = {$target}.split_at_mut(len); + head.copy_from_slice($bytes); + $target = tail; } } -#[stable(feature = "encode_utf16", since = "1.8.0")] -impl<'a> Iterator for EncodeUtf16<'a> { - type Item = u16; +// Optimized join implementation that works for both Vec (T: Copy) and String's inner vec +// Currently (2018-05-13) there is a bug with type inference and specialization (see issue #36262) +// For this reason SliceConcatExt is not specialized for T: Copy and SliceConcatExt is the +// only user of this function. It is left in place for the time when that is fixed. +// +// the bounds for String-join are S: Borrow and for Vec-join Borrow<[T]> +// [T] and str both impl AsRef<[T]> for some T +// => s.borrow().as_ref() and we always have slices +fn join_generic_copy(slice: &[S], sep: &[T]) -> Vec +where + T: Copy, + B: AsRef<[T]> + ?Sized, + S: Borrow, +{ + let sep_len = sep.len(); + let mut iter = slice.iter(); - #[inline] - fn next(&mut self) -> Option { - self.encoder.next() - } + // the first slice is the only one without a separator preceding it + let first = match iter.next() { + Some(first) => first, + None => return vec![], + }; - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.encoder.size_hint() + // compute the exact total length of the joined Vec + // if the `len` calculation overflows, we'll panic + // we would have run out of memory anyway and the rest of the function requires + // the entire Vec pre-allocated for safety + let len = sep_len.checked_mul(iter.len()).and_then(|n| { + slice.iter() + .map(|s| s.borrow().as_ref().len()) + .try_fold(n, usize::checked_add) + }).expect("attempt to join into collection with len > usize::MAX"); + + // crucial for safety + let mut result = Vec::with_capacity(len); + assert!(result.capacity() >= len); + + result.extend_from_slice(first.borrow().as_ref()); + + unsafe { + { + let pos = result.len(); + let target = result.get_unchecked_mut(pos..len); + + // copy separator and slices over without bounds checks + // generate loops with hardcoded offsets for small separators + // massive improvements possible (~ x2) + spezialize_for_lengths!(sep, target, iter; 0, 1, 2, 3, 4); + } + result.set_len(len); } + result } -#[unstable(feature = "fused", issue = "35602")] -impl<'a> FusedIterator for EncodeUtf16<'a> {} - #[stable(feature = "rust1", since = "1.0.0")] impl Borrow for String { #[inline] @@ -197,1606 +215,9 @@ impl ToOwned for str { } /// Methods for string slices. -#[lang = "str"] +#[lang = "str_alloc"] #[cfg(not(test))] impl str { - /// Returns the length of `self`. - /// - /// This length is in bytes, not [`char`]s or graphemes. In other words, - /// it may not be what a human considers the length of the string. - /// - /// [`char`]: primitive.char.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let len = "foo".len(); - /// assert_eq!(3, len); - /// - /// let len = "ƒoo".len(); // fancy f! - /// assert_eq!(4, len); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn len(&self) -> usize { - core_str::StrExt::len(self) - } - - /// Returns `true` if `self` has a length of zero bytes. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = ""; - /// assert!(s.is_empty()); - /// - /// let s = "not empty"; - /// assert!(!s.is_empty()); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn is_empty(&self) -> bool { - core_str::StrExt::is_empty(self) - } - - /// Checks that `index`-th byte lies at the start and/or end of a - /// UTF-8 code point sequence. - /// - /// The start and end of the string (when `index == self.len()`) are - /// considered to be - /// boundaries. - /// - /// Returns `false` if `index` is greater than `self.len()`. - /// - /// # Examples - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// assert!(s.is_char_boundary(0)); - /// // start of `老` - /// assert!(s.is_char_boundary(6)); - /// assert!(s.is_char_boundary(s.len())); - /// - /// // second byte of `ö` - /// assert!(!s.is_char_boundary(2)); - /// - /// // third byte of `老` - /// assert!(!s.is_char_boundary(8)); - /// ``` - #[stable(feature = "is_char_boundary", since = "1.9.0")] - #[inline] - pub fn is_char_boundary(&self, index: usize) -> bool { - core_str::StrExt::is_char_boundary(self, index) - } - - /// Converts a string slice to a byte slice. To convert the byte slice back - /// into a string slice, use the [`str::from_utf8`] function. - /// - /// [`str::from_utf8`]: ./str/fn.from_utf8.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let bytes = "bors".as_bytes(); - /// assert_eq!(b"bors", bytes); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline(always)] - pub fn as_bytes(&self) -> &[u8] { - core_str::StrExt::as_bytes(self) - } - - /// Converts a mutable string slice to a mutable byte slice. To convert the - /// mutable byte slice back into a mutable string slice, use the - /// [`str::from_utf8_mut`] function. - /// - /// [`str::from_utf8_mut`]: ./str/fn.from_utf8_mut.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let mut s = String::from("Hello"); - /// let bytes = unsafe { s.as_bytes_mut() }; - /// - /// assert_eq!(b"Hello", bytes); - /// ``` - /// - /// Mutability: - /// - /// ``` - /// let mut s = String::from("🗻∈🌏"); - /// - /// unsafe { - /// let bytes = s.as_bytes_mut(); - /// - /// bytes[0] = 0xF0; - /// bytes[1] = 0x9F; - /// bytes[2] = 0x8D; - /// bytes[3] = 0x94; - /// } - /// - /// assert_eq!("🍔∈🌏", s); - /// ``` - #[stable(feature = "str_mut_extras", since = "1.20.0")] - #[inline(always)] - pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] { - core_str::StrExt::as_bytes_mut(self) - } - - /// Converts a string slice to a raw pointer. - /// - /// As string slices are a slice of bytes, the raw pointer points to a - /// [`u8`]. This pointer will be pointing to the first byte of the string - /// slice. - /// - /// [`u8`]: primitive.u8.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = "Hello"; - /// let ptr = s.as_ptr(); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn as_ptr(&self) -> *const u8 { - core_str::StrExt::as_ptr(self) - } - - /// Returns a subslice of `str`. - /// - /// This is the non-panicking alternative to indexing the `str`. Returns - /// [`None`] whenever equivalent indexing operation would panic. - /// - /// [`None`]: option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// ``` - /// let v = String::from("🗻∈🌏"); - /// - /// assert_eq!(Some("🗻"), v.get(0..4)); - /// - /// // indices not on UTF-8 sequence boundaries - /// assert!(v.get(1..).is_none()); - /// assert!(v.get(..8).is_none()); - /// - /// // out of bounds - /// assert!(v.get(..42).is_none()); - /// ``` - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - #[inline] - pub fn get>(&self, i: I) -> Option<&I::Output> { - core_str::StrExt::get(self, i) - } - - /// Returns a mutable subslice of `str`. - /// - /// This is the non-panicking alternative to indexing the `str`. Returns - /// [`None`] whenever equivalent indexing operation would panic. - /// - /// [`None`]: option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// ``` - /// let mut v = String::from("hello"); - /// // correct length - /// assert!(v.get_mut(0..5).is_some()); - /// // out of bounds - /// assert!(v.get_mut(..42).is_none()); - /// assert_eq!(Some("he"), v.get_mut(0..2).map(|v| &*v)); - /// - /// assert_eq!("hello", v); - /// { - /// let s = v.get_mut(0..2); - /// let s = s.map(|s| { - /// s.make_ascii_uppercase(); - /// &*s - /// }); - /// assert_eq!(Some("HE"), s); - /// } - /// assert_eq!("HEllo", v); - /// ``` - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - #[inline] - pub fn get_mut>(&mut self, i: I) -> Option<&mut I::Output> { - core_str::StrExt::get_mut(self, i) - } - - /// Returns a unchecked subslice of `str`. - /// - /// This is the unchecked alternative to indexing the `str`. - /// - /// # Safety - /// - /// Callers of this function are responsible that these preconditions are - /// satisfied: - /// - /// * The starting index must come before the ending index; - /// * Indexes must be within bounds of the original slice; - /// * Indexes must lie on UTF-8 sequence boundaries. - /// - /// Failing that, the returned string slice may reference invalid memory or - /// violate the invariants communicated by the `str` type. - /// - /// # Examples - /// - /// ``` - /// let v = "🗻∈🌏"; - /// unsafe { - /// assert_eq!("🗻", v.get_unchecked(0..4)); - /// assert_eq!("∈", v.get_unchecked(4..7)); - /// assert_eq!("🌏", v.get_unchecked(7..11)); - /// } - /// ``` - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - #[inline] - pub unsafe fn get_unchecked>(&self, i: I) -> &I::Output { - core_str::StrExt::get_unchecked(self, i) - } - - /// Returns a mutable, unchecked subslice of `str`. - /// - /// This is the unchecked alternative to indexing the `str`. - /// - /// # Safety - /// - /// Callers of this function are responsible that these preconditions are - /// satisfied: - /// - /// * The starting index must come before the ending index; - /// * Indexes must be within bounds of the original slice; - /// * Indexes must lie on UTF-8 sequence boundaries. - /// - /// Failing that, the returned string slice may reference invalid memory or - /// violate the invariants communicated by the `str` type. - /// - /// # Examples - /// - /// ``` - /// let mut v = String::from("🗻∈🌏"); - /// unsafe { - /// assert_eq!("🗻", v.get_unchecked_mut(0..4)); - /// assert_eq!("∈", v.get_unchecked_mut(4..7)); - /// assert_eq!("🌏", v.get_unchecked_mut(7..11)); - /// } - /// ``` - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - #[inline] - pub unsafe fn get_unchecked_mut>(&mut self, i: I) -> &mut I::Output { - core_str::StrExt::get_unchecked_mut(self, i) - } - - /// Creates a string slice from another string slice, bypassing safety - /// checks. - /// - /// This is generally not recommended, use with caution! For a safe - /// alternative see [`str`] and [`Index`]. - /// - /// [`str`]: primitive.str.html - /// [`Index`]: ops/trait.Index.html - /// - /// This new slice goes from `begin` to `end`, including `begin` but - /// excluding `end`. - /// - /// To get a mutable string slice instead, see the - /// [`slice_mut_unchecked`] method. - /// - /// [`slice_mut_unchecked`]: #method.slice_mut_unchecked - /// - /// # Safety - /// - /// Callers of this function are responsible that three preconditions are - /// satisfied: - /// - /// * `begin` must come before `end`. - /// * `begin` and `end` must be byte positions within the string slice. - /// * `begin` and `end` must lie on UTF-8 sequence boundaries. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// - /// unsafe { - /// assert_eq!("Löwe 老虎 Léopard", s.slice_unchecked(0, 21)); - /// } - /// - /// let s = "Hello, world!"; - /// - /// unsafe { - /// assert_eq!("world", s.slice_unchecked(7, 12)); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str { - core_str::StrExt::slice_unchecked(self, begin, end) - } - - /// Creates a string slice from another string slice, bypassing safety - /// checks. - /// This is generally not recommended, use with caution! For a safe - /// alternative see [`str`] and [`IndexMut`]. - /// - /// [`str`]: primitive.str.html - /// [`IndexMut`]: ops/trait.IndexMut.html - /// - /// This new slice goes from `begin` to `end`, including `begin` but - /// excluding `end`. - /// - /// To get an immutable string slice instead, see the - /// [`slice_unchecked`] method. - /// - /// [`slice_unchecked`]: #method.slice_unchecked - /// - /// # Safety - /// - /// Callers of this function are responsible that three preconditions are - /// satisfied: - /// - /// * `begin` must come before `end`. - /// * `begin` and `end` must be byte positions within the string slice. - /// * `begin` and `end` must lie on UTF-8 sequence boundaries. - #[stable(feature = "str_slice_mut", since = "1.5.0")] - #[inline] - pub unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str { - core_str::StrExt::slice_mut_unchecked(self, begin, end) - } - - /// Divide one string slice into two at an index. - /// - /// The argument, `mid`, should be a byte offset from the start of the - /// string. It must also be on the boundary of a UTF-8 code point. - /// - /// The two slices returned go from the start of the string slice to `mid`, - /// and from `mid` to the end of the string slice. - /// - /// To get mutable string slices instead, see the [`split_at_mut`] - /// method. - /// - /// [`split_at_mut`]: #method.split_at_mut - /// - /// # Panics - /// - /// Panics if `mid` is not on a UTF-8 code point boundary, or if it is - /// beyond the last code point of the string slice. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = "Per Martin-Löf"; - /// - /// let (first, last) = s.split_at(3); - /// - /// assert_eq!("Per", first); - /// assert_eq!(" Martin-Löf", last); - /// ``` - #[inline] - #[stable(feature = "str_split_at", since = "1.4.0")] - pub fn split_at(&self, mid: usize) -> (&str, &str) { - core_str::StrExt::split_at(self, mid) - } - - /// Divide one mutable string slice into two at an index. - /// - /// The argument, `mid`, should be a byte offset from the start of the - /// string. It must also be on the boundary of a UTF-8 code point. - /// - /// The two slices returned go from the start of the string slice to `mid`, - /// and from `mid` to the end of the string slice. - /// - /// To get immutable string slices instead, see the [`split_at`] method. - /// - /// [`split_at`]: #method.split_at - /// - /// # Panics - /// - /// Panics if `mid` is not on a UTF-8 code point boundary, or if it is - /// beyond the last code point of the string slice. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let mut s = "Per Martin-Löf".to_string(); - /// { - /// let (first, last) = s.split_at_mut(3); - /// first.make_ascii_uppercase(); - /// assert_eq!("PER", first); - /// assert_eq!(" Martin-Löf", last); - /// } - /// assert_eq!("PER Martin-Löf", s); - /// ``` - #[inline] - #[stable(feature = "str_split_at", since = "1.4.0")] - pub fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str) { - core_str::StrExt::split_at_mut(self, mid) - } - - /// Returns an iterator over the [`char`]s of a string slice. - /// - /// As a string slice consists of valid UTF-8, we can iterate through a - /// string slice by [`char`]. This method returns such an iterator. - /// - /// It's important to remember that [`char`] represents a Unicode Scalar - /// Value, and may not match your idea of what a 'character' is. Iteration - /// over grapheme clusters may be what you actually want. - /// - /// [`char`]: primitive.char.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let word = "goodbye"; - /// - /// let count = word.chars().count(); - /// assert_eq!(7, count); - /// - /// let mut chars = word.chars(); - /// - /// assert_eq!(Some('g'), chars.next()); - /// assert_eq!(Some('o'), chars.next()); - /// assert_eq!(Some('o'), chars.next()); - /// assert_eq!(Some('d'), chars.next()); - /// assert_eq!(Some('b'), chars.next()); - /// assert_eq!(Some('y'), chars.next()); - /// assert_eq!(Some('e'), chars.next()); - /// - /// assert_eq!(None, chars.next()); - /// ``` - /// - /// Remember, [`char`]s may not match your human intuition about characters: - /// - /// ``` - /// let y = "y̆"; - /// - /// let mut chars = y.chars(); - /// - /// assert_eq!(Some('y'), chars.next()); // not 'y̆' - /// assert_eq!(Some('\u{0306}'), chars.next()); - /// - /// assert_eq!(None, chars.next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn chars(&self) -> Chars { - core_str::StrExt::chars(self) - } - /// Returns an iterator over the [`char`]s of a string slice, and their - /// positions. - /// - /// As a string slice consists of valid UTF-8, we can iterate through a - /// string slice by [`char`]. This method returns an iterator of both - /// these [`char`]s, as well as their byte positions. - /// - /// The iterator yields tuples. The position is first, the [`char`] is - /// second. - /// - /// [`char`]: primitive.char.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let word = "goodbye"; - /// - /// let count = word.char_indices().count(); - /// assert_eq!(7, count); - /// - /// let mut char_indices = word.char_indices(); - /// - /// assert_eq!(Some((0, 'g')), char_indices.next()); - /// assert_eq!(Some((1, 'o')), char_indices.next()); - /// assert_eq!(Some((2, 'o')), char_indices.next()); - /// assert_eq!(Some((3, 'd')), char_indices.next()); - /// assert_eq!(Some((4, 'b')), char_indices.next()); - /// assert_eq!(Some((5, 'y')), char_indices.next()); - /// assert_eq!(Some((6, 'e')), char_indices.next()); - /// - /// assert_eq!(None, char_indices.next()); - /// ``` - /// - /// Remember, [`char`]s may not match your human intuition about characters: - /// - /// ``` - /// let yes = "y̆es"; - /// - /// let mut char_indices = yes.char_indices(); - /// - /// assert_eq!(Some((0, 'y')), char_indices.next()); // not (0, 'y̆') - /// assert_eq!(Some((1, '\u{0306}')), char_indices.next()); - /// - /// // note the 3 here - the last character took up two bytes - /// assert_eq!(Some((3, 'e')), char_indices.next()); - /// assert_eq!(Some((4, 's')), char_indices.next()); - /// - /// assert_eq!(None, char_indices.next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn char_indices(&self) -> CharIndices { - core_str::StrExt::char_indices(self) - } - - /// An iterator over the bytes of a string slice. - /// - /// As a string slice consists of a sequence of bytes, we can iterate - /// through a string slice by byte. This method returns such an iterator. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let mut bytes = "bors".bytes(); - /// - /// assert_eq!(Some(b'b'), bytes.next()); - /// assert_eq!(Some(b'o'), bytes.next()); - /// assert_eq!(Some(b'r'), bytes.next()); - /// assert_eq!(Some(b's'), bytes.next()); - /// - /// assert_eq!(None, bytes.next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn bytes(&self) -> Bytes { - core_str::StrExt::bytes(self) - } - - /// Split a string slice by whitespace. - /// - /// The iterator returned will return string slices that are sub-slices of - /// the original string slice, separated by any amount of whitespace. - /// - /// 'Whitespace' is defined according to the terms of the Unicode Derived - /// Core Property `White_Space`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let mut iter = "A few words".split_whitespace(); - /// - /// assert_eq!(Some("A"), iter.next()); - /// assert_eq!(Some("few"), iter.next()); - /// assert_eq!(Some("words"), iter.next()); - /// - /// assert_eq!(None, iter.next()); - /// ``` - /// - /// All kinds of whitespace are considered: - /// - /// ``` - /// let mut iter = " Mary had\ta\u{2009}little \n\t lamb".split_whitespace(); - /// assert_eq!(Some("Mary"), iter.next()); - /// assert_eq!(Some("had"), iter.next()); - /// assert_eq!(Some("a"), iter.next()); - /// assert_eq!(Some("little"), iter.next()); - /// assert_eq!(Some("lamb"), iter.next()); - /// - /// assert_eq!(None, iter.next()); - /// ``` - #[stable(feature = "split_whitespace", since = "1.1.0")] - #[inline] - pub fn split_whitespace(&self) -> SplitWhitespace { - UnicodeStr::split_whitespace(self) - } - - /// An iterator over the lines of a string, as string slices. - /// - /// Lines are ended with either a newline (`\n`) or a carriage return with - /// a line feed (`\r\n`). - /// - /// The final line ending is optional. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let text = "foo\r\nbar\n\nbaz\n"; - /// let mut lines = text.lines(); - /// - /// assert_eq!(Some("foo"), lines.next()); - /// assert_eq!(Some("bar"), lines.next()); - /// assert_eq!(Some(""), lines.next()); - /// assert_eq!(Some("baz"), lines.next()); - /// - /// assert_eq!(None, lines.next()); - /// ``` - /// - /// The final line ending isn't required: - /// - /// ``` - /// let text = "foo\nbar\n\r\nbaz"; - /// let mut lines = text.lines(); - /// - /// assert_eq!(Some("foo"), lines.next()); - /// assert_eq!(Some("bar"), lines.next()); - /// assert_eq!(Some(""), lines.next()); - /// assert_eq!(Some("baz"), lines.next()); - /// - /// assert_eq!(None, lines.next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn lines(&self) -> Lines { - core_str::StrExt::lines(self) - } - - /// An iterator over the lines of a string. - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_deprecated(since = "1.4.0", reason = "use lines() instead now")] - #[inline] - #[allow(deprecated)] - pub fn lines_any(&self) -> LinesAny { - core_str::StrExt::lines_any(self) - } - - /// Returns an iterator of `u16` over the string encoded as UTF-16. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let text = "Zażółć gęślą jaźń"; - /// - /// let utf8_len = text.len(); - /// let utf16_len = text.encode_utf16().count(); - /// - /// assert!(utf16_len <= utf8_len); - /// ``` - #[stable(feature = "encode_utf16", since = "1.8.0")] - pub fn encode_utf16(&self) -> EncodeUtf16 { - EncodeUtf16 { encoder: Utf16Encoder::new(self[..].chars()) } - } - - /// Returns `true` if the given pattern matches a sub-slice of - /// this string slice. - /// - /// Returns `false` if it does not. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let bananas = "bananas"; - /// - /// assert!(bananas.contains("nana")); - /// assert!(!bananas.contains("apples")); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn contains<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool { - core_str::StrExt::contains(self, pat) - } - - /// Returns `true` if the given pattern matches a prefix of this - /// string slice. - /// - /// Returns `false` if it does not. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let bananas = "bananas"; - /// - /// assert!(bananas.starts_with("bana")); - /// assert!(!bananas.starts_with("nana")); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn starts_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool { - core_str::StrExt::starts_with(self, pat) - } - - /// Returns `true` if the given pattern matches a suffix of this - /// string slice. - /// - /// Returns `false` if it does not. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let bananas = "bananas"; - /// - /// assert!(bananas.ends_with("anas")); - /// assert!(!bananas.ends_with("nana")); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn ends_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::ends_with(self, pat) - } - - /// Returns the byte index of the first character of this string slice that - /// matches the pattern. - /// - /// Returns [`None`] if the pattern doesn't match. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines if - /// a character matches. - /// - /// [`char`]: primitive.char.html - /// [`None`]: option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// - /// assert_eq!(s.find('L'), Some(0)); - /// assert_eq!(s.find('é'), Some(14)); - /// assert_eq!(s.find("Léopard"), Some(13)); - /// ``` - /// - /// More complex patterns using point-free style and closures: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// - /// assert_eq!(s.find(char::is_whitespace), Some(5)); - /// assert_eq!(s.find(char::is_lowercase), Some(1)); - /// assert_eq!(s.find(|c: char| c.is_whitespace() || c.is_lowercase()), Some(1)); - /// assert_eq!(s.find(|c: char| (c < 'o') && (c > 'a')), Some(4)); - /// ``` - /// - /// Not finding the pattern: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// let x: &[_] = &['1', '2']; - /// - /// assert_eq!(s.find(x), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn find<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option { - core_str::StrExt::find(self, pat) - } - - /// Returns the byte index of the last character of this string slice that - /// matches the pattern. - /// - /// Returns [`None`] if the pattern doesn't match. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines if - /// a character matches. - /// - /// [`char`]: primitive.char.html - /// [`None`]: option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// - /// assert_eq!(s.rfind('L'), Some(13)); - /// assert_eq!(s.rfind('é'), Some(14)); - /// ``` - /// - /// More complex patterns with closures: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// - /// assert_eq!(s.rfind(char::is_whitespace), Some(12)); - /// assert_eq!(s.rfind(char::is_lowercase), Some(20)); - /// ``` - /// - /// Not finding the pattern: - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// let x: &[_] = &['1', '2']; - /// - /// assert_eq!(s.rfind(x), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rfind<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rfind(self, pat) - } - - /// An iterator over substrings of this string slice, separated by - /// characters matched by a pattern. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. - /// - /// # Iterator behavior - /// - /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern - /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// - /// If the pattern allows a reverse search but its results might differ - /// from a forward search, the [`rsplit`] method can be used. - /// - /// [`char`]: primitive.char.html - /// [`rsplit`]: #method.rsplit - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let v: Vec<&str> = "Mary had a little lamb".split(' ').collect(); - /// assert_eq!(v, ["Mary", "had", "a", "little", "lamb"]); - /// - /// let v: Vec<&str> = "".split('X').collect(); - /// assert_eq!(v, [""]); - /// - /// let v: Vec<&str> = "lionXXtigerXleopard".split('X').collect(); - /// assert_eq!(v, ["lion", "", "tiger", "leopard"]); - /// - /// let v: Vec<&str> = "lion::tiger::leopard".split("::").collect(); - /// assert_eq!(v, ["lion", "tiger", "leopard"]); - /// - /// let v: Vec<&str> = "abc1def2ghi".split(char::is_numeric).collect(); - /// assert_eq!(v, ["abc", "def", "ghi"]); - /// - /// let v: Vec<&str> = "lionXtigerXleopard".split(char::is_uppercase).collect(); - /// assert_eq!(v, ["lion", "tiger", "leopard"]); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// let v: Vec<&str> = "abc1defXghi".split(|c| c == '1' || c == 'X').collect(); - /// assert_eq!(v, ["abc", "def", "ghi"]); - /// ``` - /// - /// If a string contains multiple contiguous separators, you will end up - /// with empty strings in the output: - /// - /// ``` - /// let x = "||||a||b|c".to_string(); - /// let d: Vec<_> = x.split('|').collect(); - /// - /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]); - /// ``` - /// - /// Contiguous separators are separated by the empty string. - /// - /// ``` - /// let x = "(///)".to_string(); - /// let d: Vec<_> = x.split('/').collect(); - /// - /// assert_eq!(d, &["(", "", "", ")"]); - /// ``` - /// - /// Separators at the start or end of a string are neighbored - /// by empty strings. - /// - /// ``` - /// let d: Vec<_> = "010".split("0").collect(); - /// assert_eq!(d, &["", "1", ""]); - /// ``` - /// - /// When the empty string is used as a separator, it separates - /// every character in the string, along with the beginning - /// and end of the string. - /// - /// ``` - /// let f: Vec<_> = "rust".split("").collect(); - /// assert_eq!(f, &["", "r", "u", "s", "t", ""]); - /// ``` - /// - /// Contiguous separators can lead to possibly surprising behavior - /// when whitespace is used as the separator. This code is correct: - /// - /// ``` - /// let x = " a b c".to_string(); - /// let d: Vec<_> = x.split(' ').collect(); - /// - /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]); - /// ``` - /// - /// It does _not_ give you: - /// - /// ```,ignore - /// assert_eq!(d, &["a", "b", "c"]); - /// ``` - /// - /// Use [`split_whitespace`] for this behavior. - /// - /// [`split_whitespace`]: #method.split_whitespace - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split<'a, P: Pattern<'a>>(&'a self, pat: P) -> Split<'a, P> { - core_str::StrExt::split(self, pat) - } - - /// An iterator over substrings of the given string slice, separated by - /// characters matched by a pattern and yielded in reverse order. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator requires that the pattern supports a reverse - /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse - /// search yields the same elements. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// - /// For iterating from the front, the [`split`] method can be used. - /// - /// [`split`]: #method.split - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let v: Vec<&str> = "Mary had a little lamb".rsplit(' ').collect(); - /// assert_eq!(v, ["lamb", "little", "a", "had", "Mary"]); - /// - /// let v: Vec<&str> = "".rsplit('X').collect(); - /// assert_eq!(v, [""]); - /// - /// let v: Vec<&str> = "lionXXtigerXleopard".rsplit('X').collect(); - /// assert_eq!(v, ["leopard", "tiger", "", "lion"]); - /// - /// let v: Vec<&str> = "lion::tiger::leopard".rsplit("::").collect(); - /// assert_eq!(v, ["leopard", "tiger", "lion"]); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// let v: Vec<&str> = "abc1defXghi".rsplit(|c| c == '1' || c == 'X').collect(); - /// assert_eq!(v, ["ghi", "def", "abc"]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rsplit<'a, P: Pattern<'a>>(&'a self, pat: P) -> RSplit<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rsplit(self, pat) - } - - /// An iterator over substrings of the given string slice, separated by - /// characters matched by a pattern. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. - /// - /// Equivalent to [`split`], except that the trailing substring - /// is skipped if empty. - /// - /// [`split`]: #method.split - /// - /// This method can be used for string data that is _terminated_, - /// rather than _separated_ by a pattern. - /// - /// # Iterator behavior - /// - /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern - /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// [`char`]: primitive.char.html - /// - /// If the pattern allows a reverse search but its results might differ - /// from a forward search, the [`rsplit_terminator`] method can be used. - /// - /// [`rsplit_terminator`]: #method.rsplit_terminator - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v: Vec<&str> = "A.B.".split_terminator('.').collect(); - /// assert_eq!(v, ["A", "B"]); - /// - /// let v: Vec<&str> = "A..B..".split_terminator(".").collect(); - /// assert_eq!(v, ["A", "", "B", ""]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn split_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> SplitTerminator<'a, P> { - core_str::StrExt::split_terminator(self, pat) - } - - /// An iterator over substrings of `self`, separated by characters - /// matched by a pattern and yielded in reverse order. - /// - /// The pattern can be a simple `&str`, [`char`], or a closure that - /// determines the split. - /// Additional libraries might provide more complex patterns like - /// regular expressions. - /// - /// [`char`]: primitive.char.html - /// - /// Equivalent to [`split`], except that the trailing substring is - /// skipped if empty. - /// - /// [`split`]: #method.split - /// - /// This method can be used for string data that is _terminated_, - /// rather than _separated_ by a pattern. - /// - /// # Iterator behavior - /// - /// The returned iterator requires that the pattern supports a - /// reverse search, and it will be double ended if a forward/reverse - /// search yields the same elements. - /// - /// For iterating from the front, the [`split_terminator`] method can be - /// used. - /// - /// [`split_terminator`]: #method.split_terminator - /// - /// # Examples - /// - /// ``` - /// let v: Vec<&str> = "A.B.".rsplit_terminator('.').collect(); - /// assert_eq!(v, ["B", "A"]); - /// - /// let v: Vec<&str> = "A..B..".rsplit_terminator(".").collect(); - /// assert_eq!(v, ["", "B", "", "A"]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rsplit_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> RSplitTerminator<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rsplit_terminator(self, pat) - } - - /// An iterator over substrings of the given string slice, separated by a - /// pattern, restricted to returning at most `n` items. - /// - /// If `n` substrings are returned, the last substring (the `n`th substring) - /// will contain the remainder of the string. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator will not be double ended, because it is - /// not efficient to support. - /// - /// If the pattern allows a reverse search, the [`rsplitn`] method can be - /// used. - /// - /// [`rsplitn`]: #method.rsplitn - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let v: Vec<&str> = "Mary had a little lambda".splitn(3, ' ').collect(); - /// assert_eq!(v, ["Mary", "had", "a little lambda"]); - /// - /// let v: Vec<&str> = "lionXXtigerXleopard".splitn(3, "X").collect(); - /// assert_eq!(v, ["lion", "", "tigerXleopard"]); - /// - /// let v: Vec<&str> = "abcXdef".splitn(1, 'X').collect(); - /// assert_eq!(v, ["abcXdef"]); - /// - /// let v: Vec<&str> = "".splitn(1, 'X').collect(); - /// assert_eq!(v, [""]); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// let v: Vec<&str> = "abc1defXghi".splitn(2, |c| c == '1' || c == 'X').collect(); - /// assert_eq!(v, ["abc", "defXghi"]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn splitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> SplitN<'a, P> { - core_str::StrExt::splitn(self, n, pat) - } - - /// An iterator over substrings of this string slice, separated by a - /// pattern, starting from the end of the string, restricted to returning - /// at most `n` items. - /// - /// If `n` substrings are returned, the last substring (the `n`th substring) - /// will contain the remainder of the string. - /// - /// The pattern can be a `&str`, [`char`], or a closure that - /// determines the split. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator will not be double ended, because it is not - /// efficient to support. - /// - /// For splitting from the front, the [`splitn`] method can be used. - /// - /// [`splitn`]: #method.splitn - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// let v: Vec<&str> = "Mary had a little lamb".rsplitn(3, ' ').collect(); - /// assert_eq!(v, ["lamb", "little", "Mary had a"]); - /// - /// let v: Vec<&str> = "lionXXtigerXleopard".rsplitn(3, 'X').collect(); - /// assert_eq!(v, ["leopard", "tiger", "lionX"]); - /// - /// let v: Vec<&str> = "lion::tiger::leopard".rsplitn(2, "::").collect(); - /// assert_eq!(v, ["leopard", "lion::tiger"]); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// let v: Vec<&str> = "abc1defXghi".rsplitn(2, |c| c == '1' || c == 'X').collect(); - /// assert_eq!(v, ["ghi", "abc1def"]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rsplitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> RSplitN<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rsplitn(self, n, pat) - } - - /// An iterator over the disjoint matches of a pattern within the given string - /// slice. - /// - /// The pattern can be a `&str`, [`char`], or a closure that - /// determines if a character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern - /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// [`char`]: primitive.char.html - /// - /// If the pattern allows a reverse search but its results might differ - /// from a forward search, the [`rmatches`] method can be used. - /// - /// [`rmatches`]: #method.rmatches - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v: Vec<&str> = "abcXXXabcYYYabc".matches("abc").collect(); - /// assert_eq!(v, ["abc", "abc", "abc"]); - /// - /// let v: Vec<&str> = "1abc2abc3".matches(char::is_numeric).collect(); - /// assert_eq!(v, ["1", "2", "3"]); - /// ``` - #[stable(feature = "str_matches", since = "1.2.0")] - #[inline] - pub fn matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> Matches<'a, P> { - core_str::StrExt::matches(self, pat) - } - - /// An iterator over the disjoint matches of a pattern within this string slice, - /// yielded in reverse order. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines if - /// a character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator requires that the pattern supports a reverse - /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse - /// search yields the same elements. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// - /// For iterating from the front, the [`matches`] method can be used. - /// - /// [`matches`]: #method.matches - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v: Vec<&str> = "abcXXXabcYYYabc".rmatches("abc").collect(); - /// assert_eq!(v, ["abc", "abc", "abc"]); - /// - /// let v: Vec<&str> = "1abc2abc3".rmatches(char::is_numeric).collect(); - /// assert_eq!(v, ["3", "2", "1"]); - /// ``` - #[stable(feature = "str_matches", since = "1.2.0")] - #[inline] - pub fn rmatches<'a, P: Pattern<'a>>(&'a self, pat: P) -> RMatches<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rmatches(self, pat) - } - - /// An iterator over the disjoint matches of a pattern within this string - /// slice as well as the index that the match starts at. - /// - /// For matches of `pat` within `self` that overlap, only the indices - /// corresponding to the first match are returned. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines - /// if a character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern - /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// - /// If the pattern allows a reverse search but its results might differ - /// from a forward search, the [`rmatch_indices`] method can be used. - /// - /// [`rmatch_indices`]: #method.rmatch_indices - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v: Vec<_> = "abcXXXabcYYYabc".match_indices("abc").collect(); - /// assert_eq!(v, [(0, "abc"), (6, "abc"), (12, "abc")]); - /// - /// let v: Vec<_> = "1abcabc2".match_indices("abc").collect(); - /// assert_eq!(v, [(1, "abc"), (4, "abc")]); - /// - /// let v: Vec<_> = "ababa".match_indices("aba").collect(); - /// assert_eq!(v, [(0, "aba")]); // only the first `aba` - /// ``` - #[stable(feature = "str_match_indices", since = "1.5.0")] - #[inline] - pub fn match_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> MatchIndices<'a, P> { - core_str::StrExt::match_indices(self, pat) - } - - /// An iterator over the disjoint matches of a pattern within `self`, - /// yielded in reverse order along with the index of the match. - /// - /// For matches of `pat` within `self` that overlap, only the indices - /// corresponding to the last match are returned. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines if a - /// character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Iterator behavior - /// - /// The returned iterator requires that the pattern supports a reverse - /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse - /// search yields the same elements. - /// - /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html - /// - /// For iterating from the front, the [`match_indices`] method can be used. - /// - /// [`match_indices`]: #method.match_indices - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v: Vec<_> = "abcXXXabcYYYabc".rmatch_indices("abc").collect(); - /// assert_eq!(v, [(12, "abc"), (6, "abc"), (0, "abc")]); - /// - /// let v: Vec<_> = "1abcabc2".rmatch_indices("abc").collect(); - /// assert_eq!(v, [(4, "abc"), (1, "abc")]); - /// - /// let v: Vec<_> = "ababa".rmatch_indices("aba").collect(); - /// assert_eq!(v, [(2, "aba")]); // only the last `aba` - /// ``` - #[stable(feature = "str_match_indices", since = "1.5.0")] - #[inline] - pub fn rmatch_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> RMatchIndices<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::rmatch_indices(self, pat) - } - - /// Returns a string slice with leading and trailing whitespace removed. - /// - /// 'Whitespace' is defined according to the terms of the Unicode Derived - /// Core Property `White_Space`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = " Hello\tworld\t"; - /// - /// assert_eq!("Hello\tworld", s.trim()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim(&self) -> &str { - UnicodeStr::trim(self) - } - - /// Returns a string slice with leading whitespace removed. - /// - /// 'Whitespace' is defined according to the terms of the Unicode Derived - /// Core Property `White_Space`. - /// - /// # Text directionality - /// - /// A string is a sequence of bytes. 'Left' in this context means the first - /// position of that byte string; for a language like Arabic or Hebrew - /// which are 'right to left' rather than 'left to right', this will be - /// the _right_ side, not the left. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = " Hello\tworld\t"; - /// - /// assert_eq!("Hello\tworld\t", s.trim_left()); - /// ``` - /// - /// Directionality: - /// - /// ``` - /// let s = " English"; - /// assert!(Some('E') == s.trim_left().chars().next()); - /// - /// let s = " עברית"; - /// assert!(Some('ע') == s.trim_left().chars().next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim_left(&self) -> &str { - UnicodeStr::trim_left(self) - } - - /// Returns a string slice with trailing whitespace removed. - /// - /// 'Whitespace' is defined according to the terms of the Unicode Derived - /// Core Property `White_Space`. - /// - /// # Text directionality - /// - /// A string is a sequence of bytes. 'Right' in this context means the last - /// position of that byte string; for a language like Arabic or Hebrew - /// which are 'right to left' rather than 'left to right', this will be - /// the _left_ side, not the right. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let s = " Hello\tworld\t"; - /// - /// assert_eq!(" Hello\tworld", s.trim_right()); - /// ``` - /// - /// Directionality: - /// - /// ``` - /// let s = "English "; - /// assert!(Some('h') == s.trim_right().chars().rev().next()); - /// - /// let s = "עברית "; - /// assert!(Some('ת') == s.trim_right().chars().rev().next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim_right(&self) -> &str { - UnicodeStr::trim_right(self) - } - - /// Returns a string slice with all prefixes and suffixes that match a - /// pattern repeatedly removed. - /// - /// The pattern can be a [`char`] or a closure that determines if a - /// character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar"); - /// assert_eq!("123foo1bar123".trim_matches(char::is_numeric), "foo1bar"); - /// - /// let x: &[_] = &['1', '2']; - /// assert_eq!("12foo1bar12".trim_matches(x), "foo1bar"); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// assert_eq!("1foo1barXX".trim_matches(|c| c == '1' || c == 'X'), "foo1bar"); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str - where P::Searcher: DoubleEndedSearcher<'a> - { - core_str::StrExt::trim_matches(self, pat) - } - - /// Returns a string slice with all prefixes that match a pattern - /// repeatedly removed. - /// - /// The pattern can be a `&str`, [`char`], or a closure that determines if - /// a character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Text directionality - /// - /// A string is a sequence of bytes. 'Left' in this context means the first - /// position of that byte string; for a language like Arabic or Hebrew - /// which are 'right to left' rather than 'left to right', this will be - /// the _right_ side, not the left. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!("11foo1bar11".trim_left_matches('1'), "foo1bar11"); - /// assert_eq!("123foo1bar123".trim_left_matches(char::is_numeric), "foo1bar123"); - /// - /// let x: &[_] = &['1', '2']; - /// assert_eq!("12foo1bar12".trim_left_matches(x), "foo1bar12"); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim_left_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str { - core_str::StrExt::trim_left_matches(self, pat) - } - - /// Returns a string slice with all suffixes that match a pattern - /// repeatedly removed. - /// - /// The pattern can be a `&str`, [`char`], or a closure that - /// determines if a character matches. - /// - /// [`char`]: primitive.char.html - /// - /// # Text directionality - /// - /// A string is a sequence of bytes. 'Right' in this context means the last - /// position of that byte string; for a language like Arabic or Hebrew - /// which are 'right to left' rather than 'left to right', this will be - /// the _left_ side, not the right. - /// - /// # Examples - /// - /// Simple patterns: - /// - /// ``` - /// assert_eq!("11foo1bar11".trim_right_matches('1'), "11foo1bar"); - /// assert_eq!("123foo1bar123".trim_right_matches(char::is_numeric), "123foo1bar"); - /// - /// let x: &[_] = &['1', '2']; - /// assert_eq!("12foo1bar12".trim_right_matches(x), "12foo1bar"); - /// ``` - /// - /// A more complex pattern, using a closure: - /// - /// ``` - /// assert_eq!("1fooX".trim_right_matches(|c| c == '1' || c == 'X'), "1foo"); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn trim_right_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str - where P::Searcher: ReverseSearcher<'a> - { - core_str::StrExt::trim_right_matches(self, pat) - } - - /// Parses this string slice into another type. - /// - /// Because `parse` is so general, it can cause problems with type - /// inference. As such, `parse` is one of the few times you'll see - /// the syntax affectionately known as the 'turbofish': `::<>`. This - /// helps the inference algorithm understand specifically which type - /// you're trying to parse into. - /// - /// `parse` can parse any type that implements the [`FromStr`] trait. - /// - /// [`FromStr`]: str/trait.FromStr.html - /// - /// # Errors - /// - /// Will return [`Err`] if it's not possible to parse this string slice into - /// the desired type. - /// - /// [`Err`]: str/trait.FromStr.html#associatedtype.Err - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// let four: u32 = "4".parse().unwrap(); - /// - /// assert_eq!(4, four); - /// ``` - /// - /// Using the 'turbofish' instead of annotating `four`: - /// - /// ``` - /// let four = "4".parse::(); - /// - /// assert_eq!(Ok(4), four); - /// ``` - /// - /// Failing to parse: - /// - /// ``` - /// let nope = "j".parse::(); - /// - /// assert!(nope.is_err()); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn parse(&self) -> Result { - core_str::StrExt::parse(self) - } - /// Converts a `Box` into a `Box<[u8]>` without copying or allocating. /// /// # Examples @@ -1810,6 +231,7 @@ impl str { /// assert_eq!(*boxed_bytes, *s.as_bytes()); /// ``` #[stable(feature = "str_box_extras", since = "1.20.0")] + #[inline] pub fn into_boxed_bytes(self: Box) -> Box<[u8]> { self.into() } @@ -1838,17 +260,19 @@ impl str { /// let s = "this is old"; /// assert_eq!(s, s.replace("cookie monster", "little lamb")); /// ``` + #[must_use = "this returns the replaced string as a new allocation, \ + without modifying the original"] #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn replace<'a, P: Pattern<'a>>(&'a self, from: P, to: &str) -> String { let mut result = String::new(); let mut last_end = 0; for (start, part) in self.match_indices(from) { - result.push_str(unsafe { self.slice_unchecked(last_end, start) }); + result.push_str(unsafe { self.get_unchecked(last_end..start) }); result.push_str(to); last_end = start + part.len(); } - result.push_str(unsafe { self.slice_unchecked(last_end, self.len()) }); + result.push_str(unsafe { self.get_unchecked(last_end..self.len()) }); result } @@ -1877,17 +301,19 @@ impl str { /// let s = "this is old"; /// assert_eq!(s, s.replacen("cookie monster", "little lamb", 10)); /// ``` + #[must_use = "this returns the replaced string as a new allocation, \ + without modifying the original"] #[stable(feature = "str_replacen", since = "1.16.0")] pub fn replacen<'a, P: Pattern<'a>>(&'a self, pat: P, to: &str, count: usize) -> String { // Hope to reduce the times of re-allocation let mut result = String::with_capacity(32); let mut last_end = 0; for (start, part) in self.match_indices(pat).take(count) { - result.push_str(unsafe { self.slice_unchecked(last_end, start) }); + result.push_str(unsafe { self.get_unchecked(last_end..start) }); result.push_str(to); last_end = start + part.len(); } - result.push_str(unsafe { self.slice_unchecked(last_end, self.len()) }); + result.push_str(unsafe { self.get_unchecked(last_end..self.len()) }); result } @@ -1944,7 +370,18 @@ impl str { // See https://github.com/rust-lang/rust/issues/26035 map_uppercase_sigma(self, i, &mut s) } else { - s.extend(c.to_lowercase()); + match conversions::to_lower(c) { + [a, '\0', _] => s.push(a), + [a, b, '\0'] => { + s.push(a); + s.push(b); + } + [a, b, c] => { + s.push(a); + s.push(b); + s.push(c); + } + } } } return s; @@ -1959,7 +396,7 @@ impl str { } fn case_ignoreable_then_cased>(iter: I) -> bool { - use std_unicode::derived_property::{Cased, Case_Ignorable}; + use core::unicode::derived_property::{Cased, Case_Ignorable}; match iter.skip_while(|&c| Case_Ignorable(c)).next() { Some(c) => Cased(c), None => false, @@ -1998,18 +435,40 @@ impl str { #[stable(feature = "unicode_case_mapping", since = "1.2.0")] pub fn to_uppercase(&self) -> String { let mut s = String::with_capacity(self.len()); - s.extend(self.chars().flat_map(|c| c.to_uppercase())); + for c in self[..].chars() { + match conversions::to_upper(c) { + [a, '\0', _] => s.push(a), + [a, b, '\0'] => { + s.push(a); + s.push(b); + } + [a, b, c] => { + s.push(a); + s.push(b); + s.push(c); + } + } + } return s; } /// Escapes each char in `s` with [`char::escape_debug`]. /// + /// Note: only extended grapheme codepoints that begin the string will be + /// escaped. + /// /// [`char::escape_debug`]: primitive.char.html#method.escape_debug #[unstable(feature = "str_escape", reason = "return type may change to be an iterator", issue = "27791")] pub fn escape_debug(&self) -> String { - self.chars().flat_map(|c| c.escape_debug()).collect() + let mut string = String::with_capacity(self.len()); + let mut chars = self.chars(); + if let Some(first) = chars.next() { + string.extend(first.escape_debug_ext(true)) + } + string.extend(chars.flat_map(|c| c.escape_debug_ext(false))); + string } /// Escapes each char in `s` with [`char::escape_default`]. @@ -2048,6 +507,7 @@ impl str { /// assert_eq!(boxed_str.into_string(), string); /// ``` #[stable(feature = "box_str", since = "1.4.0")] + #[inline] pub fn into_string(self: Box) -> String { let slice = Box::<[u8]>::from(self); unsafe { String::from_utf8_unchecked(slice.into_vec()) } @@ -2066,29 +526,7 @@ impl str { /// ``` #[stable(feature = "repeat_str", since = "1.16.0")] pub fn repeat(&self, n: usize) -> String { - let mut s = String::with_capacity(self.len() * n); - s.extend((0..n).map(|_| self)); - s - } - - /// Checks if all characters in this string are within the ASCII range. - /// - /// # Examples - /// - /// ``` - /// let ascii = "hello!\n"; - /// let non_ascii = "Grüße, Jürgen ❤"; - /// - /// assert!(ascii.is_ascii()); - /// assert!(!non_ascii.is_ascii()); - /// ``` - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn is_ascii(&self) -> bool { - // We can treat each byte as character here: all multibyte characters - // start with a byte that is not in the ascii range, so we will stop - // there already. - self.bytes().all(|b| b.is_ascii()) + unsafe { String::from_utf8_unchecked(self.as_bytes().repeat(n)) } } /// Returns a copy of this string where each character is mapped to its @@ -2150,54 +588,6 @@ impl str { // make_ascii_lowercase() preserves the UTF-8 invariant. unsafe { String::from_utf8_unchecked(bytes) } } - - /// Checks that two strings are an ASCII case-insensitive match. - /// - /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`, - /// but without allocating and copying temporaries. - /// - /// # Examples - /// - /// ``` - /// assert!("Ferris".eq_ignore_ascii_case("FERRIS")); - /// assert!("Ferrös".eq_ignore_ascii_case("FERRöS")); - /// assert!(!"Ferrös".eq_ignore_ascii_case("FERRÖS")); - /// ``` - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - #[inline] - pub fn eq_ignore_ascii_case(&self, other: &str) -> bool { - self.as_bytes().eq_ignore_ascii_case(other.as_bytes()) - } - - /// Converts this string to its ASCII upper case equivalent in-place. - /// - /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z', - /// but non-ASCII letters are unchanged. - /// - /// To return a new uppercased value without modifying the existing one, use - /// [`to_ascii_uppercase`]. - /// - /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - pub fn make_ascii_uppercase(&mut self) { - let me = unsafe { self.as_bytes_mut() }; - me.make_ascii_uppercase() - } - - /// Converts this string to its ASCII lower case equivalent in-place. - /// - /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', - /// but non-ASCII letters are unchanged. - /// - /// To return a new lowercased value without modifying the existing one, use - /// [`to_ascii_lowercase`]. - /// - /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase - #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] - pub fn make_ascii_lowercase(&mut self) { - let me = unsafe { self.as_bytes_mut() }; - me.make_ascii_lowercase() - } } /// Converts a boxed slice of bytes to a boxed string slice without checking @@ -2214,6 +604,7 @@ impl str { /// assert_eq!("☺", &*smile); /// ``` #[stable(feature = "str_box_extras", since = "1.20.0")] +#[inline] pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box { Box::from_raw(Box::into_raw(v) as *mut str) } diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs index 8d99d0bc8f4d..dd559df08cce 100644 --- a/src/liballoc/string.rs +++ b/src/liballoc/string.rs @@ -56,21 +56,21 @@ #![stable(feature = "rust1", since = "1.0.0")] +use core::char::{decode_utf16, REPLACEMENT_CHARACTER}; use core::fmt; use core::hash; use core::iter::{FromIterator, FusedIterator}; -use core::ops::{self, Add, AddAssign, Index, IndexMut}; +use core::ops::Bound::{Excluded, Included, Unbounded}; +use core::ops::{self, Add, AddAssign, Index, IndexMut, RangeBounds}; use core::ptr; use core::str::pattern::Pattern; -use std_unicode::lossy; -use std_unicode::char::{decode_utf16, REPLACEMENT_CHARACTER}; +use core::str::lossy; +use collections::CollectionAllocErr; use borrow::{Cow, ToOwned}; -use range::RangeArgument; -use Bound::{Excluded, Included, Unbounded}; +use boxed::Box; use str::{self, from_boxed_utf8_unchecked, FromStr, Utf8Error, Chars}; use vec::Vec; -use boxed::Box; /// A UTF-8 encoded, growable string. /// @@ -364,7 +364,7 @@ impl String { /// /// Given that the `String` is empty, this will not allocate any initial /// buffer. While that means that this initial operation is very - /// inexpensive, but may cause excessive allocation later, when you add + /// inexpensive, it may cause excessive allocation later when you add /// data. If you have an idea of how much data the `String` will hold, /// consider the [`with_capacity`] method to prevent excessive /// re-allocation. @@ -380,7 +380,8 @@ impl String { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn new() -> String { + #[rustc_const_unstable(feature = "const_string_new")] + pub const fn new() -> String { String { vec: Vec::new() } } @@ -518,10 +519,11 @@ impl String { /// between the two. Not all byte slices are valid strings, however: strings /// are required to be valid UTF-8. During this conversion, /// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with - /// `U+FFFD REPLACEMENT CHARACTER`, which looks like this: � + /// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], which looks like this: � /// /// [`u8`]: ../../std/primitive.u8.html /// [byteslice]: ../../std/primitive.slice.html + /// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html /// /// If you are sure that the byte slice is valid UTF-8, and you don't want /// to incur the overhead of the conversion, there is an unsafe version @@ -620,7 +622,7 @@ impl String { } /// Decode a UTF-16 encoded slice `v` into a `String`, replacing - /// invalid data with the replacement character (U+FFFD). + /// invalid data with [the replacement character (`U+FFFD`)][U+FFFD]. /// /// Unlike [`from_utf8_lossy`] which returns a [`Cow<'a, str>`], /// `from_utf16_lossy` returns a `String` since the UTF-16 to UTF-8 @@ -628,6 +630,7 @@ impl String { /// /// [`from_utf8_lossy`]: #method.from_utf8_lossy /// [`Cow<'a, str>`]: ../borrow/enum.Cow.html + /// [U+FFFD]: ../char/constant.REPLACEMENT_CHARACTER.html /// /// # Examples /// @@ -920,6 +923,79 @@ impl String { self.vec.reserve_exact(additional) } + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `String`. The collection may reserve more space to avoid + /// frequent reallocations. After calling `reserve`, capacity will be + /// greater than or equal to `self.len() + additional`. Does nothing if + /// capacity is already sufficient. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &str) -> Result { + /// let mut output = String::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.push_str(data); + /// + /// Ok(output) + /// } + /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.vec.try_reserve(additional) + } + + /// Tries to reserves the minimum capacity for exactly `additional` more elements to + /// be inserted in the given `String`. After calling `reserve_exact`, + /// capacity will be greater than or equal to `self.len() + additional`. + /// Does nothing if the capacity is already sufficient. + /// + /// Note that the allocator may give the collection more space than it + /// requests. Therefore capacity can not be relied upon to be precisely + /// minimal. Prefer `reserve` if future insertions are expected. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &str) -> Result { + /// let mut output = String::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.push_str(data); + /// + /// Ok(output) + /// } + /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.vec.try_reserve_exact(additional) + } + /// Shrinks the capacity of this `String` to match its length. /// /// # Examples @@ -941,6 +1017,34 @@ impl String { self.vec.shrink_to_fit() } + /// Shrinks the capacity of this `String` with a lower bound. + /// + /// The capacity will remain at least as large as both the length + /// and the supplied value. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// #![feature(shrink_to)] + /// let mut s = String::from("foo"); + /// + /// s.reserve(100); + /// assert!(s.capacity() >= 100); + /// + /// s.shrink_to(10); + /// assert!(s.capacity() >= 10); + /// s.shrink_to(0); + /// assert!(s.capacity() >= 3); + /// ``` + #[inline] + #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.vec.shrink_to(min_capacity) + } + /// Appends the given [`char`] to the end of this `String`. /// /// [`char`]: ../../std/primitive.char.html @@ -1103,8 +1207,6 @@ impl String { /// # Examples /// /// ``` - /// #![feature(string_retain)] - /// /// let mut s = String::from("f_o_ob_ar"); /// /// s.retain(|c| c != '_'); @@ -1112,7 +1214,7 @@ impl String { /// assert_eq!(s, "foobar"); /// ``` #[inline] - #[unstable(feature = "string_retain", issue = "43874")] + #[stable(feature = "string_retain", since = "1.26.0")] pub fn retain(&mut self, mut f: F) where F: FnMut(char) -> bool { @@ -1122,7 +1224,7 @@ impl String { while idx < len { let ch = unsafe { - self.slice_unchecked(idx, len).chars().next().unwrap() + self.get_unchecked(idx..len).chars().next().unwrap() }; let ch_len = ch.len_utf8(); @@ -1384,7 +1486,7 @@ impl String { /// ``` #[stable(feature = "drain", since = "1.6.0")] pub fn drain(&mut self, range: R) -> Drain - where R: RangeArgument + where R: RangeBounds { // Memory safety // @@ -1393,12 +1495,12 @@ impl String { // Because the range removal happens in Drop, if the Drain iterator is leaked, // the removal will not happen. let len = self.len(); - let start = match range.start() { + let start = match range.start_bound() { Included(&n) => n, Excluded(&n) => n + 1, Unbounded => 0, }; - let end = match range.end() { + let end = match range.end_bound() { Included(&n) => n + 1, Excluded(&n) => n, Unbounded => len, @@ -1418,13 +1520,10 @@ impl String { } } - /// Creates a splicing iterator that removes the specified range in the string, + /// Removes the specified range in the string, /// and replaces it with the given string. /// The given string doesn't need to be the same length as the range. /// - /// Note: Unlike [`Vec::splice`], the replacement happens eagerly, and this - /// method does not return the removed chars. - /// /// # Panics /// /// Panics if the starting point or end point do not lie on a [`char`] @@ -1438,29 +1537,28 @@ impl String { /// Basic usage: /// /// ``` - /// #![feature(splice)] /// let mut s = String::from("α is alpha, β is beta"); /// let beta_offset = s.find('β').unwrap_or(s.len()); /// /// // Replace the range up until the β from the string - /// s.splice(..beta_offset, "Α is capital alpha; "); + /// s.replace_range(..beta_offset, "Α is capital alpha; "); /// assert_eq!(s, "Α is capital alpha; β is beta"); /// ``` - #[unstable(feature = "splice", reason = "recently added", issue = "44643")] - pub fn splice(&mut self, range: R, replace_with: &str) - where R: RangeArgument + #[stable(feature = "splice", since = "1.27.0")] + pub fn replace_range(&mut self, range: R, replace_with: &str) + where R: RangeBounds { // Memory safety // - // The String version of Splice does not have the memory safety issues + // Replace_range does not have the memory safety issues of a vector Splice. // of the vector version. The data is just plain bytes. - match range.start() { + match range.start_bound() { Included(&n) => assert!(self.is_char_boundary(n)), Excluded(&n) => assert!(self.is_char_boundary(n + 1)), Unbounded => {}, }; - match range.end() { + match range.end_bound() { Included(&n) => assert!(self.is_char_boundary(n + 1)), Excluded(&n) => assert!(self.is_char_boundary(n)), Unbounded => {}, @@ -1488,6 +1586,7 @@ impl String { /// let b = s.into_boxed_str(); /// ``` #[stable(feature = "box_str", since = "1.4.0")] + #[inline] pub fn into_boxed_str(self) -> Box { let slice = self.vec.into_boxed_slice(); unsafe { from_boxed_utf8_unchecked(slice) } @@ -1502,7 +1601,6 @@ impl FromUtf8Error { /// Basic usage: /// /// ``` - /// #![feature(from_utf8_error_as_bytes)] /// // some invalid bytes, in a vector /// let bytes = vec![0, 159]; /// @@ -1510,7 +1608,7 @@ impl FromUtf8Error { /// /// assert_eq!(&[0, 159], value.unwrap_err().as_bytes()); /// ``` - #[unstable(feature = "from_utf8_error_as_bytes", reason = "recently added", issue = "40895")] + #[stable(feature = "from_utf8_error_as_bytes", since = "1.26.0")] pub fn as_bytes(&self) -> &[u8] { &self.bytes[..] } @@ -1876,7 +1974,7 @@ impl ops::Index for String { unsafe { str::from_utf8_unchecked(&self.vec) } } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::Index> for String { type Output = str; @@ -1885,7 +1983,7 @@ impl ops::Index> for String { Index::index(&**self, index) } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::Index> for String { type Output = str; @@ -1923,14 +2021,14 @@ impl ops::IndexMut for String { unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) } } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::IndexMut> for String { #[inline] fn index_mut(&mut self, index: ops::RangeInclusive) -> &mut str { IndexMut::index_mut(&mut **self, index) } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::IndexMut> for String { #[inline] fn index_mut(&mut self, index: ops::RangeToInclusive) -> &mut str { @@ -2144,6 +2242,14 @@ impl<'a> From for Cow<'a, str> { } } +#[stable(feature = "cow_from_string_ref", since = "1.28.0")] +impl<'a> From<&'a String> for Cow<'a, str> { + #[inline] + fn from(s: &'a String) -> Cow<'a, str> { + Cow::Borrowed(s.as_str()) + } +} + #[stable(feature = "cow_str_from_iter", since = "1.12.0")] impl<'a> FromIterator for Cow<'a, str> { fn from_iter>(it: I) -> Cow<'a, str> { @@ -2254,5 +2360,5 @@ impl<'a> DoubleEndedIterator for Drain<'a> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a> FusedIterator for Drain<'a> {} diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs new file mode 100644 index 000000000000..a00b6b4e435f --- /dev/null +++ b/src/liballoc/sync.rs @@ -0,0 +1,1944 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![stable(feature = "rust1", since = "1.0.0")] + +//! Thread-safe reference-counting pointers. +//! +//! See the [`Arc`][arc] documentation for more details. +//! +//! [arc]: struct.Arc.html + +use core::any::Any; +use core::sync::atomic; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; +use core::borrow; +use core::fmt; +use core::cmp::Ordering; +use core::intrinsics::abort; +use core::mem::{self, align_of_val, size_of_val}; +use core::ops::Deref; +use core::ops::CoerceUnsized; +use core::ptr::{self, NonNull}; +use core::marker::{Unsize, PhantomData}; +use core::hash::{Hash, Hasher}; +use core::{isize, usize}; +use core::convert::From; + +use alloc::{Global, Alloc, Layout, box_free, handle_alloc_error}; +use boxed::Box; +use rc::is_dangling; +use string::String; +use vec::Vec; + +/// A soft limit on the amount of references that may be made to an `Arc`. +/// +/// Going above this limit will abort your program (although not +/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. +const MAX_REFCOUNT: usize = (isize::MAX) as usize; + +/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically +/// Reference Counted'. +/// +/// The type `Arc` provides shared ownership of a value of type `T`, +/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces +/// a new pointer to the same value in the heap. When the last `Arc` +/// pointer to a given value is destroyed, the pointed-to value is +/// also destroyed. +/// +/// Shared references in Rust disallow mutation by default, and `Arc` is no +/// exception: you cannot generally obtain a mutable reference to something +/// inside an `Arc`. If you need to mutate through an `Arc`, use +/// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic] +/// types. +/// +/// ## Thread Safety +/// +/// Unlike [`Rc`], `Arc` uses atomic operations for its reference +/// counting. This means that it is thread-safe. The disadvantage is that +/// atomic operations are more expensive than ordinary memory accesses. If you +/// are not sharing reference-counted values between threads, consider using +/// [`Rc`] for lower overhead. [`Rc`] is a safe default, because the +/// compiler will catch any attempt to send an [`Rc`] between threads. +/// However, a library might choose `Arc` in order to give library consumers +/// more flexibility. +/// +/// `Arc` will implement [`Send`] and [`Sync`] as long as the `T` implements +/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an +/// `Arc` to make it thread-safe? This may be a bit counter-intuitive at +/// first: after all, isn't the point of `Arc` thread safety? The key is +/// this: `Arc` makes it thread safe to have multiple ownership of the same +/// data, but it doesn't add thread safety to its data. Consider +/// `Arc<`[`RefCell`]`>`. [`RefCell`] isn't [`Sync`], and if `Arc` was always +/// [`Send`], `Arc<`[`RefCell`]`>` would be as well. But then we'd have a problem: +/// [`RefCell`] is not thread safe; it keeps track of the borrowing count using +/// non-atomic operations. +/// +/// In the end, this means that you may need to pair `Arc` with some sort of +/// [`std::sync`] type, usually [`Mutex`][mutex]. +/// +/// ## Breaking cycles with `Weak` +/// +/// The [`downgrade`][downgrade] method can be used to create a non-owning +/// [`Weak`][weak] pointer. A [`Weak`][weak] pointer can be [`upgrade`][upgrade]d +/// to an `Arc`, but this will return [`None`] if the value has already been +/// dropped. +/// +/// A cycle between `Arc` pointers will never be deallocated. For this reason, +/// [`Weak`][weak] is used to break cycles. For example, a tree could have +/// strong `Arc` pointers from parent nodes to children, and [`Weak`][weak] +/// pointers from children back to their parents. +/// +/// # Cloning references +/// +/// Creating a new reference from an existing reference counted pointer is done using the +/// `Clone` trait implemented for [`Arc`][arc] and [`Weak`][weak]. +/// +/// ``` +/// use std::sync::Arc; +/// let foo = Arc::new(vec![1.0, 2.0, 3.0]); +/// // The two syntaxes below are equivalent. +/// let a = foo.clone(); +/// let b = Arc::clone(&foo); +/// // a and b both point to the same memory location as foo. +/// ``` +/// +/// The [`Arc::clone(&from)`] syntax is the most idiomatic because it conveys more explicitly +/// the meaning of the code. In the example above, this syntax makes it easier to see that +/// this code is creating a new reference rather than copying the whole content of foo. +/// +/// ## `Deref` behavior +/// +/// `Arc` automatically dereferences to `T` (via the [`Deref`][deref] trait), +/// so you can call `T`'s methods on a value of type `Arc`. To avoid name +/// clashes with `T`'s methods, the methods of `Arc` itself are [associated +/// functions][assoc], called using function-like syntax: +/// +/// ``` +/// use std::sync::Arc; +/// let my_arc = Arc::new(()); +/// +/// Arc::downgrade(&my_arc); +/// ``` +/// +/// [`Weak`][weak] does not auto-dereference to `T`, because the value may have +/// already been destroyed. +/// +/// [arc]: struct.Arc.html +/// [weak]: struct.Weak.html +/// [`Rc`]: ../../std/rc/struct.Rc.html +/// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone +/// [mutex]: ../../std/sync/struct.Mutex.html +/// [rwlock]: ../../std/sync/struct.RwLock.html +/// [atomic]: ../../std/sync/atomic/index.html +/// [`Send`]: ../../std/marker/trait.Send.html +/// [`Sync`]: ../../std/marker/trait.Sync.html +/// [deref]: ../../std/ops/trait.Deref.html +/// [downgrade]: struct.Arc.html#method.downgrade +/// [upgrade]: struct.Weak.html#method.upgrade +/// [`None`]: ../../std/option/enum.Option.html#variant.None +/// [assoc]: ../../book/first-edition/method-syntax.html#associated-functions +/// [`RefCell`]: ../../std/cell/struct.RefCell.html +/// [`std::sync`]: ../../std/sync/index.html +/// [`Arc::clone(&from)`]: #method.clone +/// +/// # Examples +/// +/// Sharing some immutable data between threads: +/// +// Note that we **do not** run these tests here. The windows builders get super +// unhappy if a thread outlives the main thread and then exits at the same time +// (something deadlocks) so we just avoid this entirely by not running these +// tests. +/// ```no_run +/// use std::sync::Arc; +/// use std::thread; +/// +/// let five = Arc::new(5); +/// +/// for _ in 0..10 { +/// let five = Arc::clone(&five); +/// +/// thread::spawn(move || { +/// println!("{:?}", five); +/// }); +/// } +/// ``` +/// +/// Sharing a mutable [`AtomicUsize`]: +/// +/// [`AtomicUsize`]: ../../std/sync/atomic/struct.AtomicUsize.html +/// +/// ```no_run +/// use std::sync::Arc; +/// use std::sync::atomic::{AtomicUsize, Ordering}; +/// use std::thread; +/// +/// let val = Arc::new(AtomicUsize::new(5)); +/// +/// for _ in 0..10 { +/// let val = Arc::clone(&val); +/// +/// thread::spawn(move || { +/// let v = val.fetch_add(1, Ordering::SeqCst); +/// println!("{:?}", v); +/// }); +/// } +/// ``` +/// +/// See the [`rc` documentation][rc_examples] for more examples of reference +/// counting in general. +/// +/// [rc_examples]: ../../std/rc/index.html#examples +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Arc { + ptr: NonNull>, + phantom: PhantomData, +} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl Send for Arc {} +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl Sync for Arc {} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U: ?Sized> CoerceUnsized> for Arc {} + +/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the +/// managed value. The value is accessed by calling [`upgrade`] on the `Weak` +/// pointer, which returns an [`Option`]`<`[`Arc`]`>`. +/// +/// Since a `Weak` reference does not count towards ownership, it will not +/// prevent the inner value from being dropped, and `Weak` itself makes no +/// guarantees about the value still being present and may return [`None`] +/// when [`upgrade`]d. +/// +/// A `Weak` pointer is useful for keeping a temporary reference to the value +/// within [`Arc`] without extending its lifetime. It is also used to prevent +/// circular references between [`Arc`] pointers, since mutual owning references +/// would never allow either [`Arc`] to be dropped. For example, a tree could +/// have strong [`Arc`] pointers from parent nodes to children, and `Weak` +/// pointers from children back to their parents. +/// +/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`]. +/// +/// [`Arc`]: struct.Arc.html +/// [`Arc::downgrade`]: struct.Arc.html#method.downgrade +/// [`upgrade`]: struct.Weak.html#method.upgrade +/// [`Option`]: ../../std/option/enum.Option.html +/// [`None`]: ../../std/option/enum.Option.html#variant.None +#[stable(feature = "arc_weak", since = "1.4.0")] +pub struct Weak { + // This is a `NonNull` to allow optimizing the size of this type in enums, + // but it is not necessarily a valid pointer. + // `Weak::new` sets this to `usize::MAX` so that it doesn’t need + // to allocate space on the heap. That's not a value a real pointer + // will ever have because RcBox has alignment at least 2. + ptr: NonNull>, +} + +#[stable(feature = "arc_weak", since = "1.4.0")] +unsafe impl Send for Weak {} +#[stable(feature = "arc_weak", since = "1.4.0")] +unsafe impl Sync for Weak {} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U: ?Sized> CoerceUnsized> for Weak {} + +#[stable(feature = "arc_weak", since = "1.4.0")] +impl fmt::Debug for Weak { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "(Weak)") + } +} + +struct ArcInner { + strong: atomic::AtomicUsize, + + // the value usize::MAX acts as a sentinel for temporarily "locking" the + // ability to upgrade weak pointers or downgrade strong ones; this is used + // to avoid races in `make_mut` and `get_mut`. + weak: atomic::AtomicUsize, + + data: T, +} + +unsafe impl Send for ArcInner {} +unsafe impl Sync for ArcInner {} + +impl Arc { + /// Constructs a new `Arc`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn new(data: T) -> Arc { + // Start the weak pointer count as 1 which is the weak pointer that's + // held by all the strong pointers (kinda), see std/rc.rs for more info + let x: Box<_> = box ArcInner { + strong: atomic::AtomicUsize::new(1), + weak: atomic::AtomicUsize::new(1), + data, + }; + Arc { ptr: Box::into_raw_non_null(x), phantom: PhantomData } + } + + /// Returns the contained value, if the `Arc` has exactly one strong reference. + /// + /// Otherwise, an [`Err`][result] is returned with the same `Arc` that was + /// passed in. + /// + /// This will succeed even if there are outstanding weak references. + /// + /// [result]: ../../std/result/enum.Result.html + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x = Arc::new(3); + /// assert_eq!(Arc::try_unwrap(x), Ok(3)); + /// + /// let x = Arc::new(4); + /// let _y = Arc::clone(&x); + /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4); + /// ``` + #[inline] + #[stable(feature = "arc_unique", since = "1.4.0")] + pub fn try_unwrap(this: Self) -> Result { + // See `drop` for why all these atomics are like this + if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() { + return Err(this); + } + + atomic::fence(Acquire); + + unsafe { + let elem = ptr::read(&this.ptr.as_ref().data); + + // Make a weak pointer to clean up the implicit strong-weak reference + let _weak = Weak { ptr: this.ptr }; + mem::forget(this); + + Ok(elem) + } + } +} + +impl Arc { + /// Consumes the `Arc`, returning the wrapped pointer. + /// + /// To avoid a memory leak the pointer must be converted back to an `Arc` using + /// [`Arc::from_raw`][from_raw]. + /// + /// [from_raw]: struct.Arc.html#method.from_raw + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x = Arc::new(10); + /// let x_ptr = Arc::into_raw(x); + /// assert_eq!(unsafe { *x_ptr }, 10); + /// ``` + #[stable(feature = "rc_raw", since = "1.17.0")] + pub fn into_raw(this: Self) -> *const T { + let ptr: *const T = &*this; + mem::forget(this); + ptr + } + + /// Constructs an `Arc` from a raw pointer. + /// + /// The raw pointer must have been previously returned by a call to a + /// [`Arc::into_raw`][into_raw]. + /// + /// This function is unsafe because improper use may lead to memory problems. For example, a + /// double-free may occur if the function is called twice on the same raw pointer. + /// + /// [into_raw]: struct.Arc.html#method.into_raw + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x = Arc::new(10); + /// let x_ptr = Arc::into_raw(x); + /// + /// unsafe { + /// // Convert back to an `Arc` to prevent leak. + /// let x = Arc::from_raw(x_ptr); + /// assert_eq!(*x, 10); + /// + /// // Further calls to `Arc::from_raw(x_ptr)` would be memory unsafe. + /// } + /// + /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! + /// ``` + #[stable(feature = "rc_raw", since = "1.17.0")] + pub unsafe fn from_raw(ptr: *const T) -> Self { + // Align the unsized value to the end of the ArcInner. + // Because it is ?Sized, it will always be the last field in memory. + let align = align_of_val(&*ptr); + let layout = Layout::new::>(); + let offset = (layout.size() + layout.padding_needed_for(align)) as isize; + + // Reverse the offset to find the original ArcInner. + let fake_ptr = ptr as *mut ArcInner; + let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset)); + + Arc { + ptr: NonNull::new_unchecked(arc_ptr), + phantom: PhantomData, + } + } + + /// Creates a new [`Weak`][weak] pointer to this value. + /// + /// [weak]: struct.Weak.html + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// let weak_five = Arc::downgrade(&five); + /// ``` + #[stable(feature = "arc_weak", since = "1.4.0")] + pub fn downgrade(this: &Self) -> Weak { + // This Relaxed is OK because we're checking the value in the CAS + // below. + let mut cur = this.inner().weak.load(Relaxed); + + loop { + // check if the weak counter is currently "locked"; if so, spin. + if cur == usize::MAX { + cur = this.inner().weak.load(Relaxed); + continue; + } + + // NOTE: this code currently ignores the possibility of overflow + // into usize::MAX; in general both Rc and Arc need to be adjusted + // to deal with overflow. + + // Unlike with Clone(), we need this to be an Acquire read to + // synchronize with the write coming from `is_unique`, so that the + // events prior to that write happen before this read. + match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) { + Ok(_) => { + // Make sure we do not create a dangling Weak + debug_assert!(!is_dangling(this.ptr)); + return Weak { ptr: this.ptr }; + } + Err(old) => cur = old, + } + } + } + + /// Gets the number of [`Weak`][weak] pointers to this value. + /// + /// [weak]: struct.Weak.html + /// + /// # Safety + /// + /// This method by itself is safe, but using it correctly requires extra care. + /// Another thread can change the weak count at any time, + /// including potentially between calling this method and acting on the result. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// let _weak_five = Arc::downgrade(&five); + /// + /// // This assertion is deterministic because we haven't shared + /// // the `Arc` or `Weak` between threads. + /// assert_eq!(1, Arc::weak_count(&five)); + /// ``` + #[inline] + #[stable(feature = "arc_counts", since = "1.15.0")] + pub fn weak_count(this: &Self) -> usize { + let cnt = this.inner().weak.load(SeqCst); + // If the weak count is currently locked, the value of the + // count was 0 just before taking the lock. + if cnt == usize::MAX { 0 } else { cnt - 1 } + } + + /// Gets the number of strong (`Arc`) pointers to this value. + /// + /// # Safety + /// + /// This method by itself is safe, but using it correctly requires extra care. + /// Another thread can change the strong count at any time, + /// including potentially between calling this method and acting on the result. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// let _also_five = Arc::clone(&five); + /// + /// // This assertion is deterministic because we haven't shared + /// // the `Arc` between threads. + /// assert_eq!(2, Arc::strong_count(&five)); + /// ``` + #[inline] + #[stable(feature = "arc_counts", since = "1.15.0")] + pub fn strong_count(this: &Self) -> usize { + this.inner().strong.load(SeqCst) + } + + #[inline] + fn inner(&self) -> &ArcInner { + // This unsafety is ok because while this arc is alive we're guaranteed + // that the inner pointer is valid. Furthermore, we know that the + // `ArcInner` structure itself is `Sync` because the inner data is + // `Sync` as well, so we're ok loaning out an immutable pointer to these + // contents. + unsafe { self.ptr.as_ref() } + } + + // Non-inlined part of `drop`. + #[inline(never)] + unsafe fn drop_slow(&mut self) { + // Destroy the data at this time, even though we may not free the box + // allocation itself (there may still be weak pointers lying around). + ptr::drop_in_place(&mut self.ptr.as_mut().data); + + if self.inner().weak.fetch_sub(1, Release) == 1 { + atomic::fence(Acquire); + Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) + } + } + + #[inline] + #[stable(feature = "ptr_eq", since = "1.17.0")] + /// Returns true if the two `Arc`s point to the same value (not + /// just values that compare as equal). + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// let same_five = Arc::clone(&five); + /// let other_five = Arc::new(5); + /// + /// assert!(Arc::ptr_eq(&five, &same_five)); + /// assert!(!Arc::ptr_eq(&five, &other_five)); + /// ``` + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + this.ptr.as_ptr() == other.ptr.as_ptr() + } +} + +impl Arc { + // Allocates an `ArcInner` with sufficient space for an unsized value + unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner { + // Create a fake ArcInner to find allocation size and alignment + let fake_ptr = ptr as *mut ArcInner; + + let layout = Layout::for_value(&*fake_ptr); + + let mem = Global.alloc(layout) + .unwrap_or_else(|_| handle_alloc_error(layout)); + + // Initialize the real ArcInner + let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner; + + ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1)); + ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1)); + + inner + } + + fn from_box(v: Box) -> Arc { + unsafe { + let box_unique = Box::into_unique(v); + let bptr = box_unique.as_ptr(); + + let value_size = size_of_val(&*bptr); + let ptr = Self::allocate_for_ptr(bptr); + + // Copy value as bytes + ptr::copy_nonoverlapping( + bptr as *const T as *const u8, + &mut (*ptr).data as *mut _ as *mut u8, + value_size); + + // Free the allocation without dropping its contents + box_free(box_unique); + + Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } + } + } +} + +// Sets the data pointer of a `?Sized` raw pointer. +// +// For a slice/trait object, this sets the `data` field and leaves the rest +// unchanged. For a sized raw pointer, this simply sets the pointer. +unsafe fn set_data_ptr(mut ptr: *mut T, data: *mut U) -> *mut T { + ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8); + ptr +} + +impl Arc<[T]> { + // Copy elements from slice into newly allocated Arc<[T]> + // + // Unsafe because the caller must either take ownership or bind `T: Copy` + unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> { + let v_ptr = v as *const [T]; + let ptr = Self::allocate_for_ptr(v_ptr); + + ptr::copy_nonoverlapping( + v.as_ptr(), + &mut (*ptr).data as *mut [T] as *mut T, + v.len()); + + Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } + } +} + +// Specialization trait used for From<&[T]> +trait ArcFromSlice { + fn from_slice(slice: &[T]) -> Self; +} + +impl ArcFromSlice for Arc<[T]> { + #[inline] + default fn from_slice(v: &[T]) -> Self { + // Panic guard while cloning T elements. + // In the event of a panic, elements that have been written + // into the new ArcInner will be dropped, then the memory freed. + struct Guard { + mem: NonNull, + elems: *mut T, + layout: Layout, + n_elems: usize, + } + + impl Drop for Guard { + fn drop(&mut self) { + use core::slice::from_raw_parts_mut; + + unsafe { + let slice = from_raw_parts_mut(self.elems, self.n_elems); + ptr::drop_in_place(slice); + + Global.dealloc(self.mem.cast(), self.layout.clone()); + } + } + } + + unsafe { + let v_ptr = v as *const [T]; + let ptr = Self::allocate_for_ptr(v_ptr); + + let mem = ptr as *mut _ as *mut u8; + let layout = Layout::for_value(&*ptr); + + // Pointer to first element + let elems = &mut (*ptr).data as *mut [T] as *mut T; + + let mut guard = Guard{ + mem: NonNull::new_unchecked(mem), + elems: elems, + layout: layout, + n_elems: 0, + }; + + for (i, item) in v.iter().enumerate() { + ptr::write(elems.offset(i as isize), item.clone()); + guard.n_elems += 1; + } + + // All clear. Forget the guard so it doesn't free the new ArcInner. + mem::forget(guard); + + Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData } + } + } +} + +impl ArcFromSlice for Arc<[T]> { + #[inline] + fn from_slice(v: &[T]) -> Self { + unsafe { Arc::copy_from_slice(v) } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Clone for Arc { + /// Makes a clone of the `Arc` pointer. + /// + /// This creates another pointer to the same inner value, increasing the + /// strong reference count. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// Arc::clone(&five); + /// ``` + #[inline] + fn clone(&self) -> Arc { + // Using a relaxed ordering is alright here, as knowledge of the + // original reference prevents other threads from erroneously deleting + // the object. + // + // As explained in the [Boost documentation][1], Increasing the + // reference counter can always be done with memory_order_relaxed: New + // references to an object can only be formed from an existing + // reference, and passing an existing reference from one thread to + // another must already provide any required synchronization. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + let old_size = self.inner().strong.fetch_add(1, Relaxed); + + // However we need to guard against massive refcounts in case someone + // is `mem::forget`ing Arcs. If we don't do this the count can overflow + // and users will use-after free. We racily saturate to `isize::MAX` on + // the assumption that there aren't ~2 billion threads incrementing + // the reference count at once. This branch will never be taken in + // any realistic program. + // + // We abort because such a program is incredibly degenerate, and we + // don't care to support it. + if old_size > MAX_REFCOUNT { + unsafe { + abort(); + } + } + + Arc { ptr: self.ptr, phantom: PhantomData } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Deref for Arc { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + &self.inner().data + } +} + +impl Arc { + /// Makes a mutable reference into the given `Arc`. + /// + /// If there are other `Arc` or [`Weak`][weak] pointers to the same value, + /// then `make_mut` will invoke [`clone`][clone] on the inner value to + /// ensure unique ownership. This is also referred to as clone-on-write. + /// + /// See also [`get_mut`][get_mut], which will fail rather than cloning. + /// + /// [weak]: struct.Weak.html + /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone + /// [get_mut]: struct.Arc.html#method.get_mut + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let mut data = Arc::new(5); + /// + /// *Arc::make_mut(&mut data) += 1; // Won't clone anything + /// let mut other_data = Arc::clone(&data); // Won't clone inner data + /// *Arc::make_mut(&mut data) += 1; // Clones inner data + /// *Arc::make_mut(&mut data) += 1; // Won't clone anything + /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything + /// + /// // Now `data` and `other_data` point to different values. + /// assert_eq!(*data, 8); + /// assert_eq!(*other_data, 12); + /// ``` + #[inline] + #[stable(feature = "arc_unique", since = "1.4.0")] + pub fn make_mut(this: &mut Self) -> &mut T { + // Note that we hold both a strong reference and a weak reference. + // Thus, releasing our strong reference only will not, by itself, cause + // the memory to be deallocated. + // + // Use Acquire to ensure that we see any writes to `weak` that happen + // before release writes (i.e., decrements) to `strong`. Since we hold a + // weak count, there's no chance the ArcInner itself could be + // deallocated. + if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() { + // Another strong pointer exists; clone + *this = Arc::new((**this).clone()); + } else if this.inner().weak.load(Relaxed) != 1 { + // Relaxed suffices in the above because this is fundamentally an + // optimization: we are always racing with weak pointers being + // dropped. Worst case, we end up allocated a new Arc unnecessarily. + + // We removed the last strong ref, but there are additional weak + // refs remaining. We'll move the contents to a new Arc, and + // invalidate the other weak refs. + + // Note that it is not possible for the read of `weak` to yield + // usize::MAX (i.e., locked), since the weak count can only be + // locked by a thread with a strong reference. + + // Materialize our own implicit weak pointer, so that it can clean + // up the ArcInner as needed. + let weak = Weak { ptr: this.ptr }; + + // mark the data itself as already deallocated + unsafe { + // there is no data race in the implicit write caused by `read` + // here (due to zeroing) because data is no longer accessed by + // other threads (due to there being no more strong refs at this + // point). + let mut swap = Arc::new(ptr::read(&weak.ptr.as_ref().data)); + mem::swap(this, &mut swap); + mem::forget(swap); + } + } else { + // We were the sole reference of either kind; bump back up the + // strong ref count. + this.inner().strong.store(1, Release); + } + + // As with `get_mut()`, the unsafety is ok because our reference was + // either unique to begin with, or became one upon cloning the contents. + unsafe { + &mut this.ptr.as_mut().data + } + } +} + +impl Arc { + /// Returns a mutable reference to the inner value, if there are + /// no other `Arc` or [`Weak`][weak] pointers to the same value. + /// + /// Returns [`None`][option] otherwise, because it is not safe to + /// mutate a shared value. + /// + /// See also [`make_mut`][make_mut], which will [`clone`][clone] + /// the inner value when it's shared. + /// + /// [weak]: struct.Weak.html + /// [option]: ../../std/option/enum.Option.html + /// [make_mut]: struct.Arc.html#method.make_mut + /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let mut x = Arc::new(3); + /// *Arc::get_mut(&mut x).unwrap() = 4; + /// assert_eq!(*x, 4); + /// + /// let _y = Arc::clone(&x); + /// assert!(Arc::get_mut(&mut x).is_none()); + /// ``` + #[inline] + #[stable(feature = "arc_unique", since = "1.4.0")] + pub fn get_mut(this: &mut Self) -> Option<&mut T> { + if this.is_unique() { + // This unsafety is ok because we're guaranteed that the pointer + // returned is the *only* pointer that will ever be returned to T. Our + // reference count is guaranteed to be 1 at this point, and we required + // the Arc itself to be `mut`, so we're returning the only possible + // reference to the inner data. + unsafe { + Some(&mut this.ptr.as_mut().data) + } + } else { + None + } + } + + /// Determine whether this is the unique reference (including weak refs) to + /// the underlying data. + /// + /// Note that this requires locking the weak ref count. + fn is_unique(&mut self) -> bool { + // lock the weak pointer count if we appear to be the sole weak pointer + // holder. + // + // The acquire label here ensures a happens-before relationship with any + // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements + // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded + // weak ref was never dropped, the CAS here will fail so we do not care to synchronize. + if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { + // This needs to be an `Acquire` to synchronize with the decrement of the `strong` + // counter in `drop` -- the only access that happens when any but the last reference + // is being dropped. + let unique = self.inner().strong.load(Acquire) == 1; + + // The release write here synchronizes with a read in `downgrade`, + // effectively preventing the above read of `strong` from happening + // after the write. + self.inner().weak.store(1, Release); // release the lock + unique + } else { + false + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc { + /// Drops the `Arc`. + /// + /// This will decrement the strong reference count. If the strong reference + /// count reaches zero then the only other references (if any) are + /// [`Weak`][weak], so we `drop` the inner value. + /// + /// [weak]: struct.Weak.html + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// struct Foo; + /// + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } + /// } + /// + /// let foo = Arc::new(Foo); + /// let foo2 = Arc::clone(&foo); + /// + /// drop(foo); // Doesn't print anything + /// drop(foo2); // Prints "dropped!" + /// ``` + #[inline] + fn drop(&mut self) { + // Because `fetch_sub` is already atomic, we do not need to synchronize + // with other threads unless we are going to delete the object. This + // same logic applies to the below `fetch_sub` to the `weak` count. + if self.inner().strong.fetch_sub(1, Release) != 1 { + return; + } + + // This fence is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` fence. This + // means that use of the data happens before decreasing the reference + // count, which happens before this fence, which happens before the + // deletion of the data. + // + // As explained in the [Boost documentation][1], + // + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. + // + // In particular, while the contents of an Arc are usually immutable, it's + // possible to have interior writes to something like a Mutex. Since a + // Mutex is not acquired when it is deleted, we can't rely on its + // synchronization logic to make writes in thread A visible to a destructor + // running in thread B. + // + // Also note that the Acquire fence here could probably be replaced with an + // Acquire load, which could improve performance in highly-contended + // situations. See [2]. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + // [2]: (https://github.com/rust-lang/rust/pull/41714) + atomic::fence(Acquire); + + unsafe { + self.drop_slow(); + } + } +} + +impl Arc { + #[inline] + #[stable(feature = "rc_downcast", since = "1.29.0")] + /// Attempt to downcast the `Arc` to a concrete type. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// use std::sync::Arc; + /// + /// fn print_if_string(value: Arc) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// fn main() { + /// let my_string = "Hello World".to_string(); + /// print_if_string(Arc::new(my_string)); + /// print_if_string(Arc::new(0i8)); + /// } + /// ``` + pub fn downcast(self) -> Result, Self> + where + T: Any + Send + Sync + 'static, + { + if (*self).is::() { + let ptr = self.ptr.cast::>(); + mem::forget(self); + Ok(Arc { ptr, phantom: PhantomData }) + } else { + Err(self) + } + } +} + +impl Weak { + /// Constructs a new `Weak`, without allocating any memory. + /// Calling [`upgrade`] on the return value always gives [`None`]. + /// + /// [`upgrade`]: struct.Weak.html#method.upgrade + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// ``` + /// use std::sync::Weak; + /// + /// let empty: Weak = Weak::new(); + /// assert!(empty.upgrade().is_none()); + /// ``` + #[stable(feature = "downgraded_weak", since = "1.10.0")] + pub fn new() -> Weak { + Weak { + ptr: NonNull::new(usize::MAX as *mut ArcInner).expect("MAX is not 0"), + } + } +} + +impl Weak { + /// Attempts to upgrade the `Weak` pointer to an [`Arc`], extending + /// the lifetime of the value if successful. + /// + /// Returns [`None`] if the value has since been dropped. + /// + /// [`Arc`]: struct.Arc.html + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// let weak_five = Arc::downgrade(&five); + /// + /// let strong_five: Option> = weak_five.upgrade(); + /// assert!(strong_five.is_some()); + /// + /// // Destroy all strong pointers. + /// drop(strong_five); + /// drop(five); + /// + /// assert!(weak_five.upgrade().is_none()); + /// ``` + #[stable(feature = "arc_weak", since = "1.4.0")] + pub fn upgrade(&self) -> Option> { + // We use a CAS loop to increment the strong count instead of a + // fetch_add because once the count hits 0 it must never be above 0. + let inner = self.inner()?; + + // Relaxed load because any write of 0 that we can observe + // leaves the field in a permanently zero state (so a + // "stale" read of 0 is fine), and any other value is + // confirmed via the CAS below. + let mut n = inner.strong.load(Relaxed); + + loop { + if n == 0 { + return None; + } + + // See comments in `Arc::clone` for why we do this (for `mem::forget`). + if n > MAX_REFCOUNT { + unsafe { + abort(); + } + } + + // Relaxed is valid for the same reason it is on Arc's Clone impl + match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) { + Ok(_) => return Some(Arc { + // null checked above + ptr: self.ptr, + phantom: PhantomData, + }), + Err(old) => n = old, + } + } + } + + /// Return `None` when the pointer is dangling and there is no allocated `ArcInner`, + /// i.e. this `Weak` was created by `Weak::new` + #[inline] + fn inner(&self) -> Option<&ArcInner> { + if is_dangling(self.ptr) { + None + } else { + Some(unsafe { self.ptr.as_ref() }) + } + } +} + +#[stable(feature = "arc_weak", since = "1.4.0")] +impl Clone for Weak { + /// Makes a clone of the `Weak` pointer that points to the same value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::{Arc, Weak}; + /// + /// let weak_five = Arc::downgrade(&Arc::new(5)); + /// + /// Weak::clone(&weak_five); + /// ``` + #[inline] + fn clone(&self) -> Weak { + let inner = if let Some(inner) = self.inner() { + inner + } else { + return Weak { ptr: self.ptr }; + }; + // See comments in Arc::clone() for why this is relaxed. This can use a + // fetch_add (ignoring the lock) because the weak count is only locked + // where are *no other* weak pointers in existence. (So we can't be + // running this code in that case). + let old_size = inner.weak.fetch_add(1, Relaxed); + + // See comments in Arc::clone() for why we do this (for mem::forget). + if old_size > MAX_REFCOUNT { + unsafe { + abort(); + } + } + + return Weak { ptr: self.ptr }; + } +} + +#[stable(feature = "downgraded_weak", since = "1.10.0")] +impl Default for Weak { + /// Constructs a new `Weak`, without allocating memory. + /// Calling [`upgrade`] on the return value always gives [`None`]. + /// + /// [`upgrade`]: struct.Weak.html#method.upgrade + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// ``` + /// use std::sync::Weak; + /// + /// let empty: Weak = Default::default(); + /// assert!(empty.upgrade().is_none()); + /// ``` + fn default() -> Weak { + Weak::new() + } +} + +#[stable(feature = "arc_weak", since = "1.4.0")] +impl Drop for Weak { + /// Drops the `Weak` pointer. + /// + /// # Examples + /// + /// ``` + /// use std::sync::{Arc, Weak}; + /// + /// struct Foo; + /// + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } + /// } + /// + /// let foo = Arc::new(Foo); + /// let weak_foo = Arc::downgrade(&foo); + /// let other_weak_foo = Weak::clone(&weak_foo); + /// + /// drop(weak_foo); // Doesn't print anything + /// drop(foo); // Prints "dropped!" + /// + /// assert!(other_weak_foo.upgrade().is_none()); + /// ``` + fn drop(&mut self) { + // If we find out that we were the last weak pointer, then its time to + // deallocate the data entirely. See the discussion in Arc::drop() about + // the memory orderings + // + // It's not necessary to check for the locked state here, because the + // weak count can only be locked if there was precisely one weak ref, + // meaning that drop could only subsequently run ON that remaining weak + // ref, which can only happen after the lock is released. + let inner = if let Some(inner) = self.inner() { + inner + } else { + return + }; + + if inner.weak.fetch_sub(1, Release) == 1 { + atomic::fence(Acquire); + unsafe { + Global.dealloc(self.ptr.cast(), Layout::for_value(self.ptr.as_ref())) + } + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialEq for Arc { + /// Equality for two `Arc`s. + /// + /// Two `Arc`s are equal if their inner values are equal. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five == Arc::new(5)); + /// ``` + fn eq(&self, other: &Arc) -> bool { + *(*self) == *(*other) + } + + /// Inequality for two `Arc`s. + /// + /// Two `Arc`s are unequal if their inner values are unequal. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five != Arc::new(6)); + /// ``` + fn ne(&self, other: &Arc) -> bool { + *(*self) != *(*other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialOrd for Arc { + /// Partial comparison for two `Arc`s. + /// + /// The two are compared by calling `partial_cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::cmp::Ordering; + /// + /// let five = Arc::new(5); + /// + /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6))); + /// ``` + fn partial_cmp(&self, other: &Arc) -> Option { + (**self).partial_cmp(&**other) + } + + /// Less-than comparison for two `Arc`s. + /// + /// The two are compared by calling `<` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five < Arc::new(6)); + /// ``` + fn lt(&self, other: &Arc) -> bool { + *(*self) < *(*other) + } + + /// 'Less than or equal to' comparison for two `Arc`s. + /// + /// The two are compared by calling `<=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five <= Arc::new(5)); + /// ``` + fn le(&self, other: &Arc) -> bool { + *(*self) <= *(*other) + } + + /// Greater-than comparison for two `Arc`s. + /// + /// The two are compared by calling `>` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five > Arc::new(4)); + /// ``` + fn gt(&self, other: &Arc) -> bool { + *(*self) > *(*other) + } + + /// 'Greater than or equal to' comparison for two `Arc`s. + /// + /// The two are compared by calling `>=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five >= Arc::new(5)); + /// ``` + fn ge(&self, other: &Arc) -> bool { + *(*self) >= *(*other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl Ord for Arc { + /// Comparison for two `Arc`s. + /// + /// The two are compared by calling `cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::cmp::Ordering; + /// + /// let five = Arc::new(5); + /// + /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6))); + /// ``` + fn cmp(&self, other: &Arc) -> Ordering { + (**self).cmp(&**other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl Eq for Arc {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for Arc { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Arc { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Pointer for Arc { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&(&**self as *const T), f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Default for Arc { + /// Creates a new `Arc`, with the `Default` value for `T`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x: Arc = Default::default(); + /// assert_eq!(*x, 0); + /// ``` + fn default() -> Arc { + Arc::new(Default::default()) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Hash for Arc { + fn hash(&self, state: &mut H) { + (**self).hash(state) + } +} + +#[stable(feature = "from_for_ptrs", since = "1.6.0")] +impl From for Arc { + fn from(t: T) -> Self { + Arc::new(t) + } +} + +#[stable(feature = "shared_from_slice", since = "1.21.0")] +impl<'a, T: Clone> From<&'a [T]> for Arc<[T]> { + #[inline] + fn from(v: &[T]) -> Arc<[T]> { + >::from_slice(v) + } +} + +#[stable(feature = "shared_from_slice", since = "1.21.0")] +impl<'a> From<&'a str> for Arc { + #[inline] + fn from(v: &str) -> Arc { + let arc = Arc::<[u8]>::from(v.as_bytes()); + unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) } + } +} + +#[stable(feature = "shared_from_slice", since = "1.21.0")] +impl From for Arc { + #[inline] + fn from(v: String) -> Arc { + Arc::from(&v[..]) + } +} + +#[stable(feature = "shared_from_slice", since = "1.21.0")] +impl From> for Arc { + #[inline] + fn from(v: Box) -> Arc { + Arc::from_box(v) + } +} + +#[stable(feature = "shared_from_slice", since = "1.21.0")] +impl From> for Arc<[T]> { + #[inline] + fn from(mut v: Vec) -> Arc<[T]> { + unsafe { + let arc = Arc::copy_from_slice(&v); + + // Allow the Vec to free its memory, but not destroy its contents + v.set_len(0); + + arc + } + } +} + +#[cfg(test)] +mod tests { + use std::boxed::Box; + use std::clone::Clone; + use std::sync::mpsc::channel; + use std::mem::drop; + use std::ops::Drop; + use std::option::Option; + use std::option::Option::{None, Some}; + use std::sync::atomic; + use std::sync::atomic::Ordering::{Acquire, SeqCst}; + use std::thread; + use std::sync::Mutex; + use std::convert::From; + + use super::{Arc, Weak}; + use vec::Vec; + + struct Canary(*mut atomic::AtomicUsize); + + impl Drop for Canary { + fn drop(&mut self) { + unsafe { + match *self { + Canary(c) => { + (*c).fetch_add(1, SeqCst); + } + } + } + } + } + + #[test] + #[cfg_attr(target_os = "emscripten", ignore)] + fn manually_share_arc() { + let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let arc_v = Arc::new(v); + + let (tx, rx) = channel(); + + let _t = thread::spawn(move || { + let arc_v: Arc> = rx.recv().unwrap(); + assert_eq!((*arc_v)[3], 4); + }); + + tx.send(arc_v.clone()).unwrap(); + + assert_eq!((*arc_v)[2], 3); + assert_eq!((*arc_v)[4], 5); + } + + #[test] + fn test_arc_get_mut() { + let mut x = Arc::new(3); + *Arc::get_mut(&mut x).unwrap() = 4; + assert_eq!(*x, 4); + let y = x.clone(); + assert!(Arc::get_mut(&mut x).is_none()); + drop(y); + assert!(Arc::get_mut(&mut x).is_some()); + let _w = Arc::downgrade(&x); + assert!(Arc::get_mut(&mut x).is_none()); + } + + #[test] + fn try_unwrap() { + let x = Arc::new(3); + assert_eq!(Arc::try_unwrap(x), Ok(3)); + let x = Arc::new(4); + let _y = x.clone(); + assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4))); + let x = Arc::new(5); + let _w = Arc::downgrade(&x); + assert_eq!(Arc::try_unwrap(x), Ok(5)); + } + + #[test] + fn into_from_raw() { + let x = Arc::new(box "hello"); + let y = x.clone(); + + let x_ptr = Arc::into_raw(x); + drop(y); + unsafe { + assert_eq!(**x_ptr, "hello"); + + let x = Arc::from_raw(x_ptr); + assert_eq!(**x, "hello"); + + assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello")); + } + } + + #[test] + fn test_into_from_raw_unsized() { + use std::fmt::Display; + use std::string::ToString; + + let arc: Arc = Arc::from("foo"); + + let ptr = Arc::into_raw(arc.clone()); + let arc2 = unsafe { Arc::from_raw(ptr) }; + + assert_eq!(unsafe { &*ptr }, "foo"); + assert_eq!(arc, arc2); + + let arc: Arc = Arc::new(123); + + let ptr = Arc::into_raw(arc.clone()); + let arc2 = unsafe { Arc::from_raw(ptr) }; + + assert_eq!(unsafe { &*ptr }.to_string(), "123"); + assert_eq!(arc2.to_string(), "123"); + } + + #[test] + fn test_cowarc_clone_make_mut() { + let mut cow0 = Arc::new(75); + let mut cow1 = cow0.clone(); + let mut cow2 = cow1.clone(); + + assert!(75 == *Arc::make_mut(&mut cow0)); + assert!(75 == *Arc::make_mut(&mut cow1)); + assert!(75 == *Arc::make_mut(&mut cow2)); + + *Arc::make_mut(&mut cow0) += 1; + *Arc::make_mut(&mut cow1) += 2; + *Arc::make_mut(&mut cow2) += 3; + + assert!(76 == *cow0); + assert!(77 == *cow1); + assert!(78 == *cow2); + + // none should point to the same backing memory + assert!(*cow0 != *cow1); + assert!(*cow0 != *cow2); + assert!(*cow1 != *cow2); + } + + #[test] + fn test_cowarc_clone_unique2() { + let mut cow0 = Arc::new(75); + let cow1 = cow0.clone(); + let cow2 = cow1.clone(); + + assert!(75 == *cow0); + assert!(75 == *cow1); + assert!(75 == *cow2); + + *Arc::make_mut(&mut cow0) += 1; + assert!(76 == *cow0); + assert!(75 == *cow1); + assert!(75 == *cow2); + + // cow1 and cow2 should share the same contents + // cow0 should have a unique reference + assert!(*cow0 != *cow1); + assert!(*cow0 != *cow2); + assert!(*cow1 == *cow2); + } + + #[test] + fn test_cowarc_clone_weak() { + let mut cow0 = Arc::new(75); + let cow1_weak = Arc::downgrade(&cow0); + + assert!(75 == *cow0); + assert!(75 == *cow1_weak.upgrade().unwrap()); + + *Arc::make_mut(&mut cow0) += 1; + + assert!(76 == *cow0); + assert!(cow1_weak.upgrade().is_none()); + } + + #[test] + fn test_live() { + let x = Arc::new(5); + let y = Arc::downgrade(&x); + assert!(y.upgrade().is_some()); + } + + #[test] + fn test_dead() { + let x = Arc::new(5); + let y = Arc::downgrade(&x); + drop(x); + assert!(y.upgrade().is_none()); + } + + #[test] + fn weak_self_cyclic() { + struct Cycle { + x: Mutex>>, + } + + let a = Arc::new(Cycle { x: Mutex::new(None) }); + let b = Arc::downgrade(&a.clone()); + *a.x.lock().unwrap() = Some(b); + + // hopefully we don't double-free (or leak)... + } + + #[test] + fn drop_arc() { + let mut canary = atomic::AtomicUsize::new(0); + let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); + drop(x); + assert!(canary.load(Acquire) == 1); + } + + #[test] + fn drop_arc_weak() { + let mut canary = atomic::AtomicUsize::new(0); + let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize)); + let arc_weak = Arc::downgrade(&arc); + assert!(canary.load(Acquire) == 0); + drop(arc); + assert!(canary.load(Acquire) == 1); + drop(arc_weak); + } + + #[test] + fn test_strong_count() { + let a = Arc::new(0); + assert!(Arc::strong_count(&a) == 1); + let w = Arc::downgrade(&a); + assert!(Arc::strong_count(&a) == 1); + let b = w.upgrade().expect(""); + assert!(Arc::strong_count(&b) == 2); + assert!(Arc::strong_count(&a) == 2); + drop(w); + drop(a); + assert!(Arc::strong_count(&b) == 1); + let c = b.clone(); + assert!(Arc::strong_count(&b) == 2); + assert!(Arc::strong_count(&c) == 2); + } + + #[test] + fn test_weak_count() { + let a = Arc::new(0); + assert!(Arc::strong_count(&a) == 1); + assert!(Arc::weak_count(&a) == 0); + let w = Arc::downgrade(&a); + assert!(Arc::strong_count(&a) == 1); + assert!(Arc::weak_count(&a) == 1); + let x = w.clone(); + assert!(Arc::weak_count(&a) == 2); + drop(w); + drop(x); + assert!(Arc::strong_count(&a) == 1); + assert!(Arc::weak_count(&a) == 0); + let c = a.clone(); + assert!(Arc::strong_count(&a) == 2); + assert!(Arc::weak_count(&a) == 0); + let d = Arc::downgrade(&c); + assert!(Arc::weak_count(&c) == 1); + assert!(Arc::strong_count(&c) == 2); + + drop(a); + drop(c); + drop(d); + } + + #[test] + fn show_arc() { + let a = Arc::new(5); + assert_eq!(format!("{:?}", a), "5"); + } + + // Make sure deriving works with Arc + #[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)] + struct Foo { + inner: Arc, + } + + #[test] + fn test_unsized() { + let x: Arc<[i32]> = Arc::new([1, 2, 3]); + assert_eq!(format!("{:?}", x), "[1, 2, 3]"); + let y = Arc::downgrade(&x.clone()); + drop(x); + assert!(y.upgrade().is_none()); + } + + #[test] + fn test_from_owned() { + let foo = 123; + let foo_arc = Arc::from(foo); + assert!(123 == *foo_arc); + } + + #[test] + fn test_new_weak() { + let foo: Weak = Weak::new(); + assert!(foo.upgrade().is_none()); + } + + #[test] + fn test_ptr_eq() { + let five = Arc::new(5); + let same_five = five.clone(); + let other_five = Arc::new(5); + + assert!(Arc::ptr_eq(&five, &same_five)); + assert!(!Arc::ptr_eq(&five, &other_five)); + } + + #[test] + #[cfg_attr(target_os = "emscripten", ignore)] + fn test_weak_count_locked() { + let mut a = Arc::new(atomic::AtomicBool::new(false)); + let a2 = a.clone(); + let t = thread::spawn(move || { + for _i in 0..1000000 { + Arc::get_mut(&mut a); + } + a.store(true, SeqCst); + }); + + while !a2.load(SeqCst) { + let n = Arc::weak_count(&a2); + assert!(n < 2, "bad weak count: {}", n); + } + t.join().unwrap(); + } + + #[test] + fn test_from_str() { + let r: Arc = Arc::from("foo"); + + assert_eq!(&r[..], "foo"); + } + + #[test] + fn test_copy_from_slice() { + let s: &[u32] = &[1, 2, 3]; + let r: Arc<[u32]> = Arc::from(s); + + assert_eq!(&r[..], [1, 2, 3]); + } + + #[test] + fn test_clone_from_slice() { + #[derive(Clone, Debug, Eq, PartialEq)] + struct X(u32); + + let s: &[X] = &[X(1), X(2), X(3)]; + let r: Arc<[X]> = Arc::from(s); + + assert_eq!(&r[..], s); + } + + #[test] + #[should_panic] + fn test_clone_from_slice_panic() { + use std::string::{String, ToString}; + + struct Fail(u32, String); + + impl Clone for Fail { + fn clone(&self) -> Fail { + if self.0 == 2 { + panic!(); + } + Fail(self.0, self.1.clone()) + } + } + + let s: &[Fail] = &[ + Fail(0, "foo".to_string()), + Fail(1, "bar".to_string()), + Fail(2, "baz".to_string()), + ]; + + // Should panic, but not cause memory corruption + let _r: Arc<[Fail]> = Arc::from(s); + } + + #[test] + fn test_from_box() { + let b: Box = box 123; + let r: Arc = Arc::from(b); + + assert_eq!(*r, 123); + } + + #[test] + fn test_from_box_str() { + use std::string::String; + + let s = String::from("foo").into_boxed_str(); + let r: Arc = Arc::from(s); + + assert_eq!(&r[..], "foo"); + } + + #[test] + fn test_from_box_slice() { + let s = vec![1, 2, 3].into_boxed_slice(); + let r: Arc<[u32]> = Arc::from(s); + + assert_eq!(&r[..], [1, 2, 3]); + } + + #[test] + fn test_from_box_trait() { + use std::fmt::Display; + use std::string::ToString; + + let b: Box = box 123; + let r: Arc = Arc::from(b); + + assert_eq!(r.to_string(), "123"); + } + + #[test] + fn test_from_box_trait_zero_sized() { + use std::fmt::Debug; + + let b: Box = box (); + let r: Arc = Arc::from(b); + + assert_eq!(format!("{:?}", r), "()"); + } + + #[test] + fn test_from_vec() { + let v = vec![1, 2, 3]; + let r: Arc<[u32]> = Arc::from(v); + + assert_eq!(&r[..], [1, 2, 3]); + } + + #[test] + fn test_downcast() { + use std::any::Any; + + let r1: Arc = Arc::new(i32::max_value()); + let r2: Arc = Arc::new("abc"); + + assert!(r1.clone().downcast::().is_err()); + + let r1i32 = r1.downcast::(); + assert!(r1i32.is_ok()); + assert_eq!(r1i32.unwrap(), Arc::new(i32::max_value())); + + assert!(r2.clone().downcast::().is_err()); + + let r2str = r2.downcast::<&'static str>(); + assert!(r2str.is_ok()); + assert_eq!(r2str.unwrap(), Arc::new("abc")); + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl borrow::Borrow for Arc { + fn borrow(&self) -> &T { + &**self + } +} + +#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] +impl AsRef for Arc { + fn as_ref(&self) -> &T { + &**self + } +} diff --git a/src/liballoc/task.rs b/src/liballoc/task.rs new file mode 100644 index 000000000000..7a4eda21a601 --- /dev/null +++ b/src/liballoc/task.rs @@ -0,0 +1,140 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Types and Traits for working with asynchronous tasks. + +pub use core::task::*; + +#[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] +pub use self::if_arc::*; + +#[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] +mod if_arc { + use super::*; + use core::marker::PhantomData; + use core::mem; + use core::ptr::{self, NonNull}; + use sync::Arc; + + /// A way of waking up a specific task. + /// + /// Any task executor must provide a way of signaling that a task it owns + /// is ready to be `poll`ed again. Executors do so by implementing this trait. + pub trait Wake: Send + Sync { + /// Indicates that the associated task is ready to make progress and should + /// be `poll`ed. + /// + /// Executors generally maintain a queue of "ready" tasks; `wake` should place + /// the associated task onto this queue. + fn wake(arc_self: &Arc); + + /// Indicates that the associated task is ready to make progress and should + /// be `poll`ed. This function is like `wake`, but can only be called from the + /// thread on which this `Wake` was created. + /// + /// Executors generally maintain a queue of "ready" tasks; `wake_local` should place + /// the associated task onto this queue. + #[inline] + unsafe fn wake_local(arc_self: &Arc) { + Self::wake(arc_self); + } + } + + #[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] + struct ArcWrapped(PhantomData); + + unsafe impl UnsafeWake for ArcWrapped { + #[inline] + unsafe fn clone_raw(&self) -> Waker { + let me: *const ArcWrapped = self; + let arc = (*(&me as *const *const ArcWrapped as *const Arc)).clone(); + Waker::from(arc) + } + + #[inline] + unsafe fn drop_raw(&self) { + let mut me: *const ArcWrapped = self; + let me = &mut me as *mut *const ArcWrapped as *mut Arc; + ptr::drop_in_place(me); + } + + #[inline] + unsafe fn wake(&self) { + let me: *const ArcWrapped = self; + T::wake(&*(&me as *const *const ArcWrapped as *const Arc)) + } + + #[inline] + unsafe fn wake_local(&self) { + let me: *const ArcWrapped = self; + T::wake_local(&*(&me as *const *const ArcWrapped as *const Arc)) + } + } + + impl From> for Waker + where T: Wake + 'static, + { + fn from(rc: Arc) -> Self { + unsafe { + let ptr = mem::transmute::, NonNull>>(rc); + Waker::new(ptr) + } + } + } + + /// Creates a `LocalWaker` from a local `wake`. + /// + /// This function requires that `wake` is "local" (created on the current thread). + /// The resulting `LocalWaker` will call `wake.wake_local()` when awoken, and + /// will call `wake.wake()` if awoken after being converted to a `Waker`. + #[inline] + pub unsafe fn local_waker(wake: Arc) -> LocalWaker { + let ptr = mem::transmute::, NonNull>>(wake); + LocalWaker::new(ptr) + } + + struct NonLocalAsLocal(ArcWrapped); + + unsafe impl UnsafeWake for NonLocalAsLocal { + #[inline] + unsafe fn clone_raw(&self) -> Waker { + self.0.clone_raw() + } + + #[inline] + unsafe fn drop_raw(&self) { + self.0.drop_raw() + } + + #[inline] + unsafe fn wake(&self) { + self.0.wake() + } + + #[inline] + unsafe fn wake_local(&self) { + // Since we're nonlocal, we can't call wake_local + self.0.wake() + } + } + + /// Creates a `LocalWaker` from a non-local `wake`. + /// + /// This function is similar to `local_waker`, but does not require that `wake` + /// is local to the current thread. The resulting `LocalWaker` will call + /// `wake.wake()` when awoken. + #[inline] + pub fn local_waker_from_nonlocal(wake: Arc) -> LocalWaker { + unsafe { + let ptr = mem::transmute::, NonNull>>(wake); + LocalWaker::new(ptr) + } + } +} diff --git a/src/liballoc/tests/arc.rs b/src/liballoc/tests/arc.rs new file mode 100644 index 000000000000..d90c22a3b189 --- /dev/null +++ b/src/liballoc/tests/arc.rs @@ -0,0 +1,55 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::any::Any; +use std::sync::{Arc, Weak}; + +#[test] +fn uninhabited() { + enum Void {} + let mut a = Weak::::new(); + a = a.clone(); + assert!(a.upgrade().is_none()); + + let mut a: Weak = a; // Unsizing + a = a.clone(); + assert!(a.upgrade().is_none()); +} + +#[test] +fn slice() { + let a: Arc<[u32; 3]> = Arc::new([3, 2, 1]); + let a: Arc<[u32]> = a; // Unsizing + let b: Arc<[u32]> = Arc::from(&[3, 2, 1][..]); // Conversion + assert_eq!(a, b); + + // Exercise is_dangling() with a DST + let mut a = Arc::downgrade(&a); + a = a.clone(); + assert!(a.upgrade().is_some()); +} + +#[test] +fn trait_object() { + let a: Arc = Arc::new(4); + let a: Arc = a; // Unsizing + + // Exercise is_dangling() with a DST + let mut a = Arc::downgrade(&a); + a = a.clone(); + assert!(a.upgrade().is_some()); + + let mut b = Weak::::new(); + b = b.clone(); + assert!(b.upgrade().is_none()); + let mut b: Weak = b; // Unsizing + b = b.clone(); + assert!(b.upgrade().is_none()); +} diff --git a/src/liballoc/tests/binary_heap.rs b/src/liballoc/tests/binary_heap.rs index 06d585f8ea82..8494463463cb 100644 --- a/src/liballoc/tests/binary_heap.rs +++ b/src/liballoc/tests/binary_heap.rs @@ -8,9 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::panic; +use std::cmp; use std::collections::BinaryHeap; use std::collections::binary_heap::{Drain, PeekMut}; +use std::panic::{self, AssertUnwindSafe}; +use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + +use rand::{thread_rng, Rng}; #[test] fn test_iterator() { @@ -274,29 +278,86 @@ fn test_extend_specialization() { assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]); } -#[test] -fn test_placement() { - let mut a = BinaryHeap::new(); - &mut a <- 2; - &mut a <- 4; - &mut a <- 3; - assert_eq!(a.peek(), Some(&4)); - assert_eq!(a.len(), 3); - &mut a <- 1; - assert_eq!(a.into_sorted_vec(), vec![1, 2, 3, 4]); -} - -#[test] -fn test_placement_panic() { - let mut heap = BinaryHeap::from(vec![1, 2, 3]); - fn mkpanic() -> usize { panic!() } - let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { &mut heap <- mkpanic(); })); - assert_eq!(heap.len(), 3); -} - #[allow(dead_code)] fn assert_covariance() { fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> { d } } + +// old binaryheap failed this test +// +// Integrity means that all elements are present after a comparison panics, +// even if the order may not be correct. +// +// Destructors must be called exactly once per element. +#[test] +fn panic_safe() { + static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; + + #[derive(Eq, PartialEq, Ord, Clone, Debug)] + struct PanicOrd(T, bool); + + impl Drop for PanicOrd { + fn drop(&mut self) { + // update global drop count + DROP_COUNTER.fetch_add(1, Ordering::SeqCst); + } + } + + impl PartialOrd for PanicOrd { + fn partial_cmp(&self, other: &Self) -> Option { + if self.1 || other.1 { + panic!("Panicking comparison"); + } + self.0.partial_cmp(&other.0) + } + } + let mut rng = thread_rng(); + const DATASZ: usize = 32; + const NTEST: usize = 10; + + // don't use 0 in the data -- we want to catch the zeroed-out case. + let data = (1..DATASZ + 1).collect::>(); + + // since it's a fuzzy test, run several tries. + for _ in 0..NTEST { + for i in 1..DATASZ + 1 { + DROP_COUNTER.store(0, Ordering::SeqCst); + + let mut panic_ords: Vec<_> = data.iter() + .filter(|&&x| x != i) + .map(|&x| PanicOrd(x, false)) + .collect(); + let panic_item = PanicOrd(i, true); + + // heapify the sane items + rng.shuffle(&mut panic_ords); + let mut heap = BinaryHeap::from(panic_ords); + let inner_data; + + { + // push the panicking item to the heap and catch the panic + let thread_result = { + let mut heap_ref = AssertUnwindSafe(&mut heap); + panic::catch_unwind(move || { + heap_ref.push(panic_item); + }) + }; + assert!(thread_result.is_err()); + + // Assert no elements were dropped + let drops = DROP_COUNTER.load(Ordering::SeqCst); + assert!(drops == 0, "Must not drop items. drops={}", drops); + inner_data = heap.clone().into_vec(); + drop(heap); + } + let drops = DROP_COUNTER.load(Ordering::SeqCst); + assert_eq!(drops, DATASZ); + + let mut data_sorted = inner_data.into_iter().map(|p| p.0).collect::>(); + data_sorted.sort(); + assert_eq!(data_sorted, data); + } + } +} diff --git a/src/liballoc/tests/btree/map.rs b/src/liballoc/tests/btree/map.rs index 2393101040d9..6ebdb86cc4a9 100644 --- a/src/liballoc/tests/btree/map.rs +++ b/src/liballoc/tests/btree/map.rs @@ -9,8 +9,8 @@ // except according to those terms. use std::collections::BTreeMap; -use std::collections::Bound::{self, Excluded, Included, Unbounded}; use std::collections::btree_map::Entry::{Occupied, Vacant}; +use std::ops::Bound::{self, Excluded, Included, Unbounded}; use std::rc::Rc; use std::iter::FromIterator; diff --git a/src/liballoc/tests/btree/set.rs b/src/liballoc/tests/btree/set.rs index 6171b8ba624c..0330bda5e323 100644 --- a/src/liballoc/tests/btree/set.rs +++ b/src/liballoc/tests/btree/set.rs @@ -40,7 +40,7 @@ fn test_hash() { } fn check(a: &[i32], b: &[i32], expected: &[i32], f: F) - where F: FnOnce(&BTreeSet, &BTreeSet, &mut FnMut(&i32) -> bool) -> bool + where F: FnOnce(&BTreeSet, &BTreeSet, &mut dyn FnMut(&i32) -> bool) -> bool { let mut set_a = BTreeSet::new(); let mut set_b = BTreeSet::new(); diff --git a/src/liballoc/tests/heap.rs b/src/liballoc/tests/heap.rs index d3ce12056bb4..6fa88ce969a0 100644 --- a/src/liballoc/tests/heap.rs +++ b/src/liballoc/tests/heap.rs @@ -9,7 +9,7 @@ // except according to those terms. use alloc_system::System; -use std::heap::{Heap, Alloc, Layout}; +use std::alloc::{Global, Alloc, Layout}; /// https://github.com/rust-lang/rust/issues/45955 /// @@ -22,7 +22,7 @@ fn alloc_system_overaligned_request() { #[test] fn std_heap_overaligned_request() { - check_overalign_requests(Heap) + check_overalign_requests(Global) } fn check_overalign_requests(mut allocator: T) { @@ -34,7 +34,8 @@ fn check_overalign_requests(mut allocator: T) { allocator.alloc(Layout::from_size_align(size, align).unwrap()).unwrap() }).collect(); for &ptr in &pointers { - assert_eq!((ptr as usize) % align, 0, "Got a pointer less aligned than requested") + assert_eq!((ptr.as_ptr() as usize) % align, 0, + "Got a pointer less aligned than requested") } // Clean up diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs index eee229bc6fdf..c12c7a81f79c 100644 --- a/src/liballoc/tests/lib.rs +++ b/src/liballoc/tests/lib.rs @@ -8,43 +8,36 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(warnings)] - #![feature(allocator_api)] #![feature(alloc_system)] #![feature(attr_literals)] #![feature(box_syntax)] -#![feature(inclusive_range_syntax)] -#![feature(collection_placement)] #![feature(const_fn)] #![feature(drain_filter)] #![feature(exact_size_is_empty)] -#![feature(iterator_step_by)] #![feature(pattern)] -#![feature(placement_in_syntax)] -#![feature(rand)] -#![feature(repr_align)] -#![feature(slice_rotate)] -#![feature(splice)] +#![feature(slice_sort_by_cached_key)] #![feature(str_escape)] -#![feature(string_retain)] +#![feature(try_reserve)] #![feature(unboxed_closures)] -#![feature(unicode)] #![feature(exact_chunks)] +#![feature(repeat_generic_slice)] extern crate alloc_system; -extern crate std_unicode; +extern crate core; extern crate rand; use std::hash::{Hash, Hasher}; use std::collections::hash_map::DefaultHasher; +mod arc; mod binary_heap; mod btree; mod cow_str; mod fmt; mod heap; mod linked_list; +mod rc; mod slice; mod str; mod string; @@ -68,7 +61,7 @@ fn test_boxed_hasher() { 5u32.hash(&mut hasher_1); assert_eq!(ordinary_hash, hasher_1.finish()); - let mut hasher_2 = Box::new(DefaultHasher::new()) as Box; + let mut hasher_2 = Box::new(DefaultHasher::new()) as Box; 5u32.hash(&mut hasher_2); assert_eq!(ordinary_hash, hasher_2.finish()); } diff --git a/src/liballoc/tests/rc.rs b/src/liballoc/tests/rc.rs new file mode 100644 index 000000000000..9ec7c831444d --- /dev/null +++ b/src/liballoc/tests/rc.rs @@ -0,0 +1,55 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::any::Any; +use std::rc::{Rc, Weak}; + +#[test] +fn uninhabited() { + enum Void {} + let mut a = Weak::::new(); + a = a.clone(); + assert!(a.upgrade().is_none()); + + let mut a: Weak = a; // Unsizing + a = a.clone(); + assert!(a.upgrade().is_none()); +} + +#[test] +fn slice() { + let a: Rc<[u32; 3]> = Rc::new([3, 2, 1]); + let a: Rc<[u32]> = a; // Unsizing + let b: Rc<[u32]> = Rc::from(&[3, 2, 1][..]); // Conversion + assert_eq!(a, b); + + // Exercise is_dangling() with a DST + let mut a = Rc::downgrade(&a); + a = a.clone(); + assert!(a.upgrade().is_some()); +} + +#[test] +fn trait_object() { + let a: Rc = Rc::new(4); + let a: Rc = a; // Unsizing + + // Exercise is_dangling() with a DST + let mut a = Rc::downgrade(&a); + a = a.clone(); + assert!(a.upgrade().is_some()); + + let mut b = Weak::::new(); + b = b.clone(); + assert!(b.upgrade().is_none()); + let mut b: Weak = b; // Unsizing + b = b.clone(); + assert!(b.upgrade().is_none()); +} diff --git a/src/liballoc/tests/slice.rs b/src/liballoc/tests/slice.rs index 1a9d26fd1a29..df5e18a9a184 100644 --- a/src/liballoc/tests/slice.rs +++ b/src/liballoc/tests/slice.rs @@ -8,9 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cell::Cell; use std::cmp::Ordering::{Equal, Greater, Less}; +use std::cmp::Ordering; use std::mem; +use std::panic; use std::rc::Rc; +use std::sync::atomic::Ordering::Relaxed; +use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize}; +use std::thread; use rand::{Rng, thread_rng}; @@ -419,6 +425,14 @@ fn test_sort() { v.sort_by(|a, b| b.cmp(a)); assert!(v.windows(2).all(|w| w[0] >= w[1])); + // Sort in lexicographic order. + let mut v1 = orig.clone(); + let mut v2 = orig.clone(); + v1.sort_by_key(|x| x.to_string()); + v2.sort_by_cached_key(|x| x.to_string()); + assert!(v1.windows(2).all(|w| w[0].to_string() <= w[1].to_string())); + assert!(v1 == v2); + // Sort with many pre-sorted runs. let mut v = orig.clone(); v.sort(); @@ -471,7 +485,7 @@ fn test_sort_stability() { // the second item represents which occurrence of that // number this element is, i.e. the second elements // will occur in sorted order. - let mut v: Vec<_> = (0..len) + let mut orig: Vec<_> = (0..len) .map(|_| { let n = thread_rng().gen::() % 10; counts[n] += 1; @@ -479,16 +493,21 @@ fn test_sort_stability() { }) .collect(); - // only sort on the first element, so an unstable sort + let mut v = orig.clone(); + // Only sort on the first element, so an unstable sort // may mix up the counts. v.sort_by(|&(a, _), &(b, _)| a.cmp(&b)); - // this comparison includes the count (the second item + // This comparison includes the count (the second item // of the tuple), so elements with equal first items // will need to be ordered with increasing // counts... i.e. exactly asserting that this sort is // stable. assert!(v.windows(2).all(|w| w[0] <= w[1])); + + let mut v = orig.clone(); + v.sort_by_cached_key(|&(x, _)| x); + assert!(v.windows(2).all(|w| w[0] <= w[1])); } } } @@ -590,6 +609,15 @@ fn test_join() { assert_eq!(v.join(&0), [1, 0, 2, 0, 3]); } +#[test] +fn test_join_nocopy() { + let v: [String; 0] = []; + assert_eq!(v.join(","), ""); + assert_eq!(["a".to_string(), "ab".into()].join(","), "a,ab"); + assert_eq!(["a".to_string(), "ab".into(), "abc".into()].join(","), "a,ab,abc"); + assert_eq!(["a".to_string(), "ab".into(), "".into()].join(","), "a,ab,"); +} + #[test] fn test_insert() { let mut a = vec![1, 2, 4]; @@ -1263,6 +1291,7 @@ fn test_box_slice_clone() { } #[test] +#[allow(unused_must_use)] // here, we care about the side effects of `.clone()` #[cfg_attr(target_os = "emscripten", ignore)] fn test_box_slice_clone_panics() { use std::sync::Arc; @@ -1341,3 +1370,173 @@ fn test_copy_from_slice_dst_shorter() { let mut dst = [0; 3]; dst.copy_from_slice(&src); } + +const MAX_LEN: usize = 80; + +static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [ + // FIXME(RFC 1109): AtomicUsize is not Copy. + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), +]; + +static VERSIONS: AtomicUsize = ATOMIC_USIZE_INIT; + +#[derive(Clone, Eq)] +struct DropCounter { + x: u32, + id: usize, + version: Cell, +} + +impl PartialEq for DropCounter { + fn eq(&self, other: &Self) -> bool { + self.partial_cmp(other) == Some(Ordering::Equal) + } +} + +impl PartialOrd for DropCounter { + fn partial_cmp(&self, other: &Self) -> Option { + self.version.set(self.version.get() + 1); + other.version.set(other.version.get() + 1); + VERSIONS.fetch_add(2, Relaxed); + self.x.partial_cmp(&other.x) + } +} + +impl Ord for DropCounter { + fn cmp(&self, other: &Self) -> Ordering { + self.partial_cmp(other).unwrap() + } +} + +impl Drop for DropCounter { + fn drop(&mut self) { + DROP_COUNTS[self.id].fetch_add(1, Relaxed); + VERSIONS.fetch_sub(self.version.get(), Relaxed); + } +} + +macro_rules! test { + ($input:ident, $func:ident) => { + let len = $input.len(); + + // Work out the total number of comparisons required to sort + // this array... + let mut count = 0usize; + $input.to_owned().$func(|a, b| { count += 1; a.cmp(b) }); + + // ... and then panic on each and every single one. + for panic_countdown in 0..count { + // Refresh the counters. + VERSIONS.store(0, Relaxed); + for i in 0..len { + DROP_COUNTS[i].store(0, Relaxed); + } + + let v = $input.to_owned(); + let _ = thread::spawn(move || { + let mut v = v; + let mut panic_countdown = panic_countdown; + v.$func(|a, b| { + if panic_countdown == 0 { + SILENCE_PANIC.with(|s| s.set(true)); + panic!(); + } + panic_countdown -= 1; + a.cmp(b) + }) + }).join(); + + // Check that the number of things dropped is exactly + // what we expect (i.e. the contents of `v`). + for (i, c) in DROP_COUNTS.iter().enumerate().take(len) { + let count = c.load(Relaxed); + assert!(count == 1, + "found drop count == {} for i == {}, len == {}", + count, i, len); + } + + // Check that the most recent versions of values were dropped. + assert_eq!(VERSIONS.load(Relaxed), 0); + } + } +} + +thread_local!(static SILENCE_PANIC: Cell = Cell::new(false)); + +#[test] +#[cfg_attr(target_os = "emscripten", ignore)] // no threads +fn panic_safe() { + let prev = panic::take_hook(); + panic::set_hook(Box::new(move |info| { + if !SILENCE_PANIC.with(|s| s.get()) { + prev(info); + } + })); + + let mut rng = thread_rng(); + + for len in (1..20).chain(70..MAX_LEN) { + for &modulus in &[5, 20, 50] { + for &has_runs in &[false, true] { + let mut input = (0..len) + .map(|id| { + DropCounter { + x: rng.next_u32() % modulus, + id: id, + version: Cell::new(0), + } + }) + .collect::>(); + + if has_runs { + for c in &mut input { + c.x = c.id as u32; + } + + for _ in 0..5 { + let a = rng.gen::() % len; + let b = rng.gen::() % len; + if a < b { + input[a..b].reverse(); + } else { + input.swap(a, b); + } + } + } + + test!(input, sort_by); + test!(input, sort_unstable_by); + } + } + } +} + +#[test] +fn repeat_generic_slice() { + assert_eq!([1, 2].repeat(2), vec![1, 2, 1, 2]); + assert_eq!([1, 2, 3, 4].repeat(0), vec![]); + assert_eq!([1, 2, 3, 4].repeat(1), vec![1, 2, 3, 4]); + assert_eq!( + [1, 2, 3, 4].repeat(3), + vec![1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4] + ); +} diff --git a/src/liballoc/tests/str.rs b/src/liballoc/tests/str.rs index a14a5d32738b..6275c7bb1120 100644 --- a/src/liballoc/tests/str.rs +++ b/src/liballoc/tests/str.rs @@ -162,11 +162,24 @@ fn test_join_for_different_lengths() { test_join!("-a-bc", ["", "a", "bc"], "-"); } +// join has fast paths for small separators up to 4 bytes +// this tests the slow paths. +#[test] +fn test_join_for_different_lengths_with_long_separator() { + assert_eq!("~~~~~".len(), 15); + + let empty: &[&str] = &[]; + test_join!("", empty, "~~~~~"); + test_join!("a", ["a"], "~~~~~"); + test_join!("a~~~~~b", ["a", "b"], "~~~~~"); + test_join!("~~~~~a~~~~~bc", ["", "a", "bc"], "~~~~~"); +} + #[test] fn test_unsafe_slice() { - assert_eq!("ab", unsafe {"abc".slice_unchecked(0, 2)}); - assert_eq!("bc", unsafe {"abc".slice_unchecked(1, 3)}); - assert_eq!("", unsafe {"abc".slice_unchecked(1, 1)}); + assert_eq!("ab", unsafe {"abc".get_unchecked(0..2)}); + assert_eq!("bc", unsafe {"abc".get_unchecked(1..3)}); + assert_eq!("", unsafe {"abc".get_unchecked(1..1)}); fn a_million_letter_a() -> String { let mut i = 0; let mut rs = String::new(); @@ -187,7 +200,7 @@ fn test_unsafe_slice() { } let letters = a_million_letter_a(); assert_eq!(half_a_million_letter_a(), - unsafe { letters.slice_unchecked(0, 500000)}); + unsafe { letters.get_unchecked(0..500000)}); } #[test] @@ -291,114 +304,410 @@ fn test_replace_pattern() { assert_eq!(data.replace(|c| c == 'γ', "😺😺😺"), "abcdαβ😺😺😺δabcdαβ😺😺😺δ"); } -#[test] -fn test_slice() { - assert_eq!("ab", &"abc"[0..2]); - assert_eq!("bc", &"abc"[1..3]); - assert_eq!("", &"abc"[1..1]); - assert_eq!("\u{65e5}", &"\u{65e5}\u{672c}"[0..3]); +// The current implementation of SliceIndex fails to handle methods +// orthogonally from range types; therefore, it is worth testing +// all of the indexing operations on each input. +mod slice_index { + // Test a slicing operation **that should succeed,** + // testing it on all of the indexing methods. + // + // This is not suitable for testing failure on invalid inputs. + macro_rules! assert_range_eq { + ($s:expr, $range:expr, $expected:expr) + => { + let mut s: String = $s.to_owned(); + let mut expected: String = $expected.to_owned(); + { + let s: &str = &s; + let expected: &str = &expected; - let data = "ประเทศไทย中华"; - assert_eq!("ป", &data[0..3]); - assert_eq!("ร", &data[3..6]); - assert_eq!("", &data[3..3]); - assert_eq!("华", &data[30..33]); + assert_eq!(&s[$range], expected, "(in assertion for: index)"); + assert_eq!(s.get($range), Some(expected), "(in assertion for: get)"); + unsafe { + assert_eq!( + s.get_unchecked($range), expected, + "(in assertion for: get_unchecked)", + ); + } + } + { + let s: &mut str = &mut s; + let expected: &mut str = &mut expected; - fn a_million_letter_x() -> String { - let mut i = 0; - let mut rs = String::new(); - while i < 100000 { - rs.push_str("华华华华华华华华华华"); - i += 1; + assert_eq!( + &mut s[$range], expected, + "(in assertion for: index_mut)", + ); + assert_eq!( + s.get_mut($range), Some(&mut expected[..]), + "(in assertion for: get_mut)", + ); + unsafe { + assert_eq!( + s.get_unchecked_mut($range), expected, + "(in assertion for: get_unchecked_mut)", + ); + } + } } - rs } - fn half_a_million_letter_x() -> String { - let mut i = 0; - let mut rs = String::new(); - while i < 100000 { - rs.push_str("华华华华华"); - i += 1; + + // Make sure the macro can actually detect bugs, + // because if it can't, then what are we even doing here? + // + // (Be aware this only demonstrates the ability to detect bugs + // in the FIRST method that panics, as the macro is not designed + // to be used in `should_panic`) + #[test] + #[should_panic(expected = "out of bounds")] + fn assert_range_eq_can_fail_by_panic() { + assert_range_eq!("abc", 0..5, "abc"); + } + + // (Be aware this only demonstrates the ability to detect bugs + // in the FIRST method it calls, as the macro is not designed + // to be used in `should_panic`) + #[test] + #[should_panic(expected = "==")] + fn assert_range_eq_can_fail_by_inequality() { + assert_range_eq!("abc", 0..2, "abc"); + } + + // Generates test cases for bad index operations. + // + // This generates `should_panic` test cases for Index/IndexMut + // and `None` test cases for get/get_mut. + macro_rules! panic_cases { + ($( + in mod $case_name:ident { + data: $data:expr; + + // optional: + // + // a similar input for which DATA[input] succeeds, and the corresponding + // output str. This helps validate "critical points" where an input range + // straddles the boundary between valid and invalid. + // (such as the input `len..len`, which is just barely valid) + $( + good: data[$good:expr] == $output:expr; + )* + + bad: data[$bad:expr]; + message: $expect_msg:expr; // must be a literal + } + )*) => {$( + mod $case_name { + #[test] + fn pass() { + let mut v: String = $data.into(); + + $( assert_range_eq!(v, $good, $output); )* + + { + let v: &str = &v; + assert_eq!(v.get($bad), None, "(in None assertion for get)"); + } + + { + let v: &mut str = &mut v; + assert_eq!(v.get_mut($bad), None, "(in None assertion for get_mut)"); + } + } + + #[test] + #[should_panic(expected = $expect_msg)] + fn index_fail() { + let v: String = $data.into(); + let v: &str = &v; + let _v = &v[$bad]; + } + + #[test] + #[should_panic(expected = $expect_msg)] + fn index_mut_fail() { + let mut v: String = $data.into(); + let v: &mut str = &mut v; + let _v = &mut v[$bad]; + } + } + )*}; + } + + #[test] + fn simple_ascii() { + assert_range_eq!("abc", .., "abc"); + + assert_range_eq!("abc", 0..2, "ab"); + assert_range_eq!("abc", 0..=1, "ab"); + assert_range_eq!("abc", ..2, "ab"); + assert_range_eq!("abc", ..=1, "ab"); + + assert_range_eq!("abc", 1..3, "bc"); + assert_range_eq!("abc", 1..=2, "bc"); + assert_range_eq!("abc", 1..1, ""); + assert_range_eq!("abc", 1..=0, ""); + } + + #[test] + fn simple_unicode() { + // 日本 + assert_range_eq!("\u{65e5}\u{672c}", .., "\u{65e5}\u{672c}"); + + assert_range_eq!("\u{65e5}\u{672c}", 0..3, "\u{65e5}"); + assert_range_eq!("\u{65e5}\u{672c}", 0..=2, "\u{65e5}"); + assert_range_eq!("\u{65e5}\u{672c}", ..3, "\u{65e5}"); + assert_range_eq!("\u{65e5}\u{672c}", ..=2, "\u{65e5}"); + + assert_range_eq!("\u{65e5}\u{672c}", 3..6, "\u{672c}"); + assert_range_eq!("\u{65e5}\u{672c}", 3..=5, "\u{672c}"); + assert_range_eq!("\u{65e5}\u{672c}", 3.., "\u{672c}"); + + let data = "ประเทศไทย中华"; + assert_range_eq!(data, 0..3, "ป"); + assert_range_eq!(data, 3..6, "ร"); + assert_range_eq!(data, 3..3, ""); + assert_range_eq!(data, 30..33, "华"); + + /*0: 中 + 3: 华 + 6: V + 7: i + 8: ệ + 11: t + 12: + 13: N + 14: a + 15: m */ + let ss = "中华Việt Nam"; + assert_range_eq!(ss, 3..6, "华"); + assert_range_eq!(ss, 6..16, "Việt Nam"); + assert_range_eq!(ss, 6..=15, "Việt Nam"); + assert_range_eq!(ss, 6.., "Việt Nam"); + + assert_range_eq!(ss, 0..3, "中"); + assert_range_eq!(ss, 3..7, "华V"); + assert_range_eq!(ss, 3..=6, "华V"); + assert_range_eq!(ss, 3..3, ""); + assert_range_eq!(ss, 3..=2, ""); + } + + #[test] + #[cfg(not(target_arch = "asmjs"))] // hits an OOM + fn simple_big() { + fn a_million_letter_x() -> String { + let mut i = 0; + let mut rs = String::new(); + while i < 100000 { + rs.push_str("华华华华华华华华华华"); + i += 1; + } + rs } - rs + fn half_a_million_letter_x() -> String { + let mut i = 0; + let mut rs = String::new(); + while i < 100000 { + rs.push_str("华华华华华"); + i += 1; + } + rs + } + let letters = a_million_letter_x(); + assert_range_eq!(letters, 0..3 * 500000, half_a_million_letter_x()); + } + + #[test] + #[should_panic] + fn test_slice_fail() { + &"中华Việt Nam"[0..2]; + } + + panic_cases! { + in mod rangefrom_len { + data: "abcdef"; + good: data[6..] == ""; + bad: data[7..]; + message: "out of bounds"; + } + + in mod rangeto_len { + data: "abcdef"; + good: data[..6] == "abcdef"; + bad: data[..7]; + message: "out of bounds"; + } + + in mod rangetoinclusive_len { + data: "abcdef"; + good: data[..=5] == "abcdef"; + bad: data[..=6]; + message: "out of bounds"; + } + + in mod range_len_len { + data: "abcdef"; + good: data[6..6] == ""; + bad: data[7..7]; + message: "out of bounds"; + } + + in mod rangeinclusive_len_len { + data: "abcdef"; + good: data[6..=5] == ""; + bad: data[7..=6]; + message: "out of bounds"; + } + } + + panic_cases! { + in mod range_neg_width { + data: "abcdef"; + good: data[4..4] == ""; + bad: data[4..3]; + message: "begin <= end (4 <= 3)"; + } + + in mod rangeinclusive_neg_width { + data: "abcdef"; + good: data[4..=3] == ""; + bad: data[4..=2]; + message: "begin <= end (4 <= 3)"; + } + } + + mod overflow { + panic_cases! { + in mod rangeinclusive { + data: "hello"; + // note: using 0 specifically ensures that the result of overflowing is 0..0, + // so that `get` doesn't simply return None for the wrong reason. + bad: data[0..=usize::max_value()]; + message: "maximum usize"; + } + + in mod rangetoinclusive { + data: "hello"; + bad: data[..=usize::max_value()]; + message: "maximum usize"; + } + } + } + + mod boundary { + const DATA: &'static str = "abcαβγ"; + + const BAD_START: usize = 4; + const GOOD_START: usize = 3; + const BAD_END: usize = 6; + const GOOD_END: usize = 7; + const BAD_END_INCL: usize = BAD_END - 1; + const GOOD_END_INCL: usize = GOOD_END - 1; + + // it is especially important to test all of the different range types here + // because some of the logic may be duplicated as part of micro-optimizations + // to dodge unicode boundary checks on half-ranges. + panic_cases! { + in mod range_1 { + data: super::DATA; + bad: data[super::BAD_START..super::GOOD_END]; + message: + "byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of"; + } + + in mod range_2 { + data: super::DATA; + bad: data[super::GOOD_START..super::BAD_END]; + message: + "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of"; + } + + in mod rangefrom { + data: super::DATA; + bad: data[super::BAD_START..]; + message: + "byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of"; + } + + in mod rangeto { + data: super::DATA; + bad: data[..super::BAD_END]; + message: + "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of"; + } + + in mod rangeinclusive_1 { + data: super::DATA; + bad: data[super::BAD_START..=super::GOOD_END_INCL]; + message: + "byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of"; + } + + in mod rangeinclusive_2 { + data: super::DATA; + bad: data[super::GOOD_START..=super::BAD_END_INCL]; + message: + "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of"; + } + + in mod rangetoinclusive { + data: super::DATA; + bad: data[..=super::BAD_END_INCL]; + message: + "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of"; + } + } + } + + const LOREM_PARAGRAPH: &'static str = "\ + Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem \ + sit amet dolor ultricies condimentum. Praesent iaculis purus elit, ac malesuada \ + quam malesuada in. Duis sed orci eros. Suspendisse sit amet magna mollis, mollis \ + nunc luctus, imperdiet mi. Integer fringilla non sem ut lacinia. Fusce varius \ + tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec tempus vel, \ + gravida nec quam."; + + // check the panic includes the prefix of the sliced string + #[test] + #[should_panic(expected="byte index 1024 is out of bounds of `Lorem ipsum dolor sit amet")] + fn test_slice_fail_truncated_1() { + &LOREM_PARAGRAPH[..1024]; + } + // check the truncation in the panic message + #[test] + #[should_panic(expected="luctus, im`[...]")] + fn test_slice_fail_truncated_2() { + &LOREM_PARAGRAPH[..1024]; } - let letters = a_million_letter_x(); - assert_eq!(half_a_million_letter_x(), &letters[0..3 * 500000]); } #[test] -fn test_slice_2() { - let ss = "中华Việt Nam"; - - assert_eq!("华", &ss[3..6]); - assert_eq!("Việt Nam", &ss[6..16]); - - assert_eq!("ab", &"abc"[0..2]); - assert_eq!("bc", &"abc"[1..3]); - assert_eq!("", &"abc"[1..1]); - - assert_eq!("中", &ss[0..3]); - assert_eq!("华V", &ss[3..7]); - assert_eq!("", &ss[3..3]); - /*0: 中 - 3: 华 - 6: V - 7: i - 8: ệ - 11: t - 12: - 13: N - 14: a - 15: m */ +fn test_str_slice_rangetoinclusive_ok() { + let s = "abcαβγ"; + assert_eq!(&s[..=2], "abc"); + assert_eq!(&s[..=4], "abcα"); } #[test] #[should_panic] -fn test_slice_fail() { - &"中华Việt Nam"[0..2]; +fn test_str_slice_rangetoinclusive_notok() { + let s = "abcαβγ"; + &s[..=3]; } #[test] -#[should_panic] -fn test_str_slice_rangetoinclusive_max_panics() { - &"hello"[..=usize::max_value()]; -} - -#[test] -#[should_panic] -fn test_str_slice_rangeinclusive_max_panics() { - &"hello"[1..=usize::max_value()]; -} - -#[test] -#[should_panic] -fn test_str_slicemut_rangetoinclusive_max_panics() { - let mut s = "hello".to_owned(); +fn test_str_slicemut_rangetoinclusive_ok() { + let mut s = "abcαβγ".to_owned(); let s: &mut str = &mut s; - &mut s[..=usize::max_value()]; + assert_eq!(&mut s[..=2], "abc"); + assert_eq!(&mut s[..=4], "abcα"); } #[test] #[should_panic] -fn test_str_slicemut_rangeinclusive_max_panics() { - let mut s = "hello".to_owned(); +fn test_str_slicemut_rangetoinclusive_notok() { + let mut s = "abcαβγ".to_owned(); let s: &mut str = &mut s; - &mut s[1..=usize::max_value()]; -} - -#[test] -fn test_str_get_maxinclusive() { - let mut s = "hello".to_owned(); - { - let s: &str = &s; - assert_eq!(s.get(..=usize::max_value()), None); - assert_eq!(s.get(1..=usize::max_value()), None); - } - { - let s: &mut str = &mut s; - assert_eq!(s.get(..=usize::max_value()), None); - assert_eq!(s.get(1..=usize::max_value()), None); - } + &mut s[..=3]; } #[test] @@ -416,50 +725,6 @@ fn test_is_char_boundary() { } } } -const LOREM_PARAGRAPH: &'static str = "\ -Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \ -ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \ -eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \ -sem ut lacinia. Fusce varius tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec \ -tempus vel, gravida nec quam."; - -// check the panic includes the prefix of the sliced string -#[test] -#[should_panic(expected="byte index 1024 is out of bounds of `Lorem ipsum dolor sit amet")] -fn test_slice_fail_truncated_1() { - &LOREM_PARAGRAPH[..1024]; -} -// check the truncation in the panic message -#[test] -#[should_panic(expected="luctus, im`[...]")] -fn test_slice_fail_truncated_2() { - &LOREM_PARAGRAPH[..1024]; -} - -#[test] -#[should_panic(expected="byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of")] -fn test_slice_fail_boundary_1() { - &"abcαβγ"[4..]; -} - -#[test] -#[should_panic(expected="byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of")] -fn test_slice_fail_boundary_2() { - &"abcαβγ"[2..6]; -} - -#[test] -fn test_slice_from() { - assert_eq!(&"abcd"[0..], "abcd"); - assert_eq!(&"abcd"[2..], "cd"); - assert_eq!(&"abcd"[4..], ""); -} -#[test] -fn test_slice_to() { - assert_eq!(&"abcd"[..0], ""); - assert_eq!(&"abcd"[..2], "ab"); - assert_eq!(&"abcd"[..4], "abcd"); -} #[test] fn test_trim_left_matches() { @@ -737,6 +1002,12 @@ fn test_escape_unicode() { #[test] fn test_escape_debug() { + // Note that there are subtleties with the number of backslashes + // on the left- and right-hand sides. In particular, Unicode code points + // are usually escaped with two backslashes on the right-hand side, as + // they are escaped. However, when the character is unescaped (e.g. for + // printable characters), only a single backslash appears (as the character + // itself appears in the debug string). assert_eq!("abc".escape_debug(), "abc"); assert_eq!("a c".escape_debug(), "a c"); assert_eq!("éèê".escape_debug(), "éèê"); @@ -747,6 +1018,7 @@ fn test_escape_debug() { assert_eq!("\u{10000}\u{10ffff}".escape_debug(), "\u{10000}\\u{10ffff}"); assert_eq!("ab\u{200b}".escape_debug(), "ab\\u{200b}"); assert_eq!("\u{10d4ea}\r".escape_debug(), "\\u{10d4ea}\\r"); + assert_eq!("\u{301}a\u{301}bé\u{e000}".escape_debug(), "\\u{301}a\u{301}bé\\u{e000}"); } #[test] @@ -1054,6 +1326,7 @@ fn test_str_default() { t::<&str>(); t::(); + t::<&mut str>(); } #[test] @@ -1204,8 +1477,7 @@ fn test_rev_split_char_iterator_no_trailing() { #[test] fn test_utf16_code_units() { - use std_unicode::str::Utf16Encoder; - assert_eq!(Utf16Encoder::new(vec!['é', '\u{1F4A9}'].into_iter()).collect::>(), + assert_eq!("é\u{1F4A9}".encode_utf16().collect::>(), [0xE9, 0xD83D, 0xDCA9]) } diff --git a/src/liballoc/tests/string.rs b/src/liballoc/tests/string.rs index ef6f5e10a72d..befb36baeef1 100644 --- a/src/liballoc/tests/string.rs +++ b/src/liballoc/tests/string.rs @@ -9,6 +9,9 @@ // except according to those terms. use std::borrow::Cow; +use std::collections::CollectionAllocErr::*; +use std::mem::size_of; +use std::{usize, isize}; pub trait IntoCow<'a, B: ?Sized> where B: ToOwned { fn into_cow(self) -> Cow<'a, B>; @@ -129,7 +132,7 @@ fn test_from_utf16() { let s_as_utf16 = s.encode_utf16().collect::>(); let u_as_string = String::from_utf16(&u).unwrap(); - assert!(::std_unicode::char::decode_utf16(u.iter().cloned()).all(|r| r.is_ok())); + assert!(::core::char::decode_utf16(u.iter().cloned()).all(|r| r.is_ok())); assert_eq!(s_as_utf16, u); assert_eq!(u_as_string, s); @@ -440,53 +443,53 @@ fn test_drain() { } #[test] -fn test_splice() { +fn test_replace_range() { let mut s = "Hello, world!".to_owned(); - s.splice(7..12, "世界"); + s.replace_range(7..12, "世界"); assert_eq!(s, "Hello, 世界!"); } #[test] #[should_panic] -fn test_splice_char_boundary() { +fn test_replace_range_char_boundary() { let mut s = "Hello, 世界!".to_owned(); - s.splice(..8, ""); + s.replace_range(..8, ""); } #[test] -fn test_splice_inclusive_range() { +fn test_replace_range_inclusive_range() { let mut v = String::from("12345"); - v.splice(2..=3, "789"); + v.replace_range(2..=3, "789"); assert_eq!(v, "127895"); - v.splice(1..=2, "A"); + v.replace_range(1..=2, "A"); assert_eq!(v, "1A895"); } #[test] #[should_panic] -fn test_splice_out_of_bounds() { +fn test_replace_range_out_of_bounds() { let mut s = String::from("12345"); - s.splice(5..6, "789"); + s.replace_range(5..6, "789"); } #[test] #[should_panic] -fn test_splice_inclusive_out_of_bounds() { +fn test_replace_range_inclusive_out_of_bounds() { let mut s = String::from("12345"); - s.splice(5..=5, "789"); + s.replace_range(5..=5, "789"); } #[test] -fn test_splice_empty() { +fn test_replace_range_empty() { let mut s = String::from("12345"); - s.splice(1..2, ""); + s.replace_range(1..2, ""); assert_eq!(s, "1345"); } #[test] -fn test_splice_unbounded() { +fn test_replace_range_unbounded() { let mut s = String::from("12345"); - s.splice(.., ""); + s.replace_range(.., ""); assert_eq!(s, ""); } @@ -504,3 +507,163 @@ fn test_into_boxed_str() { let ys = xs.into_boxed_str(); assert_eq!(&*ys, "hello my name is bob"); } + +#[test] +fn test_reserve_exact() { + // This is all the same as test_reserve + + let mut s = String::new(); + assert_eq!(s.capacity(), 0); + + s.reserve_exact(2); + assert!(s.capacity() >= 2); + + for _i in 0..16 { + s.push('0'); + } + + assert!(s.capacity() >= 16); + s.reserve_exact(16); + assert!(s.capacity() >= 32); + + s.push('0'); + + s.reserve_exact(16); + assert!(s.capacity() >= 33) +} + +#[test] +fn test_try_reserve() { + + // These are the interesting cases: + // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM) + // * > isize::MAX should always fail + // * On 16/32-bit should CapacityOverflow + // * On 64-bit should OOM + // * overflow may trigger when adding `len` to `cap` (in number of elements) + // * overflow may trigger when multiplying `new_cap` by size_of:: (to get bytes) + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + // On 16/32-bit, we check that allocations don't exceed isize::MAX, + // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. + // Any platform that succeeds for these requests is technically broken with + // ptr::offset because LLVM is the worst. + let guards_against_isize = size_of::() < 8; + + { + // Note: basic stuff is checked by test_reserve + let mut empty_string: String = String::new(); + + // Check isize::MAX doesn't count as an overflow + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + // Play it again, frank! (just to be sure) + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + // Check isize::MAX + 1 does count as overflow + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + // Check usize::MAX does count as overflow + if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX + 1 is an OOM + if let Err(AllocErr) = empty_string.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + // Check usize::MAX is an OOM + if let Err(AllocErr) = empty_string.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + // Same basic idea, but with non-zero len + let mut ten_bytes: String = String::from("0123456789"); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should always overflow in the add-to-len + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + +} + +#[test] +fn test_try_reserve_exact() { + + // This is exactly the same as test_try_reserve with the method changed. + // See that test for comments. + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + let guards_against_isize = size_of::() < 8; + + { + let mut empty_string: String = String::new(); + + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + if let Err(AllocErr) = empty_string.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + if let Err(AllocErr) = empty_string.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + let mut ten_bytes: String = String::from("0123456789"); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + +} diff --git a/src/liballoc/tests/vec.rs b/src/liballoc/tests/vec.rs index 9cfde5dcc73c..e329b45a6175 100644 --- a/src/liballoc/tests/vec.rs +++ b/src/liballoc/tests/vec.rs @@ -10,8 +10,9 @@ use std::borrow::Cow; use std::mem::size_of; -use std::panic; +use std::{usize, isize}; use std::vec::{Drain, IntoIter}; +use std::collections::CollectionAllocErr::*; struct DropCounter<'a> { count: &'a mut u32, @@ -752,24 +753,6 @@ fn assert_covariance() { } } -#[test] -fn test_placement() { - let mut vec = vec![1]; - assert_eq!(vec.place_back() <- 2, &2); - assert_eq!(vec.len(), 2); - assert_eq!(vec.place_back() <- 3, &3); - assert_eq!(vec.len(), 3); - assert_eq!(&vec, &[1, 2, 3]); -} - -#[test] -fn test_placement_panic() { - let mut vec = vec![1, 2, 3]; - fn mkpanic() -> usize { panic!() } - let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| { vec.place_back() <- mkpanic(); })); - assert_eq!(vec.len(), 3); -} - #[test] fn from_into_inner() { let vec = vec![1, 2, 3]; @@ -965,3 +948,209 @@ fn drain_filter_complex() { assert_eq!(vec, vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]); } } + +#[test] +fn test_reserve_exact() { + // This is all the same as test_reserve + + let mut v = Vec::new(); + assert_eq!(v.capacity(), 0); + + v.reserve_exact(2); + assert!(v.capacity() >= 2); + + for i in 0..16 { + v.push(i); + } + + assert!(v.capacity() >= 16); + v.reserve_exact(16); + assert!(v.capacity() >= 32); + + v.push(16); + + v.reserve_exact(16); + assert!(v.capacity() >= 33) +} + +#[test] +fn test_try_reserve() { + + // These are the interesting cases: + // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM) + // * > isize::MAX should always fail + // * On 16/32-bit should CapacityOverflow + // * On 64-bit should OOM + // * overflow may trigger when adding `len` to `cap` (in number of elements) + // * overflow may trigger when multiplying `new_cap` by size_of:: (to get bytes) + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + // On 16/32-bit, we check that allocations don't exceed isize::MAX, + // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. + // Any platform that succeeds for these requests is technically broken with + // ptr::offset because LLVM is the worst. + let guards_against_isize = size_of::() < 8; + + { + // Note: basic stuff is checked by test_reserve + let mut empty_bytes: Vec = Vec::new(); + + // Check isize::MAX doesn't count as an overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + // Play it again, frank! (just to be sure) + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + // Check isize::MAX + 1 does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + // Check usize::MAX does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX + 1 is an OOM + if let Err(AllocErr) = empty_bytes.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + // Check usize::MAX is an OOM + if let Err(AllocErr) = empty_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + // Same basic idea, but with non-zero len + let mut ten_bytes: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should always overflow in the add-to-len + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + // Same basic idea, but with interesting type size + let mut ten_u32s: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should fail in the mul-by-size + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20) { + } else { + panic!("usize::MAX should trigger an overflow!"); + } + } + +} + +#[test] +fn test_try_reserve_exact() { + + // This is exactly the same as test_try_reserve with the method changed. + // See that test for comments. + + const MAX_CAP: usize = isize::MAX as usize; + const MAX_USIZE: usize = usize::MAX; + + let guards_against_isize = size_of::() < 8; + + { + let mut empty_bytes: Vec = Vec::new(); + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + if let Err(AllocErr) = empty_bytes.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + + if let Err(AllocErr) = empty_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an OOM!") } + } + } + + + { + let mut ten_bytes: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + let mut ten_u32s: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_USIZE - 20) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + +} diff --git a/src/liballoc/tests/vec_deque.rs b/src/liballoc/tests/vec_deque.rs index f2935c05d4f7..3ea6c87a6516 100644 --- a/src/liballoc/tests/vec_deque.rs +++ b/src/liballoc/tests/vec_deque.rs @@ -11,6 +11,9 @@ use std::collections::VecDeque; use std::fmt::Debug; use std::collections::vec_deque::{Drain}; +use std::collections::CollectionAllocErr::*; +use std::mem::size_of; +use std::{usize, isize}; use self::Taggy::*; use self::Taggypar::*; @@ -925,6 +928,107 @@ fn test_append() { assert_eq!(a.iter().cloned().collect::>(), []); } +#[test] +fn test_append_permutations() { + fn construct_vec_deque( + push_back: usize, + pop_back: usize, + push_front: usize, + pop_front: usize, + ) -> VecDeque { + let mut out = VecDeque::new(); + for a in 0..push_back { + out.push_back(a); + } + for b in 0..push_front { + out.push_front(push_back + b); + } + for _ in 0..pop_back { + out.pop_back(); + } + for _ in 0..pop_front { + out.pop_front(); + } + out + } + + const MAX: usize = 5; + + // Many different permutations of both the `VecDeque` getting appended to + // and the one getting appended are generated to check `append`. + // This ensures all 6 code paths of `append` are tested. + for src_push_back in 0..MAX { + for src_push_front in 0..MAX { + // doesn't pop more values than are pushed + for src_pop_back in 0..(src_push_back + src_push_front) { + for src_pop_front in 0..(src_push_back + src_push_front - src_pop_back) { + + let src = construct_vec_deque( + src_push_back, + src_pop_back, + src_push_front, + src_pop_front, + ); + + for dst_push_back in 0..MAX { + for dst_push_front in 0..MAX { + for dst_pop_back in 0..(dst_push_back + dst_push_front) { + for dst_pop_front + in 0..(dst_push_back + dst_push_front - dst_pop_back) + { + let mut dst = construct_vec_deque( + dst_push_back, + dst_pop_back, + dst_push_front, + dst_pop_front, + ); + let mut src = src.clone(); + + // Assert that appending `src` to `dst` gives the same order + // of values as iterating over both in sequence. + let correct = dst + .iter() + .chain(src.iter()) + .cloned() + .collect::>(); + dst.append(&mut src); + assert_eq!(dst, correct); + assert!(src.is_empty()); + } + } + } + } + } + } + } + } +} + +struct DropCounter<'a> { + count: &'a mut u32, +} + +impl<'a> Drop for DropCounter<'a> { + fn drop(&mut self) { + *self.count += 1; + } +} + +#[test] +fn test_append_double_drop() { + let (mut count_a, mut count_b) = (0, 0); + { + let mut a = VecDeque::new(); + let mut b = VecDeque::new(); + a.push_back(DropCounter { count: &mut count_a }); + b.push_back(DropCounter { count: &mut count_b }); + + a.append(&mut b); + } + assert_eq!(count_a, 1); + assert_eq!(count_b, 1); +} + #[test] fn test_retain() { let mut buf = VecDeque::new(); @@ -1002,23 +1106,206 @@ fn test_is_empty() { } #[test] -fn test_placement_in() { - let mut buf: VecDeque = VecDeque::new(); - buf.place_back() <- 1; - buf.place_back() <- 2; - assert_eq!(buf, [1,2]); +fn test_reserve_exact_2() { + // This is all the same as test_reserve - buf.place_front() <- 3; - buf.place_front() <- 4; - assert_eq!(buf, [4,3,1,2]); + let mut v = VecDeque::new(); - { - let ptr_head = buf.place_front() <- 5; - assert_eq!(*ptr_head, 5); + v.reserve_exact(2); + assert!(v.capacity() >= 2); + + for i in 0..16 { + v.push_back(i); } - { - let ptr_tail = buf.place_back() <- 6; - assert_eq!(*ptr_tail, 6); - } - assert_eq!(buf, [5,4,3,1,2,6]); + + assert!(v.capacity() >= 16); + v.reserve_exact(16); + assert!(v.capacity() >= 32); + + v.push_back(16); + + v.reserve_exact(16); + assert!(v.capacity() >= 48) +} + +#[test] +fn test_try_reserve() { + + // These are the interesting cases: + // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM) + // * > isize::MAX should always fail + // * On 16/32-bit should CapacityOverflow + // * On 64-bit should OOM + // * overflow may trigger when adding `len` to `cap` (in number of elements) + // * overflow may trigger when multiplying `new_cap` by size_of:: (to get bytes) + + const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1; + const MAX_USIZE: usize = usize::MAX; + + // On 16/32-bit, we check that allocations don't exceed isize::MAX, + // on 64-bit, we assume the OS will give an OOM for such a ridiculous size. + // Any platform that succeeds for these requests is technically broken with + // ptr::offset because LLVM is the worst. + let guards_against_isize = size_of::() < 8; + + { + // Note: basic stuff is checked by test_reserve + let mut empty_bytes: VecDeque = VecDeque::new(); + + // Check isize::MAX doesn't count as an overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + // Play it again, frank! (just to be sure) + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + // Check isize::MAX + 1 does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + // Check usize::MAX does count as overflow + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX is an OOM + // VecDeque starts with capacity 7, always adds 1 to the capacity + // and also rounds the number to next power of 2 so this is the + // furthest we can go without triggering CapacityOverflow + if let Err(AllocErr) = empty_bytes.try_reserve(MAX_CAP) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + } + + + { + // Same basic idea, but with non-zero len + let mut ten_bytes: VecDeque = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should always overflow in the add-to-len + if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + // Same basic idea, but with interesting type size + let mut ten_u32s: VecDeque = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_u32s.try_reserve(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + // Should fail in the mul-by-size + if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_USIZE - 20) { + } else { + panic!("usize::MAX should trigger an overflow!"); + } + } + +} + +#[test] +fn test_try_reserve_exact() { + + // This is exactly the same as test_try_reserve with the method changed. + // See that test for comments. + + const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1; + const MAX_USIZE: usize = usize::MAX; + + let guards_against_isize = size_of::() < 8; + + { + let mut empty_bytes: VecDeque = VecDeque::new(); + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + + if guards_against_isize { + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP + 1) { + } else { panic!("isize::MAX + 1 should trigger an overflow!") } + + if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } else { + // Check isize::MAX is an OOM + // VecDeque starts with capacity 7, always adds 1 to the capacity + // and also rounds the number to next power of 2 so this is the + // furthest we can go without triggering CapacityOverflow + if let Err(AllocErr) = empty_bytes.try_reserve_exact(MAX_CAP) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + } + + + { + let mut ten_bytes: VecDeque = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_bytes.try_reserve_exact(MAX_CAP - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_bytes.try_reserve_exact(MAX_USIZE) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + + + { + let mut ten_u32s: VecDeque = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect(); + + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 10) { + panic!("isize::MAX shouldn't trigger an overflow!"); + } + if guards_against_isize { + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an overflow!"); } + } else { + if let Err(AllocErr) = ten_u32s.try_reserve_exact(MAX_CAP/4 - 9) { + } else { panic!("isize::MAX + 1 should trigger an OOM!") } + } + if let Err(CapacityOverflow) = ten_u32s.try_reserve_exact(MAX_USIZE - 20) { + } else { panic!("usize::MAX should trigger an overflow!") } + } + } diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index 301e44632b82..cc913dfbb4b0 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -66,27 +66,25 @@ #![stable(feature = "rust1", since = "1.0.0")] -use core::cmp::Ordering; +use core::cmp::{self, Ordering}; use core::fmt; use core::hash::{self, Hash}; use core::intrinsics::{arith_offset, assume}; use core::iter::{FromIterator, FusedIterator, TrustedLen}; use core::marker::PhantomData; use core::mem; -#[cfg(not(test))] -use core::num::Float; -use core::ops::{InPlace, Index, IndexMut, Place, Placer}; +use core::ops::Bound::{Excluded, Included, Unbounded}; +use core::ops::{Index, IndexMut, RangeBounds}; use core::ops; use core::ptr; -use core::ptr::Shared; +use core::ptr::NonNull; use core::slice; +use collections::CollectionAllocErr; use borrow::ToOwned; use borrow::Cow; use boxed::Box; use raw_vec::RawVec; -use super::range::RangeArgument; -use Bound::{Excluded, Included, Unbounded}; /// A contiguous growable array type, written `Vec` but pronounced 'vector'. /// @@ -231,9 +229,9 @@ use Bound::{Excluded, Included, Unbounded}; /// /// If a `Vec` *has* allocated memory, then the memory it points to is on the heap /// (as defined by the allocator Rust is configured to use by default), and its -/// pointer points to [`len`] initialized elements in order (what you would see -/// if you coerced it to a slice), followed by [`capacity`]` - `[`len`] -/// logically uninitialized elements. +/// pointer points to [`len`] initialized, contiguous elements in order (what +/// you would see if you coerced it to a slice), followed by [`capacity`]` - +/// `[`len`] logically uninitialized, contiguous elements. /// /// `Vec` will never perform a "small optimization" where elements are actually /// stored on the stack for two reasons: @@ -281,8 +279,8 @@ use Bound::{Excluded, Included, Unbounded}; /// not break, however: using `unsafe` code to write to the excess capacity, /// and then increasing the length to match, is always valid. /// -/// `Vec` does not currently guarantee the order in which elements are dropped -/// (the order has changed in the past, and may change again). +/// `Vec` does not currently guarantee the order in which elements are dropped. +/// The order has changed in the past and may change again. /// /// [`vec!`]: ../../std/macro.vec.html /// [`Index`]: ../../std/ops/trait.Index.html @@ -321,7 +319,8 @@ impl Vec { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn new() -> Vec { + #[rustc_const_unstable(feature = "const_vec_new")] + pub const fn new() -> Vec { Vec { buf: RawVec::new(), len: 0, @@ -333,9 +332,10 @@ impl Vec { /// The vector will be able to hold exactly `capacity` elements without /// reallocating. If `capacity` is 0, the vector will not allocate. /// - /// It is important to note that this function does not specify the *length* - /// of the returned vector, but only the *capacity*. For an explanation of - /// the difference between length and capacity, see *[Capacity and reallocation]*. + /// It is important to note that although the returned vector has the + /// *capacity* specified, the vector will have a zero *length*. For an + /// explanation of the difference between length and capacity, see + /// *[Capacity and reallocation]*. /// /// [Capacity and reallocation]: #capacity-and-reallocation /// @@ -489,6 +489,83 @@ impl Vec { self.buf.reserve_exact(self.len, additional); } + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `Vec`. The collection may reserve more space to avoid + /// frequent reallocations. After calling `reserve`, capacity will be + /// greater than or equal to `self.len() + additional`. Does nothing if + /// capacity is already sufficient. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &[u32]) -> Result, CollectionAllocErr> { + /// let mut output = Vec::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.buf.try_reserve(self.len, additional) + } + + /// Tries to reserves the minimum capacity for exactly `additional` more elements to + /// be inserted in the given `Vec`. After calling `reserve_exact`, + /// capacity will be greater than or equal to `self.len() + additional`. + /// Does nothing if the capacity is already sufficient. + /// + /// Note that the allocator may give the collection more space than it + /// requests. Therefore capacity can not be relied upon to be precisely + /// minimal. Prefer `reserve` if future insertions are expected. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// #![feature(try_reserve)] + /// use std::collections::CollectionAllocErr; + /// + /// fn process_data(data: &[u32]) -> Result, CollectionAllocErr> { + /// let mut output = Vec::new(); + /// + /// // Pre-reserve the memory, exiting if we can't + /// output.try_reserve(data.len())?; + /// + /// // Now we know this can't OOM in the middle of our complex work + /// output.extend(data.iter().map(|&val| { + /// val * 2 + 5 // very complicated + /// })); + /// + /// Ok(output) + /// } + /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?"); + /// ``` + #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.buf.try_reserve_exact(self.len, additional) + } + /// Shrinks the capacity of the vector as much as possible. /// /// It will drop down as close as possible to the length but the allocator @@ -505,7 +582,34 @@ impl Vec { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn shrink_to_fit(&mut self) { - self.buf.shrink_to_fit(self.len); + if self.capacity() != self.len { + self.buf.shrink_to_fit(self.len); + } + } + + /// Shrinks the capacity of the vector with a lower bound. + /// + /// The capacity will remain at least as large as both the length + /// and the supplied value. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// #![feature(shrink_to)] + /// let mut vec = Vec::with_capacity(10); + /// vec.extend([1, 2, 3].iter().cloned()); + /// assert_eq!(vec.capacity(), 10); + /// vec.shrink_to(4); + /// assert!(vec.capacity() >= 4); + /// vec.shrink_to(0); + /// assert!(vec.capacity() >= 3); + /// ``` + #[unstable(feature = "shrink_to", reason = "new API", issue="0")] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.buf.shrink_to_fit(cmp::max(self.len, min_capacity)); } /// Converts the vector into [`Box<[T]>`][owned slice]. @@ -586,14 +690,20 @@ impl Vec { /// [`drain`]: #method.drain #[stable(feature = "rust1", since = "1.0.0")] pub fn truncate(&mut self, len: usize) { + let current_len = self.len; unsafe { + let mut ptr = self.as_mut_ptr().offset(self.len as isize); + // Set the final length at the end, keeping in mind that + // dropping an element might panic. Works around a missed + // optimization, as seen in the following issue: + // https://github.com/rust-lang/rust/issues/51802 + let mut local_len = SetLenOnDrop::new(&mut self.len); + // drop any extra elements - while len < self.len { - // decrement len before the drop_in_place(), so a panic on Drop - // doesn't re-drop the just-failed value. - self.len -= 1; - let len = self.len; - ptr::drop_in_place(self.get_unchecked_mut(len)); + for _ in len..current_len { + local_len.decrement_len(1); + ptr = ptr.offset(-1); + ptr::drop_in_place(ptr); } } } @@ -705,9 +815,15 @@ impl Vec { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn swap_remove(&mut self, index: usize) -> T { - let length = self.len(); - self.swap(index, length - 1); - self.pop().unwrap() + unsafe { + // We replace self[index] with the last element. Note that if the + // bounds check on hole succeeds there must be a last element (which + // can be self[index] itself). + let hole: *mut T = &mut self[index]; + let last = ptr::read(self.get_unchecked(self.len - 1)); + self.len -= 1; + ptr::replace(hole, last) + } } /// Inserts an element at position `index` within the vector, shifting all @@ -733,7 +849,7 @@ impl Vec { // space for the new element if len == self.buf.cap() { - self.buf.double(); + self.reserve(1); } unsafe { @@ -805,22 +921,7 @@ impl Vec { pub fn retain(&mut self, mut f: F) where F: FnMut(&T) -> bool { - let len = self.len(); - let mut del = 0; - { - let v = &mut **self; - - for i in 0..len { - if !f(&v[i]) { - del += 1; - } else if del > 0 { - v.swap(i - del, i); - } - } - } - if del > 0 { - self.truncate(len - del); - } + self.drain_filter(|x| !f(x)); } /// Removes all but the first of consecutive elements in the vector that resolve to the same @@ -968,7 +1069,7 @@ impl Vec { // This will panic or abort if we would allocate > isize::MAX bytes // or if the length increment would overflow for zero-sized types. if self.len == self.buf.cap() { - self.buf.double(); + self.reserve(1); } unsafe { let end = self.as_mut_ptr().offset(self.len as isize); @@ -977,29 +1078,6 @@ impl Vec { } } - /// Returns a place for insertion at the back of the `Vec`. - /// - /// Using this method with placement syntax is equivalent to [`push`](#method.push), - /// but may be more efficient. - /// - /// # Examples - /// - /// ``` - /// #![feature(collection_placement)] - /// #![feature(placement_in_syntax)] - /// - /// let mut vec = vec![1, 2]; - /// vec.place_back() <- 3; - /// vec.place_back() <- 4; - /// assert_eq!(&vec, &[1, 2, 3, 4]); - /// ``` - #[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] - pub fn place_back(&mut self) -> PlaceBack { - PlaceBack { vec: self } - } - /// Removes the last element from a vector and returns it, or [`None`] if it /// is empty. /// @@ -1087,7 +1165,7 @@ impl Vec { /// ``` #[stable(feature = "drain", since = "1.6.0")] pub fn drain(&mut self, range: R) -> Drain - where R: RangeArgument + where R: RangeBounds { // Memory safety // @@ -1100,12 +1178,12 @@ impl Vec { // the hole, and the vector length is restored to the new length. // let len = self.len(); - let start = match range.start() { + let start = match range.start_bound() { Included(&n) => n, Excluded(&n) => n + 1, Unbounded => 0, }; - let end = match range.end() { + let end = match range.end_bound() { Included(&n) => n + 1, Excluded(&n) => n, Unbounded => len, @@ -1124,7 +1202,7 @@ impl Vec { tail_start: end, tail_len: len - end, iter: range_slice.iter(), - vec: Shared::from(self), + vec: NonNull::from(self), } } } @@ -1218,6 +1296,49 @@ impl Vec { } other } + + /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. + /// + /// If `new_len` is greater than `len`, the `Vec` is extended by the + /// difference, with each additional slot filled with the result of + /// calling the closure `f`. The return values from `f` will end up + /// in the `Vec` in the order they have been generated. + /// + /// If `new_len` is less than `len`, the `Vec` is simply truncated. + /// + /// This method uses a closure to create new values on every push. If + /// you'd rather [`Clone`] a given value, use [`resize`]. If you want + /// to use the [`Default`] trait to generate values, you can pass + /// [`Default::default()`] as the second argument.. + /// + /// # Examples + /// + /// ``` + /// #![feature(vec_resize_with)] + /// + /// let mut vec = vec![1, 2, 3]; + /// vec.resize_with(5, Default::default); + /// assert_eq!(vec, [1, 2, 3, 0, 0]); + /// + /// let mut vec = vec![]; + /// let mut p = 1; + /// vec.resize_with(4, || { p *= 2; p }); + /// assert_eq!(vec, [2, 4, 8, 16]); + /// ``` + /// + /// [`resize`]: #method.resize + /// [`Clone`]: ../../std/clone/trait.Clone.html + #[unstable(feature = "vec_resize_with", issue = "41758")] + pub fn resize_with(&mut self, new_len: usize, f: F) + where F: FnMut() -> T + { + let len = self.len(); + if new_len > len { + self.extend_with(new_len - len, ExtendFunc(f)); + } else { + self.truncate(new_len); + } + } } impl Vec { @@ -1227,8 +1348,9 @@ impl Vec { /// difference, with each additional slot filled with `value`. /// If `new_len` is less than `len`, the `Vec` is simply truncated. /// - /// This method requires `Clone` to clone the passed value. If you'd - /// rather create a value with `Default` instead, see [`resize_default`]. + /// This method requires [`Clone`] to be able clone the passed value. If + /// you need more flexibility (or want to rely on [`Default`] instead of + /// [`Clone`]), use [`resize_with`]. /// /// # Examples /// @@ -1242,7 +1364,9 @@ impl Vec { /// assert_eq!(vec, [1, 2]); /// ``` /// - /// [`resize_default`]: #method.resize_default + /// [`Clone`]: ../../std/clone/trait.Clone.html + /// [`Default`]: ../../std/default/trait.Default.html + /// [`resize_with`]: #method.resize_with #[stable(feature = "vec_resize", since = "1.5.0")] pub fn resize(&mut self, new_len: usize, value: T) { let len = self.len(); @@ -1259,7 +1383,7 @@ impl Vec { /// Iterates over the slice `other`, clones each element, and then appends /// it to this `Vec`. The `other` vector is traversed in-order. /// - /// Note that this function is same as `extend` except that it is + /// Note that this function is same as [`extend`] except that it is /// specialized to work with slices instead. If and when Rust gets /// specialization this function will likely be deprecated (but still /// available). @@ -1271,6 +1395,8 @@ impl Vec { /// vec.extend_from_slice(&[2, 3, 4]); /// assert_eq!(vec, [1, 2, 3, 4]); /// ``` + /// + /// [`extend`]: #method.extend #[stable(feature = "vec_extend_from_slice", since = "1.6.0")] pub fn extend_from_slice(&mut self, other: &[T]) { self.spec_extend(other.iter()) @@ -1281,12 +1407,11 @@ impl Vec { /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. /// /// If `new_len` is greater than `len`, the `Vec` is extended by the - /// difference, with each additional slot filled with `Default::default()`. + /// difference, with each additional slot filled with [`Default::default()`]. /// If `new_len` is less than `len`, the `Vec` is simply truncated. /// - /// This method uses `Default` to create new values on every push. If - /// you'd rather `Clone` a given value, use [`resize`]. - /// + /// This method uses [`Default`] to create new values on every push. If + /// you'd rather [`Clone`] a given value, use [`resize`]. /// /// # Examples /// @@ -1303,6 +1428,9 @@ impl Vec { /// ``` /// /// [`resize`]: #method.resize + /// [`Default::default()`]: ../../std/default/trait.Default.html#tymethod.default + /// [`Default`]: ../../std/default/trait.Default.html + /// [`Clone`]: ../../std/clone/trait.Clone.html #[unstable(feature = "vec_resize_default", issue = "41758")] pub fn resize_default(&mut self, new_len: usize) { let len = self.len(); @@ -1317,24 +1445,31 @@ impl Vec { // This code generalises `extend_with_{element,default}`. trait ExtendWith { - fn next(&self) -> T; + fn next(&mut self) -> T; fn last(self) -> T; } struct ExtendElement(T); impl ExtendWith for ExtendElement { - fn next(&self) -> T { self.0.clone() } + fn next(&mut self) -> T { self.0.clone() } fn last(self) -> T { self.0 } } struct ExtendDefault; impl ExtendWith for ExtendDefault { - fn next(&self) -> T { Default::default() } + fn next(&mut self) -> T { Default::default() } fn last(self) -> T { Default::default() } } + +struct ExtendFunc(F); +impl T> ExtendWith for ExtendFunc { + fn next(&mut self) -> T { (self.0)() } + fn last(mut self) -> T { (self.0)() } +} + impl Vec { /// Extend the vector by `n` values, using the given generator. - fn extend_with>(&mut self, n: usize, value: E) { + fn extend_with>(&mut self, n: usize, mut value: E) { self.reserve(n); unsafe { @@ -1383,6 +1518,11 @@ impl<'a> SetLenOnDrop<'a> { fn increment_len(&mut self, increment: usize) { self.local_len += increment; } + + #[inline] + fn decrement_len(&mut self, decrement: usize) { + self.local_len -= decrement; + } } impl<'a> Drop for SetLenOnDrop<'a> { @@ -1472,40 +1612,69 @@ impl SpecFromElem for u8 { } } -macro_rules! impl_spec_from_elem { - ($t: ty, $is_zero: expr) => { - impl SpecFromElem for $t { - #[inline] - fn from_elem(elem: $t, n: usize) -> Vec<$t> { - if $is_zero(elem) { - return Vec { - buf: RawVec::with_capacity_zeroed(n), - len: n, - } - } - let mut v = Vec::with_capacity(n); - v.extend_with(n, ExtendElement(elem)); - v +impl SpecFromElem for T { + #[inline] + fn from_elem(elem: T, n: usize) -> Vec { + if elem.is_zero() { + return Vec { + buf: RawVec::with_capacity_zeroed(n), + len: n, } } - }; + let mut v = Vec::with_capacity(n); + v.extend_with(n, ExtendElement(elem)); + v + } } -impl_spec_from_elem!(i8, |x| x == 0); -impl_spec_from_elem!(i16, |x| x == 0); -impl_spec_from_elem!(i32, |x| x == 0); -impl_spec_from_elem!(i64, |x| x == 0); -impl_spec_from_elem!(i128, |x| x == 0); -impl_spec_from_elem!(isize, |x| x == 0); +unsafe trait IsZero { + /// Whether this value is zero + fn is_zero(&self) -> bool; +} -impl_spec_from_elem!(u16, |x| x == 0); -impl_spec_from_elem!(u32, |x| x == 0); -impl_spec_from_elem!(u64, |x| x == 0); -impl_spec_from_elem!(u128, |x| x == 0); -impl_spec_from_elem!(usize, |x| x == 0); +macro_rules! impl_is_zero { + ($t: ty, $is_zero: expr) => { + unsafe impl IsZero for $t { + #[inline] + fn is_zero(&self) -> bool { + $is_zero(*self) + } + } + } +} + +impl_is_zero!(i8, |x| x == 0); +impl_is_zero!(i16, |x| x == 0); +impl_is_zero!(i32, |x| x == 0); +impl_is_zero!(i64, |x| x == 0); +impl_is_zero!(i128, |x| x == 0); +impl_is_zero!(isize, |x| x == 0); + +impl_is_zero!(u16, |x| x == 0); +impl_is_zero!(u32, |x| x == 0); +impl_is_zero!(u64, |x| x == 0); +impl_is_zero!(u128, |x| x == 0); +impl_is_zero!(usize, |x| x == 0); + +impl_is_zero!(char, |x| x == '\0'); + +impl_is_zero!(f32, |x: f32| x.to_bits() == 0); +impl_is_zero!(f64, |x: f64| x.to_bits() == 0); + +unsafe impl IsZero for *const T { + #[inline] + fn is_zero(&self) -> bool { + (*self).is_null() + } +} + +unsafe impl IsZero for *mut T { + #[inline] + fn is_zero(&self) -> bool { + (*self).is_null() + } +} -impl_spec_from_elem!(f32, |x: f32| x == 0. && x.is_sign_positive()); -impl_spec_from_elem!(f64, |x: f64| x == 0. && x.is_sign_positive()); //////////////////////////////////////////////////////////////////////////////// // Common trait implementations for Vec @@ -1541,143 +1710,33 @@ impl Hash for Vec { } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl Index for Vec { - type Output = T; +#[rustc_on_unimplemented( + message="vector indices are of type `usize` or ranges of `usize`", + label="vector indices are of type `usize` or ranges of `usize`", +)] +impl Index for Vec +where + I: ::core::slice::SliceIndex<[T]>, +{ + type Output = I::Output; #[inline] - fn index(&self, index: usize) -> &T { - // NB built-in indexing via `&[T]` - &(**self)[index] - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl IndexMut for Vec { - #[inline] - fn index_mut(&mut self, index: usize) -> &mut T { - // NB built-in indexing via `&mut [T]` - &mut (**self)[index] - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::Index> for Vec { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::Range) -> &[T] { + fn index(&self, index: I) -> &Self::Output { Index::index(&**self, index) } } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::Index> for Vec { - type Output = [T]; - +#[rustc_on_unimplemented( + message="vector indices are of type `usize` or ranges of `usize`", + label="vector indices are of type `usize` or ranges of `usize`", +)] +impl IndexMut for Vec +where + I: ::core::slice::SliceIndex<[T]>, +{ #[inline] - fn index(&self, index: ops::RangeTo) -> &[T] { - Index::index(&**self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::Index> for Vec { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::RangeFrom) -> &[T] { - Index::index(&**self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::Index for Vec { - type Output = [T]; - - #[inline] - fn index(&self, _index: ops::RangeFull) -> &[T] { - self - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::Index> for Vec { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::RangeInclusive) -> &[T] { - Index::index(&**self, index) - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::Index> for Vec { - type Output = [T]; - - #[inline] - fn index(&self, index: ops::RangeToInclusive) -> &[T] { - Index::index(&**self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::IndexMut> for Vec { - #[inline] - fn index_mut(&mut self, index: ops::Range) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::IndexMut> for Vec { - #[inline] - fn index_mut(&mut self, index: ops::RangeTo) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::IndexMut> for Vec { - #[inline] - fn index_mut(&mut self, index: ops::RangeFrom) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::IndexMut for Vec { - #[inline] - fn index_mut(&mut self, _index: ops::RangeFull) -> &mut [T] { - self - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::IndexMut> for Vec { - #[inline] - fn index_mut(&mut self, index: ops::RangeInclusive) -> &mut [T] { - IndexMut::index_mut(&mut **self, index) - } -} - -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -#[rustc_on_unimplemented = "vector indices are of type `usize` or ranges of `usize`"] -impl ops::IndexMut> for Vec { - #[inline] - fn index_mut(&mut self, index: ops::RangeToInclusive) -> &mut [T] { + fn index_mut(&mut self, index: I) -> &mut Self::Output { IndexMut::index_mut(&mut **self, index) } } @@ -1745,7 +1804,7 @@ impl IntoIterator for Vec { let cap = self.buf.cap(); mem::forget(self); IntoIter { - buf: Shared::new_unchecked(begin), + buf: NonNull::new_unchecked(begin), phantom: PhantomData, cap, ptr: begin, @@ -1970,7 +2029,7 @@ impl Vec { #[inline] #[stable(feature = "vec_splice", since = "1.21.0")] pub fn splice(&mut self, range: R, replace_with: I) -> Splice - where R: RangeArgument, I: IntoIterator + where R: RangeBounds, I: IntoIterator { Splice { drain: self.drain(range), @@ -1981,8 +2040,8 @@ impl Vec { /// Creates an iterator which uses a closure to determine if an element should be removed. /// /// If the closure returns true, then the element is removed and yielded. - /// If the closure returns false, it will try again, and call the closure - /// on the next element, seeing if it passes the test. + /// If the closure returns false, the element will remain in the vector and will not be yielded + /// by the iterator. /// /// Using this method is equivalent to the following code: /// @@ -2247,6 +2306,13 @@ impl<'a, T: Clone> From> for Cow<'a, [T]> { } } +#[stable(feature = "cow_from_vec_ref", since = "1.28.0")] +impl<'a, T: Clone> From<&'a Vec> for Cow<'a, [T]> { + fn from(v: &'a Vec) -> Cow<'a, [T]> { + Cow::Borrowed(v.as_slice()) + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> FromIterator for Cow<'a, [T]> where T: Clone { fn from_iter>(it: I) -> Cow<'a, [T]> { @@ -2267,7 +2333,7 @@ impl<'a, T> FromIterator for Cow<'a, [T]> where T: Clone { /// [`IntoIterator`]: ../../std/iter/trait.IntoIterator.html #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter { - buf: Shared, + buf: NonNull, phantom: PhantomData, cap: usize, ptr: *const T, @@ -2359,9 +2425,10 @@ impl Iterator for IntoIter { #[inline] fn size_hint(&self) -> (usize, Option) { - let exact = match self.ptr.offset_to(self.end) { - Some(x) => x as usize, - None => (self.end as usize).wrapping_sub(self.ptr as usize), + let exact = if mem::size_of::() == 0 { + (self.end as usize).wrapping_sub(self.ptr as usize) + } else { + unsafe { self.end.offset_from(self.ptr) as usize } }; (exact, Some(exact)) } @@ -2404,7 +2471,7 @@ impl ExactSizeIterator for IntoIter { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for IntoIter {} #[unstable(feature = "trusted_len", issue = "37572")] @@ -2442,7 +2509,7 @@ pub struct Drain<'a, T: 'a> { tail_len: usize, /// Current remaining range to remove iter: slice::Iter<'a, T>, - vec: Shared>, + vec: NonNull>, } #[stable(feature = "collection_debug", since = "1.17.0")] @@ -2485,7 +2552,7 @@ impl<'a, T> DoubleEndedIterator for Drain<'a, T> { impl<'a, T> Drop for Drain<'a, T> { fn drop(&mut self) { // exhaust self first - while let Some(_) = self.next() {} + self.for_each(drop); if self.tail_len > 0 { unsafe { @@ -2493,9 +2560,11 @@ impl<'a, T> Drop for Drain<'a, T> { // memmove back untouched tail, update to new length let start = source_vec.len(); let tail = self.tail_start; - let src = source_vec.as_ptr().offset(tail as isize); - let dst = source_vec.as_mut_ptr().offset(start as isize); - ptr::copy(src, dst, self.tail_len); + if tail != start { + let src = source_vec.as_ptr().offset(tail as isize); + let dst = source_vec.as_mut_ptr().offset(start as isize); + ptr::copy(src, dst, self.tail_len); + } source_vec.set_len(start + self.tail_len); } } @@ -2510,60 +2579,9 @@ impl<'a, T> ExactSizeIterator for Drain<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Drain<'a, T> {} -/// A place for insertion at the back of a `Vec`. -/// -/// See [`Vec::place_back`](struct.Vec.html#method.place_back) for details. -#[must_use = "places do nothing unless written to with `<-` syntax"] -#[unstable(feature = "collection_placement", - reason = "struct name and placement protocol are subject to change", - issue = "30172")] -#[derive(Debug)] -pub struct PlaceBack<'a, T: 'a> { - vec: &'a mut Vec, -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Placer for PlaceBack<'a, T> { - type Place = PlaceBack<'a, T>; - - fn make_place(self) -> Self { - // This will panic or abort if we would allocate > isize::MAX bytes - // or if the length increment would overflow for zero-sized types. - if self.vec.len == self.vec.buf.cap() { - self.vec.buf.double(); - } - self - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> Place for PlaceBack<'a, T> { - fn pointer(&mut self) -> *mut T { - unsafe { self.vec.as_mut_ptr().offset(self.vec.len as isize) } - } -} - -#[unstable(feature = "collection_placement", - reason = "placement protocol is subject to change", - issue = "30172")] -impl<'a, T> InPlace for PlaceBack<'a, T> { - type Owner = &'a mut T; - - unsafe fn finalize(mut self) -> &'a mut T { - let ptr = self.pointer(); - self.vec.len += 1; - &mut *ptr - } -} - - /// A splicing iterator for `Vec`. /// /// This struct is created by the [`splice()`] method on [`Vec`]. See its @@ -2605,9 +2623,7 @@ impl<'a, I: Iterator> ExactSizeIterator for Splice<'a, I> {} #[stable(feature = "vec_splice", since = "1.21.0")] impl<'a, I: Iterator> Drop for Splice<'a, I> { fn drop(&mut self) { - // exhaust drain first - while let Some(_) = self.drain.next() {} - + self.drain.by_ref().for_each(drop); unsafe { if self.drain.tail_len == 0 { @@ -2736,8 +2752,7 @@ impl<'a, T, F> Drop for DrainFilter<'a, T, F> where F: FnMut(&mut T) -> bool, { fn drop(&mut self) { - for _ in self.by_ref() { } - + self.for_each(drop); unsafe { self.vec.set_len(self.old_len - self.del); } diff --git a/src/liballoc_jemalloc/Cargo.toml b/src/liballoc_jemalloc/Cargo.toml index 6d7d83dd9938..7986d5dd2eb5 100644 --- a/src/liballoc_jemalloc/Cargo.toml +++ b/src/liballoc_jemalloc/Cargo.toml @@ -12,10 +12,9 @@ test = false doc = false [dependencies] -alloc = { path = "../liballoc" } -alloc_system = { path = "../liballoc_system" } core = { path = "../libcore" } libc = { path = "../rustc/libc_shim" } +compiler_builtins = { path = "../rustc/compiler_builtins_shim" } [build-dependencies] build_helper = { path = "../build_helper" } diff --git a/src/liballoc_jemalloc/build.rs b/src/liballoc_jemalloc/build.rs index 440c9fbf2f66..fbda425a70bf 100644 --- a/src/liballoc_jemalloc/build.rs +++ b/src/liballoc_jemalloc/build.rs @@ -29,13 +29,20 @@ fn main() { // for targets like emscripten, even if we don't use it. let target = env::var("TARGET").expect("TARGET was not set"); let host = env::var("HOST").expect("HOST was not set"); - if target.contains("bitrig") || target.contains("cloudabi") || target.contains("emscripten") || - target.contains("fuchsia") || target.contains("msvc") || target.contains("openbsd") || - target.contains("redox") || target.contains("rumprun") || target.contains("wasm32") { + if target.contains("bitrig") || target.contains("emscripten") || target.contains("fuchsia") || + target.contains("msvc") || target.contains("openbsd") || target.contains("redox") || + target.contains("rumprun") || target.contains("wasm32") { println!("cargo:rustc-cfg=dummy_jemalloc"); return; } + // CloudABI ships with a copy of jemalloc that has been patched to + // work well with sandboxing. Don't attempt to build our own copy, + // as it won't build. + if target.contains("cloudabi") { + return; + } + if target.contains("android") { println!("cargo:rustc-link-lib=gcc"); } else if !target.contains("windows") && !target.contains("musl") { @@ -98,11 +105,10 @@ fn main() { cmd.arg("--with-jemalloc-prefix=je_"); } - // FIXME: building with jemalloc assertions is currently broken. - // See . - //if cfg!(feature = "debug") { - // cmd.arg("--enable-debug"); - //} + if cfg!(feature = "debug") { + // Enable jemalloc assertions. + cmd.arg("--enable-debug"); + } cmd.arg(format!("--host={}", build_helper::gnu_target(&target))); cmd.arg(format!("--build={}", build_helper::gnu_target(&host))); diff --git a/src/liballoc_jemalloc/lib.rs b/src/liballoc_jemalloc/lib.rs index d7370ae400da..480a24b9bd1f 100644 --- a/src/liballoc_jemalloc/lib.rs +++ b/src/liballoc_jemalloc/lib.rs @@ -11,32 +11,24 @@ #![no_std] #![allow(unused_attributes)] #![unstable(feature = "alloc_jemalloc", - reason = "this library is unlikely to be stabilized in its current \ - form or name", - issue = "27783")] -#![deny(warnings)] -#![feature(alloc)] -#![feature(alloc_system)] + reason = "implementation detail of std, does not provide any public API", + issue = "0")] +#![feature(core_intrinsics)] #![feature(libc)] #![feature(linkage)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(staged_api)] #![feature(rustc_attrs)] #![cfg_attr(dummy_jemalloc, allow(dead_code, unused_extern_crates))] #![cfg_attr(not(dummy_jemalloc), feature(allocator_api))] #![rustc_alloc_kind = "exe"] -extern crate alloc; -extern crate alloc_system; extern crate libc; #[cfg(not(dummy_jemalloc))] pub use contents::*; #[cfg(not(dummy_jemalloc))] mod contents { - use core::ptr; - - use alloc::heap::{Alloc, AllocErr, Layout}; - use alloc_system::System; use libc::{c_int, c_void, size_t}; // Note that the symbols here are prefixed by default on macOS and Windows (we @@ -55,18 +47,10 @@ mod contents { target_os = "dragonfly", target_os = "windows", target_env = "musl"), link_name = "je_rallocx")] fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; - #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", - target_os = "dragonfly", target_os = "windows", target_env = "musl"), - link_name = "je_xallocx")] - fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t; #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", target_os = "dragonfly", target_os = "windows", target_env = "musl"), link_name = "je_sdallocx")] fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); - #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", - target_os = "dragonfly", target_os = "windows", target_env = "musl"), - link_name = "je_nallocx")] - fn nallocx(size: size_t, flags: c_int) -> size_t; } const MALLOCX_ZERO: c_int = 0x40; @@ -107,25 +91,12 @@ mod contents { #[no_mangle] #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_alloc(size: usize, - align: usize, - err: *mut u8) -> *mut u8 { + pub unsafe extern fn __rde_alloc(size: usize, align: usize) -> *mut u8 { let flags = align_to_flags(align, size); let ptr = mallocx(size as size_t, flags) as *mut u8; - if ptr.is_null() { - let layout = Layout::from_size_align_unchecked(size, align); - ptr::write(err as *mut AllocErr, - AllocErr::Exhausted { request: layout }); - } ptr } - #[no_mangle] - #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_oom(err: *const u8) -> ! { - System.oom((*(err as *const AllocErr)).clone()) - } - #[no_mangle] #[rustc_std_internal_symbol] pub unsafe extern fn __rde_dealloc(ptr: *mut u8, @@ -135,118 +106,26 @@ mod contents { sdallocx(ptr as *mut c_void, size, flags); } - #[no_mangle] - #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_usable_size(layout: *const u8, - min: *mut usize, - max: *mut usize) { - let layout = &*(layout as *const Layout); - let flags = align_to_flags(layout.align(), layout.size()); - let size = nallocx(layout.size(), flags) as usize; - *min = layout.size(); - if size > 0 { - *max = size; - } else { - *max = layout.size(); - } - } - #[no_mangle] #[rustc_std_internal_symbol] pub unsafe extern fn __rde_realloc(ptr: *mut u8, _old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize, - err: *mut u8) -> *mut u8 { - if new_align != old_align { - ptr::write(err as *mut AllocErr, - AllocErr::Unsupported { details: "can't change alignments" }); - return 0 as *mut u8 - } - - let flags = align_to_flags(new_align, new_size); + align: usize, + new_size: usize) -> *mut u8 { + let flags = align_to_flags(align, new_size); let ptr = rallocx(ptr as *mut c_void, new_size, flags) as *mut u8; - if ptr.is_null() { - let layout = Layout::from_size_align_unchecked(new_size, new_align); - ptr::write(err as *mut AllocErr, - AllocErr::Exhausted { request: layout }); - } ptr } #[no_mangle] #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_alloc_zeroed(size: usize, - align: usize, - err: *mut u8) -> *mut u8 { + pub unsafe extern fn __rde_alloc_zeroed(size: usize, align: usize) -> *mut u8 { let ptr = if align <= MIN_ALIGN && align <= size { calloc(size as size_t, 1) as *mut u8 } else { let flags = align_to_flags(align, size) | MALLOCX_ZERO; mallocx(size as size_t, flags) as *mut u8 }; - if ptr.is_null() { - let layout = Layout::from_size_align_unchecked(size, align); - ptr::write(err as *mut AllocErr, - AllocErr::Exhausted { request: layout }); - } ptr } - - #[no_mangle] - #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_alloc_excess(size: usize, - align: usize, - excess: *mut usize, - err: *mut u8) -> *mut u8 { - let p = __rde_alloc(size, align, err); - if !p.is_null() { - let flags = align_to_flags(align, size); - *excess = nallocx(size, flags) as usize; - } - return p - } - - #[no_mangle] - #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_realloc_excess(ptr: *mut u8, - old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize, - excess: *mut usize, - err: *mut u8) -> *mut u8 { - let p = __rde_realloc(ptr, old_size, old_align, new_size, new_align, err); - if !p.is_null() { - let flags = align_to_flags(new_align, new_size); - *excess = nallocx(new_size, flags) as usize; - } - p - } - - #[no_mangle] - #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_grow_in_place(ptr: *mut u8, - old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize) -> u8 { - __rde_shrink_in_place(ptr, old_size, old_align, new_size, new_align) - } - - #[no_mangle] - #[rustc_std_internal_symbol] - pub unsafe extern fn __rde_shrink_in_place(ptr: *mut u8, - _old_size: usize, - old_align: usize, - new_size: usize, - new_align: usize) -> u8 { - if old_align == new_align { - let flags = align_to_flags(new_align, new_size); - (xallocx(ptr as *mut c_void, new_size, 0, flags) == new_size) as u8 - } else { - 0 - } - } } diff --git a/src/liballoc_system/Cargo.toml b/src/liballoc_system/Cargo.toml index f9a57f7d97a7..c34e2f203a83 100644 --- a/src/liballoc_system/Cargo.toml +++ b/src/liballoc_system/Cargo.toml @@ -10,9 +10,9 @@ test = false doc = false [dependencies] -alloc = { path = "../liballoc" } core = { path = "../libcore" } libc = { path = "../rustc/libc_shim" } +compiler_builtins = { path = "../rustc/compiler_builtins_shim" } # See comments in the source for what this dependency is [target.'cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))'.dependencies] diff --git a/src/liballoc_system/lib.rs b/src/liballoc_system/lib.rs index 1d5e7b73be55..c5e056f6b12b 100644 --- a/src/liballoc_system/lib.rs +++ b/src/liballoc_system/lib.rs @@ -10,15 +10,14 @@ #![no_std] #![allow(unused_attributes)] -#![deny(warnings)] #![unstable(feature = "alloc_system", reason = "this library is unlikely to be stabilized in its current \ form or name", issue = "32838")] -#![feature(global_allocator)] + #![feature(allocator_api)] -#![feature(alloc)] #![feature(core_intrinsics)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(staged_api)] #![feature(rustc_attrs)] #![cfg_attr(any(unix, target_os = "cloudabi", target_os = "redox"), feature(libc))] @@ -43,76 +42,82 @@ const MIN_ALIGN: usize = 8; #[allow(dead_code)] const MIN_ALIGN: usize = 16; -extern crate alloc; +use core::alloc::{Alloc, GlobalAlloc, AllocErr, Layout}; +use core::ptr::NonNull; -use self::alloc::heap::{Alloc, AllocErr, Layout, Excess, CannotReallocInPlace}; - -#[unstable(feature = "allocator_api", issue = "32838")] +/// The default memory allocator provided by the operating system. +/// +/// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows, +/// plus related functions. +/// +/// This type can be used in a `static` item +/// with the `#[global_allocator]` attribute +/// to force the global allocator to be the system’s one. +/// (The default is jemalloc for executables, on some platforms.) +/// +/// ```rust +/// use std::alloc::System; +/// +/// #[global_allocator] +/// static A: System = System; +/// +/// fn main() { +/// let a = Box::new(4); // Allocates from the system allocator. +/// println!("{}", a); +/// } +/// ``` +/// +/// It can also be used directly to allocate memory +/// independently of the standard library’s global allocator. +#[stable(feature = "alloc_system_type", since = "1.28.0")] pub struct System; #[unstable(feature = "allocator_api", issue = "32838")] unsafe impl Alloc for System { #[inline] - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - (&*self).alloc(layout) + unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr> { + NonNull::new(GlobalAlloc::alloc(self, layout)).ok_or(AllocErr) } #[inline] - unsafe fn alloc_zeroed(&mut self, layout: Layout) - -> Result<*mut u8, AllocErr> - { - (&*self).alloc_zeroed(layout) + unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result, AllocErr> { + NonNull::new(GlobalAlloc::alloc_zeroed(self, layout)).ok_or(AllocErr) } #[inline] - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { - (&*self).dealloc(ptr, layout) + unsafe fn dealloc(&mut self, ptr: NonNull, layout: Layout) { + GlobalAlloc::dealloc(self, ptr.as_ptr(), layout) } #[inline] unsafe fn realloc(&mut self, - ptr: *mut u8, - old_layout: Layout, - new_layout: Layout) -> Result<*mut u8, AllocErr> { - (&*self).realloc(ptr, old_layout, new_layout) + ptr: NonNull, + layout: Layout, + new_size: usize) -> Result, AllocErr> { + NonNull::new(GlobalAlloc::realloc(self, ptr.as_ptr(), layout, new_size)).ok_or(AllocErr) } +} - fn oom(&mut self, err: AllocErr) -> ! { - (&*self).oom(err) - } +#[cfg(any(windows, unix, target_os = "cloudabi", target_os = "redox"))] +mod realloc_fallback { + use core::alloc::{GlobalAlloc, Layout}; + use core::cmp; + use core::ptr; - #[inline] - fn usable_size(&self, layout: &Layout) -> (usize, usize) { - (&self).usable_size(layout) - } + impl super::System { + pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout, + new_size: usize) -> *mut u8 { + // Docs for GlobalAlloc::realloc require this to be valid: + let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align()); - #[inline] - unsafe fn alloc_excess(&mut self, layout: Layout) -> Result { - (&*self).alloc_excess(layout) - } - - #[inline] - unsafe fn realloc_excess(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result { - (&*self).realloc_excess(ptr, layout, new_layout) - } - - #[inline] - unsafe fn grow_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - (&*self).grow_in_place(ptr, layout, new_layout) - } - - #[inline] - unsafe fn shrink_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - (&*self).shrink_in_place(ptr, layout, new_layout) + let new_ptr = GlobalAlloc::alloc(self, new_layout); + if !new_ptr.is_null() { + let size = cmp::min(old_layout.size(), new_size); + ptr::copy_nonoverlapping(ptr, new_ptr, size); + GlobalAlloc::dealloc(self, ptr, old_layout); + } + new_ptr + } } } @@ -120,122 +125,61 @@ unsafe impl Alloc for System { mod platform { extern crate libc; - use core::cmp; use core::ptr; use MIN_ALIGN; use System; - use alloc::heap::{Alloc, AllocErr, Layout}; + use core::alloc::{GlobalAlloc, Layout}; - #[unstable(feature = "allocator_api", issue = "32838")] - unsafe impl<'a> Alloc for &'a System { + #[stable(feature = "alloc_system_type", since = "1.28.0")] + unsafe impl GlobalAlloc for System { #[inline] - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - let ptr = if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { libc::malloc(layout.size()) as *mut u8 } else { + #[cfg(target_os = "macos")] + { + if layout.align() > (1 << 31) { + return ptr::null_mut() + } + } aligned_malloc(&layout) - }; - if !ptr.is_null() { - Ok(ptr) - } else { - Err(AllocErr::Exhausted { request: layout }) } } #[inline] - unsafe fn alloc_zeroed(&mut self, layout: Layout) - -> Result<*mut u8, AllocErr> - { + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() { - let ptr = libc::calloc(layout.size(), 1) as *mut u8; - if !ptr.is_null() { - Ok(ptr) - } else { - Err(AllocErr::Exhausted { request: layout }) - } + libc::calloc(layout.size(), 1) as *mut u8 } else { - let ret = self.alloc(layout.clone()); - if let Ok(ptr) = ret { + let ptr = self.alloc(layout.clone()); + if !ptr.is_null() { ptr::write_bytes(ptr, 0, layout.size()); } - ret + ptr } } #[inline] - unsafe fn dealloc(&mut self, ptr: *mut u8, _layout: Layout) { + unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { libc::free(ptr as *mut libc::c_void) } #[inline] - unsafe fn realloc(&mut self, - ptr: *mut u8, - old_layout: Layout, - new_layout: Layout) -> Result<*mut u8, AllocErr> { - if old_layout.align() != new_layout.align() { - return Err(AllocErr::Unsupported { - details: "cannot change alignment on `realloc`", - }) - } - - if new_layout.align() <= MIN_ALIGN && new_layout.align() <= new_layout.size(){ - let ptr = libc::realloc(ptr as *mut libc::c_void, new_layout.size()); - if !ptr.is_null() { - Ok(ptr as *mut u8) - } else { - Err(AllocErr::Exhausted { request: new_layout }) - } + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if layout.align() <= MIN_ALIGN && layout.align() <= new_size { + libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8 } else { - let res = self.alloc(new_layout.clone()); - if let Ok(new_ptr) = res { - let size = cmp::min(old_layout.size(), new_layout.size()); - ptr::copy_nonoverlapping(ptr, new_ptr, size); - self.dealloc(ptr, old_layout); - } - res - } - } - - fn oom(&mut self, err: AllocErr) -> ! { - use core::fmt::{self, Write}; - - // Print a message to stderr before aborting to assist with - // debugging. It is critical that this code does not allocate any - // memory since we are in an OOM situation. Any errors are ignored - // while printing since there's nothing we can do about them and we - // are about to exit anyways. - drop(writeln!(Stderr, "fatal runtime error: {}", err)); - unsafe { - ::core::intrinsics::abort(); - } - - struct Stderr; - - impl Write for Stderr { - #[cfg(target_os = "cloudabi")] - fn write_str(&mut self, _: &str) -> fmt::Result { - // CloudABI does not have any reserved file descriptor - // numbers. We should not attempt to write to file - // descriptor #2, as it may be associated with any kind of - // resource. - Ok(()) - } - - #[cfg(not(target_os = "cloudabi"))] - fn write_str(&mut self, s: &str) -> fmt::Result { - unsafe { - libc::write(libc::STDERR_FILENO, - s.as_ptr() as *const libc::c_void, - s.len()); - } - Ok(()) - } + self.realloc_fallback(ptr, layout, new_size) } } } - #[cfg(any(target_os = "android", target_os = "redox", target_os = "solaris"))] + #[cfg(any(target_os = "android", + target_os = "hermit", + target_os = "redox", + target_os = "solaris"))] #[inline] unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { // On android we currently target API level 9 which unfortunately @@ -258,7 +202,10 @@ mod platform { libc::memalign(layout.align(), layout.size()) as *mut u8 } - #[cfg(not(any(target_os = "android", target_os = "redox", target_os = "solaris")))] + #[cfg(not(any(target_os = "android", + target_os = "hermit", + target_os = "redox", + target_os = "solaris")))] #[inline] unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { let mut out = ptr::null_mut(); @@ -274,22 +221,15 @@ mod platform { #[cfg(windows)] #[allow(bad_style)] mod platform { - use core::cmp; - use core::ptr; - use MIN_ALIGN; use System; - use alloc::heap::{Alloc, AllocErr, Layout, CannotReallocInPlace}; + use core::alloc::{GlobalAlloc, Layout}; type LPVOID = *mut u8; type HANDLE = LPVOID; type SIZE_T = usize; type DWORD = u32; type BOOL = i32; - type LPDWORD = *mut DWORD; - type LPOVERLAPPED = *mut u8; - - const STD_ERROR_HANDLE: DWORD = -12i32 as DWORD; extern "system" { fn GetProcessHeap() -> HANDLE; @@ -297,20 +237,12 @@ mod platform { fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID; fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL; fn GetLastError() -> DWORD; - fn WriteFile(hFile: HANDLE, - lpBuffer: LPVOID, - nNumberOfBytesToWrite: DWORD, - lpNumberOfBytesWritten: LPDWORD, - lpOverlapped: LPOVERLAPPED) - -> BOOL; - fn GetStdHandle(which: DWORD) -> HANDLE; } #[repr(C)] struct Header(*mut u8); const HEAP_ZERO_MEMORY: DWORD = 0x00000008; - const HEAP_REALLOC_IN_PLACE_ONLY: DWORD = 0x00000010; unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header { &mut *(ptr as *mut Header).offset(-1) @@ -323,9 +255,7 @@ mod platform { } #[inline] - unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) - -> Result<*mut u8, AllocErr> - { + unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 { let ptr = if layout.align() <= MIN_ALIGN { HeapAlloc(GetProcessHeap(), flags, layout.size()) } else { @@ -337,29 +267,23 @@ mod platform { align_ptr(ptr, layout.align()) } }; - if ptr.is_null() { - Err(AllocErr::Exhausted { request: layout }) - } else { - Ok(ptr as *mut u8) - } + ptr as *mut u8 } - #[unstable(feature = "allocator_api", issue = "32838")] - unsafe impl<'a> Alloc for &'a System { + #[stable(feature = "alloc_system_type", since = "1.28.0")] + unsafe impl GlobalAlloc for System { #[inline] - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { allocate_with_flags(layout, 0) } #[inline] - unsafe fn alloc_zeroed(&mut self, layout: Layout) - -> Result<*mut u8, AllocErr> - { + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { allocate_with_flags(layout, HEAP_ZERO_MEMORY) } #[inline] - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { if layout.align() <= MIN_ALIGN { let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID); debug_assert!(err != 0, "Failed to free heap memory: {}", @@ -373,98 +297,11 @@ mod platform { } #[inline] - unsafe fn realloc(&mut self, - ptr: *mut u8, - old_layout: Layout, - new_layout: Layout) -> Result<*mut u8, AllocErr> { - if old_layout.align() != new_layout.align() { - return Err(AllocErr::Unsupported { - details: "cannot change alignment on `realloc`", - }) - } - - if new_layout.align() <= MIN_ALIGN { - let ptr = HeapReAlloc(GetProcessHeap(), - 0, - ptr as LPVOID, - new_layout.size()); - if !ptr.is_null() { - Ok(ptr as *mut u8) - } else { - Err(AllocErr::Exhausted { request: new_layout }) - } + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + if layout.align() <= MIN_ALIGN { + HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8 } else { - let res = self.alloc(new_layout.clone()); - if let Ok(new_ptr) = res { - let size = cmp::min(old_layout.size(), new_layout.size()); - ptr::copy_nonoverlapping(ptr, new_ptr, size); - self.dealloc(ptr, old_layout); - } - res - } - } - - #[inline] - unsafe fn grow_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - self.shrink_in_place(ptr, layout, new_layout) - } - - #[inline] - unsafe fn shrink_in_place(&mut self, - ptr: *mut u8, - old_layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - if old_layout.align() != new_layout.align() { - return Err(CannotReallocInPlace) - } - - let new = if new_layout.align() <= MIN_ALIGN { - HeapReAlloc(GetProcessHeap(), - HEAP_REALLOC_IN_PLACE_ONLY, - ptr as LPVOID, - new_layout.size()) - } else { - let header = get_header(ptr); - HeapReAlloc(GetProcessHeap(), - HEAP_REALLOC_IN_PLACE_ONLY, - header.0 as LPVOID, - new_layout.size() + new_layout.align()) - }; - if new.is_null() { - Err(CannotReallocInPlace) - } else { - Ok(()) - } - } - - fn oom(&mut self, err: AllocErr) -> ! { - use core::fmt::{self, Write}; - - // Same as with unix we ignore all errors here - drop(writeln!(Stderr, "fatal runtime error: {}", err)); - unsafe { - ::core::intrinsics::abort(); - } - - struct Stderr; - - impl Write for Stderr { - fn write_str(&mut self, s: &str) -> fmt::Result { - unsafe { - // WriteFile silently fails if it is passed an invalid - // handle, so there is no need to check the result of - // GetStdHandle. - WriteFile(GetStdHandle(STD_ERROR_HANDLE), - s.as_ptr() as LPVOID, - s.len() as DWORD, - ptr::null_mut(), - ptr::null_mut()); - } - Ok(()) - } + self.realloc_fallback(ptr, layout, new_size) } } } @@ -491,69 +328,32 @@ mod platform { mod platform { extern crate dlmalloc; - use alloc::heap::{Alloc, AllocErr, Layout, Excess, CannotReallocInPlace}; + use core::alloc::{GlobalAlloc, Layout}; use System; - use self::dlmalloc::GlobalDlmalloc; - #[unstable(feature = "allocator_api", issue = "32838")] - unsafe impl<'a> Alloc for &'a System { + // No need for synchronization here as wasm is currently single-threaded + static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::DLMALLOC_INIT; + + #[stable(feature = "alloc_system_type", since = "1.28.0")] + unsafe impl GlobalAlloc for System { #[inline] - unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> { - GlobalDlmalloc.alloc(layout) + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + DLMALLOC.malloc(layout.size(), layout.align()) } #[inline] - unsafe fn alloc_zeroed(&mut self, layout: Layout) - -> Result<*mut u8, AllocErr> - { - GlobalDlmalloc.alloc_zeroed(layout) + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + DLMALLOC.calloc(layout.size(), layout.align()) } #[inline] - unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { - GlobalDlmalloc.dealloc(ptr, layout) + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + DLMALLOC.free(ptr, layout.size(), layout.align()) } #[inline] - unsafe fn realloc(&mut self, - ptr: *mut u8, - old_layout: Layout, - new_layout: Layout) -> Result<*mut u8, AllocErr> { - GlobalDlmalloc.realloc(ptr, old_layout, new_layout) - } - - #[inline] - fn usable_size(&self, layout: &Layout) -> (usize, usize) { - GlobalDlmalloc.usable_size(layout) - } - - #[inline] - unsafe fn alloc_excess(&mut self, layout: Layout) -> Result { - GlobalDlmalloc.alloc_excess(layout) - } - - #[inline] - unsafe fn realloc_excess(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result { - GlobalDlmalloc.realloc_excess(ptr, layout, new_layout) - } - - #[inline] - unsafe fn grow_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - GlobalDlmalloc.grow_in_place(ptr, layout, new_layout) - } - - #[inline] - unsafe fn shrink_in_place(&mut self, - ptr: *mut u8, - layout: Layout, - new_layout: Layout) -> Result<(), CannotReallocInPlace> { - GlobalDlmalloc.shrink_in_place(ptr, layout, new_layout) + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) } } } diff --git a/src/libarena/Cargo.toml b/src/libarena/Cargo.toml index b53c0a2f48bf..e2af67dd9286 100644 --- a/src/libarena/Cargo.toml +++ b/src/libarena/Cargo.toml @@ -7,3 +7,6 @@ version = "0.0.0" name = "arena" path = "lib.rs" crate-type = ["dylib"] + +[dependencies] +rustc_data_structures = { path = "../librustc_data_structures" } \ No newline at end of file diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index 72fa3148fe54..265721c74975 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -22,17 +22,20 @@ html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/", test(no_crate_inject, attr(deny(warnings))))] -#![deny(warnings)] #![feature(alloc)] #![feature(core_intrinsics)] #![feature(dropck_eyepatch)] -#![feature(generic_param_attrs)] +#![cfg_attr(not(stage0), feature(nll))] +#![feature(raw_vec_internals)] #![cfg_attr(test, feature(test))] #![allow(deprecated)] extern crate alloc; +extern crate rustc_data_structures; + +use rustc_data_structures::sync::MTLock; use std::cell::{Cell, RefCell}; use std::cmp; @@ -291,6 +294,8 @@ pub struct DroplessArena { chunks: RefCell>>, } +unsafe impl Send for DroplessArena {} + impl DroplessArena { pub fn new() -> DroplessArena { DroplessArena { @@ -311,8 +316,7 @@ impl DroplessArena { false } - fn align_for(&self) { - let align = mem::align_of::(); + fn align(&self, align: usize) { let final_address = ((self.ptr.get() as usize) + align - 1) & !(align - 1); self.ptr.set(final_address as *mut u8); assert!(self.ptr <= self.end); @@ -320,8 +324,7 @@ impl DroplessArena { #[inline(never)] #[cold] - fn grow(&self, n: usize) { - let needed_bytes = n * mem::size_of::(); + fn grow(&self, needed_bytes: usize) { unsafe { let mut chunks = self.chunks.borrow_mut(); let (chunk, mut new_capacity); @@ -353,25 +356,38 @@ impl DroplessArena { } #[inline] - pub fn alloc(&self, object: T) -> &mut T { + pub fn alloc_raw(&self, bytes: usize, align: usize) -> &mut [u8] { unsafe { - assert!(!mem::needs_drop::()); - assert!(mem::size_of::() != 0); + assert!(bytes != 0); - self.align_for::(); - let future_end = intrinsics::arith_offset(self.ptr.get(), mem::size_of::() as isize); + self.align(align); + + let future_end = intrinsics::arith_offset(self.ptr.get(), bytes as isize); if (future_end as *mut u8) >= self.end.get() { - self.grow::(1) + self.grow(bytes); } let ptr = self.ptr.get(); // Set the pointer past ourselves self.ptr.set( - intrinsics::arith_offset(self.ptr.get(), mem::size_of::() as isize) as *mut u8, + intrinsics::arith_offset(self.ptr.get(), bytes as isize) as *mut u8, ); + slice::from_raw_parts_mut(ptr, bytes) + } + } + + #[inline] + pub fn alloc(&self, object: T) -> &mut T { + assert!(!mem::needs_drop::()); + + let mem = self.alloc_raw( + mem::size_of::(), + mem::align_of::()) as *mut _ as *mut T; + + unsafe { // Write into uninitialized memory. - ptr::write(ptr as *mut T, object); - &mut *(ptr as *mut T) + ptr::write(mem, object); + &mut *mem } } @@ -390,27 +406,91 @@ impl DroplessArena { assert!(!mem::needs_drop::()); assert!(mem::size_of::() != 0); assert!(slice.len() != 0); - self.align_for::(); - let future_end = unsafe { - intrinsics::arith_offset(self.ptr.get(), (slice.len() * mem::size_of::()) as isize) - }; - if (future_end as *mut u8) >= self.end.get() { - self.grow::(slice.len()); - } + let mem = self.alloc_raw( + slice.len() * mem::size_of::(), + mem::align_of::()) as *mut _ as *mut T; unsafe { - let arena_slice = slice::from_raw_parts_mut(self.ptr.get() as *mut T, slice.len()); - self.ptr.set(intrinsics::arith_offset( - self.ptr.get(), - (slice.len() * mem::size_of::()) as isize, - ) as *mut u8); + let arena_slice = slice::from_raw_parts_mut(mem, slice.len()); arena_slice.copy_from_slice(slice); arena_slice } } } +pub struct SyncTypedArena { + lock: MTLock>, +} + +impl SyncTypedArena { + #[inline(always)] + pub fn new() -> SyncTypedArena { + SyncTypedArena { + lock: MTLock::new(TypedArena::new()) + } + } + + #[inline(always)] + pub fn alloc(&self, object: T) -> &mut T { + // Extend the lifetime of the result since it's limited to the lock guard + unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) } + } + + #[inline(always)] + pub fn alloc_slice(&self, slice: &[T]) -> &mut [T] + where + T: Copy, + { + // Extend the lifetime of the result since it's limited to the lock guard + unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) } + } + + #[inline(always)] + pub fn clear(&mut self) { + self.lock.get_mut().clear(); + } +} + +pub struct SyncDroplessArena { + lock: MTLock, +} + +impl SyncDroplessArena { + #[inline(always)] + pub fn new() -> SyncDroplessArena { + SyncDroplessArena { + lock: MTLock::new(DroplessArena::new()) + } + } + + #[inline(always)] + pub fn in_arena(&self, ptr: *const T) -> bool { + self.lock.lock().in_arena(ptr) + } + + #[inline(always)] + pub fn alloc_raw(&self, bytes: usize, align: usize) -> &mut [u8] { + // Extend the lifetime of the result since it's limited to the lock guard + unsafe { &mut *(self.lock.lock().alloc_raw(bytes, align) as *mut [u8]) } + } + + #[inline(always)] + pub fn alloc(&self, object: T) -> &mut T { + // Extend the lifetime of the result since it's limited to the lock guard + unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) } + } + + #[inline(always)] + pub fn alloc_slice(&self, slice: &[T]) -> &mut [T] + where + T: Copy, + { + // Extend the lifetime of the result since it's limited to the lock guard + unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) } + } +} + #[cfg(test)] mod tests { extern crate test; diff --git a/src/libbacktrace b/src/libbacktrace new file mode 160000 index 000000000000..f4d02bbdbf8a --- /dev/null +++ b/src/libbacktrace @@ -0,0 +1 @@ +Subproject commit f4d02bbdbf8a2c5a31f0801dfef597a86caad9e3 diff --git a/src/libbacktrace/ChangeLog b/src/libbacktrace/ChangeLog deleted file mode 100644 index acc07047f672..000000000000 --- a/src/libbacktrace/ChangeLog +++ /dev/null @@ -1,590 +0,0 @@ -2016-05-18 Uros Bizjak - - PR target/71161 - * elf.c (phdr_callback) [__i386__]: Add - __attribute__((__force_align_arg_pointer__)). - -2016-03-02 Maxim Ostapenko - - * elf.c (backtrace_initialize): Properly initialize elf_fileline_fn to - avoid possible crash. - (elf_add): Don't set *fileline_fn to elf_nodebug value in case of - missing debug info anymore. - -2016-02-06 John David Anglin - - * mmap.c (MAP_FAILED): Define if not defined. - -2016-01-04 Jakub Jelinek - - Update copyright years. - -2015-12-18 Andris Pavenis - - * configure.ac: Specify that DJGPP do not have mmap - even when sys/mman.h exists. - * configure: Regenerate - -2015-12-09 John David Anglin - - PR libgfortran/68115 - * configure.ac: Set libbacktrace_cv_sys_sync to no on hppa*-*-hpux*. - * configure: Regenerate. - * elf.c (backtrace_initialize): Cast __sync_bool_compare_and_swap call - to void. - -2015-09-17 Ian Lance Taylor - - * posix.c (backtrace_open): Cast second argument of open() to int. - -2015-09-11 Ian Lance Taylor - - * Makefile.am (backtrace.lo): Depend on internal.h. - (sort.lo, stest.lo): Add explicit dependencies. - * Makefile.in: Rebuild. - -2015-09-09 Hans-Peter Nilsson - - * backtrace.c: #include . - -2015-09-08 Ian Lance Taylor - - PR other/67457 - * backtrace.c: #include "internal.h". - (struct backtrace_data): Add can_alloc field. - (unwind): If can_alloc is false, don't try to get file/line - information. - (backtrace_full): Set can_alloc field in bdata. - * alloc.c (backtrace_alloc): Don't call error_callback if it is - NULL. - * mmap.c (backtrace_alloc): Likewise. - * internal.h: Update comments for backtrace_alloc and - backtrace_free. - -2015-09-08 Ian Lance Taylor - - PR other/67457 - * mmap.c (backtrace_alloc): Correct test for mmap failure. - -2015-08-31 Ulrich Weigand - - * configure.ac: For spu-*-* targets, set have_fcntl to no. - * configure: Regenerate. - -2015-08-27 Ulrich Weigand - - * configure.ac: Remove [disable-shared] argument to LT_INIT. - Remove setting PIC_FLAG when building as target library. - * configure: Regenerate. - -2015-08-26 Hans-Peter Nilsson - - * configure.ac: Only compile with -fPIC if the target - supports it. - * configure: Regenerate. - -2015-08-24 Ulrich Weigand - - * configure.ac: Set have_mmap to no on spu-*-* targets. - * configure: Regenerate. - -2015-08-13 Ian Lance Taylor - - * dwarf.c (read_function_entry): Add vec_inlined parameter. - Change all callers. - -2015-06-11 Martin Sebor - - PR sanitizer/65479 - * dwarf.c (struct line): Add new field idx. - (line_compare): Use it. - (add_line): Set it. - (read_line_info): Reset it. - -2015-05-29 Tristan Gingold - - * pecoff.c: New file. - * Makefile.am (FORMAT_FILES): Add pecoff.c and dependencies. - * Makefile.in: Regenerate. - * filetype.awk: Detect pecoff. - * configure.ac: Define BACKTRACE_SUPPORTS_DATA on elf platforms. - Add pecoff. - * btest.c (test5): Test enabled only if BACKTRACE_SUPPORTS_DATA is - true. - * backtrace-supported.h.in (BACKTRACE_SUPPORTS_DATA): Define. - * configure: Regenerate. - * pecoff.c: New file. - -2015-05-13 Michael Haubenwallner - - * Makefile.in: Regenerated with automake-1.11.6. - * aclocal.m4: Likewise. - * configure: Likewise. - -2015-01-24 Matthias Klose - - * configure.ac: Move AM_ENABLE_MULTILIB before AC_PROG_CC. - * configure: Regenerate. - -2015-01-05 Jakub Jelinek - - Update copyright years. - -2014-11-21 H.J. Lu - - PR bootstrap/63784 - * configure: Regenerated. - -2014-11-11 David Malcolm - - * ChangeLog.jit: New. - -2014-11-11 Francois-Xavier Coudert - - PR target/63610 - * configure: Regenerate. - -2014-10-23 Ian Lance Taylor - - * internal.h (backtrace_atomic_load_pointer) [no atomic or sync]: - Fix to return void *. - -2014-05-08 Ian Lance Taylor - - * mmap.c (backtrace_free): If freeing a large aligned block of - memory, call munmap rather than holding onto it. - (backtrace_vector_grow): When growing a vector, double the number - of pages requested. When releasing the old version of a grown - vector, pass the correct size to backtrace_free. - -2014-03-07 Ian Lance Taylor - - * sort.c (backtrace_qsort): Use middle element as pivot. - -2014-03-06 Ian Lance Taylor - - * sort.c: New file. - * stest.c: New file. - * internal.h (backtrace_qsort): Declare. - * dwarf.c (read_abbrevs): Call backtrace_qsort instead of qsort. - (read_line_info, read_function_entry): Likewise. - (read_function_info, build_dwarf_data): Likewise. - * elf.c (elf_initialize_syminfo): Likewise. - * Makefile.am (libbacktrace_la_SOURCES): Add sort.c. - (stest_SOURCES, stest_LDADD): Define. - (check_PROGRAMS): Add stest. - -2014-02-07 Misty De Meo - - PR target/58710 - * configure.ac: Use AC_LINK_IFELSE in check for - _Unwind_GetIPInfo. - * configure: Regenerate. - -2014-01-02 Richard Sandiford - - Update copyright years - -2013-12-06 Jakub Jelinek - - * elf.c (ET_DYN): Undefine and define again. - (elf_add): Add exe argument, if true and ehdr.e_type is ET_DYN, - return early -1 without closing the descriptor. - (struct phdr_data): Add exe_descriptor. - (phdr_callback): If pd->exe_descriptor is not -1, for very first - call if dlpi_name is NULL just call elf_add with the exe_descriptor, - otherwise backtrace_close the exe_descriptor if not -1. Adjust - call to elf_add. - (backtrace_initialize): Adjust call to elf_add. If it returns - -1, set pd.exe_descriptor to descriptor, otherwise set it to -1. - -2013-12-05 Ian Lance Taylor - - * alloc.c (backtrace_vector_finish): Add error_callback and data - parameters. Call backtrace_vector_release. Return address base. - * mmap.c (backtrace_vector_finish): Add error_callback and data - parameters. Return address base. - * dwarf.c (read_function_info): Get new address base from - backtrace_vector_finish. - * internal.h (backtrace_vector_finish): Update declaration. - -2013-11-27 Ian Lance Taylor - - * dwarf.c (find_address_ranges): New static function, broken out - of build_address_map. - (build_address_map): Call it. - * btest.c (check): Check for missing filename or function, rather - than crashing. - (f3): Check that enough frames were returned. - -2013-11-19 Jakub Jelinek - - * backtrace.h (backtrace_syminfo_callback): Add symsize argument. - * elf.c (elf_syminfo): Pass 0 or sym->size to the callback as - last argument. - * btest.c (struct symdata): Add size field. - (callback_three): Add symsize argument. Copy it to the data->size - field. - (f23): Set symdata.size to 0. - (test5): Likewise. If sizeof (int) > 1, lookup address of - ((uintptr_t) &global) + 1. Verify symdata.val and symdata.size - values. - - * atomic.c: Include sys/types.h. - -2013-11-18 Ian Lance Taylor - - * configure.ac: Check for support of __atomic extensions. - * internal.h: Declare or #define atomic functions for use in - backtrace code. - * atomic.c: New file. - * dwarf.c (dwarf_lookup_pc): Use atomic functions. - (dwarf_fileline, backtrace_dwarf_add): Likewise. - * elf.c (elf_add_syminfo_data, elf_syminfo): Likewise. - (backtrace_initialize): Likewise. - * fileline.c (fileline_initialize): Likewise. - * Makefile.am (libbacktrace_la_SOURCES): Add atomic.c. - * configure, config.h.in, Makefile.in: Rebuild. - -2013-11-18 Jakub Jelinek - - * elf.c (SHN_UNDEF): Define. - (elf_initialize_syminfo): Add base_address argument. Ignore symbols - with st_shndx == SHN_UNDEF. Add base_address to address fields. - (elf_add): Adjust caller. - - * elf.c (phdr_callback): Process info->dlpi_addr == 0 normally. - -2013-11-16 Ian Lance Taylor - - * backtrace.h (backtrace_create_state): Correct comment about - threading. - -2013-11-15 Ian Lance Taylor - - * backtrace.h (backtrace_syminfo): Update comment and parameter - name to take any address, not just a PC value. - * elf.c (STT_OBJECT): Define. - (elf_nosyms): Rename parameter pc to addr. - (elf_symbol_search): Rename local variable pc to addr. - (elf_initialize_syminfo): Add STT_OBJECT symbols to elf_symbols. - (elf_syminfo): Rename parameter pc to addr. - * btest.c (global): New global variable. - (test5): New test. - (main): Call test5. - -2013-10-17 Ian Lance Taylor - - * elf.c (elf_add): Don't get the wrong offsets if a debug section - is missing. - -2013-10-15 David Malcolm - - * configure.ac: Add --enable-host-shared, setting up - pre-existing PIC_FLAG variable within Makefile.am et al. - * configure: Regenerate. - -2013-09-20 Alan Modra - - * configure: Regenerate. - -2013-07-23 Alexander Monakov - - * elf.c (elf_syminfo): Loop over the elf_syminfo_data chain. - -2013-07-23 Alexander Monakov - - * elf.c (backtrace_initialize): Pass elf_fileline_fn to - dl_iterate_phdr callbacks. - -2013-03-25 Ian Lance Taylor - - * alloc.c: #include . - * mmap.c: Likewise. - -2013-01-31 Ian Lance Taylor - - * dwarf.c (read_function_info): Permit fvec parameter to be NULL. - (dwarf_lookup_pc): Don't use ddata->fvec if threaded. - -2013-01-25 Jakub Jelinek - - PR other/56076 - * dwarf.c (read_line_header): Don't crash if DW_AT_comp_dir - attribute was not seen. - -2013-01-16 Ian Lance Taylor - - * dwarf.c (struct unit): Add filename and abs_filename fields. - (build_address_map): Set new fields when reading unit. - (dwarf_lookup_pc): If we don't find an entry in the line table, - just return the main file name. - -2013-01-14 Richard Sandiford - - Update copyright years. - -2013-01-01 Ian Lance Taylor - - PR bootstrap/54834 - * Makefile.am (AM_CPPFLAGS): Remove -I ../gcc/include and -I - $(MULTIBUILDTOP)/../../gcc/include. - * Makefile.in: Rebuild. - -2013-01-01 Ian Lance Taylor - - PR other/55536 - * mmap.c (backtrace_alloc): Don't call sync functions if not - threaded. - (backtrace_free): Likewise. - -2012-12-12 John David Anglin - - * mmapio.c: Define MAP_FAILED if not defined. - -2012-12-11 Jakub Jelinek - - PR bootstrap/54926 - * Makefile.am (AM_CFLAGS): Remove -frandom-seed=$@. - * configure.ac: If --with-target-subdir, add -frandom-seed=$@ - to EXTRA_FLAGS unconditionally, otherwise check whether the compiler - accepts it. - * Makefile.in: Regenerated. - * configure: Regenerated. - -2012-12-07 Jakub Jelinek - - PR bootstrap/54926 - * Makefile.am (AM_CFLAGS): Add -frandom-seed=$@. - * Makefile.in: Regenerated. - -2012-11-20 Ian Lance Taylor - - * dwarf.c (read_attribute): Always clear val. - -2012-11-13 Ian Lance Taylor - - PR other/55312 - * configure.ac: Only add -Werror if building a target library. - * configure: Rebuild. - -2012-11-12 Ian Lance Taylor - Rainer Orth - Gerald Pfeifer - - * configure.ac: Check for getexecname. - * fileline.c: #include . Define getexecname if not - available. - (fileline_initialize): Try to find the executable in a few - different ways. - * print.c (error_callback): Only print the filename if it came - from the backtrace state. - * configure, config.h.in: Rebuild. - -2012-10-29 Ian Lance Taylor - - * mmap.c (backtrace_vector_release): Correct last patch: add - aligned, not size. - -2012-10-29 Ian Lance Taylor - - * mmap.c (backtrace_vector_release): Make sure freed block is - aligned on 8-byte boundary. - -2012-10-26 Ian Lance Taylor - - PR other/55087 - * posix.c (backtrace_open): Add does_not_exist parameter. - * elf.c (phdr_callback): Do not warn if shared library could not - be opened. - * fileline.c (fileline_initialize): Update calls to - backtrace_open. - * internal.h (backtrace_open): Update declaration. - -2012-10-26 Jack Howarth - - PR target/55061 - * configure.ac: Check for _Unwind_GetIPInfo function declaration. - * configure: Regenerate. - -2012-10-24 Ian Lance Taylor - - PR target/55061 - * configure.ac: Check whether -funwind-tables option works. - * configure: Rebuild. - -2012-10-11 Ian Lance Taylor - - * configure.ac: Do not use dl_iterate_phdr on Solaris 10. - * configure: Rebuild. - -2012-10-10 Ian Lance Taylor - - * elf.c: Rename all Elf typedefs to start with b_elf, and be all - lower case. - -2012-10-10 Hans-Peter Nilsson - - * elf.c (elf_add_syminfo_data): Add casts to avoid warning. - -2012-10-09 Ian Lance Taylor - - * dwarf.c (dwarf_fileline): Add cast to avoid warning. - (backtrace_dwarf_add): Likewise. - -2012-10-09 Ian Lance Taylor - - Add support for tracing through shared libraries. - * configure.ac: Check for link.h and dl_iterate_phdr. - * elf.c: #include if system has dl_iterate_phdr. #undef - ELF macros before #defining them. - (dl_phdr_info, dl_iterate_phdr): Define if system does not have - dl_iterate_phdr. - (struct elf_syminfo_data): Add next field. - (elf_initialize_syminfo): Initialize next field. - (elf_add_syminfo_data): New static function. - (elf_add): New static function, broken out of - backtrace_initialize. Call backtrace_dwarf_add instead of - backtrace_dwarf_initialize. - (struct phdr_data): Define. - (phdr_callback): New static function. - (backtrace_initialize): Call elf_add. - * dwarf.c (struct dwarf_data): Add next and base_address fields. - (add_unit_addr): Add base_address parameter. Change all callers. - (add_unit_ranges, build_address_map): Likewise. - (add_line): Add ddata parameter. Change all callers. - (read_line_program, add_function_range): Likewise. - (dwarf_lookup_pc): New static function, broken out of - dwarf_fileline. - (dwarf_fileline): Call dwarf_lookup_pc. - (build_dwarf_data): New static function. - (backtrace_dwarf_add): New function. - (backtrace_dwarf_initialize): Remove. - * internal.h (backtrace_dwarf_initialize): Don't declare. - (backtrace_dwarf_add): Declare. - * configure, config.h.in: Rebuild. - -2012-10-04 Gerald Pfeifer - - * btest.c (f23): Avoid uninitialized variable warning. - -2012-10-04 Ian Lance Taylor - - * dwarf.c: If the system header files do not declare strnlen, - provide our own version. - -2012-10-03 Ian Lance Taylor - - * dwarf.c (read_uleb128): Fix overflow test. - (read_sleb128): Likewise. - (build_address_map): Don't change unit_buf.start. - -2012-10-02 Uros Bizjak - - PR other/54761 - * configure.ac (EXTRA_FLAGS): New. - * Makefile.am (AM_FLAGS): Add $(EXTRA_FLAGS). - * configure, Makefile.in: Regenerate. - -2012-09-29 Ian Lance Taylor - - PR other/54749 - * fileline.c (fileline_initialize): Pass errnum as -1 when - reporting that we could not read executable information after a - previous failure. - -2012-09-27 Ian Lance Taylor - - PR bootstrap/54732 - * configure.ac: Add no-dependencies to AM_INIT_AUTOMAKE. - * Makefile.am: Add dependencies for all objects. - * configure, aclocal.m4, Makefile.in: Rebuild. - -2012-09-27 Ian Lance Taylor - - PR other/54726 - * elf.c (backtrace_initialize): Set *fileln_fn, not - state->fileln_fn. - -2012-09-19 Ian Lance Taylor - - * configure.ac: Only use GCC_CHECK_UNWIND_GETIPINFO when compiled - as a target library. - * configure: Rebuild. - -2012-09-19 Rainer Orth - Ian Lance Taylor - - * configure.ac (GCC_HEADER_STDINT): Invoke. - * backtrace.h: If we can't find , use "gstdint.h". - * btest.c: Don't include . - * dwarf.c: Likewise. - * configure, aclocal.m4, Makefile.in, config.h.in: Rebuild. - -2012-09-18 Ian Lance Taylor - - PR bootstrap/54623 - * Makefile.am (AM_CPPFLAGS): Define. - (AM_CFLAGS): Remove -I options. - * Makefile.in: Rebuild. - -2012-09-18 Ian Lance Taylor - - * posix.c (O_BINARY): Define if not defined. - (backtrace_open): Pass O_BINARY to open. Only call fcntl if - HAVE_FCNTL is defined. - * configure.ac: Test for the fcntl function. - * configure, config.h.in: Rebuild. - -2012-09-18 Ian Lance Taylor - - * btest.c (test1, test2, test3, test4): Add the unused attribute. - -2012-09-18 Ian Lance Taylor - - * dwarf.c: Correct test of HAVE_DECL_STRNLEN. - -2012-09-18 Ian Lance Taylor - - * configure.ac: Add AC_USE_SYSTEM_EXTENSIONS. - * mmapio.c: Don't define _GNU_SOURCE. - * configure, config.h.in: Rebuild. - -2012-09-18 Ian Lance Taylor - - * configure.ac: Check whether strnlen is declared. - * dwarf.c: Declare strnlen if not declared. - * configure, config.h.in: Rebuild. - -2012-09-18 Rainer Orth - - * fileline.c: Include . - * mmap.c: Likewise. - -2012-09-17 Ian Lance Taylor - - PR bootstrap/54611 - * nounwind.c (backtrace_full): Rename from backtrace. Add state - parameter. - -2012-09-17 Gerald Pfeifer - - PR bootstrap/54611 - * nounwind.c (backtrace_simple): Add state parameter. - -2012-09-17 Ian Lance Taylor - - PR bootstrap/54609 - * unknown.c (unknown_fileline): Add state parameter, remove - fileline_data parameter, name error_callback parameter. - (backtrace_initialize): Add state parameter. - -2012-09-17 Ian Lance Taylor - - * Initial implementation. - -Copyright (C) 2012-2016 Free Software Foundation, Inc. - -Copying and distribution of this file, with or without modification, -are permitted in any medium without royalty provided the copyright -notice and this notice are preserved. diff --git a/src/libbacktrace/ChangeLog.jit b/src/libbacktrace/ChangeLog.jit deleted file mode 100644 index 6b60e3b3b073..000000000000 --- a/src/libbacktrace/ChangeLog.jit +++ /dev/null @@ -1,14 +0,0 @@ -2014-09-24 David Malcolm - - * ChangeLog.jit: Add copyright footer. - -2013-10-03 David Malcolm - - * configure.ac: Add --enable-host-shared. - * configure: Regenerate. - -Copyright (C) 2013-2014 Free Software Foundation, Inc. - -Copying and distribution of this file, with or without modification, -are permitted in any medium without royalty provided the copyright -notice and this notice are preserved. diff --git a/src/libbacktrace/Makefile.am b/src/libbacktrace/Makefile.am deleted file mode 100644 index a7df02590982..000000000000 --- a/src/libbacktrace/Makefile.am +++ /dev/null @@ -1,136 +0,0 @@ -# Makefile.am -- Backtrace Makefile. -# Copyright (C) 2012-2016 Free Software Foundation, Inc. - -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: - -# (1) Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. - -# (2) Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. - -# (3) The name of the author may not be used to -# endorse or promote products derived from this software without -# specific prior written permission. - -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -ACLOCAL_AMFLAGS = -I .. -I ../config - -AM_CPPFLAGS = -I $(top_srcdir)/../include -I $(top_srcdir)/../libgcc \ - -I ../libgcc - -AM_CFLAGS = $(EXTRA_FLAGS) $(WARN_FLAGS) $(PIC_FLAG) - -noinst_LTLIBRARIES = libbacktrace.la - -libbacktrace_la_SOURCES = \ - backtrace.h \ - atomic.c \ - dwarf.c \ - fileline.c \ - internal.h \ - posix.c \ - print.c \ - sort.c \ - state.c - -BACKTRACE_FILES = \ - backtrace.c \ - simple.c \ - nounwind.c - -FORMAT_FILES = \ - elf.c \ - pecoff.c \ - unknown.c - -VIEW_FILES = \ - read.c \ - mmapio.c - -ALLOC_FILES = \ - alloc.c \ - mmap.c - -EXTRA_libbacktrace_la_SOURCES = \ - $(BACKTRACE_FILES) \ - $(FORMAT_FILES) \ - $(VIEW_FILES) \ - $(ALLOC_FILES) - -libbacktrace_la_LIBADD = \ - $(BACKTRACE_FILE) \ - $(FORMAT_FILE) \ - $(VIEW_FILE) \ - $(ALLOC_FILE) - -libbacktrace_la_DEPENDENCIES = $(libbacktrace_la_LIBADD) - -# Testsuite. - -check_PROGRAMS = - -TESTS = $(check_PROGRAMS) - -if NATIVE - -btest_SOURCES = btest.c -btest_CFLAGS = $(AM_CFLAGS) -g -O -btest_LDADD = libbacktrace.la - -check_PROGRAMS += btest - -stest_SOURCES = stest.c -stest_LDADD = libbacktrace.la - -check_PROGRAMS += stest - -endif NATIVE - -# We can't use automake's automatic dependency tracking, because it -# breaks when using bootstrap-lean. Automatic dependency tracking -# with GCC bootstrap will cause some of the objects to depend on -# header files in prev-gcc/include, e.g., stddef.h and stdarg.h. When -# using bootstrap-lean, prev-gcc is removed after each stage. When -# running "make install", those header files will be gone, causing the -# library to be rebuilt at install time. That may not succeed. - -# These manual dependencies do not include dependencies on unwind.h, -# even though that is part of GCC, because where to find it depends on -# whether we are being built as a host library or a target library. - -INCDIR = $(top_srcdir)/../include -alloc.lo: config.h backtrace.h internal.h -backtrace.lo: config.h backtrace.h internal.h -btest.lo: (INCDIR)/filenames.h backtrace.h backtrace-supported.h -dwarf.lo: config.h $(INCDIR)/dwarf2.h $(INCDIR)/dwarf2.def \ - $(INCDIR)/filenames.h backtrace.h internal.h -elf.lo: config.h backtrace.h internal.h -fileline.lo: config.h backtrace.h internal.h -mmap.lo: config.h backtrace.h internal.h -mmapio.lo: config.h backtrace.h internal.h -nounwind.lo: config.h internal.h -pecoff.lo: config.h backtrace.h internal.h -posix.lo: config.h backtrace.h internal.h -print.lo: config.h backtrace.h internal.h -read.lo: config.h backtrace.h internal.h -simple.lo: config.h backtrace.h internal.h -sort.lo: config.h backtrace.h internal.h -stest.lo: config.h backtrace.h internal.h -state.lo: config.h backtrace.h backtrace-supported.h internal.h -unknown.lo: config.h backtrace.h internal.h diff --git a/src/libbacktrace/Makefile.in b/src/libbacktrace/Makefile.in deleted file mode 100644 index 586b6a6eaa10..000000000000 --- a/src/libbacktrace/Makefile.in +++ /dev/null @@ -1,770 +0,0 @@ -# Makefile.in generated by automake 1.11.6 from Makefile.am. -# @configure_input@ - -# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software -# Foundation, Inc. -# This Makefile.in is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -@SET_MAKE@ - -# Makefile.am -- Backtrace Makefile. -# Copyright (C) 2012-2015 Free Software Foundation, Inc. - -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: - -# (1) Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. - -# (2) Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. - -# (3) The name of the author may not be used to -# endorse or promote products derived from this software without -# specific prior written permission. - -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -VPATH = @srcdir@ -am__make_dryrun = \ - { \ - am__dry=no; \ - case $$MAKEFLAGS in \ - *\\[\ \ ]*) \ - echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \ - | grep '^AM OK$$' >/dev/null || am__dry=yes;; \ - *) \ - for am__flg in $$MAKEFLAGS; do \ - case $$am__flg in \ - *=*|--*) ;; \ - *n*) am__dry=yes; break;; \ - esac; \ - done;; \ - esac; \ - test $$am__dry = yes; \ - } -pkgdatadir = $(datadir)/@PACKAGE@ -pkgincludedir = $(includedir)/@PACKAGE@ -pkglibdir = $(libdir)/@PACKAGE@ -pkglibexecdir = $(libexecdir)/@PACKAGE@ -am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd -install_sh_DATA = $(install_sh) -c -m 644 -install_sh_PROGRAM = $(install_sh) -c -install_sh_SCRIPT = $(install_sh) -c -INSTALL_HEADER = $(INSTALL_DATA) -transform = $(program_transform_name) -NORMAL_INSTALL = : -PRE_INSTALL = : -POST_INSTALL = : -NORMAL_UNINSTALL = : -PRE_UNINSTALL = : -POST_UNINSTALL = : -build_triplet = @build@ -host_triplet = @host@ -target_triplet = @target@ -check_PROGRAMS = $(am__EXEEXT_1) -@NATIVE_TRUE@am__append_1 = btest stest -subdir = . -DIST_COMMON = README ChangeLog $(srcdir)/Makefile.in \ - $(srcdir)/Makefile.am $(top_srcdir)/configure \ - $(am__configure_deps) $(srcdir)/config.h.in \ - $(srcdir)/../mkinstalldirs $(srcdir)/backtrace-supported.h.in -ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 -am__aclocal_m4_deps = $(top_srcdir)/../config/lead-dot.m4 \ - $(top_srcdir)/../config/multi.m4 \ - $(top_srcdir)/../config/override.m4 \ - $(top_srcdir)/../config/stdint.m4 \ - $(top_srcdir)/../config/unwind_ipinfo.m4 \ - $(top_srcdir)/../config/warnings.m4 \ - $(top_srcdir)/../libtool.m4 $(top_srcdir)/../ltoptions.m4 \ - $(top_srcdir)/../ltsugar.m4 $(top_srcdir)/../ltversion.m4 \ - $(top_srcdir)/../lt~obsolete.m4 $(top_srcdir)/configure.ac -am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ - $(ACLOCAL_M4) -am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ - configure.lineno config.status.lineno -mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs -CONFIG_HEADER = config.h -CONFIG_CLEAN_FILES = backtrace-supported.h -CONFIG_CLEAN_VPATH_FILES = -LTLIBRARIES = $(noinst_LTLIBRARIES) -am__DEPENDENCIES_1 = -am_libbacktrace_la_OBJECTS = atomic.lo dwarf.lo fileline.lo posix.lo \ - print.lo sort.lo state.lo -libbacktrace_la_OBJECTS = $(am_libbacktrace_la_OBJECTS) -@NATIVE_TRUE@am__EXEEXT_1 = btest$(EXEEXT) stest$(EXEEXT) -@NATIVE_TRUE@am_btest_OBJECTS = btest-btest.$(OBJEXT) -btest_OBJECTS = $(am_btest_OBJECTS) -@NATIVE_TRUE@btest_DEPENDENCIES = libbacktrace.la -btest_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ - --mode=link $(CCLD) $(btest_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ - $(LDFLAGS) -o $@ -@NATIVE_TRUE@am_stest_OBJECTS = stest.$(OBJEXT) -stest_OBJECTS = $(am_stest_OBJECTS) -@NATIVE_TRUE@stest_DEPENDENCIES = libbacktrace.la -DEFAULT_INCLUDES = -I.@am__isrc@ -depcomp = -am__depfiles_maybe = -COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ - $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ - --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ - $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -CCLD = $(CC) -LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ - --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ - $(LDFLAGS) -o $@ -SOURCES = $(libbacktrace_la_SOURCES) $(EXTRA_libbacktrace_la_SOURCES) \ - $(btest_SOURCES) $(stest_SOURCES) -MULTISRCTOP = -MULTIBUILDTOP = -MULTIDIRS = -MULTISUBDIR = -MULTIDO = true -MULTICLEAN = true -am__can_run_installinfo = \ - case $$AM_UPDATE_INFO_DIR in \ - n|no|NO) false;; \ - *) (install-info --version) >/dev/null 2>&1;; \ - esac -ETAGS = etags -CTAGS = ctags -am__tty_colors = \ -red=; grn=; lgn=; blu=; std= -ACLOCAL = @ACLOCAL@ -ALLOC_FILE = @ALLOC_FILE@ -AMTAR = @AMTAR@ -AR = @AR@ -AUTOCONF = @AUTOCONF@ -AUTOHEADER = @AUTOHEADER@ -AUTOMAKE = @AUTOMAKE@ -AWK = @AWK@ -BACKTRACE_FILE = @BACKTRACE_FILE@ -BACKTRACE_SUPPORTED = @BACKTRACE_SUPPORTED@ -BACKTRACE_SUPPORTS_DATA = @BACKTRACE_SUPPORTS_DATA@ -BACKTRACE_SUPPORTS_THREADS = @BACKTRACE_SUPPORTS_THREADS@ -BACKTRACE_USES_MALLOC = @BACKTRACE_USES_MALLOC@ -CC = @CC@ -CFLAGS = @CFLAGS@ -CPP = @CPP@ -CPPFLAGS = @CPPFLAGS@ -CYGPATH_W = @CYGPATH_W@ -DEFS = @DEFS@ -DSYMUTIL = @DSYMUTIL@ -DUMPBIN = @DUMPBIN@ -ECHO_C = @ECHO_C@ -ECHO_N = @ECHO_N@ -ECHO_T = @ECHO_T@ -EGREP = @EGREP@ -EXEEXT = @EXEEXT@ -EXTRA_FLAGS = @EXTRA_FLAGS@ -FGREP = @FGREP@ -FORMAT_FILE = @FORMAT_FILE@ -GREP = @GREP@ -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ -INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ -LD = @LD@ -LDFLAGS = @LDFLAGS@ -LIBOBJS = @LIBOBJS@ -LIBS = @LIBS@ -LIBTOOL = @LIBTOOL@ -LIPO = @LIPO@ -LN_S = @LN_S@ -LTLIBOBJS = @LTLIBOBJS@ -MAINT = @MAINT@ -MAKEINFO = @MAKEINFO@ -MKDIR_P = @MKDIR_P@ -NM = @NM@ -NMEDIT = @NMEDIT@ -OBJDUMP = @OBJDUMP@ -OBJEXT = @OBJEXT@ -OTOOL = @OTOOL@ -OTOOL64 = @OTOOL64@ -PACKAGE = @PACKAGE@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_STRING = @PACKAGE_STRING@ -PACKAGE_TARNAME = @PACKAGE_TARNAME@ -PACKAGE_URL = @PACKAGE_URL@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -PATH_SEPARATOR = @PATH_SEPARATOR@ -PIC_FLAG = @PIC_FLAG@ -RANLIB = @RANLIB@ -SED = @SED@ -SET_MAKE = @SET_MAKE@ -SHELL = @SHELL@ -STRIP = @STRIP@ -VERSION = @VERSION@ -VIEW_FILE = @VIEW_FILE@ -WARN_FLAGS = @WARN_FLAGS@ -abs_builddir = @abs_builddir@ -abs_srcdir = @abs_srcdir@ -abs_top_builddir = @abs_top_builddir@ -abs_top_srcdir = @abs_top_srcdir@ -ac_ct_CC = @ac_ct_CC@ -ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ -am__leading_dot = @am__leading_dot@ -am__tar = @am__tar@ -am__untar = @am__untar@ -bindir = @bindir@ -build = @build@ -build_alias = @build_alias@ -build_cpu = @build_cpu@ -build_os = @build_os@ -build_vendor = @build_vendor@ -builddir = @builddir@ -datadir = @datadir@ -datarootdir = @datarootdir@ -docdir = @docdir@ -dvidir = @dvidir@ -exec_prefix = @exec_prefix@ -host = @host@ -host_alias = @host_alias@ -host_cpu = @host_cpu@ -host_os = @host_os@ -host_vendor = @host_vendor@ -htmldir = @htmldir@ -includedir = @includedir@ -infodir = @infodir@ -install_sh = @install_sh@ -libdir = @libdir@ -libexecdir = @libexecdir@ -libtool_VERSION = @libtool_VERSION@ -localedir = @localedir@ -localstatedir = @localstatedir@ -mandir = @mandir@ -mkdir_p = @mkdir_p@ -multi_basedir = @multi_basedir@ -oldincludedir = @oldincludedir@ -pdfdir = @pdfdir@ -prefix = @prefix@ -program_transform_name = @program_transform_name@ -psdir = @psdir@ -sbindir = @sbindir@ -sharedstatedir = @sharedstatedir@ -srcdir = @srcdir@ -sysconfdir = @sysconfdir@ -target = @target@ -target_alias = @target_alias@ -target_cpu = @target_cpu@ -target_os = @target_os@ -target_vendor = @target_vendor@ -top_build_prefix = @top_build_prefix@ -top_builddir = @top_builddir@ -top_srcdir = @top_srcdir@ -ACLOCAL_AMFLAGS = -I .. -I ../config -AM_CPPFLAGS = -I $(top_srcdir)/../include -I $(top_srcdir)/../libgcc \ - -I ../libgcc - -AM_CFLAGS = $(EXTRA_FLAGS) $(WARN_FLAGS) $(PIC_FLAG) -noinst_LTLIBRARIES = libbacktrace.la -libbacktrace_la_SOURCES = \ - backtrace.h \ - atomic.c \ - dwarf.c \ - fileline.c \ - internal.h \ - posix.c \ - print.c \ - sort.c \ - state.c - -BACKTRACE_FILES = \ - backtrace.c \ - simple.c \ - nounwind.c - -FORMAT_FILES = \ - elf.c \ - pecoff.c \ - unknown.c - -VIEW_FILES = \ - read.c \ - mmapio.c - -ALLOC_FILES = \ - alloc.c \ - mmap.c - -EXTRA_libbacktrace_la_SOURCES = \ - $(BACKTRACE_FILES) \ - $(FORMAT_FILES) \ - $(VIEW_FILES) \ - $(ALLOC_FILES) - -libbacktrace_la_LIBADD = \ - $(BACKTRACE_FILE) \ - $(FORMAT_FILE) \ - $(VIEW_FILE) \ - $(ALLOC_FILE) - -libbacktrace_la_DEPENDENCIES = $(libbacktrace_la_LIBADD) -TESTS = $(check_PROGRAMS) -@NATIVE_TRUE@btest_SOURCES = btest.c -@NATIVE_TRUE@btest_CFLAGS = $(AM_CFLAGS) -g -O -@NATIVE_TRUE@btest_LDADD = libbacktrace.la -@NATIVE_TRUE@stest_SOURCES = stest.c -@NATIVE_TRUE@stest_LDADD = libbacktrace.la - -# We can't use automake's automatic dependency tracking, because it -# breaks when using bootstrap-lean. Automatic dependency tracking -# with GCC bootstrap will cause some of the objects to depend on -# header files in prev-gcc/include, e.g., stddef.h and stdarg.h. When -# using bootstrap-lean, prev-gcc is removed after each stage. When -# running "make install", those header files will be gone, causing the -# library to be rebuilt at install time. That may not succeed. - -# These manual dependencies do not include dependencies on unwind.h, -# even though that is part of GCC, because where to find it depends on -# whether we are being built as a host library or a target library. -INCDIR = $(top_srcdir)/../include -all: config.h - $(MAKE) $(AM_MAKEFLAGS) all-am - -.SUFFIXES: -.SUFFIXES: .c .lo .o .obj -am--refresh: Makefile - @: -$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) - @for dep in $?; do \ - case '$(am__configure_deps)' in \ - *$$dep*) \ - echo ' cd $(srcdir) && $(AUTOMAKE) --foreign --ignore-deps'; \ - $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign --ignore-deps \ - && exit 0; \ - exit 1;; \ - esac; \ - done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign --ignore-deps Makefile'; \ - $(am__cd) $(top_srcdir) && \ - $(AUTOMAKE) --foreign --ignore-deps Makefile -.PRECIOUS: Makefile -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - @case '$?' in \ - *config.status*) \ - echo ' $(SHELL) ./config.status'; \ - $(SHELL) ./config.status;; \ - *) \ - echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ - cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ - esac; - -$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) - $(SHELL) ./config.status --recheck - -$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - $(am__cd) $(srcdir) && $(AUTOCONF) -$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) - $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) -$(am__aclocal_m4_deps): - -config.h: stamp-h1 - @if test ! -f $@; then rm -f stamp-h1; else :; fi - @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi - -stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status - @rm -f stamp-h1 - cd $(top_builddir) && $(SHELL) ./config.status config.h -$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) - ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) - rm -f stamp-h1 - touch $@ - -distclean-hdr: - -rm -f config.h stamp-h1 -backtrace-supported.h: $(top_builddir)/config.status $(srcdir)/backtrace-supported.h.in - cd $(top_builddir) && $(SHELL) ./config.status $@ - -clean-noinstLTLIBRARIES: - -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) - @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ - dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ - test "$$dir" != "$$p" || dir=.; \ - echo "rm -f \"$${dir}/so_locations\""; \ - rm -f "$${dir}/so_locations"; \ - done -libbacktrace.la: $(libbacktrace_la_OBJECTS) $(libbacktrace_la_DEPENDENCIES) $(EXTRA_libbacktrace_la_DEPENDENCIES) - $(LINK) $(libbacktrace_la_OBJECTS) $(libbacktrace_la_LIBADD) $(LIBS) - -clean-checkPROGRAMS: - @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ - echo " rm -f" $$list; \ - rm -f $$list || exit $$?; \ - test -n "$(EXEEXT)" || exit 0; \ - list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ - echo " rm -f" $$list; \ - rm -f $$list -btest$(EXEEXT): $(btest_OBJECTS) $(btest_DEPENDENCIES) $(EXTRA_btest_DEPENDENCIES) - @rm -f btest$(EXEEXT) - $(btest_LINK) $(btest_OBJECTS) $(btest_LDADD) $(LIBS) -stest$(EXEEXT): $(stest_OBJECTS) $(stest_DEPENDENCIES) $(EXTRA_stest_DEPENDENCIES) - @rm -f stest$(EXEEXT) - $(LINK) $(stest_OBJECTS) $(stest_LDADD) $(LIBS) - -mostlyclean-compile: - -rm -f *.$(OBJEXT) - -distclean-compile: - -rm -f *.tab.c - -.c.o: - $(COMPILE) -c $< - -.c.obj: - $(COMPILE) -c `$(CYGPATH_W) '$<'` - -.c.lo: - $(LTCOMPILE) -c -o $@ $< - -btest-btest.o: btest.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(btest_CFLAGS) $(CFLAGS) -c -o btest-btest.o `test -f 'btest.c' || echo '$(srcdir)/'`btest.c - -btest-btest.obj: btest.c - $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(btest_CFLAGS) $(CFLAGS) -c -o btest-btest.obj `if test -f 'btest.c'; then $(CYGPATH_W) 'btest.c'; else $(CYGPATH_W) '$(srcdir)/btest.c'; fi` - -mostlyclean-libtool: - -rm -f *.lo - -clean-libtool: - -rm -rf .libs _libs - -distclean-libtool: - -rm -f libtool config.lt - -# GNU Make needs to see an explicit $(MAKE) variable in the command it -# runs to enable its job server during parallel builds. Hence the -# comments below. -all-multi: - $(MULTIDO) $(AM_MAKEFLAGS) DO=all multi-do # $(MAKE) -install-multi: - $(MULTIDO) $(AM_MAKEFLAGS) DO=install multi-do # $(MAKE) - -mostlyclean-multi: - $(MULTICLEAN) $(AM_MAKEFLAGS) DO=mostlyclean multi-clean # $(MAKE) -clean-multi: - $(MULTICLEAN) $(AM_MAKEFLAGS) DO=clean multi-clean # $(MAKE) -distclean-multi: - $(MULTICLEAN) $(AM_MAKEFLAGS) DO=distclean multi-clean # $(MAKE) -maintainer-clean-multi: - $(MULTICLEAN) $(AM_MAKEFLAGS) DO=maintainer-clean multi-clean # $(MAKE) - -ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) - list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - mkid -fID $$unique -tags: TAGS - -TAGS: $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - set x; \ - here=`pwd`; \ - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - shift; \ - if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ - test -n "$$unique" || unique=$$empty_fix; \ - if test $$# -gt 0; then \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - "$$@" $$unique; \ - else \ - $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ - $$unique; \ - fi; \ - fi -ctags: CTAGS -CTAGS: $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ - $(TAGS_FILES) $(LISP) - list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ - unique=`for i in $$list; do \ - if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ - done | \ - $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ - END { if (nonempty) { for (i in files) print i; }; }'`; \ - test -z "$(CTAGS_ARGS)$$unique" \ - || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ - $$unique - -GTAGS: - here=`$(am__cd) $(top_builddir) && pwd` \ - && $(am__cd) $(top_srcdir) \ - && gtags -i $(GTAGS_ARGS) "$$here" - -distclean-tags: - -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags - -check-TESTS: $(TESTS) - @failed=0; all=0; xfail=0; xpass=0; skip=0; \ - srcdir=$(srcdir); export srcdir; \ - list=' $(TESTS) '; \ - $(am__tty_colors); \ - if test -n "$$list"; then \ - for tst in $$list; do \ - if test -f ./$$tst; then dir=./; \ - elif test -f $$tst; then dir=; \ - else dir="$(srcdir)/"; fi; \ - if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *[\ \ ]$$tst[\ \ ]*) \ - xpass=`expr $$xpass + 1`; \ - failed=`expr $$failed + 1`; \ - col=$$red; res=XPASS; \ - ;; \ - *) \ - col=$$grn; res=PASS; \ - ;; \ - esac; \ - elif test $$? -ne 77; then \ - all=`expr $$all + 1`; \ - case " $(XFAIL_TESTS) " in \ - *[\ \ ]$$tst[\ \ ]*) \ - xfail=`expr $$xfail + 1`; \ - col=$$lgn; res=XFAIL; \ - ;; \ - *) \ - failed=`expr $$failed + 1`; \ - col=$$red; res=FAIL; \ - ;; \ - esac; \ - else \ - skip=`expr $$skip + 1`; \ - col=$$blu; res=SKIP; \ - fi; \ - echo "$${col}$$res$${std}: $$tst"; \ - done; \ - if test "$$all" -eq 1; then \ - tests="test"; \ - All=""; \ - else \ - tests="tests"; \ - All="All "; \ - fi; \ - if test "$$failed" -eq 0; then \ - if test "$$xfail" -eq 0; then \ - banner="$$All$$all $$tests passed"; \ - else \ - if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ - banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ - fi; \ - else \ - if test "$$xpass" -eq 0; then \ - banner="$$failed of $$all $$tests failed"; \ - else \ - if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ - banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ - fi; \ - fi; \ - dashes="$$banner"; \ - skipped=""; \ - if test "$$skip" -ne 0; then \ - if test "$$skip" -eq 1; then \ - skipped="($$skip test was not run)"; \ - else \ - skipped="($$skip tests were not run)"; \ - fi; \ - test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$skipped"; \ - fi; \ - report=""; \ - if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ - report="Please report to $(PACKAGE_BUGREPORT)"; \ - test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ - dashes="$$report"; \ - fi; \ - dashes=`echo "$$dashes" | sed s/./=/g`; \ - if test "$$failed" -eq 0; then \ - col="$$grn"; \ - else \ - col="$$red"; \ - fi; \ - echo "$${col}$$dashes$${std}"; \ - echo "$${col}$$banner$${std}"; \ - test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ - test -z "$$report" || echo "$${col}$$report$${std}"; \ - echo "$${col}$$dashes$${std}"; \ - test "$$failed" -eq 0; \ - else :; fi -check-am: all-am - $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) - $(MAKE) $(AM_MAKEFLAGS) check-TESTS -check: check-am -all-am: Makefile $(LTLIBRARIES) all-multi config.h -installdirs: -install: install-am -install-exec: install-exec-am -install-data: install-data-am -uninstall: uninstall-am - -install-am: all-am - @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am - -installcheck: installcheck-am -install-strip: - if test -z '$(STRIP)'; then \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - install; \ - else \ - $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ - install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ - "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ - fi -mostlyclean-generic: - -clean-generic: - -distclean-generic: - -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) - -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) - -maintainer-clean-generic: - @echo "This command is intended for maintainers to use" - @echo "it deletes files that may require special tools to rebuild." -clean: clean-am clean-multi - -clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ - clean-noinstLTLIBRARIES mostlyclean-am - -distclean: distclean-am distclean-multi - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -f Makefile -distclean-am: clean-am distclean-compile distclean-generic \ - distclean-hdr distclean-libtool distclean-tags - -dvi: dvi-am - -dvi-am: - -html: html-am - -html-am: - -info: info-am - -info-am: - -install-data-am: - -install-dvi: install-dvi-am - -install-dvi-am: - -install-exec-am: install-multi - -install-html: install-html-am - -install-html-am: - -install-info: install-info-am - -install-info-am: - -install-man: - -install-pdf: install-pdf-am - -install-pdf-am: - -install-ps: install-ps-am - -install-ps-am: - -installcheck-am: - -maintainer-clean: maintainer-clean-am maintainer-clean-multi - -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf $(top_srcdir)/autom4te.cache - -rm -f Makefile -maintainer-clean-am: distclean-am maintainer-clean-generic - -mostlyclean: mostlyclean-am mostlyclean-multi - -mostlyclean-am: mostlyclean-compile mostlyclean-generic \ - mostlyclean-libtool - -pdf: pdf-am - -pdf-am: - -ps: ps-am - -ps-am: - -uninstall-am: - -.MAKE: all all-multi check-am clean-multi distclean-multi install-am \ - install-multi install-strip maintainer-clean-multi \ - mostlyclean-multi - -.PHONY: CTAGS GTAGS all all-am all-multi am--refresh check check-TESTS \ - check-am clean clean-checkPROGRAMS clean-generic clean-libtool \ - clean-multi clean-noinstLTLIBRARIES ctags distclean \ - distclean-compile distclean-generic distclean-hdr \ - distclean-libtool distclean-multi distclean-tags dvi dvi-am \ - html html-am info info-am install install-am install-data \ - install-data-am install-dvi install-dvi-am install-exec \ - install-exec-am install-html install-html-am install-info \ - install-info-am install-man install-multi install-pdf \ - install-pdf-am install-ps install-ps-am install-strip \ - installcheck installcheck-am installdirs maintainer-clean \ - maintainer-clean-generic maintainer-clean-multi mostlyclean \ - mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ - mostlyclean-multi pdf pdf-am ps ps-am tags uninstall \ - uninstall-am - -alloc.lo: config.h backtrace.h internal.h -backtrace.lo: config.h backtrace.h internal.h -btest.lo: (INCDIR)/filenames.h backtrace.h backtrace-supported.h -dwarf.lo: config.h $(INCDIR)/dwarf2.h $(INCDIR)/dwarf2.def \ - $(INCDIR)/filenames.h backtrace.h internal.h -elf.lo: config.h backtrace.h internal.h -fileline.lo: config.h backtrace.h internal.h -mmap.lo: config.h backtrace.h internal.h -mmapio.lo: config.h backtrace.h internal.h -nounwind.lo: config.h internal.h -pecoff.lo: config.h backtrace.h internal.h -posix.lo: config.h backtrace.h internal.h -print.lo: config.h backtrace.h internal.h -read.lo: config.h backtrace.h internal.h -simple.lo: config.h backtrace.h internal.h -sort.lo: config.h backtrace.h internal.h -stest.lo: config.h backtrace.h internal.h -state.lo: config.h backtrace.h backtrace-supported.h internal.h -unknown.lo: config.h backtrace.h internal.h - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/src/libbacktrace/README b/src/libbacktrace/README deleted file mode 100644 index e8b225745c9c..000000000000 --- a/src/libbacktrace/README +++ /dev/null @@ -1,23 +0,0 @@ -The libbacktrace library -Initially written by Ian Lance Taylor - -The libbacktrace library may be linked into a program or library and -used to produce symbolic backtraces. Sample uses would be to print a -detailed backtrace when an error occurs or to gather detailed -profiling information. - -The libbacktrace library is provided under a BSD license. See the -source files for the exact license text. - -The public functions are declared and documented in the header file -backtrace.h, which should be #include'd by a user of the library. - -Building libbacktrace will generate a file backtrace-supported.h, -which a user of the library may use to determine whether backtraces -will work. See the source file backtrace-supported.h.in for the -macros that it defines. - -As of September 2012, libbacktrace only supports ELF executables with -DWARF debugging information. The library is written to make it -straightforward to add support for other object file and debugging -formats. diff --git a/src/libbacktrace/aclocal.m4 b/src/libbacktrace/aclocal.m4 deleted file mode 100644 index 8e84ddd1f101..000000000000 --- a/src/libbacktrace/aclocal.m4 +++ /dev/null @@ -1,683 +0,0 @@ -# generated automatically by aclocal 1.11.6 -*- Autoconf -*- - -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, -# 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, -# Inc. -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -m4_ifndef([AC_AUTOCONF_VERSION], - [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl -m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.64],, -[m4_warning([this file was generated for autoconf 2.64. -You have another version of autoconf. It may work, but is not guaranteed to. -If you have problems, you may need to regenerate the build system entirely. -To do so, use the procedure documented by the package, typically `autoreconf'.])]) - -# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008, 2011 Free Software -# Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 1 - -# AM_AUTOMAKE_VERSION(VERSION) -# ---------------------------- -# Automake X.Y traces this macro to ensure aclocal.m4 has been -# generated from the m4 files accompanying Automake X.Y. -# (This private macro should not be called outside this file.) -AC_DEFUN([AM_AUTOMAKE_VERSION], -[am__api_version='1.11' -dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to -dnl require some minimum version. Point them to the right macro. -m4_if([$1], [1.11.6], [], - [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl -]) - -# _AM_AUTOCONF_VERSION(VERSION) -# ----------------------------- -# aclocal traces this macro to find the Autoconf version. -# This is a private macro too. Using m4_define simplifies -# the logic in aclocal, which can simply ignore this definition. -m4_define([_AM_AUTOCONF_VERSION], []) - -# AM_SET_CURRENT_AUTOMAKE_VERSION -# ------------------------------- -# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. -# This function is AC_REQUIREd by AM_INIT_AUTOMAKE. -AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], -[AM_AUTOMAKE_VERSION([1.11.6])dnl -m4_ifndef([AC_AUTOCONF_VERSION], - [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl -_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) - -# AM_AUX_DIR_EXPAND -*- Autoconf -*- - -# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 1 - -# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets -# $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to -# `$srcdir', `$srcdir/..', or `$srcdir/../..'. -# -# Of course, Automake must honor this variable whenever it calls a -# tool from the auxiliary directory. The problem is that $srcdir (and -# therefore $ac_aux_dir as well) can be either absolute or relative, -# depending on how configure is run. This is pretty annoying, since -# it makes $ac_aux_dir quite unusable in subdirectories: in the top -# source directory, any form will work fine, but in subdirectories a -# relative path needs to be adjusted first. -# -# $ac_aux_dir/missing -# fails when called from a subdirectory if $ac_aux_dir is relative -# $top_srcdir/$ac_aux_dir/missing -# fails if $ac_aux_dir is absolute, -# fails when called from a subdirectory in a VPATH build with -# a relative $ac_aux_dir -# -# The reason of the latter failure is that $top_srcdir and $ac_aux_dir -# are both prefixed by $srcdir. In an in-source build this is usually -# harmless because $srcdir is `.', but things will broke when you -# start a VPATH build or use an absolute $srcdir. -# -# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, -# iff we strip the leading $srcdir from $ac_aux_dir. That would be: -# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` -# and then we would define $MISSING as -# MISSING="\${SHELL} $am_aux_dir/missing" -# This will work as long as MISSING is not called from configure, because -# unfortunately $(top_srcdir) has no meaning in configure. -# However there are other variables, like CC, which are often used in -# configure, and could therefore not use this "fixed" $ac_aux_dir. -# -# Another solution, used here, is to always expand $ac_aux_dir to an -# absolute PATH. The drawback is that using absolute paths prevent a -# configured tree to be moved without reconfiguration. - -AC_DEFUN([AM_AUX_DIR_EXPAND], -[dnl Rely on autoconf to set up CDPATH properly. -AC_PREREQ([2.50])dnl -# expand $ac_aux_dir to an absolute path -am_aux_dir=`cd $ac_aux_dir && pwd` -]) - -# AM_CONDITIONAL -*- Autoconf -*- - -# Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008 -# Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 9 - -# AM_CONDITIONAL(NAME, SHELL-CONDITION) -# ------------------------------------- -# Define a conditional. -AC_DEFUN([AM_CONDITIONAL], -[AC_PREREQ(2.52)dnl - ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], - [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl -AC_SUBST([$1_TRUE])dnl -AC_SUBST([$1_FALSE])dnl -_AM_SUBST_NOTMAKE([$1_TRUE])dnl -_AM_SUBST_NOTMAKE([$1_FALSE])dnl -m4_define([_AM_COND_VALUE_$1], [$2])dnl -if $2; then - $1_TRUE= - $1_FALSE='#' -else - $1_TRUE='#' - $1_FALSE= -fi -AC_CONFIG_COMMANDS_PRE( -[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then - AC_MSG_ERROR([[conditional "$1" was never defined. -Usually this means the macro was only invoked conditionally.]]) -fi])]) - -# Do all the work for Automake. -*- Autoconf -*- - -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, -# 2005, 2006, 2008, 2009 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 16 - -# This macro actually does too much. Some checks are only needed if -# your package does certain things. But this isn't really a big deal. - -# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) -# AM_INIT_AUTOMAKE([OPTIONS]) -# ----------------------------------------------- -# The call with PACKAGE and VERSION arguments is the old style -# call (pre autoconf-2.50), which is being phased out. PACKAGE -# and VERSION should now be passed to AC_INIT and removed from -# the call to AM_INIT_AUTOMAKE. -# We support both call styles for the transition. After -# the next Automake release, Autoconf can make the AC_INIT -# arguments mandatory, and then we can depend on a new Autoconf -# release and drop the old call support. -AC_DEFUN([AM_INIT_AUTOMAKE], -[AC_PREREQ([2.62])dnl -dnl Autoconf wants to disallow AM_ names. We explicitly allow -dnl the ones we care about. -m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl -AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl -AC_REQUIRE([AC_PROG_INSTALL])dnl -if test "`cd $srcdir && pwd`" != "`pwd`"; then - # Use -I$(srcdir) only when $(srcdir) != ., so that make's output - # is not polluted with repeated "-I." - AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl - # test to see if srcdir already configured - if test -f $srcdir/config.status; then - AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) - fi -fi - -# test whether we have cygpath -if test -z "$CYGPATH_W"; then - if (cygpath --version) >/dev/null 2>/dev/null; then - CYGPATH_W='cygpath -w' - else - CYGPATH_W=echo - fi -fi -AC_SUBST([CYGPATH_W]) - -# Define the identity of the package. -dnl Distinguish between old-style and new-style calls. -m4_ifval([$2], -[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl - AC_SUBST([PACKAGE], [$1])dnl - AC_SUBST([VERSION], [$2])], -[_AM_SET_OPTIONS([$1])dnl -dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. -m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,, - [m4_fatal([AC_INIT should be called with package and version arguments])])dnl - AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl - AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl - -_AM_IF_OPTION([no-define],, -[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package]) - AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl - -# Some tools Automake needs. -AC_REQUIRE([AM_SANITY_CHECK])dnl -AC_REQUIRE([AC_ARG_PROGRAM])dnl -AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version}) -AM_MISSING_PROG(AUTOCONF, autoconf) -AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version}) -AM_MISSING_PROG(AUTOHEADER, autoheader) -AM_MISSING_PROG(MAKEINFO, makeinfo) -AC_REQUIRE([AM_PROG_INSTALL_SH])dnl -AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl -AC_REQUIRE([AM_PROG_MKDIR_P])dnl -# We need awk for the "check" target. The system "awk" is bad on -# some platforms. -AC_REQUIRE([AC_PROG_AWK])dnl -AC_REQUIRE([AC_PROG_MAKE_SET])dnl -AC_REQUIRE([AM_SET_LEADING_DOT])dnl -_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], - [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], - [_AM_PROG_TAR([v7])])]) -_AM_IF_OPTION([no-dependencies],, -[AC_PROVIDE_IFELSE([AC_PROG_CC], - [_AM_DEPENDENCIES(CC)], - [define([AC_PROG_CC], - defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl -AC_PROVIDE_IFELSE([AC_PROG_CXX], - [_AM_DEPENDENCIES(CXX)], - [define([AC_PROG_CXX], - defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl -AC_PROVIDE_IFELSE([AC_PROG_OBJC], - [_AM_DEPENDENCIES(OBJC)], - [define([AC_PROG_OBJC], - defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl -]) -_AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl -dnl The `parallel-tests' driver may need to know about EXEEXT, so add the -dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro -dnl is hooked onto _AC_COMPILER_EXEEXT early, see below. -AC_CONFIG_COMMANDS_PRE(dnl -[m4_provide_if([_AM_COMPILER_EXEEXT], - [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl -]) - -dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not -dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further -dnl mangled by Autoconf and run in a shell conditional statement. -m4_define([_AC_COMPILER_EXEEXT], -m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) - - -# When config.status generates a header, we must update the stamp-h file. -# This file resides in the same directory as the config header -# that is generated. The stamp files are numbered to have different names. - -# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the -# loop where config.status creates the headers, so we can generate -# our stamp files there. -AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], -[# Compute $1's index in $config_headers. -_am_arg=$1 -_am_stamp_count=1 -for _am_header in $config_headers :; do - case $_am_header in - $_am_arg | $_am_arg:* ) - break ;; - * ) - _am_stamp_count=`expr $_am_stamp_count + 1` ;; - esac -done -echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) - -# Copyright (C) 2001, 2003, 2005, 2008, 2011 Free Software Foundation, -# Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 1 - -# AM_PROG_INSTALL_SH -# ------------------ -# Define $install_sh. -AC_DEFUN([AM_PROG_INSTALL_SH], -[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl -if test x"${install_sh}" != xset; then - case $am_aux_dir in - *\ * | *\ *) - install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; - *) - install_sh="\${SHELL} $am_aux_dir/install-sh" - esac -fi -AC_SUBST(install_sh)]) - -# Add --enable-maintainer-mode option to configure. -*- Autoconf -*- -# From Jim Meyering - -# Copyright (C) 1996, 1998, 2000, 2001, 2002, 2003, 2004, 2005, 2008, -# 2011 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 5 - -# AM_MAINTAINER_MODE([DEFAULT-MODE]) -# ---------------------------------- -# Control maintainer-specific portions of Makefiles. -# Default is to disable them, unless `enable' is passed literally. -# For symmetry, `disable' may be passed as well. Anyway, the user -# can override the default with the --enable/--disable switch. -AC_DEFUN([AM_MAINTAINER_MODE], -[m4_case(m4_default([$1], [disable]), - [enable], [m4_define([am_maintainer_other], [disable])], - [disable], [m4_define([am_maintainer_other], [enable])], - [m4_define([am_maintainer_other], [enable]) - m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])]) -AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) - dnl maintainer-mode's default is 'disable' unless 'enable' is passed - AC_ARG_ENABLE([maintainer-mode], -[ --][am_maintainer_other][-maintainer-mode am_maintainer_other make rules and dependencies not useful - (and sometimes confusing) to the casual installer], - [USE_MAINTAINER_MODE=$enableval], - [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes])) - AC_MSG_RESULT([$USE_MAINTAINER_MODE]) - AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes]) - MAINT=$MAINTAINER_MODE_TRUE - AC_SUBST([MAINT])dnl -] -) - -AU_DEFUN([jm_MAINTAINER_MODE], [AM_MAINTAINER_MODE]) - -# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- - -# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008 -# Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 6 - -# AM_MISSING_PROG(NAME, PROGRAM) -# ------------------------------ -AC_DEFUN([AM_MISSING_PROG], -[AC_REQUIRE([AM_MISSING_HAS_RUN]) -$1=${$1-"${am_missing_run}$2"} -AC_SUBST($1)]) - - -# AM_MISSING_HAS_RUN -# ------------------ -# Define MISSING if not defined so far and test if it supports --run. -# If it does, set am_missing_run to use it, otherwise, to nothing. -AC_DEFUN([AM_MISSING_HAS_RUN], -[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl -AC_REQUIRE_AUX_FILE([missing])dnl -if test x"${MISSING+set}" != xset; then - case $am_aux_dir in - *\ * | *\ *) - MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; - *) - MISSING="\${SHELL} $am_aux_dir/missing" ;; - esac -fi -# Use eval to expand $SHELL -if eval "$MISSING --run true"; then - am_missing_run="$MISSING --run " -else - am_missing_run= - AC_MSG_WARN([`missing' script is too old or missing]) -fi -]) - -# Copyright (C) 2003, 2004, 2005, 2006, 2011 Free Software Foundation, -# Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 1 - -# AM_PROG_MKDIR_P -# --------------- -# Check for `mkdir -p'. -AC_DEFUN([AM_PROG_MKDIR_P], -[AC_PREREQ([2.60])dnl -AC_REQUIRE([AC_PROG_MKDIR_P])dnl -dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P, -dnl while keeping a definition of mkdir_p for backward compatibility. -dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile. -dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of -dnl Makefile.ins that do not define MKDIR_P, so we do our own -dnl adjustment using top_builddir (which is defined more often than -dnl MKDIR_P). -AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl -case $mkdir_p in - [[\\/$]]* | ?:[[\\/]]*) ;; - */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; -esac -]) - -# Helper functions for option handling. -*- Autoconf -*- - -# Copyright (C) 2001, 2002, 2003, 2005, 2008, 2010 Free Software -# Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 5 - -# _AM_MANGLE_OPTION(NAME) -# ----------------------- -AC_DEFUN([_AM_MANGLE_OPTION], -[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) - -# _AM_SET_OPTION(NAME) -# -------------------- -# Set option NAME. Presently that only means defining a flag for this option. -AC_DEFUN([_AM_SET_OPTION], -[m4_define(_AM_MANGLE_OPTION([$1]), 1)]) - -# _AM_SET_OPTIONS(OPTIONS) -# ------------------------ -# OPTIONS is a space-separated list of Automake options. -AC_DEFUN([_AM_SET_OPTIONS], -[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) - -# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) -# ------------------------------------------- -# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. -AC_DEFUN([_AM_IF_OPTION], -[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) - -# Check to make sure that the build environment is sane. -*- Autoconf -*- - -# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008 -# Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 5 - -# AM_SANITY_CHECK -# --------------- -AC_DEFUN([AM_SANITY_CHECK], -[AC_MSG_CHECKING([whether build environment is sane]) -# Just in case -sleep 1 -echo timestamp > conftest.file -# Reject unsafe characters in $srcdir or the absolute working directory -# name. Accept space and tab only in the latter. -am_lf=' -' -case `pwd` in - *[[\\\"\#\$\&\'\`$am_lf]]*) - AC_MSG_ERROR([unsafe absolute working directory name]);; -esac -case $srcdir in - *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) - AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);; -esac - -# Do `set' in a subshell so we don't clobber the current shell's -# arguments. Must try -L first in case configure is actually a -# symlink; some systems play weird games with the mod time of symlinks -# (eg FreeBSD returns the mod time of the symlink's containing -# directory). -if ( - set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` - if test "$[*]" = "X"; then - # -L didn't work. - set X `ls -t "$srcdir/configure" conftest.file` - fi - rm -f conftest.file - if test "$[*]" != "X $srcdir/configure conftest.file" \ - && test "$[*]" != "X conftest.file $srcdir/configure"; then - - # If neither matched, then we have a broken ls. This can happen - # if, for instance, CONFIG_SHELL is bash and it inherits a - # broken ls alias from the environment. This has actually - # happened. Such a system could not be considered "sane". - AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken -alias in your environment]) - fi - - test "$[2]" = conftest.file - ) -then - # Ok. - : -else - AC_MSG_ERROR([newly created file is older than distributed files! -Check your system clock]) -fi -AC_MSG_RESULT(yes)]) - -# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 1 - -# AM_PROG_INSTALL_STRIP -# --------------------- -# One issue with vendor `install' (even GNU) is that you can't -# specify the program used to strip binaries. This is especially -# annoying in cross-compiling environments, where the build's strip -# is unlikely to handle the host's binaries. -# Fortunately install-sh will honor a STRIPPROG variable, so we -# always use install-sh in `make install-strip', and initialize -# STRIPPROG with the value of the STRIP variable (set by the user). -AC_DEFUN([AM_PROG_INSTALL_STRIP], -[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl -# Installed binaries are usually stripped using `strip' when the user -# run `make install-strip'. However `strip' might not be the right -# tool to use in cross-compilation environments, therefore Automake -# will honor the `STRIP' environment variable to overrule this program. -dnl Don't test for $cross_compiling = yes, because it might be `maybe'. -if test "$cross_compiling" != no; then - AC_CHECK_TOOL([STRIP], [strip], :) -fi -INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" -AC_SUBST([INSTALL_STRIP_PROGRAM])]) - -# Copyright (C) 2006, 2008, 2010 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 3 - -# _AM_SUBST_NOTMAKE(VARIABLE) -# --------------------------- -# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. -# This macro is traced by Automake. -AC_DEFUN([_AM_SUBST_NOTMAKE]) - -# AM_SUBST_NOTMAKE(VARIABLE) -# -------------------------- -# Public sister of _AM_SUBST_NOTMAKE. -AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) - -# Check how to create a tarball. -*- Autoconf -*- - -# Copyright (C) 2004, 2005, 2012 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# serial 2 - -# _AM_PROG_TAR(FORMAT) -# -------------------- -# Check how to create a tarball in format FORMAT. -# FORMAT should be one of `v7', `ustar', or `pax'. -# -# Substitute a variable $(am__tar) that is a command -# writing to stdout a FORMAT-tarball containing the directory -# $tardir. -# tardir=directory && $(am__tar) > result.tar -# -# Substitute a variable $(am__untar) that extract such -# a tarball read from stdin. -# $(am__untar) < result.tar -AC_DEFUN([_AM_PROG_TAR], -[# Always define AMTAR for backward compatibility. Yes, it's still used -# in the wild :-( We should find a proper way to deprecate it ... -AC_SUBST([AMTAR], ['$${TAR-tar}']) -m4_if([$1], [v7], - [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], - [m4_case([$1], [ustar],, [pax],, - [m4_fatal([Unknown tar format])]) -AC_MSG_CHECKING([how to create a $1 tar archive]) -# Loop over all known methods to create a tar archive until one works. -_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' -_am_tools=${am_cv_prog_tar_$1-$_am_tools} -# Do not fold the above two line into one, because Tru64 sh and -# Solaris sh will not grok spaces in the rhs of `-'. -for _am_tool in $_am_tools -do - case $_am_tool in - gnutar) - for _am_tar in tar gnutar gtar; - do - AM_RUN_LOG([$_am_tar --version]) && break - done - am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' - am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' - am__untar="$_am_tar -xf -" - ;; - plaintar) - # Must skip GNU tar: if it does not support --format= it doesn't create - # ustar tarball either. - (tar --version) >/dev/null 2>&1 && continue - am__tar='tar chf - "$$tardir"' - am__tar_='tar chf - "$tardir"' - am__untar='tar xf -' - ;; - pax) - am__tar='pax -L -x $1 -w "$$tardir"' - am__tar_='pax -L -x $1 -w "$tardir"' - am__untar='pax -r' - ;; - cpio) - am__tar='find "$$tardir" -print | cpio -o -H $1 -L' - am__tar_='find "$tardir" -print | cpio -o -H $1 -L' - am__untar='cpio -i -H $1 -d' - ;; - none) - am__tar=false - am__tar_=false - am__untar=false - ;; - esac - - # If the value was cached, stop now. We just wanted to have am__tar - # and am__untar set. - test -n "${am_cv_prog_tar_$1}" && break - - # tar/untar a dummy directory, and stop if the command works - rm -rf conftest.dir - mkdir conftest.dir - echo GrepMe > conftest.dir/file - AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) - rm -rf conftest.dir - if test -s conftest.tar; then - AM_RUN_LOG([$am__untar /dev/null 2>&1 && break - fi -done -rm -rf conftest.dir - -AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) -AC_MSG_RESULT([$am_cv_prog_tar_$1])]) -AC_SUBST([am__tar]) -AC_SUBST([am__untar]) -]) # _AM_PROG_TAR - -m4_include([../config/lead-dot.m4]) -m4_include([../config/multi.m4]) -m4_include([../config/override.m4]) -m4_include([../config/stdint.m4]) -m4_include([../config/unwind_ipinfo.m4]) -m4_include([../config/warnings.m4]) -m4_include([../libtool.m4]) -m4_include([../ltoptions.m4]) -m4_include([../ltsugar.m4]) -m4_include([../ltversion.m4]) -m4_include([../lt~obsolete.m4]) diff --git a/src/libbacktrace/alloc.c b/src/libbacktrace/alloc.c deleted file mode 100644 index a9f07a013f8f..000000000000 --- a/src/libbacktrace/alloc.c +++ /dev/null @@ -1,156 +0,0 @@ -/* alloc.c -- Memory allocation without mmap. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include - -#include "backtrace.h" -#include "internal.h" - -/* Allocation routines to use on systems that do not support anonymous - mmap. This implementation just uses malloc, which means that the - backtrace functions may not be safely invoked from a signal - handler. */ - -/* Allocate memory like malloc. If ERROR_CALLBACK is NULL, don't - report an error. */ - -void * -backtrace_alloc (struct backtrace_state *state ATTRIBUTE_UNUSED, - size_t size, backtrace_error_callback error_callback, - void *data) -{ - void *ret; - - ret = malloc (size); - if (ret == NULL) - { - if (error_callback) - error_callback (data, "malloc", errno); - } - return ret; -} - -/* Free memory. */ - -void -backtrace_free (struct backtrace_state *state ATTRIBUTE_UNUSED, - void *p, size_t size ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback ATTRIBUTE_UNUSED, - void *data ATTRIBUTE_UNUSED) -{ - free (p); -} - -/* Grow VEC by SIZE bytes. */ - -void * -backtrace_vector_grow (struct backtrace_state *state ATTRIBUTE_UNUSED, - size_t size, backtrace_error_callback error_callback, - void *data, struct backtrace_vector *vec) -{ - void *ret; - - if (size > vec->alc) - { - size_t alc; - void *base; - - if (vec->size == 0) - alc = 32 * size; - else if (vec->size >= 4096) - alc = vec->size + 4096; - else - alc = 2 * vec->size; - - if (alc < vec->size + size) - alc = vec->size + size; - - base = realloc (vec->base, alc); - if (base == NULL) - { - error_callback (data, "realloc", errno); - return NULL; - } - - vec->base = base; - vec->alc = alc - vec->size; - } - - ret = (char *) vec->base + vec->size; - vec->size += size; - vec->alc -= size; - return ret; -} - -/* Finish the current allocation on VEC. */ - -void * -backtrace_vector_finish (struct backtrace_state *state, - struct backtrace_vector *vec, - backtrace_error_callback error_callback, - void *data) -{ - void *ret; - - /* With this allocator we call realloc in backtrace_vector_grow, - which means we can't easily reuse the memory here. So just - release it. */ - if (!backtrace_vector_release (state, vec, error_callback, data)) - return NULL; - ret = vec->base; - vec->base = NULL; - vec->size = 0; - vec->alc = 0; - return ret; -} - -/* Release any extra space allocated for VEC. */ - -int -backtrace_vector_release (struct backtrace_state *state ATTRIBUTE_UNUSED, - struct backtrace_vector *vec, - backtrace_error_callback error_callback, - void *data) -{ - vec->base = realloc (vec->base, vec->size); - if (vec->base == NULL) - { - error_callback (data, "realloc", errno); - return 0; - } - vec->alc = 0; - return 1; -} diff --git a/src/libbacktrace/ansidecl.h b/src/libbacktrace/ansidecl.h deleted file mode 100644 index 4087dd729175..000000000000 --- a/src/libbacktrace/ansidecl.h +++ /dev/null @@ -1,329 +0,0 @@ -/* ANSI and traditional C compatibility macros - Copyright (C) 1991-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ - -/* ANSI and traditional C compatibility macros - - ANSI C is assumed if __STDC__ is #defined. - - Macro ANSI C definition Traditional C definition - ----- ---- - ---------- ----------- - ---------- - PTR `void *' `char *' - const not defined `' - volatile not defined `' - signed not defined `' - - For ease of writing code which uses GCC extensions but needs to be - portable to other compilers, we provide the GCC_VERSION macro that - simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various - wrappers around __attribute__. Also, __extension__ will be #defined - to nothing if it doesn't work. See below. */ - -#ifndef _ANSIDECL_H -#define _ANSIDECL_H 1 - -#ifdef __cplusplus -extern "C" { -#endif - -/* Every source file includes this file, - so they will all get the switch for lint. */ -/* LINTLIBRARY */ - -/* Using MACRO(x,y) in cpp #if conditionals does not work with some - older preprocessors. Thus we can't define something like this: - -#define HAVE_GCC_VERSION(MAJOR, MINOR) \ - (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR))) - -and then test "#if HAVE_GCC_VERSION(2,7)". - -So instead we use the macro below and test it against specific values. */ - -/* This macro simplifies testing whether we are using gcc, and if it - is of a particular minimum version. (Both major & minor numbers are - significant.) This macro will evaluate to 0 if we are not using - gcc at all. */ -#ifndef GCC_VERSION -#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) -#endif /* GCC_VERSION */ - -#if defined (__STDC__) || defined(__cplusplus) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) -/* All known AIX compilers implement these things (but don't always - define __STDC__). The RISC/OS MIPS compiler defines these things - in SVR4 mode, but does not define __STDC__. */ -/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other - C++ compilers, does not define __STDC__, though it acts as if this - was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */ - -#define PTR void * - -#undef const -#undef volatile -#undef signed - -/* inline requires special treatment; it's in C99, and GCC >=2.7 supports - it too, but it's not in C89. */ -#undef inline -#if __STDC_VERSION__ >= 199901L || defined(__cplusplus) || (defined(__SUNPRO_C) && defined(__C99FEATURES__)) -/* it's a keyword */ -#else -# if GCC_VERSION >= 2007 -# define inline __inline__ /* __inline__ prevents -pedantic warnings */ -# else -# define inline /* nothing */ -# endif -#endif - -#else /* Not ANSI C. */ - -#define PTR char * - -/* some systems define these in header files for non-ansi mode */ -#undef const -#undef volatile -#undef signed -#undef inline -#define const -#define volatile -#define signed -#define inline - -#endif /* ANSI C. */ - -/* Define macros for some gcc attributes. This permits us to use the - macros freely, and know that they will come into play for the - version of gcc in which they are supported. */ - -#if (GCC_VERSION < 2007) -# define __attribute__(x) -#endif - -/* Attribute __malloc__ on functions was valid as of gcc 2.96. */ -#ifndef ATTRIBUTE_MALLOC -# if (GCC_VERSION >= 2096) -# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__)) -# else -# define ATTRIBUTE_MALLOC -# endif /* GNUC >= 2.96 */ -#endif /* ATTRIBUTE_MALLOC */ - -/* Attributes on labels were valid as of gcc 2.93 and g++ 4.5. For - g++ an attribute on a label must be followed by a semicolon. */ -#ifndef ATTRIBUTE_UNUSED_LABEL -# ifndef __cplusplus -# if GCC_VERSION >= 2093 -# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED -# else -# define ATTRIBUTE_UNUSED_LABEL -# endif -# else -# if GCC_VERSION >= 4005 -# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED ; -# else -# define ATTRIBUTE_UNUSED_LABEL -# endif -# endif -#endif - -/* Similarly to ARG_UNUSED below. Prior to GCC 3.4, the C++ frontend - couldn't parse attributes placed after the identifier name, and now - the entire compiler is built with C++. */ -#ifndef ATTRIBUTE_UNUSED -#if GCC_VERSION >= 3004 -# define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) -#else -#define ATTRIBUTE_UNUSED -#endif -#endif /* ATTRIBUTE_UNUSED */ - -/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the - identifier name. */ -#if ! defined(__cplusplus) || (GCC_VERSION >= 3004) -# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED -#else /* !__cplusplus || GNUC >= 3.4 */ -# define ARG_UNUSED(NAME) NAME -#endif /* !__cplusplus || GNUC >= 3.4 */ - -#ifndef ATTRIBUTE_NORETURN -#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__)) -#endif /* ATTRIBUTE_NORETURN */ - -/* Attribute `nonnull' was valid as of gcc 3.3. */ -#ifndef ATTRIBUTE_NONNULL -# if (GCC_VERSION >= 3003) -# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m))) -# else -# define ATTRIBUTE_NONNULL(m) -# endif /* GNUC >= 3.3 */ -#endif /* ATTRIBUTE_NONNULL */ - -/* Attribute `returns_nonnull' was valid as of gcc 4.9. */ -#ifndef ATTRIBUTE_RETURNS_NONNULL -# if (GCC_VERSION >= 4009) -# define ATTRIBUTE_RETURNS_NONNULL __attribute__ ((__returns_nonnull__)) -# else -# define ATTRIBUTE_RETURNS_NONNULL -# endif /* GNUC >= 4.9 */ -#endif /* ATTRIBUTE_RETURNS_NONNULL */ - -/* Attribute `pure' was valid as of gcc 3.0. */ -#ifndef ATTRIBUTE_PURE -# if (GCC_VERSION >= 3000) -# define ATTRIBUTE_PURE __attribute__ ((__pure__)) -# else -# define ATTRIBUTE_PURE -# endif /* GNUC >= 3.0 */ -#endif /* ATTRIBUTE_PURE */ - -/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL. - This was the case for the `printf' format attribute by itself - before GCC 3.3, but as of 3.3 we need to add the `nonnull' - attribute to retain this behavior. */ -#ifndef ATTRIBUTE_PRINTF -#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m) -#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2) -#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3) -#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4) -#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5) -#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6) -#endif /* ATTRIBUTE_PRINTF */ - -/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on - a function pointer. Format attributes were allowed on function - pointers as of gcc 3.1. */ -#ifndef ATTRIBUTE_FPTR_PRINTF -# if (GCC_VERSION >= 3001) -# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n) -# else -# define ATTRIBUTE_FPTR_PRINTF(m, n) -# endif /* GNUC >= 3.1 */ -# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2) -# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3) -# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4) -# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5) -# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6) -#endif /* ATTRIBUTE_FPTR_PRINTF */ - -/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A - NULL format specifier was allowed as of gcc 3.3. */ -#ifndef ATTRIBUTE_NULL_PRINTF -# if (GCC_VERSION >= 3003) -# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) -# else -# define ATTRIBUTE_NULL_PRINTF(m, n) -# endif /* GNUC >= 3.3 */ -# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2) -# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3) -# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4) -# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5) -# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6) -#endif /* ATTRIBUTE_NULL_PRINTF */ - -/* Attribute `sentinel' was valid as of gcc 3.5. */ -#ifndef ATTRIBUTE_SENTINEL -# if (GCC_VERSION >= 3005) -# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__)) -# else -# define ATTRIBUTE_SENTINEL -# endif /* GNUC >= 3.5 */ -#endif /* ATTRIBUTE_SENTINEL */ - - -#ifndef ATTRIBUTE_ALIGNED_ALIGNOF -# if (GCC_VERSION >= 3000) -# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m)))) -# else -# define ATTRIBUTE_ALIGNED_ALIGNOF(m) -# endif /* GNUC >= 3.0 */ -#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */ - -/* Useful for structures whose layout must much some binary specification - regardless of the alignment and padding qualities of the compiler. */ -#ifndef ATTRIBUTE_PACKED -# define ATTRIBUTE_PACKED __attribute__ ((packed)) -#endif - -/* Attribute `hot' and `cold' was valid as of gcc 4.3. */ -#ifndef ATTRIBUTE_COLD -# if (GCC_VERSION >= 4003) -# define ATTRIBUTE_COLD __attribute__ ((__cold__)) -# else -# define ATTRIBUTE_COLD -# endif /* GNUC >= 4.3 */ -#endif /* ATTRIBUTE_COLD */ -#ifndef ATTRIBUTE_HOT -# if (GCC_VERSION >= 4003) -# define ATTRIBUTE_HOT __attribute__ ((__hot__)) -# else -# define ATTRIBUTE_HOT -# endif /* GNUC >= 4.3 */ -#endif /* ATTRIBUTE_HOT */ - -/* Attribute 'no_sanitize_undefined' was valid as of gcc 4.9. */ -#ifndef ATTRIBUTE_NO_SANITIZE_UNDEFINED -# if (GCC_VERSION >= 4009) -# define ATTRIBUTE_NO_SANITIZE_UNDEFINED __attribute__ ((no_sanitize_undefined)) -# else -# define ATTRIBUTE_NO_SANITIZE_UNDEFINED -# endif /* GNUC >= 4.9 */ -#endif /* ATTRIBUTE_NO_SANITIZE_UNDEFINED */ - -/* We use __extension__ in some places to suppress -pedantic warnings - about GCC extensions. This feature didn't work properly before - gcc 2.8. */ -#if GCC_VERSION < 2008 -#define __extension__ -#endif - -/* This is used to declare a const variable which should be visible - outside of the current compilation unit. Use it as - EXPORTED_CONST int i = 1; - This is because the semantics of const are different in C and C++. - "extern const" is permitted in C but it looks strange, and gcc - warns about it when -Wc++-compat is not used. */ -#ifdef __cplusplus -#define EXPORTED_CONST extern const -#else -#define EXPORTED_CONST const -#endif - -/* Be conservative and only use enum bitfields with C++ or GCC. - FIXME: provide a complete autoconf test for buggy enum bitfields. */ - -#ifdef __cplusplus -#define ENUM_BITFIELD(TYPE) enum TYPE -#elif (GCC_VERSION > 2000) -#define ENUM_BITFIELD(TYPE) __extension__ enum TYPE -#else -#define ENUM_BITFIELD(TYPE) unsigned int -#endif - - /* This is used to mark a class or virtual function as final. */ -#if __cplusplus >= 201103L -#define GCC_FINAL final -#elif GCC_VERSION >= 4007 -#define GCC_FINAL __final -#else -#define GCC_FINAL -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* ansidecl.h */ diff --git a/src/libbacktrace/atomic.c b/src/libbacktrace/atomic.c deleted file mode 100644 index cb0ad0298e68..000000000000 --- a/src/libbacktrace/atomic.c +++ /dev/null @@ -1,113 +0,0 @@ -/* atomic.c -- Support for atomic functions if not present. - Copyright (C) 2013-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include - -#include "backtrace.h" -#include "backtrace-supported.h" -#include "internal.h" - -/* This file holds implementations of the atomic functions that are - used if the host compiler has the sync functions but not the atomic - functions, as is true of versions of GCC before 4.7. */ - -#if !defined (HAVE_ATOMIC_FUNCTIONS) && defined (HAVE_SYNC_FUNCTIONS) - -/* Do an atomic load of a pointer. */ - -void * -backtrace_atomic_load_pointer (void *arg) -{ - void **pp; - void *p; - - pp = (void **) arg; - p = *pp; - while (!__sync_bool_compare_and_swap (pp, p, p)) - p = *pp; - return p; -} - -/* Do an atomic load of an int. */ - -int -backtrace_atomic_load_int (int *p) -{ - int i; - - i = *p; - while (!__sync_bool_compare_and_swap (p, i, i)) - i = *p; - return i; -} - -/* Do an atomic store of a pointer. */ - -void -backtrace_atomic_store_pointer (void *arg, void *p) -{ - void **pp; - void *old; - - pp = (void **) arg; - old = *pp; - while (!__sync_bool_compare_and_swap (pp, old, p)) - old = *pp; -} - -/* Do an atomic store of a size_t value. */ - -void -backtrace_atomic_store_size_t (size_t *p, size_t v) -{ - size_t old; - - old = *p; - while (!__sync_bool_compare_and_swap (p, old, v)) - old = *p; -} - -/* Do an atomic store of a int value. */ - -void -backtrace_atomic_store_int (int *p, int v) -{ - size_t old; - - old = *p; - while (!__sync_bool_compare_and_swap (p, old, v)) - old = *p; -} - -#endif diff --git a/src/libbacktrace/backtrace-supported.h.in b/src/libbacktrace/backtrace-supported.h.in deleted file mode 100644 index ab34199fcd36..000000000000 --- a/src/libbacktrace/backtrace-supported.h.in +++ /dev/null @@ -1,66 +0,0 @@ -/* backtrace-supported.h.in -- Whether stack backtrace is supported. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -/* The file backtrace-supported.h.in is used by configure to generate - the file backtrace-supported.h. The file backtrace-supported.h may - be #include'd to see whether the backtrace library will be able to - get a backtrace and produce symbolic information. */ - - -/* BACKTRACE_SUPPORTED will be #define'd as 1 if the backtrace library - should work, 0 if it will not. Libraries may #include this to make - other arrangements. */ - -#define BACKTRACE_SUPPORTED @BACKTRACE_SUPPORTED@ - -/* BACKTRACE_USES_MALLOC will be #define'd as 1 if the backtrace - library will call malloc as it works, 0 if it will call mmap - instead. This may be used to determine whether it is safe to call - the backtrace functions from a signal handler. In general this - only applies to calls like backtrace and backtrace_pcinfo. It does - not apply to backtrace_simple, which never calls malloc. It does - not apply to backtrace_print, which always calls fprintf and - therefore malloc. */ - -#define BACKTRACE_USES_MALLOC @BACKTRACE_USES_MALLOC@ - -/* BACKTRACE_SUPPORTS_THREADS will be #define'd as 1 if the backtrace - library is configured with threading support, 0 if not. If this is - 0, the threaded parameter to backtrace_create_state must be passed - as 0. */ - -#define BACKTRACE_SUPPORTS_THREADS @BACKTRACE_SUPPORTS_THREADS@ - -/* BACKTRACE_SUPPORTS_DATA will be #defined'd as 1 if the backtrace_syminfo - will work for variables. It will always work for functions. */ - -#define BACKTRACE_SUPPORTS_DATA @BACKTRACE_SUPPORTS_DATA@ diff --git a/src/libbacktrace/backtrace.c b/src/libbacktrace/backtrace.c deleted file mode 100644 index b89bf554ac54..000000000000 --- a/src/libbacktrace/backtrace.c +++ /dev/null @@ -1,129 +0,0 @@ -/* backtrace.c -- Entry point for stack backtrace library. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include - -#include "unwind.h" -#include "backtrace.h" -#include "internal.h" - -/* The main backtrace_full routine. */ - -/* Data passed through _Unwind_Backtrace. */ - -struct backtrace_data -{ - /* Number of frames to skip. */ - int skip; - /* Library state. */ - struct backtrace_state *state; - /* Callback routine. */ - backtrace_full_callback callback; - /* Error callback routine. */ - backtrace_error_callback error_callback; - /* Data to pass to callback routines. */ - void *data; - /* Value to return from backtrace_full. */ - int ret; - /* Whether there is any memory available. */ - int can_alloc; -}; - -/* Unwind library callback routine. This is passed to - _Unwind_Backtrace. */ - -static _Unwind_Reason_Code -unwind (struct _Unwind_Context *context, void *vdata) -{ - struct backtrace_data *bdata = (struct backtrace_data *) vdata; - uintptr_t pc; - int ip_before_insn = 0; - -#ifdef HAVE_GETIPINFO - pc = _Unwind_GetIPInfo (context, &ip_before_insn); -#else - pc = _Unwind_GetIP (context); -#endif - - if (bdata->skip > 0) - { - --bdata->skip; - return _URC_NO_REASON; - } - - if (!ip_before_insn) - --pc; - - if (!bdata->can_alloc) - bdata->ret = bdata->callback (bdata->data, pc, NULL, 0, NULL); - else - bdata->ret = backtrace_pcinfo (bdata->state, pc, bdata->callback, - bdata->error_callback, bdata->data); - if (bdata->ret != 0) - return _URC_END_OF_STACK; - - return _URC_NO_REASON; -} - -/* Get a stack backtrace. */ - -int -backtrace_full (struct backtrace_state *state, int skip, - backtrace_full_callback callback, - backtrace_error_callback error_callback, void *data) -{ - struct backtrace_data bdata; - void *p; - - bdata.skip = skip + 1; - bdata.state = state; - bdata.callback = callback; - bdata.error_callback = error_callback; - bdata.data = data; - bdata.ret = 0; - - /* If we can't allocate any memory at all, don't try to produce - file/line information. */ - p = backtrace_alloc (state, 4096, NULL, NULL); - if (p == NULL) - bdata.can_alloc = 0; - else - { - backtrace_free (state, p, 4096, NULL, NULL); - bdata.can_alloc = 1; - } - - _Unwind_Backtrace (unwind, &bdata); - return bdata.ret; -} diff --git a/src/libbacktrace/backtrace.h b/src/libbacktrace/backtrace.h deleted file mode 100644 index d209219d9a41..000000000000 --- a/src/libbacktrace/backtrace.h +++ /dev/null @@ -1,199 +0,0 @@ -/* backtrace.h -- Public header file for stack backtrace library. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#ifndef BACKTRACE_H -#define BACKTRACE_H - -#include -#include - -/* We want to get a definition for uintptr_t, but we still care about - systems that don't have . */ -#if defined(__GLIBC__) && __GLIBC__ >= 2 - -#include - -#elif defined(HAVE_STDINT_H) - -#include - -#else - -/* Systems that don't have must provide gstdint.h, e.g., - from GCC_HEADER_STDINT in configure.ac. */ -#include "gstdint.h" - -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -/* The backtrace state. This struct is intentionally not defined in - the public interface. */ - -struct backtrace_state; - -/* The type of the error callback argument to backtrace functions. - This function, if not NULL, will be called for certain error cases. - The DATA argument is passed to the function that calls this one. - The MSG argument is an error message. The ERRNUM argument, if - greater than 0, holds an errno value. The MSG buffer may become - invalid after this function returns. - - As a special case, the ERRNUM argument will be passed as -1 if no - debug info can be found for the executable, but the function - requires debug info (e.g., backtrace_full, backtrace_pcinfo). The - MSG in this case will be something along the lines of "no debug - info". Similarly, ERRNUM will be passed as -1 if there is no - symbol table, but the function requires a symbol table (e.g., - backtrace_syminfo). This may be used as a signal that some other - approach should be tried. */ - -typedef void (*backtrace_error_callback) (void *data, const char *msg, - int errnum); - -/* Create state information for the backtrace routines. This must be - called before any of the other routines, and its return value must - be passed to all of the other routines. FILENAME is the path name - of the executable file; if it is NULL the library will try - system-specific path names. If not NULL, FILENAME must point to a - permanent buffer. If THREADED is non-zero the state may be - accessed by multiple threads simultaneously, and the library will - use appropriate atomic operations. If THREADED is zero the state - may only be accessed by one thread at a time. This returns a state - pointer on success, NULL on error. If an error occurs, this will - call the ERROR_CALLBACK routine. */ - -extern struct backtrace_state *backtrace_create_state ( - const char *filename, int threaded, - backtrace_error_callback error_callback, void *data); - -/* The type of the callback argument to the backtrace_full function. - DATA is the argument passed to backtrace_full. PC is the program - counter. FILENAME is the name of the file containing PC, or NULL - if not available. LINENO is the line number in FILENAME containing - PC, or 0 if not available. FUNCTION is the name of the function - containing PC, or NULL if not available. This should return 0 to - continuing tracing. The FILENAME and FUNCTION buffers may become - invalid after this function returns. */ - -typedef int (*backtrace_full_callback) (void *data, uintptr_t pc, - const char *filename, int lineno, - const char *function); - -/* Get a full stack backtrace. SKIP is the number of frames to skip; - passing 0 will start the trace with the function calling - backtrace_full. DATA is passed to the callback routine. If any - call to CALLBACK returns a non-zero value, the stack backtrace - stops, and backtrace returns that value; this may be used to limit - the number of stack frames desired. If all calls to CALLBACK - return 0, backtrace returns 0. The backtrace_full function will - make at least one call to either CALLBACK or ERROR_CALLBACK. This - function requires debug info for the executable. */ - -extern int backtrace_full (struct backtrace_state *state, int skip, - backtrace_full_callback callback, - backtrace_error_callback error_callback, - void *data); - -/* The type of the callback argument to the backtrace_simple function. - DATA is the argument passed to simple_backtrace. PC is the program - counter. This should return 0 to continue tracing. */ - -typedef int (*backtrace_simple_callback) (void *data, uintptr_t pc); - -/* Get a simple backtrace. SKIP is the number of frames to skip, as - in backtrace. DATA is passed to the callback routine. If any call - to CALLBACK returns a non-zero value, the stack backtrace stops, - and backtrace_simple returns that value. Otherwise - backtrace_simple returns 0. The backtrace_simple function will - make at least one call to either CALLBACK or ERROR_CALLBACK. This - function does not require any debug info for the executable. */ - -extern int backtrace_simple (struct backtrace_state *state, int skip, - backtrace_simple_callback callback, - backtrace_error_callback error_callback, - void *data); - -/* Print the current backtrace in a user readable format to a FILE. - SKIP is the number of frames to skip, as in backtrace_full. Any - error messages are printed to stderr. This function requires debug - info for the executable. */ - -extern void backtrace_print (struct backtrace_state *state, int skip, FILE *); - -/* Given PC, a program counter in the current program, call the - callback function with filename, line number, and function name - information. This will normally call the callback function exactly - once. However, if the PC happens to describe an inlined call, and - the debugging information contains the necessary information, then - this may call the callback function multiple times. This will make - at least one call to either CALLBACK or ERROR_CALLBACK. This - returns the first non-zero value returned by CALLBACK, or 0. */ - -extern int backtrace_pcinfo (struct backtrace_state *state, uintptr_t pc, - backtrace_full_callback callback, - backtrace_error_callback error_callback, - void *data); - -/* The type of the callback argument to backtrace_syminfo. DATA and - PC are the arguments passed to backtrace_syminfo. SYMNAME is the - name of the symbol for the corresponding code. SYMVAL is the - value and SYMSIZE is the size of the symbol. SYMNAME will be NULL - if no error occurred but the symbol could not be found. */ - -typedef void (*backtrace_syminfo_callback) (void *data, uintptr_t pc, - const char *symname, - uintptr_t symval, - uintptr_t symsize); - -/* Given ADDR, an address or program counter in the current program, - call the callback information with the symbol name and value - describing the function or variable in which ADDR may be found. - This will call either CALLBACK or ERROR_CALLBACK exactly once. - This returns 1 on success, 0 on failure. This function requires - the symbol table but does not require the debug info. Note that if - the symbol table is present but ADDR could not be found in the - table, CALLBACK will be called with a NULL SYMNAME argument. - Returns 1 on success, 0 on error. */ - -extern int backtrace_syminfo (struct backtrace_state *state, uintptr_t addr, - backtrace_syminfo_callback callback, - backtrace_error_callback error_callback, - void *data); - -#ifdef __cplusplus -} /* End extern "C". */ -#endif - -#endif diff --git a/src/libbacktrace/btest.c b/src/libbacktrace/btest.c deleted file mode 100644 index 0506d2b11218..000000000000 --- a/src/libbacktrace/btest.c +++ /dev/null @@ -1,721 +0,0 @@ -/* btest.c -- Test for libbacktrace library - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -/* This program tests the externally visible interfaces of the - libbacktrace library. */ - -#include -#include -#include -#include - -#include "filenames.h" - -#include "backtrace.h" -#include "backtrace-supported.h" - -/* Portable attribute syntax. Actually some of these tests probably - won't work if the attributes are not recognized. */ - -#ifndef GCC_VERSION -# define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) -#endif - -#if (GCC_VERSION < 2007) -# define __attribute__(x) -#endif - -#ifndef ATTRIBUTE_UNUSED -# define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) -#endif - -/* Used to collect backtrace info. */ - -struct info -{ - char *filename; - int lineno; - char *function; -}; - -/* Passed to backtrace callback function. */ - -struct bdata -{ - struct info *all; - size_t index; - size_t max; - int failed; -}; - -/* Passed to backtrace_simple callback function. */ - -struct sdata -{ - uintptr_t *addrs; - size_t index; - size_t max; - int failed; -}; - -/* Passed to backtrace_syminfo callback function. */ - -struct symdata -{ - const char *name; - uintptr_t val, size; - int failed; -}; - -/* The backtrace state. */ - -static void *state; - -/* The number of failures. */ - -static int failures; - -/* Return the base name in a path. */ - -static const char * -base (const char *p) -{ - const char *last; - const char *s; - - last = NULL; - for (s = p; *s != '\0'; ++s) - { - if (IS_DIR_SEPARATOR (*s)) - last = s + 1; - } - return last != NULL ? last : p; -} - -/* Check an entry in a struct info array. */ - -static void -check (const char *name, int index, const struct info *all, int want_lineno, - const char *want_function, int *failed) -{ - if (*failed) - return; - if (all[index].filename == NULL || all[index].function == NULL) - { - fprintf (stderr, "%s: [%d]: missing file name or function name\n", - name, index); - *failed = 1; - return; - } - if (strcmp (base (all[index].filename), "btest.c") != 0) - { - fprintf (stderr, "%s: [%d]: got %s expected test.c\n", name, index, - all[index].filename); - *failed = 1; - } - if (all[index].lineno != want_lineno) - { - fprintf (stderr, "%s: [%d]: got %d expected %d\n", name, index, - all[index].lineno, want_lineno); - *failed = 1; - } - if (strcmp (all[index].function, want_function) != 0) - { - fprintf (stderr, "%s: [%d]: got %s expected %s\n", name, index, - all[index].function, want_function); - *failed = 1; - } -} - -/* The backtrace callback function. */ - -static int -callback_one (void *vdata, uintptr_t pc ATTRIBUTE_UNUSED, - const char *filename, int lineno, const char *function) -{ - struct bdata *data = (struct bdata *) vdata; - struct info *p; - - if (data->index >= data->max) - { - fprintf (stderr, "callback_one: callback called too many times\n"); - data->failed = 1; - return 1; - } - - p = &data->all[data->index]; - if (filename == NULL) - p->filename = NULL; - else - { - p->filename = strdup (filename); - assert (p->filename != NULL); - } - p->lineno = lineno; - if (function == NULL) - p->function = NULL; - else - { - p->function = strdup (function); - assert (p->function != NULL); - } - ++data->index; - - return 0; -} - -/* An error callback passed to backtrace. */ - -static void -error_callback_one (void *vdata, const char *msg, int errnum) -{ - struct bdata *data = (struct bdata *) vdata; - - fprintf (stderr, "%s", msg); - if (errnum > 0) - fprintf (stderr, ": %s", strerror (errnum)); - fprintf (stderr, "\n"); - data->failed = 1; -} - -/* The backtrace_simple callback function. */ - -static int -callback_two (void *vdata, uintptr_t pc) -{ - struct sdata *data = (struct sdata *) vdata; - - if (data->index >= data->max) - { - fprintf (stderr, "callback_two: callback called too many times\n"); - data->failed = 1; - return 1; - } - - data->addrs[data->index] = pc; - ++data->index; - - return 0; -} - -/* An error callback passed to backtrace_simple. */ - -static void -error_callback_two (void *vdata, const char *msg, int errnum) -{ - struct sdata *data = (struct sdata *) vdata; - - fprintf (stderr, "%s", msg); - if (errnum > 0) - fprintf (stderr, ": %s", strerror (errnum)); - fprintf (stderr, "\n"); - data->failed = 1; -} - -/* The backtrace_syminfo callback function. */ - -static void -callback_three (void *vdata, uintptr_t pc ATTRIBUTE_UNUSED, - const char *symname, uintptr_t symval, - uintptr_t symsize) -{ - struct symdata *data = (struct symdata *) vdata; - - if (symname == NULL) - data->name = NULL; - else - { - data->name = strdup (symname); - assert (data->name != NULL); - } - data->val = symval; - data->size = symsize; -} - -/* The backtrace_syminfo error callback function. */ - -static void -error_callback_three (void *vdata, const char *msg, int errnum) -{ - struct symdata *data = (struct symdata *) vdata; - - fprintf (stderr, "%s", msg); - if (errnum > 0) - fprintf (stderr, ": %s", strerror (errnum)); - fprintf (stderr, "\n"); - data->failed = 1; -} - -/* Test the backtrace function with non-inlined functions. */ - -static int test1 (void) __attribute__ ((noinline, unused)); -static int f2 (int) __attribute__ ((noinline)); -static int f3 (int, int) __attribute__ ((noinline)); - -static int -test1 (void) -{ - /* Returning a value here and elsewhere avoids a tailcall which - would mess up the backtrace. */ - return f2 (__LINE__) + 1; -} - -static int -f2 (int f1line) -{ - return f3 (f1line, __LINE__) + 2; -} - -static int -f3 (int f1line, int f2line) -{ - struct info all[20]; - struct bdata data; - int f3line; - int i; - - data.all = &all[0]; - data.index = 0; - data.max = 20; - data.failed = 0; - - f3line = __LINE__ + 1; - i = backtrace_full (state, 0, callback_one, error_callback_one, &data); - - if (i != 0) - { - fprintf (stderr, "test1: unexpected return value %d\n", i); - data.failed = 1; - } - - if (data.index < 3) - { - fprintf (stderr, - "test1: not enough frames; got %zu, expected at least 3\n", - data.index); - data.failed = 1; - } - - check ("test1", 0, all, f3line, "f3", &data.failed); - check ("test1", 1, all, f2line, "f2", &data.failed); - check ("test1", 2, all, f1line, "test1", &data.failed); - - printf ("%s: backtrace_full noinline\n", data.failed ? "FAIL" : "PASS"); - - if (data.failed) - ++failures; - - return failures; -} - -/* Test the backtrace function with inlined functions. */ - -static inline int test2 (void) __attribute__ ((always_inline, unused)); -static inline int f12 (int) __attribute__ ((always_inline)); -static inline int f13 (int, int) __attribute__ ((always_inline)); - -static inline int -test2 (void) -{ - return f12 (__LINE__) + 1; -} - -static inline int -f12 (int f1line) -{ - return f13 (f1line, __LINE__) + 2; -} - -static inline int -f13 (int f1line, int f2line) -{ - struct info all[20]; - struct bdata data; - int f3line; - int i; - - data.all = &all[0]; - data.index = 0; - data.max = 20; - data.failed = 0; - - f3line = __LINE__ + 1; - i = backtrace_full (state, 0, callback_one, error_callback_one, &data); - - if (i != 0) - { - fprintf (stderr, "test2: unexpected return value %d\n", i); - data.failed = 1; - } - - check ("test2", 0, all, f3line, "f13", &data.failed); - check ("test2", 1, all, f2line, "f12", &data.failed); - check ("test2", 2, all, f1line, "test2", &data.failed); - - printf ("%s: backtrace_full inline\n", data.failed ? "FAIL" : "PASS"); - - if (data.failed) - ++failures; - - return failures; -} - -/* Test the backtrace_simple function with non-inlined functions. */ - -static int test3 (void) __attribute__ ((noinline, unused)); -static int f22 (int) __attribute__ ((noinline)); -static int f23 (int, int) __attribute__ ((noinline)); - -static int -test3 (void) -{ - return f22 (__LINE__) + 1; -} - -static int -f22 (int f1line) -{ - return f23 (f1line, __LINE__) + 2; -} - -static int -f23 (int f1line, int f2line) -{ - uintptr_t addrs[20]; - struct sdata data; - int f3line; - int i; - - data.addrs = &addrs[0]; - data.index = 0; - data.max = 20; - data.failed = 0; - - f3line = __LINE__ + 1; - i = backtrace_simple (state, 0, callback_two, error_callback_two, &data); - - if (i != 0) - { - fprintf (stderr, "test3: unexpected return value %d\n", i); - data.failed = 1; - } - - if (!data.failed) - { - struct info all[20]; - struct bdata bdata; - int j; - - bdata.all = &all[0]; - bdata.index = 0; - bdata.max = 20; - bdata.failed = 0; - - for (j = 0; j < 3; ++j) - { - i = backtrace_pcinfo (state, addrs[j], callback_one, - error_callback_one, &bdata); - if (i != 0) - { - fprintf (stderr, - ("test3: unexpected return value " - "from backtrace_pcinfo %d\n"), - i); - bdata.failed = 1; - } - if (!bdata.failed && bdata.index != (size_t) (j + 1)) - { - fprintf (stderr, - ("wrong number of calls from backtrace_pcinfo " - "got %u expected %d\n"), - (unsigned int) bdata.index, j + 1); - bdata.failed = 1; - } - } - - check ("test3", 0, all, f3line, "f23", &bdata.failed); - check ("test3", 1, all, f2line, "f22", &bdata.failed); - check ("test3", 2, all, f1line, "test3", &bdata.failed); - - if (bdata.failed) - data.failed = 1; - - for (j = 0; j < 3; ++j) - { - struct symdata symdata; - - symdata.name = NULL; - symdata.val = 0; - symdata.size = 0; - symdata.failed = 0; - - i = backtrace_syminfo (state, addrs[j], callback_three, - error_callback_three, &symdata); - if (i == 0) - { - fprintf (stderr, - ("test3: [%d]: unexpected return value " - "from backtrace_syminfo %d\n"), - j, i); - symdata.failed = 1; - } - - if (!symdata.failed) - { - const char *expected; - - switch (j) - { - case 0: - expected = "f23"; - break; - case 1: - expected = "f22"; - break; - case 2: - expected = "test3"; - break; - default: - assert (0); - } - - if (symdata.name == NULL) - { - fprintf (stderr, "test3: [%d]: NULL syminfo name\n", j); - symdata.failed = 1; - } - /* Use strncmp, not strcmp, because GCC might create a - clone. */ - else if (strncmp (symdata.name, expected, strlen (expected)) - != 0) - { - fprintf (stderr, - ("test3: [%d]: unexpected syminfo name " - "got %s expected %s\n"), - j, symdata.name, expected); - symdata.failed = 1; - } - } - - if (symdata.failed) - data.failed = 1; - } - } - - printf ("%s: backtrace_simple noinline\n", data.failed ? "FAIL" : "PASS"); - - if (data.failed) - ++failures; - - return failures; -} - -/* Test the backtrace_simple function with inlined functions. */ - -static inline int test4 (void) __attribute__ ((always_inline, unused)); -static inline int f32 (int) __attribute__ ((always_inline)); -static inline int f33 (int, int) __attribute__ ((always_inline)); - -static inline int -test4 (void) -{ - return f32 (__LINE__) + 1; -} - -static inline int -f32 (int f1line) -{ - return f33 (f1line, __LINE__) + 2; -} - -static inline int -f33 (int f1line, int f2line) -{ - uintptr_t addrs[20]; - struct sdata data; - int f3line; - int i; - - data.addrs = &addrs[0]; - data.index = 0; - data.max = 20; - data.failed = 0; - - f3line = __LINE__ + 1; - i = backtrace_simple (state, 0, callback_two, error_callback_two, &data); - - if (i != 0) - { - fprintf (stderr, "test3: unexpected return value %d\n", i); - data.failed = 1; - } - - if (!data.failed) - { - struct info all[20]; - struct bdata bdata; - - bdata.all = &all[0]; - bdata.index = 0; - bdata.max = 20; - bdata.failed = 0; - - i = backtrace_pcinfo (state, addrs[0], callback_one, error_callback_one, - &bdata); - if (i != 0) - { - fprintf (stderr, - ("test4: unexpected return value " - "from backtrace_pcinfo %d\n"), - i); - bdata.failed = 1; - } - - check ("test4", 0, all, f3line, "f33", &bdata.failed); - check ("test4", 1, all, f2line, "f32", &bdata.failed); - check ("test4", 2, all, f1line, "test4", &bdata.failed); - - if (bdata.failed) - data.failed = 1; - } - - printf ("%s: backtrace_simple inline\n", data.failed ? "FAIL" : "PASS"); - - if (data.failed) - ++failures; - - return failures; -} - -#if BACKTRACE_SUPPORTS_DATA - -int global = 1; - -static int -test5 (void) -{ - struct symdata symdata; - int i; - uintptr_t addr = (uintptr_t) &global; - - if (sizeof (global) > 1) - addr += 1; - - symdata.name = NULL; - symdata.val = 0; - symdata.size = 0; - symdata.failed = 0; - - i = backtrace_syminfo (state, addr, callback_three, - error_callback_three, &symdata); - if (i == 0) - { - fprintf (stderr, - "test5: unexpected return value from backtrace_syminfo %d\n", - i); - symdata.failed = 1; - } - - if (!symdata.failed) - { - if (symdata.name == NULL) - { - fprintf (stderr, "test5: NULL syminfo name\n"); - symdata.failed = 1; - } - else if (strcmp (symdata.name, "global") != 0) - { - fprintf (stderr, - "test5: unexpected syminfo name got %s expected %s\n", - symdata.name, "global"); - symdata.failed = 1; - } - else if (symdata.val != (uintptr_t) &global) - { - fprintf (stderr, - "test5: unexpected syminfo value got %lx expected %lx\n", - (unsigned long) symdata.val, - (unsigned long) (uintptr_t) &global); - symdata.failed = 1; - } - else if (symdata.size != sizeof (global)) - { - fprintf (stderr, - "test5: unexpected syminfo size got %lx expected %lx\n", - (unsigned long) symdata.size, - (unsigned long) sizeof (global)); - symdata.failed = 1; - } - } - - printf ("%s: backtrace_syminfo variable\n", - symdata.failed ? "FAIL" : "PASS"); - - if (symdata.failed) - ++failures; - - return failures; -} - -#endif /* BACKTRACE_SUPPORTS_DATA */ - -static void -error_callback_create (void *data ATTRIBUTE_UNUSED, const char *msg, - int errnum) -{ - fprintf (stderr, "%s", msg); - if (errnum > 0) - fprintf (stderr, ": %s", strerror (errnum)); - fprintf (stderr, "\n"); - exit (EXIT_FAILURE); -} - -/* Run all the tests. */ - -int -main (int argc ATTRIBUTE_UNUSED, char **argv) -{ - state = backtrace_create_state (argv[0], BACKTRACE_SUPPORTS_THREADS, - error_callback_create, NULL); - -#if BACKTRACE_SUPPORTED - test1 (); - test2 (); - test3 (); - test4 (); -#if BACKTRACE_SUPPORTS_DATA - test5 (); -#endif -#endif - - exit (failures ? EXIT_FAILURE : EXIT_SUCCESS); -} diff --git a/src/libbacktrace/config.h.in b/src/libbacktrace/config.h.in deleted file mode 100644 index 87cb805984d4..000000000000 --- a/src/libbacktrace/config.h.in +++ /dev/null @@ -1,134 +0,0 @@ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* ELF size: 32 or 64 */ -#undef BACKTRACE_ELF_SIZE - -/* Define to 1 if you have the __atomic functions */ -#undef HAVE_ATOMIC_FUNCTIONS - -/* Define to 1 if you have the declaration of `strnlen', and to 0 if you - don't. */ -#undef HAVE_DECL_STRNLEN - -/* Define to 1 if you have the header file. */ -#undef HAVE_DLFCN_H - -/* Define if dl_iterate_phdr is available. */ -#undef HAVE_DL_ITERATE_PHDR - -/* Define to 1 if you have the fcntl function */ -#undef HAVE_FCNTL - -/* Define if getexecname is available. */ -#undef HAVE_GETEXECNAME - -/* Define if _Unwind_GetIPInfo is available. */ -#undef HAVE_GETIPINFO - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_LINK_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if you have the __sync functions */ -#undef HAVE_SYNC_FUNCTIONS - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_MMAN_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_UNISTD_H - -/* Define to the sub-directory in which libtool stores uninstalled libraries. - */ -#undef LT_OBJDIR - -/* Define to the address where bug reports for this package should be sent. */ -#undef PACKAGE_BUGREPORT - -/* Define to the full name of this package. */ -#undef PACKAGE_NAME - -/* Define to the full name and version of this package. */ -#undef PACKAGE_STRING - -/* Define to the one symbol short name of this package. */ -#undef PACKAGE_TARNAME - -/* Define to the home page for this package. */ -#undef PACKAGE_URL - -/* Define to the version of this package. */ -#undef PACKAGE_VERSION - -/* The size of `char', as computed by sizeof. */ -#undef SIZEOF_CHAR - -/* The size of `int', as computed by sizeof. */ -#undef SIZEOF_INT - -/* The size of `long', as computed by sizeof. */ -#undef SIZEOF_LONG - -/* The size of `short', as computed by sizeof. */ -#undef SIZEOF_SHORT - -/* The size of `void *', as computed by sizeof. */ -#undef SIZEOF_VOID_P - -/* Define to 1 if you have the ANSI C header files. */ -#undef STDC_HEADERS - -/* Enable extensions on AIX 3, Interix. */ -#ifndef _ALL_SOURCE -# undef _ALL_SOURCE -#endif -/* Enable GNU extensions on systems that have them. */ -#ifndef _GNU_SOURCE -# undef _GNU_SOURCE -#endif -/* Enable threading extensions on Solaris. */ -#ifndef _POSIX_PTHREAD_SEMANTICS -# undef _POSIX_PTHREAD_SEMANTICS -#endif -/* Enable extensions on HP NonStop. */ -#ifndef _TANDEM_SOURCE -# undef _TANDEM_SOURCE -#endif -/* Enable general extensions on Solaris. */ -#ifndef __EXTENSIONS__ -# undef __EXTENSIONS__ -#endif - - -/* Define to 1 if on MINIX. */ -#undef _MINIX - -/* Define to 2 if the system does not provide POSIX.1 features except with - this defined. */ -#undef _POSIX_1_SOURCE - -/* Define to 1 if you need to in order for `stat' and other things to work. */ -#undef _POSIX_SOURCE diff --git a/src/libbacktrace/config.sub b/src/libbacktrace/config.sub deleted file mode 100644 index 40ea5dfe1152..000000000000 --- a/src/libbacktrace/config.sub +++ /dev/null @@ -1,1836 +0,0 @@ -#! /bin/sh -# Configuration validation subroutine script. -# Copyright 1992-2017 Free Software Foundation, Inc. - -timestamp='2017-04-02' - -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that -# program. This Exception is an additional permission under section 7 -# of the GNU General Public License, version 3 ("GPLv3"). - - -# Please send patches to . -# -# Configuration subroutine to validate and canonicalize a configuration type. -# Supply the specified configuration type as an argument. -# If it is invalid, we print an error message on stderr and exit with code 1. -# Otherwise, we print the canonical config type on stdout and succeed. - -# You can get the latest version of this script from: -# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub - -# This file is supposed to be the same for all GNU packages -# and recognize all the CPU types, system types and aliases -# that are meaningful with *any* GNU software. -# Each package is responsible for reporting which valid configurations -# it does not support. The user should be able to distinguish -# a failure to support a valid configuration from a meaningless -# configuration. - -# The goal of this file is to map all the various variations of a given -# machine specification into a single specification in the form: -# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM -# or in some cases, the newer four-part form: -# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM -# It is wrong to echo any other type of specification. - -me=`echo "$0" | sed -e 's,.*/,,'` - -usage="\ -Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS - -Canonicalize a configuration name. - -Operation modes: - -h, --help print this help, then exit - -t, --time-stamp print date of last modification, then exit - -v, --version print version number, then exit - -Report bugs and patches to ." - -version="\ -GNU config.sub ($timestamp) - -Copyright 1992-2017 Free Software Foundation, Inc. - -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." - -help=" -Try \`$me --help' for more information." - -# Parse command line -while test $# -gt 0 ; do - case $1 in - --time-stamp | --time* | -t ) - echo "$timestamp" ; exit ;; - --version | -v ) - echo "$version" ; exit ;; - --help | --h* | -h ) - echo "$usage"; exit ;; - -- ) # Stop option processing - shift; break ;; - - ) # Use stdin as input. - break ;; - -* ) - echo "$me: invalid option $1$help" - exit 1 ;; - - *local*) - # First pass through any local machine types. - echo $1 - exit ;; - - * ) - break ;; - esac -done - -case $# in - 0) echo "$me: missing argument$help" >&2 - exit 1;; - 1) ;; - *) echo "$me: too many arguments$help" >&2 - exit 1;; -esac - -# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). -# Here we must recognize all the valid KERNEL-OS combinations. -maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` -case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ - linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ - knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ - kopensolaris*-gnu* | cloudabi*-eabi* | \ - storm-chaos* | os2-emx* | rtmk-nova*) - os=-$maybe_os - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` - ;; - android-linux) - os=-linux-android - basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown - ;; - *) - basic_machine=`echo $1 | sed 's/-[^-]*$//'` - if [ $basic_machine != $1 ] - then os=`echo $1 | sed 's/.*-/-/'` - else os=; fi - ;; -esac - -### Let's recognize common machines as not being operating systems so -### that things like config.sub decstation-3100 work. We also -### recognize some manufacturers as not being operating systems, so we -### can provide default operating systems below. -case $os in - -sun*os*) - # Prevent following clause from handling this invalid input. - ;; - -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ - -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ - -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ - -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ - -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ - -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ - -apple | -axis | -knuth | -cray | -microblaze*) - os= - basic_machine=$1 - ;; - -bluegene*) - os=-cnk - ;; - -sim | -cisco | -oki | -wec | -winbond) - os= - basic_machine=$1 - ;; - -scout) - ;; - -wrs) - os=-vxworks - basic_machine=$1 - ;; - -chorusos*) - os=-chorusos - basic_machine=$1 - ;; - -chorusrdb) - os=-chorusrdb - basic_machine=$1 - ;; - -hiux*) - os=-hiuxwe2 - ;; - -sco6) - os=-sco5v6 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5) - os=-sco3.2v5 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco4) - os=-sco3.2v4 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2.[4-9]*) - os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco3.2v[4-9]*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco5v6*) - # Don't forget version if it is 3.2v4 or newer. - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -sco*) - os=-sco3.2v2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -udk*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -isc) - os=-isc2.2 - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -clix*) - basic_machine=clipper-intergraph - ;; - -isc*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` - ;; - -lynx*178) - os=-lynxos178 - ;; - -lynx*5) - os=-lynxos5 - ;; - -lynx*) - os=-lynxos - ;; - -ptx*) - basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` - ;; - -windowsnt*) - os=`echo $os | sed -e 's/windowsnt/winnt/'` - ;; - -psos*) - os=-psos - ;; - -mint | -mint[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; -esac - -# Decode aliases for certain CPU-COMPANY combinations. -case $basic_machine in - # Recognize the basic CPU types without company name. - # Some are omitted here because they have special meanings below. - 1750a | 580 \ - | a29k \ - | aarch64 | aarch64_be \ - | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ - | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ - | am33_2.0 \ - | arc | arceb \ - | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ - | avr | avr32 \ - | ba \ - | be32 | be64 \ - | bfin \ - | c4x | c8051 | clipper \ - | d10v | d30v | dlx | dsp16xx \ - | e2k | epiphany \ - | fido | fr30 | frv | ft32 \ - | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ - | hexagon \ - | i370 | i860 | i960 | ia16 | ia64 \ - | ip2k | iq2000 \ - | k1om \ - | le32 | le64 \ - | lm32 \ - | m32c | m32r | m32rle | m68000 | m68k | m88k \ - | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64el \ - | mips64octeon | mips64octeonel \ - | mips64orion | mips64orionel \ - | mips64r5900 | mips64r5900el \ - | mips64vr | mips64vrel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa32r6 | mipsisa32r6el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64r6 | mipsisa64r6el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipsr5900 | mipsr5900el \ - | mipstx39 | mipstx39el \ - | mn10200 | mn10300 \ - | moxie \ - | mt \ - | msp430 \ - | nds32 | nds32le | nds32be \ - | nios | nios2 | nios2eb | nios2el \ - | ns16k | ns32k \ - | open8 | or1k | or1knd | or32 \ - | pdp10 | pdp11 | pj | pjl \ - | powerpc | powerpc64 | powerpc64le | powerpcle \ - | pru \ - | pyramid \ - | riscv32 | riscv64 \ - | rl78 | rx \ - | score \ - | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ - | sh64 | sh64le \ - | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ - | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ - | spu \ - | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ - | ubicom32 \ - | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | visium \ - | wasm32 \ - | we32k \ - | x86 | xc16x | xstormy16 | xtensa \ - | z8k | z80) - basic_machine=$basic_machine-unknown - ;; - c54x) - basic_machine=tic54x-unknown - ;; - c55x) - basic_machine=tic55x-unknown - ;; - c6x) - basic_machine=tic6x-unknown - ;; - leon|leon[3-9]) - basic_machine=sparc-$basic_machine - ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) - basic_machine=$basic_machine-unknown - os=-none - ;; - m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) - ;; - ms1) - basic_machine=mt-unknown - ;; - - strongarm | thumb | xscale) - basic_machine=arm-unknown - ;; - xgate) - basic_machine=$basic_machine-unknown - os=-none - ;; - xscaleeb) - basic_machine=armeb-unknown - ;; - - xscaleel) - basic_machine=armel-unknown - ;; - - # We use `pc' rather than `unknown' - # because (1) that's what they normally are, and - # (2) the word "unknown" tends to confuse beginning users. - i*86 | x86_64) - basic_machine=$basic_machine-pc - ;; - # Object if more than one company name word. - *-*-*) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; - # Recognize the basic CPU types with company name. - 580-* \ - | a29k-* \ - | aarch64-* | aarch64_be-* \ - | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ - | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ - | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ - | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ - | avr-* | avr32-* \ - | ba-* \ - | be32-* | be64-* \ - | bfin-* | bs2000-* \ - | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | c8051-* | clipper-* | craynv-* | cydra-* \ - | d10v-* | d30v-* | dlx-* \ - | e2k-* | elxsi-* \ - | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ - | h8300-* | h8500-* \ - | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ - | hexagon-* \ - | i*86-* | i860-* | i960-* | ia16-* | ia64-* \ - | ip2k-* | iq2000-* \ - | k1om-* \ - | le32-* | le64-* \ - | lm32-* \ - | m32c-* | m32r-* | m32rle-* \ - | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ - | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ - | microblaze-* | microblazeel-* \ - | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ - | mips16-* \ - | mips64-* | mips64el-* \ - | mips64octeon-* | mips64octeonel-* \ - | mips64orion-* | mips64orionel-* \ - | mips64r5900-* | mips64r5900el-* \ - | mips64vr-* | mips64vrel-* \ - | mips64vr4100-* | mips64vr4100el-* \ - | mips64vr4300-* | mips64vr4300el-* \ - | mips64vr5000-* | mips64vr5000el-* \ - | mips64vr5900-* | mips64vr5900el-* \ - | mipsisa32-* | mipsisa32el-* \ - | mipsisa32r2-* | mipsisa32r2el-* \ - | mipsisa32r6-* | mipsisa32r6el-* \ - | mipsisa64-* | mipsisa64el-* \ - | mipsisa64r2-* | mipsisa64r2el-* \ - | mipsisa64r6-* | mipsisa64r6el-* \ - | mipsisa64sb1-* | mipsisa64sb1el-* \ - | mipsisa64sr71k-* | mipsisa64sr71kel-* \ - | mipsr5900-* | mipsr5900el-* \ - | mipstx39-* | mipstx39el-* \ - | mmix-* \ - | mt-* \ - | msp430-* \ - | nds32-* | nds32le-* | nds32be-* \ - | nios-* | nios2-* | nios2eb-* | nios2el-* \ - | none-* | np1-* | ns16k-* | ns32k-* \ - | open8-* \ - | or1k*-* \ - | orion-* \ - | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ - | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ - | pru-* \ - | pyramid-* \ - | riscv32-* | riscv64-* \ - | rl78-* | romp-* | rs6000-* | rx-* \ - | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ - | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ - | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ - | sparclite-* \ - | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ - | tahoe-* \ - | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ - | tile*-* \ - | tron-* \ - | ubicom32-* \ - | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ - | vax-* \ - | visium-* \ - | wasm32-* \ - | we32k-* \ - | x86-* | x86_64-* | xc16x-* | xps100-* \ - | xstormy16-* | xtensa*-* \ - | ymp-* \ - | z8k-* | z80-*) - ;; - # Recognize the basic CPU types without company name, with glob match. - xtensa*) - basic_machine=$basic_machine-unknown - ;; - # Recognize the various machine names and aliases which stand - # for a CPU type and a company and sometimes even an OS. - 386bsd) - basic_machine=i386-unknown - os=-bsd - ;; - 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) - basic_machine=m68000-att - ;; - 3b*) - basic_machine=we32k-att - ;; - a29khif) - basic_machine=a29k-amd - os=-udi - ;; - abacus) - basic_machine=abacus-unknown - ;; - adobe68k) - basic_machine=m68010-adobe - os=-scout - ;; - alliant | fx80) - basic_machine=fx80-alliant - ;; - altos | altos3068) - basic_machine=m68k-altos - ;; - am29k) - basic_machine=a29k-none - os=-bsd - ;; - amd64) - basic_machine=x86_64-pc - ;; - amd64-*) - basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - amdahl) - basic_machine=580-amdahl - os=-sysv - ;; - amiga | amiga-*) - basic_machine=m68k-unknown - ;; - amigaos | amigados) - basic_machine=m68k-unknown - os=-amigaos - ;; - amigaunix | amix) - basic_machine=m68k-unknown - os=-sysv4 - ;; - apollo68) - basic_machine=m68k-apollo - os=-sysv - ;; - apollo68bsd) - basic_machine=m68k-apollo - os=-bsd - ;; - aros) - basic_machine=i386-pc - os=-aros - ;; - asmjs) - basic_machine=asmjs-unknown - ;; - aux) - basic_machine=m68k-apple - os=-aux - ;; - balance) - basic_machine=ns32k-sequent - os=-dynix - ;; - blackfin) - basic_machine=bfin-unknown - os=-linux - ;; - blackfin-*) - basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - bluegene*) - basic_machine=powerpc-ibm - os=-cnk - ;; - c54x-*) - basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c55x-*) - basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c6x-*) - basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - c90) - basic_machine=c90-cray - os=-unicos - ;; - cegcc) - basic_machine=arm-unknown - os=-cegcc - ;; - convex-c1) - basic_machine=c1-convex - os=-bsd - ;; - convex-c2) - basic_machine=c2-convex - os=-bsd - ;; - convex-c32) - basic_machine=c32-convex - os=-bsd - ;; - convex-c34) - basic_machine=c34-convex - os=-bsd - ;; - convex-c38) - basic_machine=c38-convex - os=-bsd - ;; - cray | j90) - basic_machine=j90-cray - os=-unicos - ;; - craynv) - basic_machine=craynv-cray - os=-unicosmp - ;; - cr16 | cr16-*) - basic_machine=cr16-unknown - os=-elf - ;; - crds | unos) - basic_machine=m68k-crds - ;; - crisv32 | crisv32-* | etraxfs*) - basic_machine=crisv32-axis - ;; - cris | cris-* | etrax*) - basic_machine=cris-axis - ;; - crx) - basic_machine=crx-unknown - os=-elf - ;; - da30 | da30-*) - basic_machine=m68k-da30 - ;; - decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) - basic_machine=mips-dec - ;; - decsystem10* | dec10*) - basic_machine=pdp10-dec - os=-tops10 - ;; - decsystem20* | dec20*) - basic_machine=pdp10-dec - os=-tops20 - ;; - delta | 3300 | motorola-3300 | motorola-delta \ - | 3300-motorola | delta-motorola) - basic_machine=m68k-motorola - ;; - delta88) - basic_machine=m88k-motorola - os=-sysv3 - ;; - dicos) - basic_machine=i686-pc - os=-dicos - ;; - djgpp) - basic_machine=i586-pc - os=-msdosdjgpp - ;; - dpx20 | dpx20-*) - basic_machine=rs6000-bull - os=-bosx - ;; - dpx2* | dpx2*-bull) - basic_machine=m68k-bull - os=-sysv3 - ;; - e500v[12]) - basic_machine=powerpc-unknown - os=$os"spe" - ;; - e500v[12]-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` - os=$os"spe" - ;; - ebmon29k) - basic_machine=a29k-amd - os=-ebmon - ;; - elxsi) - basic_machine=elxsi-elxsi - os=-bsd - ;; - encore | umax | mmax) - basic_machine=ns32k-encore - ;; - es1800 | OSE68k | ose68k | ose | OSE) - basic_machine=m68k-ericsson - os=-ose - ;; - fx2800) - basic_machine=i860-alliant - ;; - genix) - basic_machine=ns32k-ns - ;; - gmicro) - basic_machine=tron-gmicro - os=-sysv - ;; - go32) - basic_machine=i386-pc - os=-go32 - ;; - h3050r* | hiux*) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - h8300hms) - basic_machine=h8300-hitachi - os=-hms - ;; - h8300xray) - basic_machine=h8300-hitachi - os=-xray - ;; - h8500hms) - basic_machine=h8500-hitachi - os=-hms - ;; - harris) - basic_machine=m88k-harris - os=-sysv3 - ;; - hp300-*) - basic_machine=m68k-hp - ;; - hp300bsd) - basic_machine=m68k-hp - os=-bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=-hpux - ;; - hp3k9[0-9][0-9] | hp9[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k2[0-9][0-9] | hp9k31[0-9]) - basic_machine=m68000-hp - ;; - hp9k3[2-9][0-9]) - basic_machine=m68k-hp - ;; - hp9k6[0-9][0-9] | hp6[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hp9k7[0-79][0-9] | hp7[0-79][0-9]) - basic_machine=hppa1.1-hp - ;; - hp9k78[0-9] | hp78[0-9]) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) - # FIXME: really hppa2.0-hp - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][13679] | hp8[0-9][13679]) - basic_machine=hppa1.1-hp - ;; - hp9k8[0-9][0-9] | hp8[0-9][0-9]) - basic_machine=hppa1.0-hp - ;; - hppa-next) - os=-nextstep3 - ;; - hppaosf) - basic_machine=hppa1.1-hp - os=-osf - ;; - hppro) - basic_machine=hppa1.1-hp - os=-proelf - ;; - i370-ibm* | ibm*) - basic_machine=i370-ibm - ;; - i*86v32) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv32 - ;; - i*86v4*) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv4 - ;; - i*86v) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-sysv - ;; - i*86sol2) - basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` - os=-solaris2 - ;; - i386mach) - basic_machine=i386-mach - os=-mach - ;; - i386-vsta | vsta) - basic_machine=i386-unknown - os=-vsta - ;; - iris | iris4d) - basic_machine=mips-sgi - case $os in - -irix*) - ;; - *) - os=-irix4 - ;; - esac - ;; - isi68 | isi) - basic_machine=m68k-isi - os=-sysv - ;; - leon-*|leon[3-9]-*) - basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` - ;; - m68knommu) - basic_machine=m68k-unknown - os=-linux - ;; - m68knommu-*) - basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - m88k-omron*) - basic_machine=m88k-omron - ;; - magnum | m3230) - basic_machine=mips-mips - os=-sysv - ;; - merlin) - basic_machine=ns32k-utek - os=-sysv - ;; - microblaze*) - basic_machine=microblaze-xilinx - ;; - mingw64) - basic_machine=x86_64-pc - os=-mingw64 - ;; - mingw32) - basic_machine=i686-pc - os=-mingw32 - ;; - mingw32ce) - basic_machine=arm-unknown - os=-mingw32ce - ;; - miniframe) - basic_machine=m68000-convergent - ;; - *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) - basic_machine=m68k-atari - os=-mint - ;; - mips3*-*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` - ;; - mips3*) - basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown - ;; - monitor) - basic_machine=m68k-rom68k - os=-coff - ;; - morphos) - basic_machine=powerpc-unknown - os=-morphos - ;; - moxiebox) - basic_machine=moxie-unknown - os=-moxiebox - ;; - msdos) - basic_machine=i386-pc - os=-msdos - ;; - ms1-*) - basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` - ;; - msys) - basic_machine=i686-pc - os=-msys - ;; - mvs) - basic_machine=i370-ibm - os=-mvs - ;; - nacl) - basic_machine=le32-unknown - os=-nacl - ;; - ncr3000) - basic_machine=i486-ncr - os=-sysv4 - ;; - netbsd386) - basic_machine=i386-unknown - os=-netbsd - ;; - netwinder) - basic_machine=armv4l-rebel - os=-linux - ;; - news | news700 | news800 | news900) - basic_machine=m68k-sony - os=-newsos - ;; - news1000) - basic_machine=m68030-sony - os=-newsos - ;; - news-3600 | risc-news) - basic_machine=mips-sony - os=-newsos - ;; - necv70) - basic_machine=v70-nec - os=-sysv - ;; - next | m*-next ) - basic_machine=m68k-next - case $os in - -nextstep* ) - ;; - -ns2*) - os=-nextstep2 - ;; - *) - os=-nextstep3 - ;; - esac - ;; - nh3000) - basic_machine=m68k-harris - os=-cxux - ;; - nh[45]000) - basic_machine=m88k-harris - os=-cxux - ;; - nindy960) - basic_machine=i960-intel - os=-nindy - ;; - mon960) - basic_machine=i960-intel - os=-mon960 - ;; - nonstopux) - basic_machine=mips-compaq - os=-nonstopux - ;; - np1) - basic_machine=np1-gould - ;; - neo-tandem) - basic_machine=neo-tandem - ;; - nse-tandem) - basic_machine=nse-tandem - ;; - nsr-tandem) - basic_machine=nsr-tandem - ;; - nsx-tandem) - basic_machine=nsx-tandem - ;; - op50n-* | op60c-*) - basic_machine=hppa1.1-oki - os=-proelf - ;; - openrisc | openrisc-*) - basic_machine=or32-unknown - ;; - os400) - basic_machine=powerpc-ibm - os=-os400 - ;; - OSE68000 | ose68000) - basic_machine=m68000-ericsson - os=-ose - ;; - os68k) - basic_machine=m68k-none - os=-os68k - ;; - pa-hitachi) - basic_machine=hppa1.1-hitachi - os=-hiuxwe2 - ;; - paragon) - basic_machine=i860-intel - os=-osf - ;; - parisc) - basic_machine=hppa-unknown - os=-linux - ;; - parisc-*) - basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` - os=-linux - ;; - pbd) - basic_machine=sparc-tti - ;; - pbb) - basic_machine=m68k-tti - ;; - pc532 | pc532-*) - basic_machine=ns32k-pc532 - ;; - pc98) - basic_machine=i386-pc - ;; - pc98-*) - basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium | p5 | k5 | k6 | nexgen | viac3) - basic_machine=i586-pc - ;; - pentiumpro | p6 | 6x86 | athlon | athlon_*) - basic_machine=i686-pc - ;; - pentiumii | pentium2 | pentiumiii | pentium3) - basic_machine=i686-pc - ;; - pentium4) - basic_machine=i786-pc - ;; - pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) - basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumpro-* | p6-* | 6x86-* | athlon-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) - basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pentium4-*) - basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - pn) - basic_machine=pn-gould - ;; - power) basic_machine=power-ibm - ;; - ppc | ppcbe) basic_machine=powerpc-unknown - ;; - ppc-* | ppcbe-*) - basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppcle | powerpclittle) - basic_machine=powerpcle-unknown - ;; - ppcle-* | powerpclittle-*) - basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64) basic_machine=powerpc64-unknown - ;; - ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ppc64le | powerpc64little) - basic_machine=powerpc64le-unknown - ;; - ppc64le-* | powerpc64little-*) - basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - ps2) - basic_machine=i386-ibm - ;; - pw32) - basic_machine=i586-unknown - os=-pw32 - ;; - rdos | rdos64) - basic_machine=x86_64-pc - os=-rdos - ;; - rdos32) - basic_machine=i386-pc - os=-rdos - ;; - rom68k) - basic_machine=m68k-rom68k - os=-coff - ;; - rm[46]00) - basic_machine=mips-siemens - ;; - rtpc | rtpc-*) - basic_machine=romp-ibm - ;; - s390 | s390-*) - basic_machine=s390-ibm - ;; - s390x | s390x-*) - basic_machine=s390x-ibm - ;; - sa29200) - basic_machine=a29k-amd - os=-udi - ;; - sb1) - basic_machine=mipsisa64sb1-unknown - ;; - sb1el) - basic_machine=mipsisa64sb1el-unknown - ;; - sde) - basic_machine=mipsisa32-sde - os=-elf - ;; - sei) - basic_machine=mips-sei - os=-seiux - ;; - sequent) - basic_machine=i386-sequent - ;; - sh) - basic_machine=sh-hitachi - os=-hms - ;; - sh5el) - basic_machine=sh5le-unknown - ;; - sh64) - basic_machine=sh64-unknown - ;; - sparclite-wrs | simso-wrs) - basic_machine=sparclite-wrs - os=-vxworks - ;; - sps7) - basic_machine=m68k-bull - os=-sysv2 - ;; - spur) - basic_machine=spur-unknown - ;; - st2000) - basic_machine=m68k-tandem - ;; - stratus) - basic_machine=i860-stratus - os=-sysv4 - ;; - strongarm-* | thumb-*) - basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` - ;; - sun2) - basic_machine=m68000-sun - ;; - sun2os3) - basic_machine=m68000-sun - os=-sunos3 - ;; - sun2os4) - basic_machine=m68000-sun - os=-sunos4 - ;; - sun3os3) - basic_machine=m68k-sun - os=-sunos3 - ;; - sun3os4) - basic_machine=m68k-sun - os=-sunos4 - ;; - sun4os3) - basic_machine=sparc-sun - os=-sunos3 - ;; - sun4os4) - basic_machine=sparc-sun - os=-sunos4 - ;; - sun4sol2) - basic_machine=sparc-sun - os=-solaris2 - ;; - sun3 | sun3-*) - basic_machine=m68k-sun - ;; - sun4) - basic_machine=sparc-sun - ;; - sun386 | sun386i | roadrunner) - basic_machine=i386-sun - ;; - sv1) - basic_machine=sv1-cray - os=-unicos - ;; - symmetry) - basic_machine=i386-sequent - os=-dynix - ;; - t3e) - basic_machine=alphaev5-cray - os=-unicos - ;; - t90) - basic_machine=t90-cray - os=-unicos - ;; - tile*) - basic_machine=$basic_machine-unknown - os=-linux-gnu - ;; - tx39) - basic_machine=mipstx39-unknown - ;; - tx39el) - basic_machine=mipstx39el-unknown - ;; - toad1) - basic_machine=pdp10-xkl - os=-tops20 - ;; - tower | tower-32) - basic_machine=m68k-ncr - ;; - tpf) - basic_machine=s390x-ibm - os=-tpf - ;; - udi29k) - basic_machine=a29k-amd - os=-udi - ;; - ultra3) - basic_machine=a29k-nyu - os=-sym1 - ;; - v810 | necv810) - basic_machine=v810-nec - os=-none - ;; - vaxv) - basic_machine=vax-dec - os=-sysv - ;; - vms) - basic_machine=vax-dec - os=-vms - ;; - vpp*|vx|vx-*) - basic_machine=f301-fujitsu - ;; - vxworks960) - basic_machine=i960-wrs - os=-vxworks - ;; - vxworks68) - basic_machine=m68k-wrs - os=-vxworks - ;; - vxworks29k) - basic_machine=a29k-wrs - os=-vxworks - ;; - wasm32) - basic_machine=wasm32-unknown - ;; - w65*) - basic_machine=w65-wdc - os=-none - ;; - w89k-*) - basic_machine=hppa1.1-winbond - os=-proelf - ;; - xbox) - basic_machine=i686-pc - os=-mingw32 - ;; - xps | xps100) - basic_machine=xps100-honeywell - ;; - xscale-* | xscalee[bl]-*) - basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` - ;; - ymp) - basic_machine=ymp-cray - os=-unicos - ;; - z8k-*-coff) - basic_machine=z8k-unknown - os=-sim - ;; - z80-*-coff) - basic_machine=z80-unknown - os=-sim - ;; - none) - basic_machine=none-none - os=-none - ;; - -# Here we handle the default manufacturer of certain CPU types. It is in -# some cases the only manufacturer, in others, it is the most popular. - w89k) - basic_machine=hppa1.1-winbond - ;; - op50n) - basic_machine=hppa1.1-oki - ;; - op60c) - basic_machine=hppa1.1-oki - ;; - romp) - basic_machine=romp-ibm - ;; - mmix) - basic_machine=mmix-knuth - ;; - rs6000) - basic_machine=rs6000-ibm - ;; - vax) - basic_machine=vax-dec - ;; - pdp10) - # there are many clones, so DEC is not a safe bet - basic_machine=pdp10-unknown - ;; - pdp11) - basic_machine=pdp11-dec - ;; - we32k) - basic_machine=we32k-att - ;; - sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) - basic_machine=sh-unknown - ;; - sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) - basic_machine=sparc-sun - ;; - cydra) - basic_machine=cydra-cydrome - ;; - orion) - basic_machine=orion-highlevel - ;; - orion105) - basic_machine=clipper-highlevel - ;; - mac | mpw | mac-mpw) - basic_machine=m68k-apple - ;; - pmac | pmac-mpw) - basic_machine=powerpc-apple - ;; - *-unknown) - # Make sure to match an already-canonicalized machine name. - ;; - *) - echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 - exit 1 - ;; -esac - -# Here we canonicalize certain aliases for manufacturers. -case $basic_machine in - *-digital*) - basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` - ;; - *-commodore*) - basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` - ;; - *) - ;; -esac - -# Decode manufacturer-specific aliases for certain operating systems. - -if [ x"$os" != x"" ] -then -case $os in - # First match some system type aliases - # that might get confused with valid system types. - # -solaris* is a basic system type, with this one exception. - -auroraux) - os=-auroraux - ;; - -solaris1 | -solaris1.*) - os=`echo $os | sed -e 's|solaris1|sunos4|'` - ;; - -solaris) - os=-solaris2 - ;; - -svr4*) - os=-sysv4 - ;; - -unixware*) - os=-sysv4.2uw - ;; - -gnu/linux*) - os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` - ;; - # First accept the basic system types. - # The portable systems comes first. - # Each alternative MUST END IN A *, to match a version number. - # -sysv* is not here because it comes later, after sysvr4. - -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ - | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ - | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ - | -sym* | -kopensolaris* | -plan9* \ - | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ - | -aos* | -aros* | -cloudabi* | -sortix* \ - | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ - | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ - | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ - | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ - | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ - | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ - | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ - | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ - | -chorusos* | -chorusrdb* | -cegcc* | -glidix* \ - | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ - | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ - | -linux-newlib* | -linux-musl* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ - | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ - | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ - | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ - | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ - | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ - | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ - | -onefs* | -tirtos* | -phoenix* | -fuchsia* | -redox*) - # Remember, each alternative MUST END IN *, to match a version number. - ;; - -qnx*) - case $basic_machine in - x86-* | i*86-*) - ;; - *) - os=-nto$os - ;; - esac - ;; - -nto-qnx*) - ;; - -nto*) - os=`echo $os | sed -e 's|nto|nto-qnx|'` - ;; - -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ - | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ - | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) - ;; - -mac*) - os=`echo $os | sed -e 's|mac|macos|'` - ;; - -linux-dietlibc) - os=-linux-dietlibc - ;; - -linux*) - os=`echo $os | sed -e 's|linux|linux-gnu|'` - ;; - -sunos5*) - os=`echo $os | sed -e 's|sunos5|solaris2|'` - ;; - -sunos6*) - os=`echo $os | sed -e 's|sunos6|solaris3|'` - ;; - -opened*) - os=-openedition - ;; - -os400*) - os=-os400 - ;; - -wince*) - os=-wince - ;; - -osfrose*) - os=-osfrose - ;; - -osf*) - os=-osf - ;; - -utek*) - os=-bsd - ;; - -dynix*) - os=-bsd - ;; - -acis*) - os=-aos - ;; - -atheos*) - os=-atheos - ;; - -syllable*) - os=-syllable - ;; - -386bsd) - os=-bsd - ;; - -ctix* | -uts*) - os=-sysv - ;; - -nova*) - os=-rtmk-nova - ;; - -ns2 ) - os=-nextstep2 - ;; - -nsk*) - os=-nsk - ;; - # Preserve the version number of sinix5. - -sinix5.*) - os=`echo $os | sed -e 's|sinix|sysv|'` - ;; - -sinix*) - os=-sysv4 - ;; - -tpf*) - os=-tpf - ;; - -triton*) - os=-sysv3 - ;; - -oss*) - os=-sysv3 - ;; - -svr4) - os=-sysv4 - ;; - -svr3) - os=-sysv3 - ;; - -sysvr4) - os=-sysv4 - ;; - # This must come after -sysvr4. - -sysv*) - ;; - -ose*) - os=-ose - ;; - -es1800*) - os=-ose - ;; - -xenix) - os=-xenix - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - os=-mint - ;; - -aros*) - os=-aros - ;; - -zvmoe) - os=-zvmoe - ;; - -dicos*) - os=-dicos - ;; - -nacl*) - ;; - -ios) - ;; - -none) - ;; - *) - # Get rid of the `-' at the beginning of $os. - os=`echo $os | sed 's/[^-]*-//'` - echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 - exit 1 - ;; -esac -else - -# Here we handle the default operating systems that come with various machines. -# The value should be what the vendor currently ships out the door with their -# machine or put another way, the most popular os provided with the machine. - -# Note that if you're going to try to match "-MANUFACTURER" here (say, -# "-sun"), then you have to tell the case statement up towards the top -# that MANUFACTURER isn't an operating system. Otherwise, code above -# will signal an error saying that MANUFACTURER isn't an operating -# system, and we'll never get to this point. - -case $basic_machine in - score-*) - os=-elf - ;; - spu-*) - os=-elf - ;; - *-acorn) - os=-riscix1.2 - ;; - arm*-rebel) - os=-linux - ;; - arm*-semi) - os=-aout - ;; - c4x-* | tic4x-*) - os=-coff - ;; - c8051-*) - os=-elf - ;; - hexagon-*) - os=-elf - ;; - tic54x-*) - os=-coff - ;; - tic55x-*) - os=-coff - ;; - tic6x-*) - os=-coff - ;; - # This must come before the *-dec entry. - pdp10-*) - os=-tops20 - ;; - pdp11-*) - os=-none - ;; - *-dec | vax-*) - os=-ultrix4.2 - ;; - m68*-apollo) - os=-domain - ;; - i386-sun) - os=-sunos4.0.2 - ;; - m68000-sun) - os=-sunos3 - ;; - m68*-cisco) - os=-aout - ;; - mep-*) - os=-elf - ;; - mips*-cisco) - os=-elf - ;; - mips*-*) - os=-elf - ;; - or32-*) - os=-coff - ;; - *-tti) # must be before sparc entry or we get the wrong os. - os=-sysv3 - ;; - sparc-* | *-sun) - os=-sunos4.1.1 - ;; - pru-*) - os=-elf - ;; - *-be) - os=-beos - ;; - *-haiku) - os=-haiku - ;; - *-ibm) - os=-aix - ;; - *-knuth) - os=-mmixware - ;; - *-wec) - os=-proelf - ;; - *-winbond) - os=-proelf - ;; - *-oki) - os=-proelf - ;; - *-hp) - os=-hpux - ;; - *-hitachi) - os=-hiux - ;; - i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) - os=-sysv - ;; - *-cbm) - os=-amigaos - ;; - *-dg) - os=-dgux - ;; - *-dolphin) - os=-sysv3 - ;; - m68k-ccur) - os=-rtu - ;; - m88k-omron*) - os=-luna - ;; - *-next ) - os=-nextstep - ;; - *-sequent) - os=-ptx - ;; - *-crds) - os=-unos - ;; - *-ns) - os=-genix - ;; - i370-*) - os=-mvs - ;; - *-next) - os=-nextstep3 - ;; - *-gould) - os=-sysv - ;; - *-highlevel) - os=-bsd - ;; - *-encore) - os=-bsd - ;; - *-sgi) - os=-irix - ;; - *-siemens) - os=-sysv4 - ;; - *-masscomp) - os=-rtu - ;; - f30[01]-fujitsu | f700-fujitsu) - os=-uxpv - ;; - *-rom68k) - os=-coff - ;; - *-*bug) - os=-coff - ;; - *-apple) - os=-macos - ;; - *-atari*) - os=-mint - ;; - *) - os=-none - ;; -esac -fi - -# Here we handle the case where we know the os, and the CPU type, but not the -# manufacturer. We pick the logical manufacturer. -vendor=unknown -case $basic_machine in - *-unknown) - case $os in - -riscix*) - vendor=acorn - ;; - -sunos*) - vendor=sun - ;; - -cnk*|-aix*) - vendor=ibm - ;; - -beos*) - vendor=be - ;; - -hpux*) - vendor=hp - ;; - -mpeix*) - vendor=hp - ;; - -hiux*) - vendor=hitachi - ;; - -unos*) - vendor=crds - ;; - -dgux*) - vendor=dg - ;; - -luna*) - vendor=omron - ;; - -genix*) - vendor=ns - ;; - -mvs* | -opened*) - vendor=ibm - ;; - -os400*) - vendor=ibm - ;; - -ptx*) - vendor=sequent - ;; - -tpf*) - vendor=ibm - ;; - -vxsim* | -vxworks* | -windiss*) - vendor=wrs - ;; - -aux*) - vendor=apple - ;; - -hms*) - vendor=hitachi - ;; - -mpw* | -macos*) - vendor=apple - ;; - -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) - vendor=atari - ;; - -vos*) - vendor=stratus - ;; - esac - basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` - ;; -esac - -echo $basic_machine$os -exit - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "timestamp='" -# time-stamp-format: "%:y-%02m-%02d" -# time-stamp-end: "'" -# End: diff --git a/src/libbacktrace/configure b/src/libbacktrace/configure deleted file mode 100755 index 8bdb29d25606..000000000000 --- a/src/libbacktrace/configure +++ /dev/null @@ -1,15199 +0,0 @@ -#! /bin/sh -# Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.64 for package-unused version-unused. -# -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, -# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software -# Foundation, Inc. -# -# This configure script is free software; the Free Software Foundation -# gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1 - - test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( - ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' - ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO - ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO - PATH=/empty FPATH=/empty; export PATH FPATH - test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ - || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - # We cannot yet assume a decent shell, so we have to provide a - # neutralization value for shells without unset; and this also - # works around shells that cannot unset nonexistent variables. - BASH_ENV=/dev/null - ENV=/dev/null - (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV - export CONFIG_SHELL - exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, -$0: including any error possibly output before this -$0: message. Then install a modern shell, or manually run -$0: the script under such a shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error ERROR [LINENO LOG_FD] -# --------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with status $?, using 1 if that was 0. -as_fn_error () -{ - as_status=$?; test $as_status -eq 0 && as_status=1 - if test "$3"; then - as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3 - fi - $as_echo "$as_me: error: $1" >&2 - as_fn_exit $as_status -} # as_fn_error - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - - - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } - - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -p' - fi -else - as_ln_s='cp -p' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - -SHELL=${CONFIG_SHELL-/bin/sh} - - -exec 7<&0 &1 - -# Name of the host. -# hostname on some systems (SVR3.2, Linux) returns a bogus exit status, -# so uname gets run too. -ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` - -# -# Initializations. -# -ac_default_prefix=/usr/local -ac_clean_files= -ac_config_libobj_dir=. -LIBOBJS= -cross_compiling=no -subdirs= -MFLAGS= -MAKEFLAGS= - -# Identity of this package. -PACKAGE_NAME='package-unused' -PACKAGE_TARNAME='libbacktrace' -PACKAGE_VERSION='version-unused' -PACKAGE_STRING='package-unused version-unused' -PACKAGE_BUGREPORT='' -PACKAGE_URL='' - -ac_unique_file="backtrace.h" -# Factoring default headers for most tests. -ac_includes_default="\ -#include -#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_SYS_STAT_H -# include -#endif -#ifdef STDC_HEADERS -# include -# include -#else -# ifdef HAVE_STDLIB_H -# include -# endif -#endif -#ifdef HAVE_STRING_H -# if !defined STDC_HEADERS && defined HAVE_MEMORY_H -# include -# endif -# include -#endif -#ifdef HAVE_STRINGS_H -# include -#endif -#ifdef HAVE_INTTYPES_H -# include -#endif -#ifdef HAVE_STDINT_H -# include -#endif -#ifdef HAVE_UNISTD_H -# include -#endif" - -ac_subst_vars='am__EXEEXT_FALSE -am__EXEEXT_TRUE -LTLIBOBJS -LIBOBJS -NATIVE_FALSE -NATIVE_TRUE -BACKTRACE_USES_MALLOC -ALLOC_FILE -VIEW_FILE -BACKTRACE_SUPPORTS_DATA -BACKTRACE_SUPPORTED -FORMAT_FILE -BACKTRACE_SUPPORTS_THREADS -PIC_FLAG -WARN_FLAGS -EXTRA_FLAGS -BACKTRACE_FILE -OTOOL64 -OTOOL -LIPO -NMEDIT -DSYMUTIL -AR -OBJDUMP -LN_S -NM -ac_ct_DUMPBIN -DUMPBIN -LD -FGREP -SED -LIBTOOL -RANLIB -MAINT -MAINTAINER_MODE_FALSE -MAINTAINER_MODE_TRUE -am__untar -am__tar -AMTAR -am__leading_dot -SET_MAKE -AWK -mkdir_p -MKDIR_P -INSTALL_STRIP_PROGRAM -STRIP -install_sh -MAKEINFO -AUTOHEADER -AUTOMAKE -AUTOCONF -ACLOCAL -VERSION -PACKAGE -CYGPATH_W -am__isrc -INSTALL_DATA -INSTALL_SCRIPT -INSTALL_PROGRAM -libtool_VERSION -EGREP -GREP -CPP -OBJEXT -EXEEXT -ac_ct_CC -CPPFLAGS -LDFLAGS -CFLAGS -CC -target_os -target_vendor -target_cpu -target -host_os -host_vendor -host_cpu -host -build_os -build_vendor -build_cpu -build -multi_basedir -target_alias -host_alias -build_alias -LIBS -ECHO_T -ECHO_N -ECHO_C -DEFS -mandir -localedir -libdir -psdir -pdfdir -dvidir -htmldir -infodir -docdir -oldincludedir -includedir -localstatedir -sharedstatedir -sysconfdir -datadir -datarootdir -libexecdir -sbindir -bindir -program_transform_name -prefix -exec_prefix -PACKAGE_URL -PACKAGE_BUGREPORT -PACKAGE_STRING -PACKAGE_VERSION -PACKAGE_TARNAME -PACKAGE_NAME -PATH_SEPARATOR -SHELL' -ac_subst_files='' -ac_user_opts=' -enable_option_checking -enable_multilib -enable_maintainer_mode -with_target_subdir -enable_shared -enable_static -with_pic -enable_fast_install -with_gnu_ld -enable_libtool_lock -with_system_libunwind -enable_host_shared -' - ac_precious_vars='build_alias -host_alias -target_alias -CC -CFLAGS -LDFLAGS -LIBS -CPPFLAGS -CPP' - - -# Initialize some variables set by options. -ac_init_help= -ac_init_version=false -ac_unrecognized_opts= -ac_unrecognized_sep= -# The variables have the same names as the options, with -# dashes changed to underlines. -cache_file=/dev/null -exec_prefix=NONE -no_create= -no_recursion= -prefix=NONE -program_prefix=NONE -program_suffix=NONE -program_transform_name=s,x,x, -silent= -site= -srcdir= -verbose= -x_includes=NONE -x_libraries=NONE - -# Installation directory options. -# These are left unexpanded so users can "make install exec_prefix=/foo" -# and all the variables that are supposed to be based on exec_prefix -# by default will actually change. -# Use braces instead of parens because sh, perl, etc. also accept them. -# (The list follows the same order as the GNU Coding Standards.) -bindir='${exec_prefix}/bin' -sbindir='${exec_prefix}/sbin' -libexecdir='${exec_prefix}/libexec' -datarootdir='${prefix}/share' -datadir='${datarootdir}' -sysconfdir='${prefix}/etc' -sharedstatedir='${prefix}/com' -localstatedir='${prefix}/var' -includedir='${prefix}/include' -oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' -infodir='${datarootdir}/info' -htmldir='${docdir}' -dvidir='${docdir}' -pdfdir='${docdir}' -psdir='${docdir}' -libdir='${exec_prefix}/lib' -localedir='${datarootdir}/locale' -mandir='${datarootdir}/man' - -ac_prev= -ac_dashdash= -for ac_option -do - # If the previous option needs an argument, assign it. - if test -n "$ac_prev"; then - eval $ac_prev=\$ac_option - ac_prev= - continue - fi - - case $ac_option in - *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; - *) ac_optarg=yes ;; - esac - - # Accept the important Cygnus configure options, so we can diagnose typos. - - case $ac_dashdash$ac_option in - --) - ac_dashdash=yes ;; - - -bindir | --bindir | --bindi | --bind | --bin | --bi) - ac_prev=bindir ;; - -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) - bindir=$ac_optarg ;; - - -build | --build | --buil | --bui | --bu) - ac_prev=build_alias ;; - -build=* | --build=* | --buil=* | --bui=* | --bu=*) - build_alias=$ac_optarg ;; - - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) - cache_file=$ac_optarg ;; - - --config-cache | -C) - cache_file=config.cache ;; - - -datadir | --datadir | --datadi | --datad) - ac_prev=datadir ;; - -datadir=* | --datadir=* | --datadi=* | --datad=*) - datadir=$ac_optarg ;; - - -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ - | --dataroo | --dataro | --datar) - ac_prev=datarootdir ;; - -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ - | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) - datarootdir=$ac_optarg ;; - - -disable-* | --disable-*) - ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=no ;; - - -docdir | --docdir | --docdi | --doc | --do) - ac_prev=docdir ;; - -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) - docdir=$ac_optarg ;; - - -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) - ac_prev=dvidir ;; - -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) - dvidir=$ac_optarg ;; - - -enable-* | --enable-*) - ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=\$ac_optarg ;; - - -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ - | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ - | --exec | --exe | --ex) - ac_prev=exec_prefix ;; - -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ - | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ - | --exec=* | --exe=* | --ex=*) - exec_prefix=$ac_optarg ;; - - -gas | --gas | --ga | --g) - # Obsolete; use --with-gas. - with_gas=yes ;; - - -help | --help | --hel | --he | -h) - ac_init_help=long ;; - -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) - ac_init_help=recursive ;; - -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) - ac_init_help=short ;; - - -host | --host | --hos | --ho) - ac_prev=host_alias ;; - -host=* | --host=* | --hos=* | --ho=*) - host_alias=$ac_optarg ;; - - -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) - ac_prev=htmldir ;; - -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ - | --ht=*) - htmldir=$ac_optarg ;; - - -includedir | --includedir | --includedi | --included | --include \ - | --includ | --inclu | --incl | --inc) - ac_prev=includedir ;; - -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ - | --includ=* | --inclu=* | --incl=* | --inc=*) - includedir=$ac_optarg ;; - - -infodir | --infodir | --infodi | --infod | --info | --inf) - ac_prev=infodir ;; - -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) - infodir=$ac_optarg ;; - - -libdir | --libdir | --libdi | --libd) - ac_prev=libdir ;; - -libdir=* | --libdir=* | --libdi=* | --libd=*) - libdir=$ac_optarg ;; - - -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ - | --libexe | --libex | --libe) - ac_prev=libexecdir ;; - -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ - | --libexe=* | --libex=* | --libe=*) - libexecdir=$ac_optarg ;; - - -localedir | --localedir | --localedi | --localed | --locale) - ac_prev=localedir ;; - -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) - localedir=$ac_optarg ;; - - -localstatedir | --localstatedir | --localstatedi | --localstated \ - | --localstate | --localstat | --localsta | --localst | --locals) - ac_prev=localstatedir ;; - -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ - | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) - localstatedir=$ac_optarg ;; - - -mandir | --mandir | --mandi | --mand | --man | --ma | --m) - ac_prev=mandir ;; - -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) - mandir=$ac_optarg ;; - - -nfp | --nfp | --nf) - # Obsolete; use --without-fp. - with_fp=no ;; - - -no-create | --no-create | --no-creat | --no-crea | --no-cre \ - | --no-cr | --no-c | -n) - no_create=yes ;; - - -no-recursion | --no-recursion | --no-recursio | --no-recursi \ - | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) - no_recursion=yes ;; - - -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ - | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ - | --oldin | --oldi | --old | --ol | --o) - ac_prev=oldincludedir ;; - -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ - | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ - | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) - oldincludedir=$ac_optarg ;; - - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - prefix=$ac_optarg ;; - - -program-prefix | --program-prefix | --program-prefi | --program-pref \ - | --program-pre | --program-pr | --program-p) - ac_prev=program_prefix ;; - -program-prefix=* | --program-prefix=* | --program-prefi=* \ - | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) - program_prefix=$ac_optarg ;; - - -program-suffix | --program-suffix | --program-suffi | --program-suff \ - | --program-suf | --program-su | --program-s) - ac_prev=program_suffix ;; - -program-suffix=* | --program-suffix=* | --program-suffi=* \ - | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) - program_suffix=$ac_optarg ;; - - -program-transform-name | --program-transform-name \ - | --program-transform-nam | --program-transform-na \ - | --program-transform-n | --program-transform- \ - | --program-transform | --program-transfor \ - | --program-transfo | --program-transf \ - | --program-trans | --program-tran \ - | --progr-tra | --program-tr | --program-t) - ac_prev=program_transform_name ;; - -program-transform-name=* | --program-transform-name=* \ - | --program-transform-nam=* | --program-transform-na=* \ - | --program-transform-n=* | --program-transform-=* \ - | --program-transform=* | --program-transfor=* \ - | --program-transfo=* | --program-transf=* \ - | --program-trans=* | --program-tran=* \ - | --progr-tra=* | --program-tr=* | --program-t=*) - program_transform_name=$ac_optarg ;; - - -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) - ac_prev=pdfdir ;; - -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) - pdfdir=$ac_optarg ;; - - -psdir | --psdir | --psdi | --psd | --ps) - ac_prev=psdir ;; - -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) - psdir=$ac_optarg ;; - - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - silent=yes ;; - - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) - ac_prev=sbindir ;; - -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ - | --sbi=* | --sb=*) - sbindir=$ac_optarg ;; - - -sharedstatedir | --sharedstatedir | --sharedstatedi \ - | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ - | --sharedst | --shareds | --shared | --share | --shar \ - | --sha | --sh) - ac_prev=sharedstatedir ;; - -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ - | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ - | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ - | --sha=* | --sh=*) - sharedstatedir=$ac_optarg ;; - - -site | --site | --sit) - ac_prev=site ;; - -site=* | --site=* | --sit=*) - site=$ac_optarg ;; - - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - srcdir=$ac_optarg ;; - - -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ - | --syscon | --sysco | --sysc | --sys | --sy) - ac_prev=sysconfdir ;; - -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ - | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) - sysconfdir=$ac_optarg ;; - - -target | --target | --targe | --targ | --tar | --ta | --t) - ac_prev=target_alias ;; - -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) - target_alias=$ac_optarg ;; - - -v | -verbose | --verbose | --verbos | --verbo | --verb) - verbose=yes ;; - - -version | --version | --versio | --versi | --vers | -V) - ac_init_version=: ;; - - -with-* | --with-*) - ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=\$ac_optarg ;; - - -without-* | --without-*) - ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=no ;; - - --x) - # Obsolete; use --with-x. - with_x=yes ;; - - -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ - | --x-incl | --x-inc | --x-in | --x-i) - ac_prev=x_includes ;; - -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ - | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) - x_includes=$ac_optarg ;; - - -x-libraries | --x-libraries | --x-librarie | --x-librari \ - | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) - ac_prev=x_libraries ;; - -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ - | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) - x_libraries=$ac_optarg ;; - - -*) as_fn_error "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information." - ;; - - *=*) - ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` - # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error "invalid variable name: \`$ac_envvar'" ;; - esac - eval $ac_envvar=\$ac_optarg - export $ac_envvar ;; - - *) - # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 - expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} - ;; - - esac -done - -if test -n "$ac_prev"; then - ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error "missing argument to $ac_option" -fi - -if test -n "$ac_unrecognized_opts"; then - case $enable_option_checking in - no) ;; - fatal) as_fn_error "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; - esac -fi - -# Check all directory arguments for consistency. -for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ - datadir sysconfdir sharedstatedir localstatedir includedir \ - oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir -do - eval ac_val=\$$ac_var - # Remove trailing slashes. - case $ac_val in - */ ) - ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` - eval $ac_var=\$ac_val;; - esac - # Be sure to have absolute directory names. - case $ac_val in - [\\/$]* | ?:[\\/]* ) continue;; - NONE | '' ) case $ac_var in *prefix ) continue;; esac;; - esac - as_fn_error "expected an absolute directory name for --$ac_var: $ac_val" -done - -# There might be people who depend on the old broken behavior: `$host' -# used to hold the argument of --host etc. -# FIXME: To remove some day. -build=$build_alias -host=$host_alias -target=$target_alias - -# FIXME: To remove some day. -if test "x$host_alias" != x; then - if test "x$build_alias" = x; then - cross_compiling=maybe - $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. - If a cross compiler is detected then cross compile mode will be used." >&2 - elif test "x$build_alias" != "x$host_alias"; then - cross_compiling=yes - fi -fi - -ac_tool_prefix= -test -n "$host_alias" && ac_tool_prefix=$host_alias- - -test "$silent" = yes && exec 6>/dev/null - - -ac_pwd=`pwd` && test -n "$ac_pwd" && -ac_ls_di=`ls -di .` && -ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error "working directory cannot be determined" -test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error "pwd does not report name of working directory" - - -# Find the source files, if location was not specified. -if test -z "$srcdir"; then - ac_srcdir_defaulted=yes - # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$as_myself" || -$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_myself" : 'X\(//\)[^/]' \| \ - X"$as_myself" : 'X\(//\)$' \| \ - X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - srcdir=$ac_confdir - if test ! -r "$srcdir/$ac_unique_file"; then - srcdir=.. - fi -else - ac_srcdir_defaulted=no -fi -if test ! -r "$srcdir/$ac_unique_file"; then - test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error "cannot find sources ($ac_unique_file) in $srcdir" -fi -ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" -ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error "$ac_msg" - pwd)` -# When building in place, set srcdir=. -if test "$ac_abs_confdir" = "$ac_pwd"; then - srcdir=. -fi -# Remove unnecessary trailing slashes from srcdir. -# Double slashes in file names in object file debugging info -# mess up M-x gdb in Emacs. -case $srcdir in -*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; -esac -for ac_var in $ac_precious_vars; do - eval ac_env_${ac_var}_set=\${${ac_var}+set} - eval ac_env_${ac_var}_value=\$${ac_var} - eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} - eval ac_cv_env_${ac_var}_value=\$${ac_var} -done - -# -# Report the --help message. -# -if test "$ac_init_help" = "long"; then - # Omit some internal or obsolete options to make the list less imposing. - # This message is too long to be a string in the A/UX 3.1 sh. - cat <<_ACEOF -\`configure' configures package-unused version-unused to adapt to many kinds of systems. - -Usage: $0 [OPTION]... [VAR=VALUE]... - -To assign environment variables (e.g., CC, CFLAGS...), specify them as -VAR=VALUE. See below for descriptions of some of the useful variables. - -Defaults for the options are specified in brackets. - -Configuration: - -h, --help display this help and exit - --help=short display options specific to this package - --help=recursive display the short help of all the included packages - -V, --version display version information and exit - -q, --quiet, --silent do not print \`checking...' messages - --cache-file=FILE cache test results in FILE [disabled] - -C, --config-cache alias for \`--cache-file=config.cache' - -n, --no-create do not create output files - --srcdir=DIR find the sources in DIR [configure dir or \`..'] - -Installation directories: - --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] - --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] - -By default, \`make install' will install all the files in -\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify -an installation prefix other than \`$ac_default_prefix' using \`--prefix', -for instance \`--prefix=\$HOME'. - -For better control, use the options below. - -Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/libbacktrace] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] -_ACEOF - - cat <<\_ACEOF - -Program names: - --program-prefix=PREFIX prepend PREFIX to installed program names - --program-suffix=SUFFIX append SUFFIX to installed program names - --program-transform-name=PROGRAM run sed PROGRAM on installed program names - -System types: - --build=BUILD configure for building on BUILD [guessed] - --host=HOST cross-compile to build programs to run on HOST [BUILD] - --target=TARGET configure for building compilers for TARGET [HOST] -_ACEOF -fi - -if test -n "$ac_init_help"; then - case $ac_init_help in - short | recursive ) echo "Configuration of package-unused version-unused:";; - esac - cat <<\_ACEOF - -Optional Features: - --disable-option-checking ignore unrecognized --enable/--with options - --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) - --enable-FEATURE[=ARG] include FEATURE [ARG=yes] - --enable-multilib build many library versions (default) - --enable-maintainer-mode enable make rules and dependencies not useful - (and sometimes confusing) to the casual installer - --enable-shared[=PKGS] build shared libraries [default=yes] - --enable-static[=PKGS] build static libraries [default=yes] - --enable-fast-install[=PKGS] - optimize for fast installation [default=yes] - --disable-libtool-lock avoid locking (might break parallel builds) - --enable-host-shared build host code as shared libraries - -Optional Packages: - --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] - --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) - --with-target-subdir=SUBDIR Configuring in a subdirectory for target - --with-pic try to use only PIC/non-PIC objects [default=use - both] - --with-gnu-ld assume the C compiler uses GNU ld [default=no] - --with-system-libunwind use installed libunwind - -Some influential environment variables: - CC C compiler command - CFLAGS C compiler flags - LDFLAGS linker flags, e.g. -L if you have libraries in a - nonstandard directory - LIBS libraries to pass to the linker, e.g. -l - CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I if - you have headers in a nonstandard directory - CPP C preprocessor - -Use these variables to override the choices made by `configure' or to help -it to find libraries and programs with nonstandard names/locations. - -Report bugs to the package provider. -_ACEOF -ac_status=$? -fi - -if test "$ac_init_help" = "recursive"; then - # If there are subdirs, report their specific --help. - for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || - { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || - continue - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. - if test -f "$ac_srcdir/configure.gnu"; then - echo && - $SHELL "$ac_srcdir/configure.gnu" --help=recursive - elif test -f "$ac_srcdir/configure"; then - echo && - $SHELL "$ac_srcdir/configure" --help=recursive - else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 - fi || ac_status=$? - cd "$ac_pwd" || { ac_status=$?; break; } - done -fi - -test -n "$ac_init_help" && exit $ac_status -if $ac_init_version; then - cat <<\_ACEOF -package-unused configure version-unused -generated by GNU Autoconf 2.64 - -Copyright (C) 2009 Free Software Foundation, Inc. -This configure script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it. -_ACEOF - exit -fi - -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - return $ac_retval - -} # ac_fn_c_try_compile - -# ac_fn_c_try_cpp LINENO -# ---------------------- -# Try to preprocess conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_cpp () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } >/dev/null && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - return $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists, giving a warning if it cannot be compiled using -# the include files in INCLUDES and setting the cache variable VAR -# accordingly. -ac_fn_c_check_header_mongrel () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -else - # Is the header compilable? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 -$as_echo_n "checking $2 usability... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_header_compiler=yes -else - ac_header_compiler=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 -$as_echo "$ac_header_compiler" >&6; } - -# Is the header present? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 -$as_echo_n "checking $2 presence... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <$2> -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ac_header_preproc=yes -else - ac_header_preproc=no -fi -rm -f conftest.err conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 -$as_echo "$ac_header_preproc" >&6; } - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( - yes:no: ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 -$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; - no:yes:* ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 -$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 -$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 -$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 -$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; -esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=\$ac_header_compiler" -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_header_mongrel - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - return $ac_retval - -} # ac_fn_c_try_run - -# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists and can be compiled using the include files in -# INCLUDES, setting the cache variable VAR accordingly. -ac_fn_c_check_header_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_header_compile - -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - $as_test_x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - return $ac_retval - -} # ac_fn_c_try_link - -# ac_fn_c_check_func LINENO FUNC VAR -# ---------------------------------- -# Tests whether FUNC exists, setting the cache variable VAR accordingly -ac_fn_c_check_func () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Define $2 to an innocuous variant, in case declares $2. - For example, HP-UX 11i declares gettimeofday. */ -#define $2 innocuous_$2 - -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include -#else -# include -#endif - -#undef $2 - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $2 (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_$2 || defined __stub___$2 -choke me -#endif - -int -main () -{ -return $2 (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_func - -# ac_fn_c_check_type LINENO TYPE VAR INCLUDES -# ------------------------------------------- -# Tests whether TYPE exists after having included INCLUDES, setting cache -# variable VAR accordingly. -ac_fn_c_check_type () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=no" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof ($2)) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof (($2))) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - eval "$3=yes" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_type - -# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES -# -------------------------------------------- -# Tries to find the compile-time value of EXPR in a program that includes -# INCLUDES, setting VAR accordingly. Returns whether the value could be -# computed -ac_fn_c_compute_int () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if test "$cross_compiling" = yes; then - # Depending upon the size, compute the lo and hi bounds. -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) >= 0)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_lo=0 ac_mid=0 - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) <= $ac_mid)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=$ac_mid; break -else - as_fn_arith $ac_mid + 1 && ac_lo=$as_val - if test $ac_lo -le $ac_mid; then - ac_lo= ac_hi= - break - fi - as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - done -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) < 0)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=-1 ac_mid=-1 - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) >= $ac_mid)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_lo=$ac_mid; break -else - as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val - if test $ac_mid -le $ac_hi; then - ac_lo= ac_hi= - break - fi - as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - done -else - ac_lo= ac_hi= -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -# Binary search between lo and hi bounds. -while test "x$ac_lo" != "x$ac_hi"; do - as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -static int test_array [1 - 2 * !(($2) <= $ac_mid)]; -test_array [0] = 0 - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_hi=$ac_mid -else - as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -done -case $ac_lo in #(( -?*) eval "$3=\$ac_lo"; ac_retval=0 ;; -'') ac_retval=1 ;; -esac - else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -static long int longval () { return $2; } -static unsigned long int ulongval () { return $2; } -#include -#include -int -main () -{ - - FILE *f = fopen ("conftest.val", "w"); - if (! f) - return 1; - if (($2) < 0) - { - long int i = longval (); - if (i != ($2)) - return 1; - fprintf (f, "%ld", i); - } - else - { - unsigned long int i = ulongval (); - if (i != ($2)) - return 1; - fprintf (f, "%lu", i); - } - /* Do not output a trailing newline, as this causes \r\n confusion - on some platforms. */ - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - echo >>conftest.val; read $3 &5 -$as_echo_n "checking whether $as_decl_name is declared... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -#ifndef $as_decl_name -#ifdef __cplusplus - (void) $as_decl_use; -#else - (void) $as_decl_name; -#endif -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_decl -cat >config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by package-unused $as_me version-unused, which was -generated by GNU Autoconf 2.64. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` - -/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` -/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` -/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` -/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` - -_ASUNAME - -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done -IFS=$as_save_IFS - -} >&5 - -cat >&5 <<_ACEOF - - -## ----------- ## -## Core tests. ## -## ----------- ## - -_ACEOF - - -# Keep a trace of the command line. -# Strip out --no-create and --no-recursion so they do not pile up. -# Strip out --silent because we don't want to record it for future runs. -# Also quote any args containing shell meta-characters. -# Make two passes to allow for proper duplicate-argument suppression. -ac_configure_args= -ac_configure_args0= -ac_configure_args1= -ac_must_keep_next=false -for ac_pass in 1 2 -do - for ac_arg - do - case $ac_arg in - -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - continue ;; - *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; - 2) - as_fn_append ac_configure_args1 " '$ac_arg'" - if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. - else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac - fi - as_fn_append ac_configure_args " '$ac_arg'" - ;; - esac - done -done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} - -# When interrupted or exit'd, cleanup temporary files, and complete -# config.log. We remove comments because anyway the quotes in there -# would cause problems or look ugly. -# WARNING: Use '\'' to represent an apostrophe within the trap. -# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. -trap 'exit_status=$? - # Save into config.log some information that might help in debugging. - { - echo - - cat <<\_ASBOX -## ---------------- ## -## Cache variables. ## -## ---------------- ## -_ASBOX - echo - # The following way of writing the cache mishandles newlines in values, -( - for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - (set) 2>&1 | - case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - sed -n \ - "s/'\''/'\''\\\\'\'''\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" - ;; #( - *) - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) - echo - - cat <<\_ASBOX -## ----------------- ## -## Output variables. ## -## ----------------- ## -_ASBOX - echo - for ac_var in $ac_subst_vars - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - - if test -n "$ac_subst_files"; then - cat <<\_ASBOX -## ------------------- ## -## File substitutions. ## -## ------------------- ## -_ASBOX - echo - for ac_var in $ac_subst_files - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - fi - - if test -s confdefs.h; then - cat <<\_ASBOX -## ----------- ## -## confdefs.h. ## -## ----------- ## -_ASBOX - echo - cat confdefs.h - echo - fi - test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" - } >&5 - rm -f core *.core core.conftest.* && - rm -f -r conftest* confdefs* conf$$* $ac_clean_files && - exit $exit_status -' 0 -for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal -done -ac_signal=0 - -# confdefs.h avoids OS command line length limits that DEFS can exceed. -rm -f -r conftest* confdefs.h - -$as_echo "/* confdefs.h */" > confdefs.h - -# Predefined preprocessor variables. - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF - - -# Let the site file select an alternate cache file if it wants to. -# Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE -if test -n "$CONFIG_SITE"; then - ac_site_file1=$CONFIG_SITE -elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site -else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site -fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" -do - test "x$ac_site_file" = xNONE && continue - if test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} - sed 's/^/| /' "$ac_site_file" >&5 - . "$ac_site_file" - fi -done - -if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special - # files actually), so we avoid doing that. - if test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} - case $cache_file in - [\\/]* | ?:[\\/]* ) . "$cache_file";; - *) . "./$cache_file";; - esac - fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} - >$cache_file -fi - -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - - - - -ac_config_headers="$ac_config_headers config.h" - - -if test -n "${with_target_subdir}"; then - # Default to --enable-multilib -# Check whether --enable-multilib was given. -if test "${enable_multilib+set}" = set; then : - enableval=$enable_multilib; case "$enableval" in - yes) multilib=yes ;; - no) multilib=no ;; - *) as_fn_error "bad value $enableval for multilib option" "$LINENO" 5 ;; - esac -else - multilib=yes -fi - - -# We may get other options which we leave undocumented: -# --with-target-subdir, --with-multisrctop, --with-multisubdir -# See config-ml.in if you want the gory details. - -if test "$srcdir" = "."; then - if test "$with_target_subdir" != "."; then - multi_basedir="$srcdir/$with_multisrctop../.." - else - multi_basedir="$srcdir/$with_multisrctop.." - fi -else - multi_basedir="$srcdir/.." -fi - - -# Even if the default multilib is not a cross compilation, -# it may be that some of the other multilibs are. -if test $cross_compiling = no && test $multilib = yes \ - && test "x${with_multisubdir}" != x ; then - cross_compiling=maybe -fi - -ac_config_commands="$ac_config_commands default-1" - -fi - -ac_aux_dir= -for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do - for ac_t in install-sh install.sh shtool; do - if test -f "$ac_dir/$ac_t"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/$ac_t -c" - break 2 - fi - done -done -if test -z "$ac_aux_dir"; then - as_fn_error "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 -fi - -# These three variables are undocumented and unsupported, -# and are intended to be withdrawn in a future Autoconf release. -# They can cause serious problems if a builder's source tree is in a directory -# whose full name contains unusual characters. -ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. -ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. -ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. - - -# Make sure we can run config.sub. -$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || - as_fn_error "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 -$as_echo_n "checking build system type... " >&6; } -if test "${ac_cv_build+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_build_alias=$build_alias -test "x$ac_build_alias" = x && - ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` -test "x$ac_build_alias" = x && - as_fn_error "cannot guess build type; you must specify one" "$LINENO" 5 -ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || - as_fn_error "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 -$as_echo "$ac_cv_build" >&6; } -case $ac_cv_build in -*-*-*) ;; -*) as_fn_error "invalid value of canonical build" "$LINENO" 5;; -esac -build=$ac_cv_build -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_build -shift -build_cpu=$1 -build_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -build_os=$* -IFS=$ac_save_IFS -case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 -$as_echo_n "checking host system type... " >&6; } -if test "${ac_cv_host+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test "x$host_alias" = x; then - ac_cv_host=$ac_cv_build -else - ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || - as_fn_error "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 -$as_echo "$ac_cv_host" >&6; } -case $ac_cv_host in -*-*-*) ;; -*) as_fn_error "invalid value of canonical host" "$LINENO" 5;; -esac -host=$ac_cv_host -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_host -shift -host_cpu=$1 -host_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -host_os=$* -IFS=$ac_save_IFS -case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking target system type" >&5 -$as_echo_n "checking target system type... " >&6; } -if test "${ac_cv_target+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test "x$target_alias" = x; then - ac_cv_target=$ac_cv_host -else - ac_cv_target=`$SHELL "$ac_aux_dir/config.sub" $target_alias` || - as_fn_error "$SHELL $ac_aux_dir/config.sub $target_alias failed" "$LINENO" 5 -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_target" >&5 -$as_echo "$ac_cv_target" >&6; } -case $ac_cv_target in -*-*-*) ;; -*) as_fn_error "invalid value of canonical target" "$LINENO" 5;; -esac -target=$ac_cv_target -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_target -shift -target_cpu=$1 -target_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -target_os=$* -IFS=$ac_save_IFS -case $target_os in *\ *) target_os=`echo "$target_os" | sed 's/ /-/g'`;; esac - - -# The aliases save the names the user supplied, while $host etc. -# will get canonicalized. -test -n "$target_alias" && - test "$program_prefix$program_suffix$program_transform_name" = \ - NONENONEs,x,x, && - program_prefix=${target_alias}- - -target_alias=${target_alias-$host_alias} - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi - -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cl.exe -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "no acceptable C compiler found in \$PATH -See \`config.log' for more details." "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - rm -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out conftest.out" -# Try to create an executable without -o first, disregard a.out. -# It will help us diagnose broken compilers, and finding out an intuition -# of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` - -# The possible output files: -ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" - -ac_rmfiles= -for ac_file in $ac_files -do - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - * ) ac_rmfiles="$ac_rmfiles $ac_file";; - esac -done -rm -f $ac_rmfiles - -if { { ac_try="$ac_link_default" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link_default") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. -# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' -# in a Makefile. We should not override ac_cv_exeext if it was cached, -# so that the user can short-circuit this test for compilers unknown to -# Autoconf. -for ac_file in $ac_files '' -do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) - ;; - [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; - *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; - then :; else - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - fi - # We set ac_cv_exeext here because the later test for it is not - # safe: cross compilers may not add the suffix if given an `-o' - # argument, so we may need to know it at that point already. - # Even if this section looks crufty: it has the advantage of - # actually working. - break;; - * ) - break;; - esac -done -test "$ac_cv_exeext" = no && ac_cv_exeext= - -else - ac_file='' -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } -if test -z "$ac_file"; then : - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -{ as_fn_set_status 77 -as_fn_error "C compiler cannot create executables -See \`config.log' for more details." "$LINENO" 5; }; } -fi -ac_exeext=$ac_cv_exeext - -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -# If not cross compiling, check that we can run a simple program. -if test "$cross_compiling" != yes; then - if { ac_try='./$ac_file' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details." "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - -rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out conftest.out -ac_clean_files=$ac_clean_files_save -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # If both `conftest.exe' and `conftest' are `present' (well, observable) -# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will -# work properly (i.e., refer to `conftest.exe'), while it won't with -# `rm'. -for ac_file in conftest.exe conftest conftest.*; do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - break;; - * ) break;; - esac -done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details." "$LINENO" 5; } -fi -rm -f conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } - -rm -f conftest.$ac_ext -EXEEXT=$ac_cv_exeext -ac_exeext=$EXEEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if test "${ac_cv_objext+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - for ac_file in conftest.o conftest.obj conftest.*; do - test -f "$ac_file" || continue; - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; - *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` - break;; - esac -done -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "cannot compute suffix of object files: cannot compile -See \`config.log' for more details." "$LINENO" 5; } -fi -rm -f conftest.$ac_cv_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } -OBJEXT=$ac_cv_objext -ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if test "${ac_cv_c_compiler_gnu+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if test "${ac_cv_prog_cc_g+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if test "${ac_cv_prog_cc_c89+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if test "${ac_cv_prog_CPP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - break -fi - - done - ac_cv_prog_CPP=$CPP - -fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 -$as_echo "$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details." "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 -$as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if test "${ac_cv_path_GREP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$GREP"; then - ac_path_GREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue -# Check for GNU ac_path_GREP and select it if it is found. - # Check for GNU $ac_path_GREP -case `"$ac_path_GREP" --version 2>&1` in -*GNU*) - ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'GREP' >> "conftest.nl" - "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_GREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_GREP="$ac_path_GREP" - ac_path_GREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_GREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_GREP"; then - as_fn_error "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_GREP=$GREP -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 -$as_echo "$ac_cv_path_GREP" >&6; } - GREP="$ac_cv_path_GREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 -$as_echo_n "checking for egrep... " >&6; } -if test "${ac_cv_path_EGREP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 - then ac_cv_path_EGREP="$GREP -E" - else - if test -z "$EGREP"; then - ac_path_EGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue -# Check for GNU ac_path_EGREP and select it if it is found. - # Check for GNU $ac_path_EGREP -case `"$ac_path_EGREP" --version 2>&1` in -*GNU*) - ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'EGREP' >> "conftest.nl" - "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_EGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_EGREP="$ac_path_EGREP" - ac_path_EGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_EGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_EGREP"; then - as_fn_error "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_EGREP=$EGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 -$as_echo "$ac_cv_path_EGREP" >&6; } - EGREP="$ac_cv_path_EGREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if test "${ac_cv_header_stdc+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif - -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h - -fi - -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -eval as_val=\$$as_ac_Header - if test "x$as_val" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - - - ac_fn_c_check_header_mongrel "$LINENO" "minix/config.h" "ac_cv_header_minix_config_h" "$ac_includes_default" -if test "x$ac_cv_header_minix_config_h" = x""yes; then : - MINIX=yes -else - MINIX= -fi - - - if test "$MINIX" = yes; then - -$as_echo "#define _POSIX_SOURCE 1" >>confdefs.h - - -$as_echo "#define _POSIX_1_SOURCE 2" >>confdefs.h - - -$as_echo "#define _MINIX 1" >>confdefs.h - - fi - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5 -$as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; } -if test "${ac_cv_safe_to_define___extensions__+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -# define __EXTENSIONS__ 1 - $ac_includes_default -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_safe_to_define___extensions__=yes -else - ac_cv_safe_to_define___extensions__=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5 -$as_echo "$ac_cv_safe_to_define___extensions__" >&6; } - test $ac_cv_safe_to_define___extensions__ = yes && - $as_echo "#define __EXTENSIONS__ 1" >>confdefs.h - - $as_echo "#define _ALL_SOURCE 1" >>confdefs.h - - $as_echo "#define _GNU_SOURCE 1" >>confdefs.h - - $as_echo "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h - - $as_echo "#define _TANDEM_SOURCE 1" >>confdefs.h - - - -libtool_VERSION=1:0:0 - - -# 1.11.1: Require that version of automake. -# foreign: Don't require README, INSTALL, NEWS, etc. -# no-define: Don't define PACKAGE and VERSION. -# no-dependencies: Don't generate automatic dependencies. -# (because it breaks when using bootstrap-lean, since some of the -# headers are gone at "make install" time). -# -Wall: Issue all automake warnings. -# -Wno-portability: Don't warn about constructs supported by GNU make. -# (because GCC requires GNU make anyhow). -am__api_version='1.11' - -# Find a good install program. We prefer a C program (faster), -# so one script is as good as another. But avoid the broken or -# incompatible versions: -# SysV /etc/install, /usr/sbin/install -# SunOS /usr/etc/install -# IRIX /sbin/install -# AIX /bin/install -# AmigaOS /C/install, which installs bootblocks on floppy discs -# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag -# AFS /usr/afsws/bin/install, which mishandles nonexistent args -# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" -# OS/2's system install, which has a completely different semantic -# ./install, which can be erroneously created by make from ./install.sh. -# Reject install programs that cannot install multiple files. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 -$as_echo_n "checking for a BSD-compatible install... " >&6; } -if test -z "$INSTALL"; then -if test "${ac_cv_path_install+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - # Account for people who put trailing slashes in PATH elements. -case $as_dir/ in #(( - ./ | .// | /[cC]/* | \ - /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ - ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ - /usr/ucb/* ) ;; - *) - # OSF1 and SCO ODT 3.0 have their own names for install. - # Don't use installbsd from OSF since it installs stuff as root - # by default. - for ac_prog in ginstall scoinst install; do - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then - if test $ac_prog = install && - grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # AIX install. It has an incompatible calling convention. - : - elif test $ac_prog = install && - grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # program-specific install script used by HP pwplus--don't use. - : - else - rm -rf conftest.one conftest.two conftest.dir - echo one > conftest.one - echo two > conftest.two - mkdir conftest.dir - if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && - test -s conftest.one && test -s conftest.two && - test -s conftest.dir/conftest.one && - test -s conftest.dir/conftest.two - then - ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" - break 3 - fi - fi - fi - done - done - ;; -esac - - done -IFS=$as_save_IFS - -rm -rf conftest.one conftest.two conftest.dir - -fi - if test "${ac_cv_path_install+set}" = set; then - INSTALL=$ac_cv_path_install - else - # As a last resort, use the slow shell script. Don't cache a - # value for INSTALL within a source directory, because that will - # break other packages using the cache if that directory is - # removed, or if the value is a relative name. - INSTALL=$ac_install_sh - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 -$as_echo "$INSTALL" >&6; } - -# Use test -z because SunOS4 sh mishandles braces in ${var-val}. -# It thinks the first close brace ends the variable substitution. -test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' - -test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' - -test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 -$as_echo_n "checking whether build environment is sane... " >&6; } -# Just in case -sleep 1 -echo timestamp > conftest.file -# Reject unsafe characters in $srcdir or the absolute working directory -# name. Accept space and tab only in the latter. -am_lf=' -' -case `pwd` in - *[\\\"\#\$\&\'\`$am_lf]*) - as_fn_error "unsafe absolute working directory name" "$LINENO" 5;; -esac -case $srcdir in - *[\\\"\#\$\&\'\`$am_lf\ \ ]*) - as_fn_error "unsafe srcdir value: \`$srcdir'" "$LINENO" 5;; -esac - -# Do `set' in a subshell so we don't clobber the current shell's -# arguments. Must try -L first in case configure is actually a -# symlink; some systems play weird games with the mod time of symlinks -# (eg FreeBSD returns the mod time of the symlink's containing -# directory). -if ( - set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` - if test "$*" = "X"; then - # -L didn't work. - set X `ls -t "$srcdir/configure" conftest.file` - fi - rm -f conftest.file - if test "$*" != "X $srcdir/configure conftest.file" \ - && test "$*" != "X conftest.file $srcdir/configure"; then - - # If neither matched, then we have a broken ls. This can happen - # if, for instance, CONFIG_SHELL is bash and it inherits a - # broken ls alias from the environment. This has actually - # happened. Such a system could not be considered "sane". - as_fn_error "ls -t appears to fail. Make sure there is not a broken -alias in your environment" "$LINENO" 5 - fi - - test "$2" = conftest.file - ) -then - # Ok. - : -else - as_fn_error "newly created file is older than distributed files! -Check your system clock" "$LINENO" 5 -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -test "$program_prefix" != NONE && - program_transform_name="s&^&$program_prefix&;$program_transform_name" -# Use a double $ so make ignores it. -test "$program_suffix" != NONE && - program_transform_name="s&\$&$program_suffix&;$program_transform_name" -# Double any \ or $. -# By default was `s,x,x', remove it if useless. -ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' -program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` - -# expand $ac_aux_dir to an absolute path -am_aux_dir=`cd $ac_aux_dir && pwd` - -if test x"${MISSING+set}" != xset; then - case $am_aux_dir in - *\ * | *\ *) - MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; - *) - MISSING="\${SHELL} $am_aux_dir/missing" ;; - esac -fi -# Use eval to expand $SHELL -if eval "$MISSING --run true"; then - am_missing_run="$MISSING --run " -else - am_missing_run= - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`missing' script is too old or missing" >&5 -$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} -fi - -if test x"${install_sh}" != xset; then - case $am_aux_dir in - *\ * | *\ *) - install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; - *) - install_sh="\${SHELL} $am_aux_dir/install-sh" - esac -fi - -# Installed binaries are usually stripped using `strip' when the user -# run `make install-strip'. However `strip' might not be the right -# tool to use in cross-compilation environments, therefore Automake -# will honor the `STRIP' environment variable to overrule this program. -if test "$cross_compiling" != no; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. -set dummy ${ac_tool_prefix}strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_STRIP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$STRIP"; then - ac_cv_prog_STRIP="$STRIP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_STRIP="${ac_tool_prefix}strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -STRIP=$ac_cv_prog_STRIP -if test -n "$STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 -$as_echo "$STRIP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_STRIP"; then - ac_ct_STRIP=$STRIP - # Extract the first word of "strip", so it can be a program name with args. -set dummy strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_STRIP"; then - ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_STRIP="strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP -if test -n "$ac_ct_STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 -$as_echo "$ac_ct_STRIP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_STRIP" = x; then - STRIP=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - STRIP=$ac_ct_STRIP - fi -else - STRIP="$ac_cv_prog_STRIP" -fi - -fi -INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 -$as_echo_n "checking for a thread-safe mkdir -p... " >&6; } -if test -z "$MKDIR_P"; then - if test "${ac_cv_path_mkdir+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in mkdir gmkdir; do - for ac_exec_ext in '' $ac_executable_extensions; do - { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue - case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( - 'mkdir (GNU coreutils) '* | \ - 'mkdir (coreutils) '* | \ - 'mkdir (fileutils) '4.1*) - ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext - break 3;; - esac - done - done - done -IFS=$as_save_IFS - -fi - - if test "${ac_cv_path_mkdir+set}" = set; then - MKDIR_P="$ac_cv_path_mkdir -p" - else - # As a last resort, use the slow shell script. Don't cache a - # value for MKDIR_P within a source directory, because that will - # break other packages using the cache if that directory is - # removed, or if the value is a relative name. - test -d ./--version && rmdir ./--version - MKDIR_P="$ac_install_sh -d" - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 -$as_echo "$MKDIR_P" >&6; } - -mkdir_p="$MKDIR_P" -case $mkdir_p in - [\\/$]* | ?:[\\/]*) ;; - */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; -esac - -for ac_prog in gawk mawk nawk awk -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_AWK+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$AWK"; then - ac_cv_prog_AWK="$AWK" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_AWK="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -AWK=$ac_cv_prog_AWK -if test -n "$AWK"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 -$as_echo "$AWK" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$AWK" && break -done - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 -$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } -set x ${MAKE-make} -ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` -if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - cat >conftest.make <<\_ACEOF -SHELL = /bin/sh -all: - @echo '@@@%%%=$(MAKE)=@@@%%%' -_ACEOF -# GNU make sometimes prints "make[1]: Entering...", which would confuse us. -case `${MAKE-make} -f conftest.make 2>/dev/null` in - *@@@%%%=?*=@@@%%%*) - eval ac_cv_prog_make_${ac_make}_set=yes;; - *) - eval ac_cv_prog_make_${ac_make}_set=no;; -esac -rm -f conftest.make -fi -if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - SET_MAKE= -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - SET_MAKE="MAKE=${MAKE-make}" -fi - -rm -rf .tst 2>/dev/null -mkdir .tst 2>/dev/null -if test -d .tst; then - am__leading_dot=. -else - am__leading_dot=_ -fi -rmdir .tst 2>/dev/null - -if test "`cd $srcdir && pwd`" != "`pwd`"; then - # Use -I$(srcdir) only when $(srcdir) != ., so that make's output - # is not polluted with repeated "-I." - am__isrc=' -I$(srcdir)' - # test to see if srcdir already configured - if test -f $srcdir/config.status; then - as_fn_error "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 - fi -fi - -# test whether we have cygpath -if test -z "$CYGPATH_W"; then - if (cygpath --version) >/dev/null 2>/dev/null; then - CYGPATH_W='cygpath -w' - else - CYGPATH_W=echo - fi -fi - - -# Define the identity of the package. - PACKAGE='libbacktrace' - VERSION='version-unused' - - -# Some tools Automake needs. - -ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} - - -AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} - - -AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} - - -AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} - - -MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} - -# We need awk for the "check" target. The system "awk" is bad on -# some platforms. -# Always define AMTAR for backward compatibility. Yes, it's still used -# in the wild :-( We should find a proper way to deprecate it ... -AMTAR='$${TAR-tar}' - -am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 -$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } - # Check whether --enable-maintainer-mode was given. -if test "${enable_maintainer_mode+set}" = set; then : - enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval -else - USE_MAINTAINER_MODE=no -fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 -$as_echo "$USE_MAINTAINER_MODE" >&6; } - if test $USE_MAINTAINER_MODE = yes; then - MAINTAINER_MODE_TRUE= - MAINTAINER_MODE_FALSE='#' -else - MAINTAINER_MODE_TRUE='#' - MAINTAINER_MODE_FALSE= -fi - - MAINT=$MAINTAINER_MODE_TRUE - - - - -# Check whether --with-target-subdir was given. -if test "${with_target_subdir+set}" = set; then : - withval=$with_target_subdir; -fi - - -# We must force CC to /not/ be precious variables; otherwise -# the wrong, non-multilib-adjusted value will be used in multilibs. -# As a side effect, we have to subst CFLAGS ourselves. - - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi - -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cl.exe -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "no acceptable C compiler found in \$PATH -See \`config.log' for more details." "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - rm -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if test "${ac_cv_c_compiler_gnu+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if test "${ac_cv_prog_cc_g+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if test "${ac_cv_prog_cc_c89+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - - - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. -set dummy ${ac_tool_prefix}ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_RANLIB+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$RANLIB"; then - ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -RANLIB=$ac_cv_prog_RANLIB -if test -n "$RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 -$as_echo "$RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_RANLIB"; then - ac_ct_RANLIB=$RANLIB - # Extract the first word of "ranlib", so it can be a program name with args. -set dummy ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_RANLIB"; then - ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_RANLIB="ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB -if test -n "$ac_ct_RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 -$as_echo "$ac_ct_RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_RANLIB" = x; then - RANLIB=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - RANLIB=$ac_ct_RANLIB - fi -else - RANLIB="$ac_cv_prog_RANLIB" -fi - - -for ac_prog in gawk mawk nawk awk -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_AWK+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$AWK"; then - ac_cv_prog_AWK="$AWK" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_AWK="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -AWK=$ac_cv_prog_AWK -if test -n "$AWK"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 -$as_echo "$AWK" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$AWK" && break -done - -case "$AWK" in -"") as_fn_error "can't build without awk" "$LINENO" 5 ;; -esac - -case `pwd` in - *\ * | *\ *) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 -$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; -esac - - - -macro_version='2.2.7a' -macro_revision='1.3134' - - - - - - - - - - - - - -ltmain="$ac_aux_dir/ltmain.sh" - -# Backslashify metacharacters that are still active within -# double-quoted strings. -sed_quote_subst='s/\(["`$\\]\)/\\\1/g' - -# Same as above, but do not quote variable references. -double_quote_subst='s/\(["`\\]\)/\\\1/g' - -# Sed substitution to delay expansion of an escaped shell variable in a -# double_quote_subst'ed string. -delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' - -# Sed substitution to delay expansion of an escaped single quote. -delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' - -# Sed substitution to avoid accidental globbing in evaled expressions -no_glob_subst='s/\*/\\\*/g' - -ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO -ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 -$as_echo_n "checking how to print strings... " >&6; } -# Test print first, because it will be a builtin if present. -if test "X`print -r -- -n 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' -elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='printf %s\n' -else - # Use this function as a fallback that always works. - func_fallback_echo () - { - eval 'cat <<_LTECHO_EOF -$1 -_LTECHO_EOF' - } - ECHO='func_fallback_echo' -fi - -# func_echo_all arg... -# Invoke $ECHO with all args, space-separated. -func_echo_all () -{ - $ECHO "" -} - -case "$ECHO" in - printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 -$as_echo "printf" >&6; } ;; - print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 -$as_echo "print -r" >&6; } ;; - *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 -$as_echo "cat" >&6; } ;; -esac - - - - - - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 -$as_echo_n "checking for a sed that does not truncate output... " >&6; } -if test "${ac_cv_path_SED+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ - for ac_i in 1 2 3 4 5 6 7; do - ac_script="$ac_script$as_nl$ac_script" - done - echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed - { ac_script=; unset ac_script;} - if test -z "$SED"; then - ac_path_SED_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in sed gsed; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_SED" && $as_test_x "$ac_path_SED"; } || continue -# Check for GNU ac_path_SED and select it if it is found. - # Check for GNU $ac_path_SED -case `"$ac_path_SED" --version 2>&1` in -*GNU*) - ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo '' >> "conftest.nl" - "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_SED_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_SED="$ac_path_SED" - ac_path_SED_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_SED_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_SED"; then - as_fn_error "no acceptable sed could be found in \$PATH" "$LINENO" 5 - fi -else - ac_cv_path_SED=$SED -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 -$as_echo "$ac_cv_path_SED" >&6; } - SED="$ac_cv_path_SED" - rm -f conftest.sed - -test -z "$SED" && SED=sed -Xsed="$SED -e 1s/^X//" - - - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 -$as_echo_n "checking for fgrep... " >&6; } -if test "${ac_cv_path_FGREP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 - then ac_cv_path_FGREP="$GREP -F" - else - if test -z "$FGREP"; then - ac_path_FGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in fgrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_FGREP" && $as_test_x "$ac_path_FGREP"; } || continue -# Check for GNU ac_path_FGREP and select it if it is found. - # Check for GNU $ac_path_FGREP -case `"$ac_path_FGREP" --version 2>&1` in -*GNU*) - ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'FGREP' >> "conftest.nl" - "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_FGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_FGREP="$ac_path_FGREP" - ac_path_FGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_FGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_FGREP"; then - as_fn_error "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_FGREP=$FGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 -$as_echo "$ac_cv_path_FGREP" >&6; } - FGREP="$ac_cv_path_FGREP" - - -test -z "$GREP" && GREP=grep - - - - - - - - - - - - - - - - - - - -# Check whether --with-gnu-ld was given. -if test "${with_gnu_ld+set}" = set; then : - withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes -else - with_gnu_ld=no -fi - -ac_prog=ld -if test "$GCC" = yes; then - # Check if gcc -print-prog-name=ld gives a path. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 -$as_echo_n "checking for ld used by $CC... " >&6; } - case $host in - *-*-mingw*) - # gcc leaves a trailing carriage return which upsets mingw - ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; - *) - ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; - esac - case $ac_prog in - # Accept absolute paths. - [\\/]* | ?:[\\/]*) - re_direlt='/[^/][^/]*/\.\./' - # Canonicalize the pathname of ld - ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` - while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do - ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` - done - test -z "$LD" && LD="$ac_prog" - ;; - "") - # If it fails, then pretend we aren't using GCC. - ac_prog=ld - ;; - *) - # If it is relative, then search for the first ld in PATH. - with_gnu_ld=unknown - ;; - esac -elif test "$with_gnu_ld" = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 -$as_echo_n "checking for GNU ld... " >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 -$as_echo_n "checking for non-GNU ld... " >&6; } -fi -if test "${lt_cv_path_LD+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$LD"; then - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for ac_dir in $PATH; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then - lt_cv_path_LD="$ac_dir/$ac_prog" - # Check to see if the program is GNU ld. I'd rather use --version, - # but apparently some variants of GNU ld only accept -v. - # Break only if it was the GNU/non-GNU ld that we prefer. - case `"$lt_cv_path_LD" -v 2>&1 &5 -$as_echo "$LD" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -test -z "$LD" && as_fn_error "no acceptable ld found in \$PATH" "$LINENO" 5 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 -$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } -if test "${lt_cv_prog_gnu_ld+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - # I'd rather use --version here, but apparently some GNU lds only accept -v. -case `$LD -v 2>&1 &5 -$as_echo "$lt_cv_prog_gnu_ld" >&6; } -with_gnu_ld=$lt_cv_prog_gnu_ld - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 -$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } -if test "${lt_cv_path_NM+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$NM"; then - # Let the user override the test. - lt_cv_path_NM="$NM" -else - lt_nm_to_check="${ac_tool_prefix}nm" - if test -n "$ac_tool_prefix" && test "$build" = "$host"; then - lt_nm_to_check="$lt_nm_to_check nm" - fi - for lt_tmp_nm in $lt_nm_to_check; do - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - tmp_nm="$ac_dir/$lt_tmp_nm" - if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then - # Check to see if the nm accepts a BSD-compat flag. - # Adding the `sed 1q' prevents false positives on HP-UX, which says: - # nm: unknown option "B" ignored - # Tru64's nm complains that /dev/null is an invalid object file - case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in - */dev/null* | *'Invalid file or object type'*) - lt_cv_path_NM="$tmp_nm -B" - break - ;; - *) - case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in - */dev/null*) - lt_cv_path_NM="$tmp_nm -p" - break - ;; - *) - lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but - continue # so that we can try to find one that supports BSD flags - ;; - esac - ;; - esac - fi - done - IFS="$lt_save_ifs" - done - : ${lt_cv_path_NM=no} -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 -$as_echo "$lt_cv_path_NM" >&6; } -if test "$lt_cv_path_NM" != "no"; then - NM="$lt_cv_path_NM" -else - # Didn't find any BSD compatible name lister, look for dumpbin. - if test -n "$DUMPBIN"; then : - # Let the user override the test. - else - if test -n "$ac_tool_prefix"; then - for ac_prog in dumpbin "link -dump" - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_DUMPBIN+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$DUMPBIN"; then - ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -DUMPBIN=$ac_cv_prog_DUMPBIN -if test -n "$DUMPBIN"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 -$as_echo "$DUMPBIN" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$DUMPBIN" && break - done -fi -if test -z "$DUMPBIN"; then - ac_ct_DUMPBIN=$DUMPBIN - for ac_prog in dumpbin "link -dump" -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_DUMPBIN+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_DUMPBIN"; then - ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN -if test -n "$ac_ct_DUMPBIN"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 -$as_echo "$ac_ct_DUMPBIN" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_DUMPBIN" && break -done - - if test "x$ac_ct_DUMPBIN" = x; then - DUMPBIN=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - DUMPBIN=$ac_ct_DUMPBIN - fi -fi - - case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in - *COFF*) - DUMPBIN="$DUMPBIN -symbols" - ;; - *) - DUMPBIN=: - ;; - esac - fi - - if test "$DUMPBIN" != ":"; then - NM="$DUMPBIN" - fi -fi -test -z "$NM" && NM=nm - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 -$as_echo_n "checking the name lister ($NM) interface... " >&6; } -if test "${lt_cv_nm_interface+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_nm_interface="BSD nm" - echo "int some_variable = 0;" > conftest.$ac_ext - (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) - (eval "$ac_compile" 2>conftest.err) - cat conftest.err >&5 - (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) - (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) - cat conftest.err >&5 - (eval echo "\"\$as_me:$LINENO: output\"" >&5) - cat conftest.out >&5 - if $GREP 'External.*some_variable' conftest.out > /dev/null; then - lt_cv_nm_interface="MS dumpbin" - fi - rm -f conftest* -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 -$as_echo "$lt_cv_nm_interface" >&6; } - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 -$as_echo_n "checking whether ln -s works... " >&6; } -LN_S=$as_ln_s -if test "$LN_S" = "ln -s"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 -$as_echo "no, using $LN_S" >&6; } -fi - -# find the maximum length of command line arguments -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 -$as_echo_n "checking the maximum length of command line arguments... " >&6; } -if test "${lt_cv_sys_max_cmd_len+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - i=0 - teststring="ABCD" - - case $build_os in - msdosdjgpp*) - # On DJGPP, this test can blow up pretty badly due to problems in libc - # (any single argument exceeding 2000 bytes causes a buffer overrun - # during glob expansion). Even if it were fixed, the result of this - # check would be larger than it should be. - lt_cv_sys_max_cmd_len=12288; # 12K is about right - ;; - - gnu*) - # Under GNU Hurd, this test is not required because there is - # no limit to the length of command line arguments. - # Libtool will interpret -1 as no limit whatsoever - lt_cv_sys_max_cmd_len=-1; - ;; - - cygwin* | mingw* | cegcc*) - # On Win9x/ME, this test blows up -- it succeeds, but takes - # about 5 minutes as the teststring grows exponentially. - # Worse, since 9x/ME are not pre-emptively multitasking, - # you end up with a "frozen" computer, even though with patience - # the test eventually succeeds (with a max line length of 256k). - # Instead, let's just punt: use the minimum linelength reported by - # all of the supported platforms: 8192 (on NT/2K/XP). - lt_cv_sys_max_cmd_len=8192; - ;; - - mint*) - # On MiNT this can take a long time and run out of memory. - lt_cv_sys_max_cmd_len=8192; - ;; - - amigaos*) - # On AmigaOS with pdksh, this test takes hours, literally. - # So we just punt and use a minimum line length of 8192. - lt_cv_sys_max_cmd_len=8192; - ;; - - netbsd* | freebsd* | openbsd* | darwin* | dragonfly* | bitrig*) - # This has been around since 386BSD, at least. Likely further. - if test -x /sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` - elif test -x /usr/sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` - else - lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs - fi - # And add a safety zone - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - ;; - - interix*) - # We know the value 262144 and hardcode it with a safety zone (like BSD) - lt_cv_sys_max_cmd_len=196608 - ;; - - osf*) - # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure - # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not - # nice to cause kernel panics so lets avoid the loop below. - # First set a reasonable default. - lt_cv_sys_max_cmd_len=16384 - # - if test -x /sbin/sysconfig; then - case `/sbin/sysconfig -q proc exec_disable_arg_limit` in - *1*) lt_cv_sys_max_cmd_len=-1 ;; - esac - fi - ;; - sco3.2v5*) - lt_cv_sys_max_cmd_len=102400 - ;; - sysv5* | sco5v6* | sysv4.2uw2*) - kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` - if test -n "$kargmax"; then - lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` - else - lt_cv_sys_max_cmd_len=32768 - fi - ;; - *) - lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` - if test -n "$lt_cv_sys_max_cmd_len"; then - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - else - # Make teststring a little bigger before we do anything with it. - # a 1K string should be a reasonable start. - for i in 1 2 3 4 5 6 7 8 ; do - teststring=$teststring$teststring - done - SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} - # If test is not a shell built-in, we'll probably end up computing a - # maximum length that is only half of the actual maximum length, but - # we can't tell. - while { test "X"`func_fallback_echo "$teststring$teststring" 2>/dev/null` \ - = "X$teststring$teststring"; } >/dev/null 2>&1 && - test $i != 17 # 1/2 MB should be enough - do - i=`expr $i + 1` - teststring=$teststring$teststring - done - # Only check the string length outside the loop. - lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` - teststring= - # Add a significant safety factor because C++ compilers can tack on - # massive amounts of additional arguments before passing them to the - # linker. It appears as though 1/2 is a usable value. - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` - fi - ;; - esac - -fi - -if test -n $lt_cv_sys_max_cmd_len ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 -$as_echo "$lt_cv_sys_max_cmd_len" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 -$as_echo "none" >&6; } -fi -max_cmd_len=$lt_cv_sys_max_cmd_len - - - - - - -: ${CP="cp -f"} -: ${MV="mv -f"} -: ${RM="rm -f"} - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5 -$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } -# Try some XSI features -xsi_shell=no -( _lt_dummy="a/b/c" - test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ - = c,a/b,, \ - && eval 'test $(( 1 + 1 )) -eq 2 \ - && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ - && xsi_shell=yes -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5 -$as_echo "$xsi_shell" >&6; } - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5 -$as_echo_n "checking whether the shell understands \"+=\"... " >&6; } -lt_shell_append=no -( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ - >/dev/null 2>&1 \ - && lt_shell_append=yes -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5 -$as_echo "$lt_shell_append" >&6; } - - -if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then - lt_unset=unset -else - lt_unset=false -fi - - - - - -# test EBCDIC or ASCII -case `echo X|tr X '\101'` in - A) # ASCII based system - # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr - lt_SP2NL='tr \040 \012' - lt_NL2SP='tr \015\012 \040\040' - ;; - *) # EBCDIC based system - lt_SP2NL='tr \100 \n' - lt_NL2SP='tr \r\n \100\100' - ;; -esac - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 -$as_echo_n "checking for $LD option to reload object files... " >&6; } -if test "${lt_cv_ld_reload_flag+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ld_reload_flag='-r' -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 -$as_echo "$lt_cv_ld_reload_flag" >&6; } -reload_flag=$lt_cv_ld_reload_flag -case $reload_flag in -"" | " "*) ;; -*) reload_flag=" $reload_flag" ;; -esac -reload_cmds='$LD$reload_flag -o $output$reload_objs' -case $host_os in - darwin*) - if test "$GCC" = yes; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' - else - reload_cmds='$LD$reload_flag -o $output$reload_objs' - fi - ;; -esac - - - - - - - - - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. -set dummy ${ac_tool_prefix}objdump; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_OBJDUMP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$OBJDUMP"; then - ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -OBJDUMP=$ac_cv_prog_OBJDUMP -if test -n "$OBJDUMP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 -$as_echo "$OBJDUMP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_OBJDUMP"; then - ac_ct_OBJDUMP=$OBJDUMP - # Extract the first word of "objdump", so it can be a program name with args. -set dummy objdump; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_OBJDUMP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_OBJDUMP"; then - ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_OBJDUMP="objdump" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP -if test -n "$ac_ct_OBJDUMP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 -$as_echo "$ac_ct_OBJDUMP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_OBJDUMP" = x; then - OBJDUMP="false" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - OBJDUMP=$ac_ct_OBJDUMP - fi -else - OBJDUMP="$ac_cv_prog_OBJDUMP" -fi - -test -z "$OBJDUMP" && OBJDUMP=objdump - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 -$as_echo_n "checking how to recognize dependent libraries... " >&6; } -if test "${lt_cv_deplibs_check_method+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_file_magic_cmd='$MAGIC_CMD' -lt_cv_file_magic_test_file= -lt_cv_deplibs_check_method='unknown' -# Need to set the preceding variable on all platforms that support -# interlibrary dependencies. -# 'none' -- dependencies not supported. -# `unknown' -- same as none, but documents that we really don't know. -# 'pass_all' -- all dependencies passed with no checks. -# 'test_compile' -- check by making test program. -# 'file_magic [[regex]]' -- check by looking for files in library path -# which responds to the $file_magic_cmd with a given extended regex. -# If you have `file' or equivalent on your system and you're not sure -# whether `pass_all' will *always* work, you probably want this one. - -case $host_os in -aix[4-9]*) - lt_cv_deplibs_check_method=pass_all - ;; - -beos*) - lt_cv_deplibs_check_method=pass_all - ;; - -bsdi[45]*) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' - lt_cv_file_magic_cmd='/usr/bin/file -L' - lt_cv_file_magic_test_file=/shlib/libc.so - ;; - -cygwin*) - # func_win32_libid is a shell function defined in ltmain.sh - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - ;; - -mingw* | pw32*) - # Base MSYS/MinGW do not provide the 'file' command needed by - # func_win32_libid shell function, so use a weaker test based on 'objdump', - # unless we find 'file', for example because we are cross-compiling. - # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin. - if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else - lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; - -cegcc*) - # use the weaker test based on 'objdump'. See mingw*. - lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' - lt_cv_file_magic_cmd='$OBJDUMP -f' - ;; - -darwin* | rhapsody*) - lt_cv_deplibs_check_method=pass_all - ;; - -freebsd* | dragonfly*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then - case $host_cpu in - i*86 ) - # Not sure whether the presence of OpenBSD here was a mistake. - # Let's accept both of them until this is cleared up. - lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` - ;; - esac - else - lt_cv_deplibs_check_method=pass_all - fi - ;; - -gnu*) - lt_cv_deplibs_check_method=pass_all - ;; - -haiku*) - lt_cv_deplibs_check_method=pass_all - ;; - -hpux10.20* | hpux11*) - lt_cv_file_magic_cmd=/usr/bin/file - case $host_cpu in - ia64*) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' - lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so - ;; - hppa*64*) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' - lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl - ;; - *) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' - lt_cv_file_magic_test_file=/usr/lib/libc.sl - ;; - esac - ;; - -interix[3-9]*) - # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' - ;; - -irix5* | irix6* | nonstopux*) - case $LD in - *-32|*"-32 ") libmagic=32-bit;; - *-n32|*"-n32 ") libmagic=N32;; - *-64|*"-64 ") libmagic=64-bit;; - *) libmagic=never-match;; - esac - lt_cv_deplibs_check_method=pass_all - ;; - -# This must be Linux ELF. -linux* | k*bsd*-gnu | kopensolaris*-gnu) - lt_cv_deplibs_check_method=pass_all - ;; - -netbsd*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' - fi - ;; - -newos6*) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=/usr/lib/libnls.so - ;; - -*nto* | *qnx*) - lt_cv_deplibs_check_method=pass_all - ;; - -openbsd*) - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' - fi - ;; - -osf3* | osf4* | osf5*) - lt_cv_deplibs_check_method=pass_all - ;; - -rdos*) - lt_cv_deplibs_check_method=pass_all - ;; - -solaris*) - lt_cv_deplibs_check_method=pass_all - ;; - -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - lt_cv_deplibs_check_method=pass_all - ;; - -sysv4 | sysv4.3*) - case $host_vendor in - motorola) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` - ;; - ncr) - lt_cv_deplibs_check_method=pass_all - ;; - sequent) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' - ;; - sni) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" - lt_cv_file_magic_test_file=/lib/libc.so - ;; - siemens) - lt_cv_deplibs_check_method=pass_all - ;; - pc) - lt_cv_deplibs_check_method=pass_all - ;; - esac - ;; - -tpf*) - lt_cv_deplibs_check_method=pass_all - ;; -esac - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 -$as_echo "$lt_cv_deplibs_check_method" >&6; } -file_magic_cmd=$lt_cv_file_magic_cmd -deplibs_check_method=$lt_cv_deplibs_check_method -test -z "$deplibs_check_method" && deplibs_check_method=unknown - - - - - - - - - - - - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. -set dummy ${ac_tool_prefix}ar; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_AR+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$AR"; then - ac_cv_prog_AR="$AR" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_AR="${ac_tool_prefix}ar" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -AR=$ac_cv_prog_AR -if test -n "$AR"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 -$as_echo "$AR" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_AR"; then - ac_ct_AR=$AR - # Extract the first word of "ar", so it can be a program name with args. -set dummy ar; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_AR+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_AR"; then - ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_AR="ar" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_AR=$ac_cv_prog_ac_ct_AR -if test -n "$ac_ct_AR"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 -$as_echo "$ac_ct_AR" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_AR" = x; then - AR="false" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - AR=$ac_ct_AR - fi -else - AR="$ac_cv_prog_AR" -fi - -test -z "$AR" && AR=ar -test -z "$AR_FLAGS" && AR_FLAGS=cru - - - - - - - - - - - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. -set dummy ${ac_tool_prefix}strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_STRIP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$STRIP"; then - ac_cv_prog_STRIP="$STRIP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_STRIP="${ac_tool_prefix}strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -STRIP=$ac_cv_prog_STRIP -if test -n "$STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 -$as_echo "$STRIP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_STRIP"; then - ac_ct_STRIP=$STRIP - # Extract the first word of "strip", so it can be a program name with args. -set dummy strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_STRIP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_STRIP"; then - ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_STRIP="strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP -if test -n "$ac_ct_STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 -$as_echo "$ac_ct_STRIP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_STRIP" = x; then - STRIP=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - STRIP=$ac_ct_STRIP - fi -else - STRIP="$ac_cv_prog_STRIP" -fi - -test -z "$STRIP" && STRIP=: - - - - - - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. -set dummy ${ac_tool_prefix}ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_RANLIB+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$RANLIB"; then - ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -RANLIB=$ac_cv_prog_RANLIB -if test -n "$RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 -$as_echo "$RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_RANLIB"; then - ac_ct_RANLIB=$RANLIB - # Extract the first word of "ranlib", so it can be a program name with args. -set dummy ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_RANLIB+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_RANLIB"; then - ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_RANLIB="ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB -if test -n "$ac_ct_RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 -$as_echo "$ac_ct_RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_RANLIB" = x; then - RANLIB=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - RANLIB=$ac_ct_RANLIB - fi -else - RANLIB="$ac_cv_prog_RANLIB" -fi - -test -z "$RANLIB" && RANLIB=: - - - - - - -# Determine commands to create old-style static archives. -old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' -old_postinstall_cmds='chmod 644 $oldlib' -old_postuninstall_cmds= - -if test -n "$RANLIB"; then - case $host_os in - openbsd*) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" - ;; - *) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" - ;; - esac - old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" -fi - -case $host_os in - darwin*) - lock_old_archive_extraction=yes ;; - *) - lock_old_archive_extraction=no ;; -esac - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} - -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} - -# Allow CC to be a program name with arguments. -compiler=$CC - - -# Check for command to grab the raw symbol name followed by C symbol from nm. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 -$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } -if test "${lt_cv_sys_global_symbol_pipe+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - -# These are sane defaults that work on at least a few old systems. -# [They come from Ultrix. What could be older than Ultrix?!! ;)] - -# Character class describing NM global symbol codes. -symcode='[BCDEGRST]' - -# Regexp to match symbols that can be accessed directly from C. -sympat='\([_A-Za-z][_A-Za-z0-9]*\)' - -# Define system-specific variables. -case $host_os in -aix*) - symcode='[BCDT]' - ;; -cygwin* | mingw* | pw32* | cegcc*) - symcode='[ABCDGISTW]' - ;; -hpux*) - if test "$host_cpu" = ia64; then - symcode='[ABCDEGRST]' - fi - ;; -irix* | nonstopux*) - symcode='[BCDEGRST]' - ;; -osf*) - symcode='[BCDEGQRST]' - ;; -solaris*) - symcode='[BDRT]' - ;; -sco3.2v5*) - symcode='[DT]' - ;; -sysv4.2uw2*) - symcode='[DT]' - ;; -sysv5* | sco5v6* | unixware* | OpenUNIX*) - symcode='[ABDT]' - ;; -sysv4) - symcode='[DFNSTU]' - ;; -esac - -# If we're using GNU nm, then use its standard symbol codes. -case `$NM -V 2>&1` in -*GNU* | *'with BFD'*) - symcode='[ABCDGIRSTW]' ;; -esac - -# Transform an extracted symbol line into a proper C declaration. -# Some systems (esp. on ia64) link data and code symbols differently, -# so use this general approach. -lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - -# Transform an extracted symbol line into symbol name and symbol address -lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" -lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" - -# Handle CRLF in mingw tool chain -opt_cr= -case $build_os in -mingw*) - opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp - ;; -esac - -# Try without a prefix underscore, then with it. -for ac_symprfx in "" "_"; do - - # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. - symxfrm="\\1 $ac_symprfx\\2 \\2" - - # Write the raw and C identifiers. - if test "$lt_cv_nm_interface" = "MS dumpbin"; then - # Fake it for dumpbin and say T for any non-static function - # and D for any global variable. - # Also find C++ and __fastcall symbols from MSVC++, - # which start with @ or ?. - lt_cv_sys_global_symbol_pipe="$AWK '"\ -" {last_section=section; section=\$ 3};"\ -" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ -" \$ 0!~/External *\|/{next};"\ -" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ -" {if(hide[section]) next};"\ -" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ -" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ -" s[1]~/^[@?]/{print s[1], s[1]; next};"\ -" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ -" ' prfx=^$ac_symprfx" - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi - - # Check to see that the pipe works correctly. - pipe_works=no - - rm -f conftest* - cat > conftest.$ac_ext <<_LT_EOF -#ifdef __cplusplus -extern "C" { -#endif -char nm_test_var; -void nm_test_func(void); -void nm_test_func(void){} -#ifdef __cplusplus -} -#endif -int main(){nm_test_var='a';nm_test_func();return(0);} -_LT_EOF - - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - # Now try to grab the symbols. - nlist=conftest.nm - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 - (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s "$nlist"; then - # Try sorting and uniquifying the output. - if sort "$nlist" | uniq > "$nlist"T; then - mv -f "$nlist"T "$nlist" - else - rm -f "$nlist"T - fi - - # Make sure that we snagged all the symbols we need. - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -#ifdef __cplusplus -extern "C" { -#endif - -_LT_EOF - # Now generate the symbol file. - eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' - - cat <<_LT_EOF >> conftest.$ac_ext - -/* The mapping between symbol names and symbols. */ -const struct { - const char *name; - void *address; -} -lt__PROGRAM__LTX_preloaded_symbols[] = -{ - { "@PROGRAM@", (void *) 0 }, -_LT_EOF - $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext - cat <<\_LT_EOF >> conftest.$ac_ext - {0, (void *) 0} -}; - -/* This works around a problem in FreeBSD linker */ -#ifdef FREEBSD_WORKAROUND -static const void *lt_preloaded_setup() { - return lt__PROGRAM__LTX_preloaded_symbols; -} -#endif - -#ifdef __cplusplus -} -#endif -_LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext - lt_save_LIBS="$LIBS" - lt_save_CFLAGS="$CFLAGS" - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 - (eval $ac_link) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s conftest${ac_exeext}; then - pipe_works=yes - fi - LIBS="$lt_save_LIBS" - CFLAGS="$lt_save_CFLAGS" - else - echo "cannot find nm_test_func in $nlist" >&5 - fi - else - echo "cannot find nm_test_var in $nlist" >&5 - fi - else - echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 - fi - else - echo "$progname: failed program was:" >&5 - cat conftest.$ac_ext >&5 - fi - rm -rf conftest* conftst* - - # Do not use the global_symbol_pipe unless it works. - if test "$pipe_works" = yes; then - break - else - lt_cv_sys_global_symbol_pipe= - fi -done - -fi - -if test -z "$lt_cv_sys_global_symbol_pipe"; then - lt_cv_sys_global_symbol_to_cdecl= -fi -if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 -$as_echo "failed" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 -$as_echo "ok" >&6; } -fi - - - - - - - - - - - - - - - - - - - - - - -# Check whether --enable-libtool-lock was given. -if test "${enable_libtool_lock+set}" = set; then : - enableval=$enable_libtool_lock; -fi - -test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes - -# Some flags need to be propagated to the compiler or linker for good -# libtool support. -case $host in -ia64-*-hpux*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - case `/usr/bin/file conftest.$ac_objext` in - *ELF-32*) - HPUX_IA64_MODE="32" - ;; - *ELF-64*) - HPUX_IA64_MODE="64" - ;; - esac - fi - rm -rf conftest* - ;; -*-*-irix6*) - # Find out which ABI we are using. - echo '#line '$LINENO' "configure"' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - if test "$lt_cv_prog_gnu_ld" = yes; then - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -melf32bsmip" - ;; - *N32*) - LD="${LD-ld} -melf32bmipn32" - ;; - *64-bit*) - LD="${LD-ld} -melf64bmip" - ;; - esac - else - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -32" - ;; - *N32*) - LD="${LD-ld} -n32" - ;; - *64-bit*) - LD="${LD-ld} -64" - ;; - esac - fi - fi - rm -rf conftest* - ;; - -x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ -s390*-*linux*|s390*-*tpf*|sparc*-*linux*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - case `/usr/bin/file conftest.o` in - *32-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_i386_fbsd" - ;; - x86_64-*linux*) - case `/usr/bin/file conftest.o` in - *x86-64*) - LD="${LD-ld} -m elf32_x86_64" - ;; - *) - LD="${LD-ld} -m elf_i386" - ;; - esac - ;; - powerpc64le-*linux*) - LD="${LD-ld} -m elf32lppclinux" - ;; - powerpc64-*linux*) - LD="${LD-ld} -m elf32ppclinux" - ;; - s390x-*linux*) - LD="${LD-ld} -m elf_s390" - ;; - sparc64-*linux*) - LD="${LD-ld} -m elf32_sparc" - ;; - esac - ;; - *64-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_x86_64_fbsd" - ;; - x86_64-*linux*) - LD="${LD-ld} -m elf_x86_64" - ;; - powerpcle-*linux*) - LD="${LD-ld} -m elf64lppc" - ;; - powerpc-*linux*) - LD="${LD-ld} -m elf64ppc" - ;; - s390*-*linux*|s390*-*tpf*) - LD="${LD-ld} -m elf64_s390" - ;; - sparc*-*linux*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; - -*-*-sco3.2v5*) - # On SCO OpenServer 5, we need -belf to get full-featured binaries. - SAVE_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -belf" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 -$as_echo_n "checking whether the C compiler needs -belf... " >&6; } -if test "${lt_cv_cc_needs_belf+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - lt_cv_cc_needs_belf=yes -else - lt_cv_cc_needs_belf=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 -$as_echo "$lt_cv_cc_needs_belf" >&6; } - if test x"$lt_cv_cc_needs_belf" != x"yes"; then - # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf - CFLAGS="$SAVE_CFLAGS" - fi - ;; -sparc*-*solaris*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - case `/usr/bin/file conftest.o` in - *64-bit*) - case $lt_cv_prog_gnu_ld in - yes*) LD="${LD-ld} -m elf64_sparc" ;; - *) - if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then - LD="${LD-ld} -64" - fi - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; -esac - -need_locks="$enable_libtool_lock" - - - case $host_os in - rhapsody* | darwin*) - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. -set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_DSYMUTIL+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$DSYMUTIL"; then - ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -DSYMUTIL=$ac_cv_prog_DSYMUTIL -if test -n "$DSYMUTIL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 -$as_echo "$DSYMUTIL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_DSYMUTIL"; then - ac_ct_DSYMUTIL=$DSYMUTIL - # Extract the first word of "dsymutil", so it can be a program name with args. -set dummy dsymutil; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_DSYMUTIL+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_DSYMUTIL"; then - ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL -if test -n "$ac_ct_DSYMUTIL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 -$as_echo "$ac_ct_DSYMUTIL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_DSYMUTIL" = x; then - DSYMUTIL=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - DSYMUTIL=$ac_ct_DSYMUTIL - fi -else - DSYMUTIL="$ac_cv_prog_DSYMUTIL" -fi - - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. -set dummy ${ac_tool_prefix}nmedit; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_NMEDIT+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$NMEDIT"; then - ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -NMEDIT=$ac_cv_prog_NMEDIT -if test -n "$NMEDIT"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 -$as_echo "$NMEDIT" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_NMEDIT"; then - ac_ct_NMEDIT=$NMEDIT - # Extract the first word of "nmedit", so it can be a program name with args. -set dummy nmedit; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_NMEDIT+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_NMEDIT"; then - ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_NMEDIT="nmedit" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT -if test -n "$ac_ct_NMEDIT"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 -$as_echo "$ac_ct_NMEDIT" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_NMEDIT" = x; then - NMEDIT=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - NMEDIT=$ac_ct_NMEDIT - fi -else - NMEDIT="$ac_cv_prog_NMEDIT" -fi - - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. -set dummy ${ac_tool_prefix}lipo; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_LIPO+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$LIPO"; then - ac_cv_prog_LIPO="$LIPO" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_LIPO="${ac_tool_prefix}lipo" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -LIPO=$ac_cv_prog_LIPO -if test -n "$LIPO"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 -$as_echo "$LIPO" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_LIPO"; then - ac_ct_LIPO=$LIPO - # Extract the first word of "lipo", so it can be a program name with args. -set dummy lipo; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_LIPO+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_LIPO"; then - ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_LIPO="lipo" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO -if test -n "$ac_ct_LIPO"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 -$as_echo "$ac_ct_LIPO" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_LIPO" = x; then - LIPO=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - LIPO=$ac_ct_LIPO - fi -else - LIPO="$ac_cv_prog_LIPO" -fi - - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. -set dummy ${ac_tool_prefix}otool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_OTOOL+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$OTOOL"; then - ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_OTOOL="${ac_tool_prefix}otool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -OTOOL=$ac_cv_prog_OTOOL -if test -n "$OTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 -$as_echo "$OTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_OTOOL"; then - ac_ct_OTOOL=$OTOOL - # Extract the first word of "otool", so it can be a program name with args. -set dummy otool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_OTOOL+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_OTOOL"; then - ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_OTOOL="otool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL -if test -n "$ac_ct_OTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 -$as_echo "$ac_ct_OTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_OTOOL" = x; then - OTOOL=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - OTOOL=$ac_ct_OTOOL - fi -else - OTOOL="$ac_cv_prog_OTOOL" -fi - - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. -set dummy ${ac_tool_prefix}otool64; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_OTOOL64+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$OTOOL64"; then - ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -OTOOL64=$ac_cv_prog_OTOOL64 -if test -n "$OTOOL64"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 -$as_echo "$OTOOL64" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_OTOOL64"; then - ac_ct_OTOOL64=$OTOOL64 - # Extract the first word of "otool64", so it can be a program name with args. -set dummy otool64; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_OTOOL64+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_OTOOL64"; then - ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_OTOOL64="otool64" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 -if test -n "$ac_ct_OTOOL64"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 -$as_echo "$ac_ct_OTOOL64" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_OTOOL64" = x; then - OTOOL64=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - OTOOL64=$ac_ct_OTOOL64 - fi -else - OTOOL64="$ac_cv_prog_OTOOL64" -fi - - - - - - - - - - - - - - - - - - - - - - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 -$as_echo_n "checking for -single_module linker flag... " >&6; } -if test "${lt_cv_apple_cc_single_mod+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_apple_cc_single_mod=no - if test -z "${LT_MULTI_MODULE}"; then - # By default we will add the -single_module flag. You can override - # by either setting the environment variable LT_MULTI_MODULE - # non-empty at configure time, or by adding -multi_module to the - # link flags. - rm -rf libconftest.dylib* - echo "int foo(void){return 1;}" > conftest.c - echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ --dynamiclib -Wl,-single_module conftest.c" >&5 - $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ - -dynamiclib -Wl,-single_module conftest.c 2>conftest.err - _lt_result=$? - if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then - lt_cv_apple_cc_single_mod=yes - else - cat conftest.err >&5 - fi - rm -rf libconftest.dylib* - rm -f conftest.* - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 -$as_echo "$lt_cv_apple_cc_single_mod" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 -$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } -if test "${lt_cv_ld_exported_symbols_list+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ld_exported_symbols_list=no - save_LDFLAGS=$LDFLAGS - echo "_main" > conftest.sym - LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - lt_cv_ld_exported_symbols_list=yes -else - lt_cv_ld_exported_symbols_list=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - LDFLAGS="$save_LDFLAGS" - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 -$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 -$as_echo_n "checking for -force_load linker flag... " >&6; } -if test "${lt_cv_ld_force_load+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ld_force_load=no - cat > conftest.c << _LT_EOF -int forced_loaded() { return 2;} -_LT_EOF - echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 - echo "$AR cru libconftest.a conftest.o" >&5 - $AR cru libconftest.a conftest.o 2>&5 - cat > conftest.c << _LT_EOF -int main() { return 0;} -_LT_EOF - echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 - $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err - _lt_result=$? - if test -f conftest && test ! -s conftest.err && test $_lt_result = 0 && $GREP forced_load conftest 2>&1 >/dev/null; then - lt_cv_ld_force_load=yes - else - cat conftest.err >&5 - fi - rm -f conftest.err libconftest.a conftest conftest.c - rm -rf conftest.dSYM - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 -$as_echo "$lt_cv_ld_force_load" >&6; } - case $host_os in - rhapsody* | darwin1.[012]) - _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; - darwin1.*) - _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; - darwin*) # darwin 5.x on - # if running on 10.5 or later, the deployment target defaults - # to the OS version, if on x86, and 10.4, the deployment - # target defaults to 10.4. Don't you love it? - case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in - 10.0,*86*-darwin8*|10.0,*-darwin[91]*) - _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; - 10.[012][,.]*) - _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; - 10.*) - _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; - esac - ;; - esac - if test "$lt_cv_apple_cc_single_mod" = "yes"; then - _lt_dar_single_mod='$single_module' - fi - if test "$lt_cv_ld_exported_symbols_list" = "yes"; then - _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' - else - _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then - _lt_dsymutil='~$DSYMUTIL $lib || :' - else - _lt_dsymutil= - fi - ;; - esac - -for ac_header in dlfcn.h -do : - ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default -" -if test "x$ac_cv_header_dlfcn_h" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_DLFCN_H 1 -_ACEOF - -fi - -done - - - - - -# Set options - - - - enable_dlopen=no - - - enable_win32_dll=no - - - # Check whether --enable-shared was given. -if test "${enable_shared+set}" = set; then : - enableval=$enable_shared; p=${PACKAGE-default} - case $enableval in - yes) enable_shared=yes ;; - no) enable_shared=no ;; - *) - enable_shared=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_shared=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac -else - enable_shared=yes -fi - - - - - - - - - - # Check whether --enable-static was given. -if test "${enable_static+set}" = set; then : - enableval=$enable_static; p=${PACKAGE-default} - case $enableval in - yes) enable_static=yes ;; - no) enable_static=no ;; - *) - enable_static=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_static=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac -else - enable_static=yes -fi - - - - - - - - - - -# Check whether --with-pic was given. -if test "${with_pic+set}" = set; then : - withval=$with_pic; pic_mode="$withval" -else - pic_mode=default -fi - - -test -z "$pic_mode" && pic_mode=default - - - - - - - - # Check whether --enable-fast-install was given. -if test "${enable_fast_install+set}" = set; then : - enableval=$enable_fast_install; p=${PACKAGE-default} - case $enableval in - yes) enable_fast_install=yes ;; - no) enable_fast_install=no ;; - *) - enable_fast_install=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_fast_install=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac -else - enable_fast_install=yes -fi - - - - - - - - - - - -# This can be used to rebuild libtool when needed -LIBTOOL_DEPS="$ltmain" - -# Always use our own libtool. -LIBTOOL='$(SHELL) $(top_builddir)/libtool' - - - - - - - - - - - - - - - - - - - - - - - - - - -test -z "$LN_S" && LN_S="ln -s" - - - - - - - - - - - - - - -if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 -$as_echo_n "checking for objdir... " >&6; } -if test "${lt_cv_objdir+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - rm -f .libs 2>/dev/null -mkdir .libs 2>/dev/null -if test -d .libs; then - lt_cv_objdir=.libs -else - # MS-DOS does not allow filenames that begin with a dot. - lt_cv_objdir=_libs -fi -rmdir .libs 2>/dev/null -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 -$as_echo "$lt_cv_objdir" >&6; } -objdir=$lt_cv_objdir - - - - - -cat >>confdefs.h <<_ACEOF -#define LT_OBJDIR "$lt_cv_objdir/" -_ACEOF - - - - -case $host_os in -aix3*) - # AIX sometimes has problems with the GCC collect2 program. For some - # reason, if we set the COLLECT_NAMES environment variable, the problems - # vanish in a puff of smoke. - if test "X${COLLECT_NAMES+set}" != Xset; then - COLLECT_NAMES= - export COLLECT_NAMES - fi - ;; -esac - -# Global variables: -ofile=libtool -can_build_shared=yes - -# All known linkers require a `.a' archive for static linking (except MSVC, -# which needs '.lib'). -libext=a - -with_gnu_ld="$lt_cv_prog_gnu_ld" - -old_CC="$CC" -old_CFLAGS="$CFLAGS" - -# Set sane defaults for various variables -test -z "$CC" && CC=cc -test -z "$LTCC" && LTCC=$CC -test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS -test -z "$LD" && LD=ld -test -z "$ac_objext" && ac_objext=o - -for cc_temp in $compiler""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` - - -# Only perform the check for file, if the check method requires it -test -z "$MAGIC_CMD" && MAGIC_CMD=file -case $deplibs_check_method in -file_magic*) - if test "$file_magic_cmd" = '$MAGIC_CMD'; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 -$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } -if test "${lt_cv_path_MAGIC_CMD+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - case $MAGIC_CMD in -[\\/*] | ?:[\\/]*) - lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD="$MAGIC_CMD" - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" - for ac_dir in $ac_dummy; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f $ac_dir/${ac_tool_prefix}file; then - lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD="$lt_cv_path_MAGIC_CMD" - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <<_LT_EOF 1>&2 - -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org - -_LT_EOF - fi ;; - esac - fi - break - fi - done - IFS="$lt_save_ifs" - MAGIC_CMD="$lt_save_MAGIC_CMD" - ;; -esac -fi - -MAGIC_CMD="$lt_cv_path_MAGIC_CMD" -if test -n "$MAGIC_CMD"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 -$as_echo "$MAGIC_CMD" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - - - -if test -z "$lt_cv_path_MAGIC_CMD"; then - if test -n "$ac_tool_prefix"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 -$as_echo_n "checking for file... " >&6; } -if test "${lt_cv_path_MAGIC_CMD+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - case $MAGIC_CMD in -[\\/*] | ?:[\\/]*) - lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD="$MAGIC_CMD" - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" - for ac_dir in $ac_dummy; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f $ac_dir/file; then - lt_cv_path_MAGIC_CMD="$ac_dir/file" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD="$lt_cv_path_MAGIC_CMD" - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <<_LT_EOF 1>&2 - -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org - -_LT_EOF - fi ;; - esac - fi - break - fi - done - IFS="$lt_save_ifs" - MAGIC_CMD="$lt_save_MAGIC_CMD" - ;; -esac -fi - -MAGIC_CMD="$lt_cv_path_MAGIC_CMD" -if test -n "$MAGIC_CMD"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 -$as_echo "$MAGIC_CMD" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - else - MAGIC_CMD=: - fi -fi - - fi - ;; -esac - -# Use C for the default configuration in the libtool script - -lt_save_CC="$CC" -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -# Source file extension for C test sources. -ac_ext=c - -# Object file extension for compiled C test sources. -objext=o -objext=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;" - -# Code to be used in simple link tests -lt_simple_link_test_code='int main(){return(0);}' - - - - - - - -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} - -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} - -# Allow CC to be a program name with arguments. -compiler=$CC - -# Save the default compiler, since it gets overwritten when the other -# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. -compiler_DEFAULT=$CC - -# save warnings/boilerplate of simple test code -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$RM conftest* - -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$RM -r conftest* - - -## CAVEAT EMPTOR: -## There is no encapsulation within the following macros, do not change -## the running order or otherwise move them around unless you know exactly -## what you are doing... -if test -n "$compiler"; then - -lt_prog_compiler_no_builtin_flag= - -if test "$GCC" = yes; then - case $cc_basename in - nvcc*) - lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; - *) - lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; - esac - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 -$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } -if test "${lt_cv_prog_compiler_rtti_exceptions+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_rtti_exceptions=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="-fno-rtti -fno-exceptions" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_rtti_exceptions=yes - fi - fi - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 -$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } - -if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then - lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" -else - : -fi - -fi - - - - - - - lt_prog_compiler_wl= -lt_prog_compiler_pic= -lt_prog_compiler_static= - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -$as_echo_n "checking for $compiler option to produce PIC... " >&6; } - - if test "$GCC" = yes; then - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_static='-static' - - case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static='-Bstatic' - fi - lt_prog_compiler_pic='-fPIC' - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - lt_prog_compiler_pic='-fPIC' - ;; - m68k) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' - ;; - esac - ;; - - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - - mingw* | cygwin* | pw32* | os2* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - lt_prog_compiler_pic='-DDLL_EXPORT' - ;; - - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - lt_prog_compiler_pic='-fno-common' - ;; - - haiku*) - # PIC is the default for Haiku. - # The "-static" flag exists, but is broken. - lt_prog_compiler_static= - ;; - - hpux*) - # PIC is the default for 64-bit PA HP-UX, but not for 32-bit - # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag - # sets the default TLS model and affects inlining. - case $host_cpu in - hppa*64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic='-fPIC' - ;; - esac - ;; - - interix[3-9]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; - - msdosdjgpp*) - # Just because we use GCC doesn't mean we suddenly get shared libraries - # on systems that don't support them. - lt_prog_compiler_can_build_shared=no - enable_shared=no - ;; - - *nto* | *qnx*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - lt_prog_compiler_pic='-fPIC -shared' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - lt_prog_compiler_pic=-Kconform_pic - fi - ;; - - *) - lt_prog_compiler_pic='-fPIC' - ;; - esac - - case $cc_basename in - nvcc*) # Cuda Compiler Driver 2.2 - lt_prog_compiler_wl='-Xlinker ' - lt_prog_compiler_pic='-Xcompiler -fPIC' - ;; - esac - else - # PORTME Check for flag to pass linker flags through the system compiler. - case $host_os in - aix*) - lt_prog_compiler_wl='-Wl,' - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static='-Bstatic' - else - lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' - fi - ;; - - mingw* | cygwin* | pw32* | os2* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - lt_prog_compiler_pic='-DDLL_EXPORT' - ;; - - hpux9* | hpux10* | hpux11*) - lt_prog_compiler_wl='-Wl,' - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic='+Z' - ;; - esac - # Is there a better lt_prog_compiler_static that works with the bundled CC? - lt_prog_compiler_static='${wl}-a ${wl}archive' - ;; - - irix5* | irix6* | nonstopux*) - lt_prog_compiler_wl='-Wl,' - # PIC (with -KPIC) is the default. - lt_prog_compiler_static='-non_shared' - ;; - - linux* | k*bsd*-gnu | kopensolaris*-gnu) - case $cc_basename in - # old Intel for x86_64 which still supported -KPIC. - ecc*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-static' - ;; - # icc used to be incompatible with GCC. - # ICC 10 doesn't accept -KPIC any more. - icc* | ifort*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fPIC' - lt_prog_compiler_static='-static' - ;; - # Lahey Fortran 8.1. - lf95*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='--shared' - lt_prog_compiler_static='--static' - ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fpic' - lt_prog_compiler_static='-Bstatic' - ;; - ccc*) - lt_prog_compiler_wl='-Wl,' - # All Alpha code is PIC. - lt_prog_compiler_static='-non_shared' - ;; - xl* | bgxl* | bgf* | mpixl*) - # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-qpic' - lt_prog_compiler_static='-qstaticlink' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ F* | *Sun*Fortran*) - # Sun Fortran 8.3 passes all unrecognized flags to the linker - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - lt_prog_compiler_wl='' - ;; - *Sun\ C*) - # Sun C 5.9 - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - lt_prog_compiler_wl='-Wl,' - ;; - esac - ;; - esac - ;; - - newsos6) - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; - - *nto* | *qnx*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - lt_prog_compiler_pic='-fPIC -shared' - ;; - - osf3* | osf4* | osf5*) - lt_prog_compiler_wl='-Wl,' - # All OSF/1 code is PIC. - lt_prog_compiler_static='-non_shared' - ;; - - rdos*) - lt_prog_compiler_static='-non_shared' - ;; - - solaris*) - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in - f77* | f90* | f95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; - esac - ;; - - sunos4*) - lt_prog_compiler_wl='-Qoption ld ' - lt_prog_compiler_pic='-PIC' - lt_prog_compiler_static='-Bstatic' - ;; - - sysv4 | sysv4.2uw2* | sysv4.3*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; - - sysv4*MP*) - if test -d /usr/nec ;then - lt_prog_compiler_pic='-Kconform_pic' - lt_prog_compiler_static='-Bstatic' - fi - ;; - - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; - - unicos*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_can_build_shared=no - ;; - - uts4*) - lt_prog_compiler_pic='-pic' - lt_prog_compiler_static='-Bstatic' - ;; - - *) - lt_prog_compiler_can_build_shared=no - ;; - esac - fi - -case $host_os in - # For platforms which do not support PIC, -DPIC is meaningless: - *djgpp*) - lt_prog_compiler_pic= - ;; - *) - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; -esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 -$as_echo "$lt_prog_compiler_pic" >&6; } - - - - - - -# -# Check to make sure the PIC flag actually works. -# -if test -n "$lt_prog_compiler_pic"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 -$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } -if test "${lt_cv_prog_compiler_pic_works+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_pic_works=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$lt_prog_compiler_pic -DPIC" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_pic_works=yes - fi - fi - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 -$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } - -if test x"$lt_cv_prog_compiler_pic_works" = xyes; then - case $lt_prog_compiler_pic in - "" | " "*) ;; - *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; - esac -else - lt_prog_compiler_pic= - lt_prog_compiler_can_build_shared=no -fi - -fi - - - - - - -# -# Check to make sure the static flag actually works. -# -wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 -$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } -if test "${lt_cv_prog_compiler_static_works+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_static_works=no - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS $lt_tmp_static_flag" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&5 - $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_static_works=yes - fi - else - lt_cv_prog_compiler_static_works=yes - fi - fi - $RM -r conftest* - LDFLAGS="$save_LDFLAGS" - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 -$as_echo "$lt_cv_prog_compiler_static_works" >&6; } - -if test x"$lt_cv_prog_compiler_static_works" = xyes; then - : -else - lt_prog_compiler_static= -fi - - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 -$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } -if test "${lt_cv_prog_compiler_c_o+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_c_o=no - $RM -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - lt_cv_prog_compiler_c_o=yes - fi - fi - chmod u+w . 2>&5 - $RM conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files - $RM out/* && rmdir out - cd .. - $RM -r conftest - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 -$as_echo "$lt_cv_prog_compiler_c_o" >&6; } - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 -$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } -if test "${lt_cv_prog_compiler_c_o+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_c_o=no - $RM -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - lt_cv_prog_compiler_c_o=yes - fi - fi - chmod u+w . 2>&5 - $RM conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files - $RM out/* && rmdir out - cd .. - $RM -r conftest - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 -$as_echo "$lt_cv_prog_compiler_c_o" >&6; } - - - - -hard_links="nottested" -if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then - # do not overwrite the value of need_locks provided by the user - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 -$as_echo_n "checking if we can lock with hard links... " >&6; } - hard_links=yes - $RM conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 -$as_echo "$hard_links" >&6; } - if test "$hard_links" = no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 -$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} - need_locks=warn - fi -else - need_locks=no -fi - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 -$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } - - runpath_var= - allow_undefined_flag= - always_export_symbols=no - archive_cmds= - archive_expsym_cmds= - compiler_needs_object=no - enable_shared_with_static_runtimes=no - export_dynamic_flag_spec= - export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - hardcode_automatic=no - hardcode_direct=no - hardcode_direct_absolute=no - hardcode_libdir_flag_spec= - hardcode_libdir_flag_spec_ld= - hardcode_libdir_separator= - hardcode_minus_L=no - hardcode_shlibpath_var=unsupported - inherit_rpath=no - link_all_deplibs=unknown - module_cmds= - module_expsym_cmds= - old_archive_from_new_cmds= - old_archive_from_expsyms_cmds= - thread_safe_flag_spec= - whole_archive_flag_spec= - # include_expsyms should be a list of space-separated symbols to be *always* - # included in the symbol list - include_expsyms= - # exclude_expsyms can be an extended regexp of symbols to exclude - # it will be wrapped by ` (' and `)$', so one must not match beginning or - # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', - # as well as any symbol that contains `d'. - exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' - # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out - # platforms (ab)use it in PIC code, but their linkers get confused if - # the symbol is explicitly referenced. Since portable code cannot - # rely on this symbol name, it's probably fine to never include it in - # preloaded symbol tables. - # Exclude shared library initialization/finalization symbols. - extract_expsyms_cmds= - - case $host_os in - cygwin* | mingw* | pw32* | cegcc*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test "$GCC" != yes; then - with_gnu_ld=no - fi - ;; - interix*) - # we just hope/assume this is gcc and not c89 (= MSVC++) - with_gnu_ld=yes - ;; - openbsd*) - with_gnu_ld=no - ;; - esac - - ld_shlibs=yes - - # On some targets, GNU ld is compatible enough with the native linker - # that we're better off using the native interface for both. - lt_use_gnu_ld_interface=no - if test "$with_gnu_ld" = yes; then - case $host_os in - aix*) - # The AIX port of GNU ld has always aspired to compatibility - # with the native linker. However, as the warning in the GNU ld - # block says, versions before 2.19.5* couldn't really create working - # shared libraries, regardless of the interface used. - case `$LD -v 2>&1` in - *\ \(GNU\ Binutils\)\ 2.19.5*) ;; - *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; - *\ \(GNU\ Binutils\)\ [3-9]*) ;; - *) - lt_use_gnu_ld_interface=yes - ;; - esac - ;; - *) - lt_use_gnu_ld_interface=yes - ;; - esac - fi - - if test "$lt_use_gnu_ld_interface" = yes; then - # If archive_cmds runs LD, not CC, wlarc should be empty - wlarc='${wl}' - - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - runpath_var=LD_RUN_PATH - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - export_dynamic_flag_spec='${wl}--export-dynamic' - # ancient GNU ld didn't support --whole-archive et. al. - if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then - whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - else - whole_archive_flag_spec= - fi - supports_anon_versioning=no - case `$LD -v 2>&1` in - *GNU\ gold*) supports_anon_versioning=yes ;; - *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 - *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... - *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... - *\ 2.11.*) ;; # other 2.11 versions - *) supports_anon_versioning=yes ;; - esac - - # See if GNU ld supports shared libraries. - case $host_os in - aix[3-9]*) - # On AIX/PPC, the GNU linker is very broken - if test "$host_cpu" != ia64; then - ld_shlibs=no - cat <<_LT_EOF 1>&2 - -*** Warning: the GNU linker, at least up to release 2.19, is reported -*** to be unable to reliably create shared libraries on AIX. -*** Therefore, libtool is disabling shared libraries support. If you -*** really care for shared libraries, you may want to install binutils -*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. -*** You will then need to restart the configuration process. - -_LT_EOF - fi - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds='' - ;; - m68k) - archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - ;; - esac - ;; - - beos*) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - allow_undefined_flag=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - else - ld_shlibs=no - fi - ;; - - cygwin* | mingw* | pw32* | cegcc*) - # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, - # as there is no search path for DLLs. - hardcode_libdir_flag_spec='-L$libdir' - export_dynamic_flag_spec='${wl}--export-all-symbols' - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes - export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - ld_shlibs=no - fi - ;; - - haiku*) - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - link_all_deplibs=yes - ;; - - interix[3-9]*) - hardcode_direct=no - hardcode_shlibpath_var=no - hardcode_libdir_flag_spec='${wl}-rpath,$libdir' - export_dynamic_flag_spec='${wl}-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - - gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) - tmp_diet=no - if test "$host_os" = linux-dietlibc; then - case $cc_basename in - diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) - esac - fi - if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ - && test "$tmp_diet" = no - then - tmp_addflag=' $pic_flag' - tmp_sharedflag='-shared' - case $cc_basename,$host_cpu in - pgcc*) # Portland Group C compiler - whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag' - ;; - pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group f77 and f90 compilers - whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag -Mnomain' ;; - ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 - tmp_addflag=' -i_dynamic' ;; - efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 - tmp_addflag=' -i_dynamic -nofor_main' ;; - ifc* | ifort*) # Intel Fortran compiler - tmp_addflag=' -nofor_main' ;; - lf95*) # Lahey Fortran 8.1 - whole_archive_flag_spec= - tmp_sharedflag='--shared' ;; - xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) - tmp_sharedflag='-qmkshrobj' - tmp_addflag= ;; - nvcc*) # Cuda Compiler Driver 2.2 - whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' - compiler_needs_object=yes - ;; - esac - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) # Sun C 5.9 - whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' - compiler_needs_object=yes - tmp_sharedflag='-G' ;; - *Sun\ F*) # Sun Fortran 8.3 - tmp_sharedflag='-G' ;; - esac - archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - - if test "x$supports_anon_versioning" = xyes; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' - fi - - case $cc_basename in - xlf* | bgf* | bgxlf* | mpixlf*) - # IBM XL Fortran 10.1 on PPC cannot create shared libs itself - whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' - hardcode_libdir_flag_spec= - hardcode_libdir_flag_spec_ld='-rpath $libdir' - archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac - else - ld_shlibs=no - fi - ;; - - netbsd*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - - solaris*) - if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then - ld_shlibs=no - cat <<_LT_EOF 1>&2 - -*** Warning: The releases 2.8.* of the GNU linker cannot reliably -*** create shared libraries on Solaris systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.9.1 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -_LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) - case `$LD -v 2>&1` in - *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) - ld_shlibs=no - cat <<_LT_EOF 1>&2 - -*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not -*** reliably create shared libraries on SCO systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.16.91.0.3 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -_LT_EOF - ;; - *) - # For security reasons, it is highly recommended that you always - # use absolute paths for naming shared libraries, and exclude the - # DT_RUNPATH tag from executables and libraries. But doing so - # requires that you compile everything twice, which is a pain. - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - esac - ;; - - sunos4*) - archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' - wlarc= - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - esac - - if test "$ld_shlibs" = no; then - runpath_var= - hardcode_libdir_flag_spec= - export_dynamic_flag_spec= - whole_archive_flag_spec= - fi - else - # PORTME fill in a description of your system's linker (not GNU ld) - case $host_os in - aix3*) - allow_undefined_flag=unsupported - always_export_symbols=yes - archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - hardcode_minus_L=yes - if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - hardcode_direct=unsupported - fi - ;; - - aix[4-9]*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - # Also, AIX nm treats weak defined symbols like other global - # defined symbols, whereas GNU nm marks them as "W". - if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then - export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' - else - export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' - fi - aix_use_runtimelinking=no - - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) - for ld_flag in $LDFLAGS; do - if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then - aix_use_runtimelinking=yes - break - fi - done - ;; - esac - - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - archive_cmds='' - hardcode_direct=yes - hardcode_direct_absolute=yes - hardcode_libdir_separator=':' - link_all_deplibs=yes - file_list_spec='${wl}-f,' - - if test "$GCC" = yes; then - case $host_os in aix4.[012]|aix4.[012].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && - strings "$collect2name" | $GREP resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - hardcode_direct=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - hardcode_minus_L=yes - hardcode_libdir_flag_spec='-L$libdir' - hardcode_libdir_separator= - fi - ;; - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi - - export_dynamic_flag_spec='${wl}-bexpall' - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - always_export_symbols=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -fi -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else - if test "$host_cpu" = ia64; then - hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' - allow_undefined_flag="-z nodefs" - archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an - # empty executable. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -fi -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - no_undefined_flag=' ${wl}-bernotok' - allow_undefined_flag=' ${wl}-berok' - if test "$with_gnu_ld" = yes; then - # We only use this code for GNU lds that support --whole-archive. - whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive' - else - # Exported symbols can be pulled into shared objects from archives - whole_archive_flag_spec='$convenience' - fi - archive_cmds_need_lc=yes - # This is similar to how AIX traditionally builds its shared libraries. - archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' - fi - fi - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - archive_expsym_cmds='' - ;; - m68k) - archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - ;; - esac - ;; - - bsdi[45]*) - export_dynamic_flag_spec=-rdynamic - ;; - - cygwin* | mingw* | pw32* | cegcc*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - hardcode_libdir_flag_spec=' ' - allow_undefined_flag=unsupported - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=".dll" - # FIXME: Setting linknames here is a bad hack. - archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' - # The linker will automatically build a .lib file if we build a DLL. - old_archive_from_new_cmds='true' - # FIXME: Should let the user specify the lib program. - old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' - fix_srcfile_path='`cygpath -w "$srcfile"`' - enable_shared_with_static_runtimes=yes - ;; - - darwin* | rhapsody*) - - - archive_cmds_need_lc=no - hardcode_direct=no - hardcode_automatic=yes - hardcode_shlibpath_var=unsupported - if test "$lt_cv_ld_force_load" = "yes"; then - whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' - else - whole_archive_flag_spec='' - fi - link_all_deplibs=yes - allow_undefined_flag="$_lt_dar_allow_undefined" - case $cc_basename in - ifort*) _lt_dar_can_shared=yes ;; - *) _lt_dar_can_shared=$GCC ;; - esac - if test "$_lt_dar_can_shared" = "yes"; then - output_verbose_link_cmd=func_echo_all - archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" - module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" - archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" - module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" - - else - ld_shlibs=no - fi - - ;; - - dgux*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_shlibpath_var=no - ;; - - # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor - # support. Future versions do this automatically, but an explicit c++rt0.o - # does not break anything, and helps significantly (at the cost of a little - # extra space). - freebsd2.2*) - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - - # Unfortunately, older versions of FreeBSD 2 do not have this feature. - freebsd2.*) - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes - hardcode_minus_L=yes - hardcode_shlibpath_var=no - ;; - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly* | openbsd* | bitrig*) - archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - - hpux9*) - if test "$GCC" = yes; then - archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi - hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' - hardcode_libdir_separator=: - hardcode_direct=yes - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - export_dynamic_flag_spec='${wl}-E' - ;; - - hpux10*) - if test "$GCC" = yes && test "$with_gnu_ld" = no; then - archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi - if test "$with_gnu_ld" = no; then - hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' - hardcode_libdir_flag_spec_ld='+b $libdir' - hardcode_libdir_separator=: - hardcode_direct=yes - hardcode_direct_absolute=yes - export_dynamic_flag_spec='${wl}-E' - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - fi - ;; - - hpux11*) - if test "$GCC" = yes && test "$with_gnu_ld" = no; then - case $host_cpu in - hppa*64*) - archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else - case $host_cpu in - hppa*64*) - archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - - # Older versions of the 11.00 compiler do not understand -b yet - # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 -$as_echo_n "checking if $CC understands -b... " >&6; } -if test "${lt_cv_prog_compiler__b+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler__b=no - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS -b" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&5 - $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler__b=yes - fi - else - lt_cv_prog_compiler__b=yes - fi - fi - $RM -r conftest* - LDFLAGS="$save_LDFLAGS" - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 -$as_echo "$lt_cv_prog_compiler__b" >&6; } - -if test x"$lt_cv_prog_compiler__b" = xyes; then - archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' -fi - - ;; - esac - fi - if test "$with_gnu_ld" = no; then - hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' - hardcode_libdir_separator=: - - case $host_cpu in - hppa*64*|ia64*) - hardcode_direct=no - hardcode_shlibpath_var=no - ;; - *) - hardcode_direct=yes - hardcode_direct_absolute=yes - export_dynamic_flag_spec='${wl}-E' - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - ;; - esac - fi - ;; - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int foo(void) {} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' - -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - LDFLAGS="$save_LDFLAGS" - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' - fi - archive_cmds_need_lc='no' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator=: - inherit_rpath=yes - link_all_deplibs=yes - ;; - - netbsd*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out - else - archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF - fi - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - - newsos6) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator=: - hardcode_shlibpath_var=no - ;; - - *nto* | *qnx*) - ;; - - openbsd*) - if test -f /usr/libexec/ld.so; then - hardcode_direct=yes - hardcode_shlibpath_var=no - hardcode_direct_absolute=yes - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' - hardcode_libdir_flag_spec='${wl}-rpath,$libdir' - export_dynamic_flag_spec='${wl}-E' - else - case $host_os in - openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec='-R$libdir' - ;; - *) - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='${wl}-rpath,$libdir' - ;; - esac - fi - else - ld_shlibs=no - fi - ;; - - os2*) - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - allow_undefined_flag=unsupported - archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' - old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' - ;; - - osf3*) - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - allow_undefined_flag=' -expect_unresolved \*' - archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - fi - archive_cmds_need_lc='no' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator=: - ;; - - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' - archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' - archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ - $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' - - # Both c and cxx compiler support -rpath directly - hardcode_libdir_flag_spec='-rpath $libdir' - fi - archive_cmds_need_lc='no' - hardcode_libdir_separator=: - ;; - - solaris*) - no_undefined_flag=' -z defs' - if test "$GCC" = yes; then - wlarc='${wl}' - archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) - wlarc='' - archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' - ;; - *) - wlarc='${wl}' - archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - ;; - esac - fi - hardcode_libdir_flag_spec='-R$libdir' - hardcode_shlibpath_var=no - case $host_os in - solaris2.[0-5] | solaris2.[0-5].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands `-z linker_flag'. GCC discards it without `$wl', - # but is careful enough not to reorder. - # Supported since Solaris 2.6 (maybe 2.5.1?) - if test "$GCC" = yes; then - whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' - else - whole_archive_flag_spec='-z allextract$convenience -z defaultextract' - fi - ;; - esac - link_all_deplibs=yes - ;; - - sunos4*) - if test "x$host_vendor" = xsequent; then - # Use $CC to link under sequent, because it throws in some extra .o - # files that make .init and .fini sections work. - archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' - fi - hardcode_libdir_flag_spec='-L$libdir' - hardcode_direct=yes - hardcode_minus_L=yes - hardcode_shlibpath_var=no - ;; - - sysv4) - case $host_vendor in - sni) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes # is this really true??? - ;; - siemens) - ## LD is ld it makes a PLAMLIB - ## CC just makes a GrossModule. - archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' - reload_cmds='$CC -r -o $output$reload_objs' - hardcode_direct=no - ;; - motorola) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=no #Motorola manual says yes, but my tests say they lie - ;; - esac - runpath_var='LD_RUN_PATH' - hardcode_shlibpath_var=no - ;; - - sysv4.3*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var=no - export_dynamic_flag_spec='-Bexport' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var=no - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - ld_shlibs=yes - fi - ;; - - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) - no_undefined_flag='${wl}-z,text' - archive_cmds_need_lc=no - hardcode_shlibpath_var=no - runpath_var='LD_RUN_PATH' - - if test "$GCC" = yes; then - archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - - sysv5* | sco3.2v5* | sco5v6*) - # Note: We can NOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - no_undefined_flag='${wl}-z,text' - allow_undefined_flag='${wl}-z,nodefs' - archive_cmds_need_lc=no - hardcode_shlibpath_var=no - hardcode_libdir_flag_spec='${wl}-R,$libdir' - hardcode_libdir_separator=':' - link_all_deplibs=yes - export_dynamic_flag_spec='${wl}-Bexport' - runpath_var='LD_RUN_PATH' - - if test "$GCC" = yes; then - archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - - uts4*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_shlibpath_var=no - ;; - - *) - ld_shlibs=no - ;; - esac - - if test x$host_vendor = xsni; then - case $host in - sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) - export_dynamic_flag_spec='${wl}-Blargedynsym' - ;; - esac - fi - fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 -$as_echo "$ld_shlibs" >&6; } -test "$ld_shlibs" = no && can_build_shared=no - -with_gnu_ld=$with_gnu_ld - - - - - - - - - - - - - - - -# -# Do we need to explicitly link libc? -# -case "x$archive_cmds_need_lc" in -x|xyes) - # Assume -lc should be added - archive_cmds_need_lc=yes - - if test "$enable_shared" = yes && test "$GCC" = yes; then - case $archive_cmds in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. - ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 -$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } -if test "${lt_cv_archive_cmds_need_lc+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - $RM conftest* - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$lt_prog_compiler_wl - pic_flag=$lt_prog_compiler_pic - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$allow_undefined_flag - allow_undefined_flag= - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 - (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - then - lt_cv_archive_cmds_need_lc=no - else - lt_cv_archive_cmds_need_lc=yes - fi - allow_undefined_flag=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 -$as_echo "$lt_cv_archive_cmds_need_lc" >&6; } - archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc - ;; - esac - fi - ;; -esac - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 -$as_echo_n "checking dynamic linker characteristics... " >&6; } - -if test "$GCC" = yes; then - case $host_os in - darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; - *) lt_awk_arg="/^libraries:/" ;; - esac - case $host_os in - mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;; - *) lt_sed_strip_eq="s,=/,/,g" ;; - esac - lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` - case $lt_search_path_spec in - *\;*) - # if the path contains ";" then we assume it to be the separator - # otherwise default to the standard path separator (i.e. ":") - it is - # assumed that no part of a normal pathname contains ";" but that should - # okay in the real world where ";" in dirpaths is itself problematic. - lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` - ;; - *) - lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` - ;; - esac - # Ok, now we have the path, separated by spaces, we can step through it - # and add multilib dir if necessary. - lt_tmp_lt_search_path_spec= - lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` - for lt_sys_path in $lt_search_path_spec; do - if test -d "$lt_sys_path/$lt_multi_os_dir"; then - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" - else - test -d "$lt_sys_path" && \ - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" - fi - done - lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' -BEGIN {RS=" "; FS="/|\n";} { - lt_foo=""; - lt_count=0; - for (lt_i = NF; lt_i > 0; lt_i--) { - if ($lt_i != "" && $lt_i != ".") { - if ($lt_i == "..") { - lt_count++; - } else { - if (lt_count == 0) { - lt_foo="/" $lt_i lt_foo; - } else { - lt_count--; - } - } - } - } - if (lt_foo != "") { lt_freq[lt_foo]++; } - if (lt_freq[lt_foo] == 1) { print lt_foo; } -}'` - # AWK program above erroneously prepends '/' to C:/dos/paths - # for these hosts. - case $host_os in - mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ - $SED 's,/\([A-Za-z]:\),\1,g'` ;; - esac - sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` -else - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" -fi -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext_cmds=".so" -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -need_lib_prefix=unknown -hardcode_into_libs=no - -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown - -case $host_os in -aix3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' - shlibpath_var=LIBPATH - - # AIX 3 has no versioning support, so we append a major version to the name. - soname_spec='${libname}${release}${shared_ext}$major' - ;; - -aix[4-9]*) - version_type=linux - need_lib_prefix=no - need_version=no - hardcode_into_libs=yes - if test "$host_cpu" = ia64; then - # AIX 5 supports IA64 - library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - else - # With GCC up to 2.95.x, collect2 would create an import file - # for dependence libraries. The import file would start with - # the line `#! .'. This would cause the generated library to - # depend on `.', always an invalid library. This was fixed in - # development snapshots of GCC prior to 3.0. - case $host_os in - aix4 | aix4.[01] | aix4.[01].*) - if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' - echo ' yes ' - echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then - : - else - can_build_shared=no - fi - ;; - esac - # AIX (on Power*) has no versioning support, so currently we can not hardcode correct - # soname into executable. Probably we can add versioning support to - # collect2, so additional links can be useful in future. - if test "$aix_use_runtimelinking" = yes; then - # If using run time linking (on AIX 4.2 or later) use lib.so - # instead of lib.a to let people know that these are not - # typical AIX shared libraries. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - else - # We preserve .a as extension for shared libraries through AIX4.2 - # and later when we are not doing run time linking. - library_names_spec='${libname}${release}.a $libname.a' - soname_spec='${libname}${release}${shared_ext}$major' - fi - shlibpath_var=LIBPATH - fi - ;; - -amigaos*) - case $host_cpu in - powerpc) - # Since July 2007 AmigaOS4 officially supports .so libraries. - # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - ;; - m68k) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' - ;; - esac - ;; - -beos*) - library_names_spec='${libname}${shared_ext}' - dynamic_linker="$host_os ld.so" - shlibpath_var=LIBRARY_PATH - ;; - -bsdi[45]*) - version_type=linux - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" - sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" - # the default ld.so.conf also contains /usr/contrib/lib and - # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow - # libtool to hard-code these into programs - ;; - -cygwin* | mingw* | pw32* | cegcc*) - version_type=windows - shrext_cmds=".dll" - need_version=no - need_lib_prefix=no - - case $GCC,$host_os in - yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname~ - if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then - eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; - fi' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - shlibpath_overrides_runpath=yes - - case $host_os in - cygwin*) - # Cygwin DLLs use 'cyg' prefix rather than 'lib' - soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" - ;; - mingw* | cegcc*) - # MinGW DLLs use traditional 'lib' prefix - soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - pw32*) - # pw32 DLLs use 'pw' prefix rather than 'lib' - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac - ;; - - *) - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' - ;; - esac - dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; - -darwin* | rhapsody*) - dynamic_linker="$host_os dyld" - version_type=darwin - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' - soname_spec='${libname}${release}${major}$shared_ext' - shlibpath_overrides_runpath=yes - shlibpath_var=DYLD_LIBRARY_PATH - shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' - - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" - sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' - ;; - -dgux*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -freebsd* | dragonfly*) - # DragonFly does not have aout. When/if they implement a new - # versioning mechanism, adjust this. - if test -x /usr/bin/objformat; then - objformat=`/usr/bin/objformat` - else - case $host_os in - freebsd[23].*) objformat=aout ;; - *) objformat=elf ;; - esac - fi - version_type=freebsd-$objformat - case $version_type in - freebsd-elf*) - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - need_version=no - need_lib_prefix=no - ;; - freebsd-*) - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' - need_version=yes - ;; - esac - shlibpath_var=LD_LIBRARY_PATH - case $host_os in - freebsd2.*) - shlibpath_overrides_runpath=yes - ;; - freebsd3.[01]* | freebsdelf3.[01]*) - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ - freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - *) # from 4.6 on, and DragonFly - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - esac - ;; - -gnu*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - hardcode_into_libs=yes - ;; - -haiku*) - version_type=linux - need_lib_prefix=no - need_version=no - dynamic_linker="$host_os runtime_loader" - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=yes - sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' - hardcode_into_libs=yes - ;; - -hpux9* | hpux10* | hpux11*) - # Give a soname corresponding to the major version so that dld.sl refuses to - # link against other versions. - version_type=sunos - need_lib_prefix=no - need_version=no - case $host_cpu in - ia64*) - shrext_cmds='.so' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.so" - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - if test "X$HPUX_IA64_MODE" = X32; then - sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" - else - sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" - fi - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) - shrext_cmds='.sl' - dynamic_linker="$host_os dld.sl" - shlibpath_var=SHLIB_PATH - shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - ;; - esac - # HP-UX runs *really* slowly unless shared libraries are mode 555, ... - postinstall_cmds='chmod 555 $lib' - # or fails outright, so override atomically: - install_override_mode=555 - ;; - -interix[3-9]*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - -irix5* | irix6* | nonstopux*) - case $host_os in - nonstopux*) version_type=nonstopux ;; - *) - if test "$lt_cv_prog_gnu_ld" = yes; then - version_type=linux - else - version_type=irix - fi ;; - esac - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' - case $host_os in - irix5* | nonstopux*) - libsuff= shlibsuff= - ;; - *) - case $LD in # libtool.m4 will add one of these switches to LD - *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") - libsuff= shlibsuff= libmagic=32-bit;; - *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") - libsuff=32 shlibsuff=N32 libmagic=N32;; - *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") - libsuff=64 shlibsuff=64 libmagic=64-bit;; - *) libsuff= shlibsuff= libmagic=never-match;; - esac - ;; - esac - shlibpath_var=LD_LIBRARY${shlibsuff}_PATH - shlibpath_overrides_runpath=no - sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" - sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" - hardcode_into_libs=yes - ;; - -# No shared lib support for Linux oldld, aout, or coff. -linux*oldld* | linux*aout* | linux*coff*) - dynamic_linker=no - ;; - -# This must be Linux ELF. -linux* | k*bsd*-gnu | kopensolaris*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - - # Some binutils ld are patched to set DT_RUNPATH - if test "${lt_cv_shlibpath_overrides_runpath+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_shlibpath_overrides_runpath=no - save_LDFLAGS=$LDFLAGS - save_libdir=$libdir - eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ - LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : - lt_cv_shlibpath_overrides_runpath=yes -fi -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - LDFLAGS=$save_LDFLAGS - libdir=$save_libdir - -fi - - shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath - - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - - # Append ld.so.conf contents to the search path - if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" - fi - - # We used to test for /lib/ld.so.1 and disable shared libraries on - # powerpc, because MkLinux only supported shared libraries with the - # GNU dynamic linker. Since this was broken with cross compilers, - # most powerpc-linux boxes support dynamic linking these days and - # people can always --disable-shared, the test was removed, and we - # assume the GNU/Linux dynamic linker is in use. - dynamic_linker='GNU/Linux ld.so' - ;; - -netbsd*) - version_type=sunos - need_lib_prefix=no - need_version=no - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - dynamic_linker='NetBSD (a.out) ld.so' - else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='NetBSD ld.elf_so' - fi - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - -newsos6) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -*nto* | *qnx*) - version_type=qnx - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='ldqnx.so' - ;; - -openbsd*) - version_type=sunos - sys_lib_dlsearch_path_spec="/usr/lib" - need_lib_prefix=no - # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. - case $host_os in - openbsd3.3 | openbsd3.3.*) need_version=yes ;; - *) need_version=no ;; - esac - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - shlibpath_var=LD_LIBRARY_PATH - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - case $host_os in - openbsd2.[89] | openbsd2.[89].*) - shlibpath_overrides_runpath=no - ;; - *) - shlibpath_overrides_runpath=yes - ;; - esac - else - shlibpath_overrides_runpath=yes - fi - ;; - -os2*) - libname_spec='$name' - shrext_cmds=".dll" - need_lib_prefix=no - library_names_spec='$libname${shared_ext} $libname.a' - dynamic_linker='OS/2 ld.exe' - shlibpath_var=LIBPATH - ;; - -osf3* | osf4* | osf5*) - version_type=osf - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" - sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" - ;; - -rdos*) - dynamic_linker=no - ;; - -solaris*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - # ldd complains unless libraries are executable - postinstall_cmds='chmod +x $lib' - ;; - -sunos4*) - version_type=sunos - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - if test "$with_gnu_ld" = yes; then - need_lib_prefix=no - fi - need_version=yes - ;; - -sysv4 | sysv4.3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - case $host_vendor in - sni) - shlibpath_overrides_runpath=no - need_lib_prefix=no - runpath_var=LD_RUN_PATH - ;; - siemens) - need_lib_prefix=no - ;; - motorola) - need_lib_prefix=no - need_version=no - shlibpath_overrides_runpath=no - sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' - ;; - esac - ;; - -sysv4*MP*) - if test -d /usr/nec ;then - version_type=linux - library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' - soname_spec='$libname${shared_ext}.$major' - shlibpath_var=LD_LIBRARY_PATH - fi - ;; - -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - version_type=freebsd-elf - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - if test "$with_gnu_ld" = yes; then - sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' - else - sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' - case $host_os in - sco3.2v5*) - sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" - ;; - esac - fi - sys_lib_dlsearch_path_spec='/usr/lib' - ;; - -tpf*) - # TPF is a cross-target only. Preferred cross-host = GNU/Linux. - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - -uts4*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -*) - dynamic_linker=no - ;; -esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 -$as_echo "$dynamic_linker" >&6; } -test "$dynamic_linker" = no && can_build_shared=no - -variables_saved_for_relink="PATH $shlibpath_var $runpath_var" -if test "$GCC" = yes; then - variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" -fi - -if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then - sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" -fi -if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then - sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" -fi - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 -$as_echo_n "checking how to hardcode library paths into programs... " >&6; } -hardcode_action= -if test -n "$hardcode_libdir_flag_spec" || - test -n "$runpath_var" || - test "X$hardcode_automatic" = "Xyes" ; then - - # We can hardcode non-existent directories. - if test "$hardcode_direct" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && - test "$hardcode_minus_L" != no; then - # Linking always hardcodes the temporary library directory. - hardcode_action=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action=immediate - fi -else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action=unsupported -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 -$as_echo "$hardcode_action" >&6; } - -if test "$hardcode_action" = relink || - test "$inherit_rpath" = yes; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless -fi - - - - - - - if test "x$enable_dlopen" != xyes; then - enable_dlopen=unknown - enable_dlopen_self=unknown - enable_dlopen_self_static=unknown -else - lt_cv_dlopen=no - lt_cv_dlopen_libs= - - case $host_os in - beos*) - lt_cv_dlopen="load_add_on" - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ;; - - mingw* | pw32* | cegcc*) - lt_cv_dlopen="LoadLibrary" - lt_cv_dlopen_libs= - ;; - - cygwin*) - lt_cv_dlopen="dlopen" - lt_cv_dlopen_libs= - ;; - - darwin*) - # if libdl is installed we need to link against it - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 -$as_echo_n "checking for dlopen in -ldl... " >&6; } -if test "${ac_cv_lib_dl_dlopen+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldl $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dl_dlopen=yes -else - ac_cv_lib_dl_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 -$as_echo "$ac_cv_lib_dl_dlopen" >&6; } -if test "x$ac_cv_lib_dl_dlopen" = x""yes; then : - lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" -else - - lt_cv_dlopen="dyld" - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - -fi - - ;; - - *) - ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" -if test "x$ac_cv_func_shl_load" = x""yes; then : - lt_cv_dlopen="shl_load" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 -$as_echo_n "checking for shl_load in -ldld... " >&6; } -if test "${ac_cv_lib_dld_shl_load+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldld $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char shl_load (); -int -main () -{ -return shl_load (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dld_shl_load=yes -else - ac_cv_lib_dld_shl_load=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 -$as_echo "$ac_cv_lib_dld_shl_load" >&6; } -if test "x$ac_cv_lib_dld_shl_load" = x""yes; then : - lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" -else - ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" -if test "x$ac_cv_func_dlopen" = x""yes; then : - lt_cv_dlopen="dlopen" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 -$as_echo_n "checking for dlopen in -ldl... " >&6; } -if test "${ac_cv_lib_dl_dlopen+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldl $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dl_dlopen=yes -else - ac_cv_lib_dl_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 -$as_echo "$ac_cv_lib_dl_dlopen" >&6; } -if test "x$ac_cv_lib_dl_dlopen" = x""yes; then : - lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 -$as_echo_n "checking for dlopen in -lsvld... " >&6; } -if test "${ac_cv_lib_svld_dlopen+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lsvld $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_svld_dlopen=yes -else - ac_cv_lib_svld_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 -$as_echo "$ac_cv_lib_svld_dlopen" >&6; } -if test "x$ac_cv_lib_svld_dlopen" = x""yes; then : - lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 -$as_echo_n "checking for dld_link in -ldld... " >&6; } -if test "${ac_cv_lib_dld_dld_link+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldld $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dld_link (); -int -main () -{ -return dld_link (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dld_dld_link=yes -else - ac_cv_lib_dld_dld_link=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 -$as_echo "$ac_cv_lib_dld_dld_link" >&6; } -if test "x$ac_cv_lib_dld_dld_link" = x""yes; then : - lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" -fi - - -fi - - -fi - - -fi - - -fi - - -fi - - ;; - esac - - if test "x$lt_cv_dlopen" != xno; then - enable_dlopen=yes - else - enable_dlopen=no - fi - - case $lt_cv_dlopen in - dlopen) - save_CPPFLAGS="$CPPFLAGS" - test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" - - save_LDFLAGS="$LDFLAGS" - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" - - save_LIBS="$LIBS" - LIBS="$lt_cv_dlopen_libs $LIBS" - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 -$as_echo_n "checking whether a program can dlopen itself... " >&6; } -if test "${lt_cv_dlopen_self+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : - lt_cv_dlopen_self=cross -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF -#line 11134 "configure" -#include "confdefs.h" - -#if HAVE_DLFCN_H -#include -#endif - -#include - -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif - -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif - -/* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ -#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) -void fnord () __attribute__((visibility("default"))); -#endif - -void fnord () { int i=42; } -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; - - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else - { - if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - else puts (dlerror ()); - } - /* dlclose (self); */ - } - else - puts (dlerror ()); - - return status; -} -_LT_EOF - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 - (eval $ac_link) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then - (./conftest; exit; ) >&5 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; - x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; - x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; - esac - else : - # compilation failed - lt_cv_dlopen_self=no - fi -fi -rm -fr conftest* - - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 -$as_echo "$lt_cv_dlopen_self" >&6; } - - if test "x$lt_cv_dlopen_self" = xyes; then - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 -$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } -if test "${lt_cv_dlopen_self_static+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : - lt_cv_dlopen_self_static=cross -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF -#line 11240 "configure" -#include "confdefs.h" - -#if HAVE_DLFCN_H -#include -#endif - -#include - -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif - -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif - -/* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ -#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) -void fnord () __attribute__((visibility("default"))); -#endif - -void fnord () { int i=42; } -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; - - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else - { - if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - else puts (dlerror ()); - } - /* dlclose (self); */ - } - else - puts (dlerror ()); - - return status; -} -_LT_EOF - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 - (eval $ac_link) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then - (./conftest; exit; ) >&5 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; - x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; - x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; - esac - else : - # compilation failed - lt_cv_dlopen_self_static=no - fi -fi -rm -fr conftest* - - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 -$as_echo "$lt_cv_dlopen_self_static" >&6; } - fi - - CPPFLAGS="$save_CPPFLAGS" - LDFLAGS="$save_LDFLAGS" - LIBS="$save_LIBS" - ;; - esac - - case $lt_cv_dlopen_self in - yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; - *) enable_dlopen_self=unknown ;; - esac - - case $lt_cv_dlopen_self_static in - yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; - *) enable_dlopen_self_static=unknown ;; - esac -fi - - - - - - - - - - - - - - - - - -striplib= -old_striplib= -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 -$as_echo_n "checking whether stripping libraries is possible... " >&6; } -if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP" ; then - striplib="$STRIP -x" - old_striplib="$STRIP -S" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - fi - ;; - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - ;; - esac -fi - - - - - - - - - - - - - # Report which library types will actually be built - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 -$as_echo_n "checking if libtool supports shared libraries... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 -$as_echo "$can_build_shared" >&6; } - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 -$as_echo_n "checking whether to build shared libraries... " >&6; } - test "$can_build_shared" = "no" && enable_shared=no - - # On AIX, shared libraries and static libraries use the same namespace, and - # are all built from PIC. - case $host_os in - aix3*) - test "$enable_shared" = yes && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; - - aix[4-9]*) - if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then - test "$enable_shared" = yes && enable_static=no - fi - ;; - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 -$as_echo "$enable_shared" >&6; } - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 -$as_echo_n "checking whether to build static libraries... " >&6; } - # Make sure either enable_shared or enable_static is yes. - test "$enable_shared" = yes || enable_static=yes - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 -$as_echo "$enable_static" >&6; } - - - - -fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -CC="$lt_save_CC" - - - - - - - - - - - - - - ac_config_commands="$ac_config_commands libtool" - - - - -# Only expand once: - - - - -backtrace_supported=yes - -if test -n "${with_target_subdir}"; then - # We are compiling a GCC library. We can assume that the unwind - # library exists. - BACKTRACE_FILE="backtrace.lo simple.lo" -else - ac_fn_c_check_header_mongrel "$LINENO" "unwind.h" "ac_cv_header_unwind_h" "$ac_includes_default" -if test "x$ac_cv_header_unwind_h" = x""yes; then : - ac_fn_c_check_func "$LINENO" "_Unwind_Backtrace" "ac_cv_func__Unwind_Backtrace" -if test "x$ac_cv_func__Unwind_Backtrace" = x""yes; then : - BACKTRACE_FILE="backtrace.lo simple.lo" -else - BACKTRACE_FILE="nounwind.lo" - backtrace_supported=no -fi - -else - BACKTRACE_FILE="nounwind.lo" - backtrace_supported=no -fi - - -fi - - -EXTRA_FLAGS= -if test -n "${with_target_subdir}"; then - EXTRA_FLAGS="-funwind-tables -frandom-seed=\$@" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -funwind-tables option" >&5 -$as_echo_n "checking for -funwind-tables option... " >&6; } -if test "${libbacktrace_cv_c_unwind_tables+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - CFLAGS_hold="$CFLAGS" - CFLAGS="$CFLAGS -funwind-tables" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -static int f() { return 0; } -int -main () -{ -return f(); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - libbacktrace_cv_c_unwind_tables=yes -else - libbacktrace_cv_c_unwind_tables=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - CFLAGS="$CFLAGS_hold" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libbacktrace_cv_c_unwind_tables" >&5 -$as_echo "$libbacktrace_cv_c_unwind_tables" >&6; } - if test "$libbacktrace_cv_c_unwind_tables" = "yes"; then - EXTRA_FLAGS=-funwind-tables - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -frandom-seed=string option" >&5 -$as_echo_n "checking for -frandom-seed=string option... " >&6; } -if test "${libbacktrace_cv_c_random_seed_string+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - CFLAGS_hold="$CFLAGS" - CFLAGS="$CFLAGS -frandom-seed=conftest.lo" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - libbacktrace_cv_c_random_seed_string=yes -else - libbacktrace_cv_c_random_seed_string=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - CFLAGS="$CFLAGS_hold" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libbacktrace_cv_c_random_seed_string" >&5 -$as_echo "$libbacktrace_cv_c_random_seed_string" >&6; } - if test "$libbacktrace_cv_c_random_seed_string" = "yes"; then - EXTRA_FLAGS="$EXTRA_FLAGS -frandom-seed=\$@" - fi -fi - - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -WARN_FLAGS= -save_CFLAGS="$CFLAGS" -for real_option in -W -Wall -Wwrite-strings -Wstrict-prototypes \ - -Wmissing-prototypes -Wold-style-definition \ - -Wmissing-format-attribute -Wcast-qual; do - # Do the check with the no- prefix removed since gcc silently - # accepts any -Wno-* option on purpose - case $real_option in - -Wno-*) option=-W`expr x$real_option : 'x-Wno-\(.*\)'` ;; - *) option=$real_option ;; - esac - as_acx_Woption=`$as_echo "acx_cv_prog_cc_warning_$option" | $as_tr_sh` - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports $option" >&5 -$as_echo_n "checking whether $CC supports $option... " >&6; } -if { as_var=$as_acx_Woption; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - CFLAGS="$option" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$as_acx_Woption=yes" -else - eval "$as_acx_Woption=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -eval ac_res=\$$as_acx_Woption - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - if test `eval 'as_val=${'$as_acx_Woption'};$as_echo "$as_val"'` = yes; then : - WARN_FLAGS="$WARN_FLAGS${WARN_FLAGS:+ }$real_option" -fi - done -CFLAGS="$save_CFLAGS" -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -if test -n "${with_target_subdir}"; then - WARN_FLAGS="$WARN_FLAGS -Werror" -fi - - - -if test -n "${with_target_subdir}"; then - - -# Check whether --with-system-libunwind was given. -if test "${with_system_libunwind+set}" = set; then : - withval=$with_system_libunwind; -fi - - # If system-libunwind was not specifically set, pick a default setting. - if test x$with_system_libunwind = x; then - case ${target} in - ia64-*-hpux*) with_system_libunwind=yes ;; - *) with_system_libunwind=no ;; - esac - fi - # Based on system-libunwind and target, do we have ipinfo? - if test x$with_system_libunwind = xyes; then - case ${target} in - ia64-*-*) have_unwind_getipinfo=no ;; - *) have_unwind_getipinfo=yes ;; - esac - else - # Darwin before version 9 does not have _Unwind_GetIPInfo. - - case ${target} in - *-*-darwin[3-8]|*-*-darwin[3-8].*) have_unwind_getipinfo=no ;; - *) have_unwind_getipinfo=yes ;; - esac - - fi - - if test x$have_unwind_getipinfo = xyes; then - -$as_echo "#define HAVE_GETIPINFO 1" >>confdefs.h - - fi - -else - ac_save_CFFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -Werror-implicit-function-declaration" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Unwind_GetIPInfo" >&5 -$as_echo_n "checking for _Unwind_GetIPInfo... " >&6; } - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include "unwind.h" - struct _Unwind_Context *context; - int ip_before_insn = 0; -int -main () -{ -return _Unwind_GetIPInfo (context, &ip_before_insn); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - have_unwind_getipinfo=yes -else - have_unwind_getipinfo=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - CFLAGS="$ac_save_CFLAGS" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_unwind_getipinfo" >&5 -$as_echo "$have_unwind_getipinfo" >&6; } - if test "$have_unwind_getipinfo" = "yes"; then - -$as_echo "#define HAVE_GETIPINFO 1" >>confdefs.h - - fi -fi - -# Enable --enable-host-shared. -# Check whether --enable-host-shared was given. -if test "${enable_host_shared+set}" = set; then : - enableval=$enable_host_shared; PIC_FLAG=-fPIC -else - PIC_FLAG= -fi - - - -# Test for __sync support. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking __sync extensions" >&5 -$as_echo_n "checking __sync extensions... " >&6; } -if test "${libbacktrace_cv_sys_sync+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "${with_target_subdir}"; then - case "${host}" in - hppa*-*-hpux*) libbacktrace_cv_sys_sync=no ;; - *) libbacktrace_cv_sys_sync=yes ;; - esac - else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int i; -int -main () -{ -__sync_bool_compare_and_swap (&i, i, i); - __sync_lock_test_and_set (&i, 1); - __sync_lock_release (&i); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - libbacktrace_cv_sys_sync=yes -else - libbacktrace_cv_sys_sync=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libbacktrace_cv_sys_sync" >&5 -$as_echo "$libbacktrace_cv_sys_sync" >&6; } -BACKTRACE_SUPPORTS_THREADS=0 -if test "$libbacktrace_cv_sys_sync" = "yes"; then - BACKTRACE_SUPPORTS_THREADS=1 - -$as_echo "#define HAVE_SYNC_FUNCTIONS 1" >>confdefs.h - -fi - - -# Test for __atomic support. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking __atomic extensions" >&5 -$as_echo_n "checking __atomic extensions... " >&6; } -if test "${libbacktrace_cv_sys_atomic+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "${with_target_subdir}"; then - libbacktrace_cv_sys_atomic=yes - else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int i; -int -main () -{ -__atomic_load_n (&i, __ATOMIC_ACQUIRE); - __atomic_store_n (&i, 1, __ATOMIC_RELEASE); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - libbacktrace_cv_sys_atomic=yes -else - libbacktrace_cv_sys_atomic=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libbacktrace_cv_sys_atomic" >&5 -$as_echo "$libbacktrace_cv_sys_atomic" >&6; } -if test "$libbacktrace_cv_sys_atomic" = "yes"; then - -$as_echo "#define HAVE_ATOMIC_FUNCTIONS 1" >>confdefs.h - -fi - -# The library needs to be able to read the executable itself. Compile -# a file to determine the executable format. The awk script -# filetype.awk prints out the file type. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking output filetype" >&5 -$as_echo_n "checking output filetype... " >&6; } -if test "${libbacktrace_cv_sys_filetype+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - filetype= -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int i; -int -main () -{ -int j; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - filetype=`${AWK} -f $srcdir/filetype.awk conftest.$ac_objext` -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "compiler failed -See \`config.log' for more details." "$LINENO" 5; } -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -libbacktrace_cv_sys_filetype=$filetype -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libbacktrace_cv_sys_filetype" >&5 -$as_echo "$libbacktrace_cv_sys_filetype" >&6; } - -# Match the file type to decide what files to compile. -FORMAT_FILE= -backtrace_supports_data=yes -case "$libbacktrace_cv_sys_filetype" in -elf*) FORMAT_FILE="elf.lo" ;; -pecoff) FORMAT_FILE="pecoff.lo" - backtrace_supports_data=no - ;; -macho*) FORMAT_FILE="macho.lo" - backtrace_supports_data=no - ;; -*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: could not determine output file type" >&5 -$as_echo "$as_me: WARNING: could not determine output file type" >&2;} - FORMAT_FILE="unknown.lo" - backtrace_supported=no - ;; -esac - - -# ELF defines. -elfsize= -case "$libbacktrace_cv_sys_filetype" in -elf32) elfsize=32 ;; -elf64) elfsize=64 ;; -*) elfsize=unused -esac - -cat >>confdefs.h <<_ACEOF -#define BACKTRACE_ELF_SIZE $elfsize -_ACEOF - - -BACKTRACE_SUPPORTED=0 -if test "$backtrace_supported" = "yes"; then - BACKTRACE_SUPPORTED=1 -fi - - -BACKTRACE_SUPPORTS_DATA=0 -if test "$backtrace_supports_data" = "yes"; then - BACKTRACE_SUPPORTS_DATA=1 -fi - - - - -inttype_headers=`echo inttypes.h sys/inttypes.h | sed -e 's/,/ /g'` - -acx_cv_header_stdint=stddef.h -acx_cv_header_stdint_kind="(already complete)" -for i in stdint.h $inttype_headers; do - unset ac_cv_type_uintptr_t - unset ac_cv_type_uintmax_t - unset ac_cv_type_int_least32_t - unset ac_cv_type_int_fast32_t - unset ac_cv_type_uint64_t - $as_echo_n "looking for a compliant stdint.h in $i, " >&6 - ac_fn_c_check_type "$LINENO" "uintmax_t" "ac_cv_type_uintmax_t" "#include -#include <$i> -" -if test "x$ac_cv_type_uintmax_t" = x""yes; then : - acx_cv_header_stdint=$i -else - continue -fi - - ac_fn_c_check_type "$LINENO" "uintptr_t" "ac_cv_type_uintptr_t" "#include -#include <$i> -" -if test "x$ac_cv_type_uintptr_t" = x""yes; then : - -else - acx_cv_header_stdint_kind="(mostly complete)" -fi - - ac_fn_c_check_type "$LINENO" "int_least32_t" "ac_cv_type_int_least32_t" "#include -#include <$i> -" -if test "x$ac_cv_type_int_least32_t" = x""yes; then : - -else - acx_cv_header_stdint_kind="(mostly complete)" -fi - - ac_fn_c_check_type "$LINENO" "int_fast32_t" "ac_cv_type_int_fast32_t" "#include -#include <$i> -" -if test "x$ac_cv_type_int_fast32_t" = x""yes; then : - -else - acx_cv_header_stdint_kind="(mostly complete)" -fi - - ac_fn_c_check_type "$LINENO" "uint64_t" "ac_cv_type_uint64_t" "#include -#include <$i> -" -if test "x$ac_cv_type_uint64_t" = x""yes; then : - -else - acx_cv_header_stdint_kind="(lacks uint64_t)" -fi - - break -done -if test "$acx_cv_header_stdint" = stddef.h; then - acx_cv_header_stdint_kind="(lacks uintmax_t)" - for i in stdint.h $inttype_headers; do - unset ac_cv_type_uintptr_t - unset ac_cv_type_uint32_t - unset ac_cv_type_uint64_t - $as_echo_n "looking for an incomplete stdint.h in $i, " >&6 - ac_fn_c_check_type "$LINENO" "uint32_t" "ac_cv_type_uint32_t" "#include -#include <$i> -" -if test "x$ac_cv_type_uint32_t" = x""yes; then : - acx_cv_header_stdint=$i -else - continue -fi - - ac_fn_c_check_type "$LINENO" "uint64_t" "ac_cv_type_uint64_t" "#include -#include <$i> -" -if test "x$ac_cv_type_uint64_t" = x""yes; then : - -fi - - ac_fn_c_check_type "$LINENO" "uintptr_t" "ac_cv_type_uintptr_t" "#include -#include <$i> -" -if test "x$ac_cv_type_uintptr_t" = x""yes; then : - -fi - - break - done -fi -if test "$acx_cv_header_stdint" = stddef.h; then - acx_cv_header_stdint_kind="(u_intXX_t style)" - for i in sys/types.h $inttype_headers; do - unset ac_cv_type_u_int32_t - unset ac_cv_type_u_int64_t - $as_echo_n "looking for u_intXX_t types in $i, " >&6 - ac_fn_c_check_type "$LINENO" "u_int32_t" "ac_cv_type_u_int32_t" "#include -#include <$i> -" -if test "x$ac_cv_type_u_int32_t" = x""yes; then : - acx_cv_header_stdint=$i -else - continue -fi - - ac_fn_c_check_type "$LINENO" "u_int64_t" "ac_cv_type_u_int64_t" "#include -#include <$i> -" -if test "x$ac_cv_type_u_int64_t" = x""yes; then : - -fi - - break - done -fi -if test "$acx_cv_header_stdint" = stddef.h; then - acx_cv_header_stdint_kind="(using manual detection)" -fi - -test -z "$ac_cv_type_uintptr_t" && ac_cv_type_uintptr_t=no -test -z "$ac_cv_type_uint64_t" && ac_cv_type_uint64_t=no -test -z "$ac_cv_type_u_int64_t" && ac_cv_type_u_int64_t=no -test -z "$ac_cv_type_int_least32_t" && ac_cv_type_int_least32_t=no -test -z "$ac_cv_type_int_fast32_t" && ac_cv_type_int_fast32_t=no - -# ----------------- Summarize what we found so far - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking what to include in gstdint.h" >&5 -$as_echo_n "checking what to include in gstdint.h... " >&6; } - -case `$as_basename -- gstdint.h || -$as_expr X/gstdint.h : '.*/\([^/][^/]*\)/*$' \| \ - Xgstdint.h : 'X\(//\)$' \| \ - Xgstdint.h : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/gstdint.h | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` in - stdint.h) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: are you sure you want it there?" >&5 -$as_echo "$as_me: WARNING: are you sure you want it there?" >&2;} ;; - inttypes.h) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: are you sure you want it there?" >&5 -$as_echo "$as_me: WARNING: are you sure you want it there?" >&2;} ;; - *) ;; -esac - -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_cv_header_stdint $acx_cv_header_stdint_kind" >&5 -$as_echo "$acx_cv_header_stdint $acx_cv_header_stdint_kind" >&6; } - -# ----------------- done included file, check C basic types -------- - -# Lacking an uintptr_t? Test size of void * -case "$acx_cv_header_stdint:$ac_cv_type_uintptr_t" in - stddef.h:* | *:no) # The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5 -$as_echo_n "checking size of void *... " >&6; } -if test "${ac_cv_sizeof_void_p+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_void_p" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -{ as_fn_set_status 77 -as_fn_error "cannot compute sizeof (void *) -See \`config.log' for more details." "$LINENO" 5; }; } - else - ac_cv_sizeof_void_p=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5 -$as_echo "$ac_cv_sizeof_void_p" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_VOID_P $ac_cv_sizeof_void_p -_ACEOF - - ;; -esac - -# Lacking an uint64_t? Test size of long -case "$acx_cv_header_stdint:$ac_cv_type_uint64_t:$ac_cv_type_u_int64_t" in - stddef.h:*:* | *:no:no) # The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5 -$as_echo_n "checking size of long... " >&6; } -if test "${ac_cv_sizeof_long+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_long" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -{ as_fn_set_status 77 -as_fn_error "cannot compute sizeof (long) -See \`config.log' for more details." "$LINENO" 5; }; } - else - ac_cv_sizeof_long=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5 -$as_echo "$ac_cv_sizeof_long" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_LONG $ac_cv_sizeof_long -_ACEOF - - ;; -esac - -if test $acx_cv_header_stdint = stddef.h; then - # Lacking a good header? Test size of everything and deduce all types. - # The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5 -$as_echo_n "checking size of int... " >&6; } -if test "${ac_cv_sizeof_int+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_int" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -{ as_fn_set_status 77 -as_fn_error "cannot compute sizeof (int) -See \`config.log' for more details." "$LINENO" 5; }; } - else - ac_cv_sizeof_int=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5 -$as_echo "$ac_cv_sizeof_int" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_INT $ac_cv_sizeof_int -_ACEOF - - - # The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of short" >&5 -$as_echo_n "checking size of short... " >&6; } -if test "${ac_cv_sizeof_short+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (short))" "ac_cv_sizeof_short" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_short" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -{ as_fn_set_status 77 -as_fn_error "cannot compute sizeof (short) -See \`config.log' for more details." "$LINENO" 5; }; } - else - ac_cv_sizeof_short=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_short" >&5 -$as_echo "$ac_cv_sizeof_short" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_SHORT $ac_cv_sizeof_short -_ACEOF - - - # The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of char" >&5 -$as_echo_n "checking size of char... " >&6; } -if test "${ac_cv_sizeof_char+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (char))" "ac_cv_sizeof_char" "$ac_includes_default"; then : - -else - if test "$ac_cv_type_char" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -{ as_fn_set_status 77 -as_fn_error "cannot compute sizeof (char) -See \`config.log' for more details." "$LINENO" 5; }; } - else - ac_cv_sizeof_char=0 - fi -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_char" >&5 -$as_echo "$ac_cv_sizeof_char" >&6; } - - - -cat >>confdefs.h <<_ACEOF -#define SIZEOF_CHAR $ac_cv_sizeof_char -_ACEOF - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for type equivalent to int8_t" >&5 -$as_echo_n "checking for type equivalent to int8_t... " >&6; } - case "$ac_cv_sizeof_char" in - 1) acx_cv_type_int8_t=char ;; - *) as_fn_error "no 8-bit type, please report a bug" "$LINENO" 5 - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_cv_type_int8_t" >&5 -$as_echo "$acx_cv_type_int8_t" >&6; } - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for type equivalent to int16_t" >&5 -$as_echo_n "checking for type equivalent to int16_t... " >&6; } - case "$ac_cv_sizeof_int:$ac_cv_sizeof_short" in - 2:*) acx_cv_type_int16_t=int ;; - *:2) acx_cv_type_int16_t=short ;; - *) as_fn_error "no 16-bit type, please report a bug" "$LINENO" 5 - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_cv_type_int16_t" >&5 -$as_echo "$acx_cv_type_int16_t" >&6; } - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for type equivalent to int32_t" >&5 -$as_echo_n "checking for type equivalent to int32_t... " >&6; } - case "$ac_cv_sizeof_int:$ac_cv_sizeof_long" in - 4:*) acx_cv_type_int32_t=int ;; - *:4) acx_cv_type_int32_t=long ;; - *) as_fn_error "no 32-bit type, please report a bug" "$LINENO" 5 - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_cv_type_int32_t" >&5 -$as_echo "$acx_cv_type_int32_t" >&6; } -fi - -# These tests are here to make the output prettier - -if test "$ac_cv_type_uint64_t" != yes && test "$ac_cv_type_u_int64_t" != yes; then - case "$ac_cv_sizeof_long" in - 8) acx_cv_type_int64_t=long ;; - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for type equivalent to int64_t" >&5 -$as_echo_n "checking for type equivalent to int64_t... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${acx_cv_type_int64_t-'using preprocessor symbols'}" >&5 -$as_echo "${acx_cv_type_int64_t-'using preprocessor symbols'}" >&6; } -fi - -# Now we can use the above types - -if test "$ac_cv_type_uintptr_t" != yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for type equivalent to intptr_t" >&5 -$as_echo_n "checking for type equivalent to intptr_t... " >&6; } - case $ac_cv_sizeof_void_p in - 2) acx_cv_type_intptr_t=int16_t ;; - 4) acx_cv_type_intptr_t=int32_t ;; - 8) acx_cv_type_intptr_t=int64_t ;; - *) as_fn_error "no equivalent for intptr_t, please report a bug" "$LINENO" 5 - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_cv_type_intptr_t" >&5 -$as_echo "$acx_cv_type_intptr_t" >&6; } -fi - -# ----------------- done all checks, emit header ------------- -ac_config_commands="$ac_config_commands gstdint.h" - - - - -for ac_header in sys/mman.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "sys/mman.h" "ac_cv_header_sys_mman_h" "$ac_includes_default" -if test "x$ac_cv_header_sys_mman_h" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_SYS_MMAN_H 1 -_ACEOF - -fi - -done - -if test "$ac_cv_header_sys_mman_h" = "no"; then - have_mmap=no -else - if test -n "${with_target_subdir}"; then - # When built as a GCC target library, we can't do a link test. We - # simply assume that if we have mman.h, we have mmap. - have_mmap=yes - case "${host}" in - spu-*-*|*-*-msdosdjgpp) - # The SPU does not have mmap, but it has a sys/mman.h header file - # containing "mmap_eaddr" and the mmap flags, confusing the test. - # DJGPP also has sys/man.h, but no mmap - have_mmap=no ;; - esac - else - ac_fn_c_check_func "$LINENO" "mmap" "ac_cv_func_mmap" -if test "x$ac_cv_func_mmap" = x""yes; then : - have_mmap=yes -else - have_mmap=no -fi - - fi -fi - -case "${host_os}" in -darwin*) - have_mmap=no ;; -esac - -if test "$have_mmap" = "no"; then - VIEW_FILE=read.lo - ALLOC_FILE=alloc.lo -else - VIEW_FILE=mmapio.lo - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#include -#if !defined(MAP_ANONYMOUS) && !defined(MAP_ANON) - #error no MAP_ANONYMOUS -#endif - -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ALLOC_FILE=alloc.lo -else - ALLOC_FILE=alloc.lo -fi -rm -f conftest.err conftest.$ac_ext -fi - - - -BACKTRACE_USES_MALLOC=0 -if test "$ALLOC_FILE" = "alloc.lo"; then - BACKTRACE_USES_MALLOC=1 -fi - - -# Check for dl_iterate_phdr. -for ac_header in link.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "link.h" "ac_cv_header_link_h" "$ac_includes_default" -if test "x$ac_cv_header_link_h" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LINK_H 1 -_ACEOF - -fi - -done - -if test "$ac_cv_header_link_h" = "no"; then - have_dl_iterate_phdr=no -else - if test -n "${with_target_subdir}"; then - # When built as a GCC target library, we can't do a link test. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "dl_iterate_phdr" >/dev/null 2>&1; then : - have_dl_iterate_phdr=yes -else - have_dl_iterate_phdr=no -fi -rm -f conftest* - - case "${host}" in - *-*-solaris2.10*) - # Avoid dl_iterate_phdr on Solaris 10, where it is in the - # header file but is only in -ldl. - have_dl_iterate_phdr=no ;; - esac - else - ac_fn_c_check_func "$LINENO" "dl_iterate_phdr" "ac_cv_func_dl_iterate_phdr" -if test "x$ac_cv_func_dl_iterate_phdr" = x""yes; then : - have_dl_iterate_phdr=yes -else - have_dl_iterate_phdr=no -fi - - fi -fi -if test "$have_dl_iterate_phdr" = "yes"; then - -$as_echo "#define HAVE_DL_ITERATE_PHDR 1" >>confdefs.h - -fi - -# Check for the fcntl function. -if test -n "${with_target_subdir}"; then - case "${host}" in - *-*-mingw*) have_fcntl=no ;; - spu-*-*) have_fcntl=no ;; - *) have_fcntl=yes ;; - esac -else - ac_fn_c_check_func "$LINENO" "fcntl" "ac_cv_func_fcntl" -if test "x$ac_cv_func_fcntl" = x""yes; then : - have_fcntl=yes -else - have_fcntl=no -fi - -fi -if test "$have_fcntl" = "yes"; then - -$as_echo "#define HAVE_FCNTL 1" >>confdefs.h - -fi - -ac_fn_c_check_decl "$LINENO" "strnlen" "ac_cv_have_decl_strnlen" "$ac_includes_default" -if test "x$ac_cv_have_decl_strnlen" = x""yes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi - -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_STRNLEN $ac_have_decl -_ACEOF - - -# Check for getexecname function. -if test -n "${with_target_subdir}"; then - case "${host}" in - *-*-solaris2*) have_getexecname=yes ;; - *) have_getexecname=no ;; - esac -else - ac_fn_c_check_func "$LINENO" "getexecname" "ac_cv_func_getexecname" -if test "x$ac_cv_func_getexecname" = x""yes; then : - have_getexecname=yes -else - have_getexecname=no -fi - -fi -if test "$have_getexecname" = "yes"; then - -$as_echo "#define HAVE_GETEXECNAME 1" >>confdefs.h - -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether tests can run" >&5 -$as_echo_n "checking whether tests can run... " >&6; } -if test "${libbacktrace_cv_sys_native+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : - libbacktrace_cv_sys_native=no -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - libbacktrace_cv_sys_native=yes -else - libbacktrace_cv_sys_native=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libbacktrace_cv_sys_native" >&5 -$as_echo "$libbacktrace_cv_sys_native" >&6; } - if test "$libbacktrace_cv_sys_native" = "yes"; then - NATIVE_TRUE= - NATIVE_FALSE='#' -else - NATIVE_TRUE='#' - NATIVE_FALSE= -fi - - -if test "${multilib}" = "yes"; then - multilib_arg="--enable-multilib" -else - multilib_arg= -fi - -ac_config_files="$ac_config_files Makefile backtrace-supported.h" - - -# We need multilib support, but only if configuring for the target. -ac_config_commands="$ac_config_commands default" - - -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. - -_ACEOF - -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - test "x$cache_file" != "x/dev/null" && - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} - cat confcache >$cache_file - else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache - -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - -DEFS=-DHAVE_CONFIG_H - -ac_libobjs= -ac_ltlibobjs= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs - -LTLIBOBJS=$ac_ltlibobjs - - - if test -n "$EXEEXT"; then - am__EXEEXT_TRUE= - am__EXEEXT_FALSE='#' -else - am__EXEEXT_TRUE='#' - am__EXEEXT_FALSE= -fi - -if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then - as_fn_error "conditional \"MAINTAINER_MODE\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi -if test -z "${NATIVE_TRUE}" && test -z "${NATIVE_FALSE}"; then - as_fn_error "conditional \"NATIVE\" was never defined. -Usually this means the macro was only invoked conditionally." "$LINENO" 5 -fi - -: ${CONFIG_STATUS=./config.status} -ac_write_fail=0 -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. - -debug=false -ac_cs_recheck=false -ac_cs_silent=false - -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error ERROR [LINENO LOG_FD] -# --------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with status $?, using 1 if that was 0. -as_fn_error () -{ - as_status=$?; test $as_status -eq 0 && as_status=1 - if test "$3"; then - as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3 - fi - $as_echo "$as_me: error: $1" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -p' - fi -else - as_ln_s='cp -p' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by package-unused $as_me version-unused, which was -generated by GNU Autoconf 2.64. Invocation command line was - - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ - -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" - -_ACEOF - -case $ac_config_files in *" -"*) set x $ac_config_files; shift; ac_config_files=$*;; -esac - -case $ac_config_headers in *" -"*) set x $ac_config_headers; shift; ac_config_headers=$*;; -esac - - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# Files that config.status was made for. -config_files="$ac_config_files" -config_headers="$ac_config_headers" -config_commands="$ac_config_commands" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. - -Usage: $0 [OPTION]... [TAG]... - - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - -q, --quiet, --silent - do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --file=FILE[:TEMPLATE] - instantiate the configuration file FILE - --header=FILE[:TEMPLATE] - instantiate the configuration header FILE - -Configuration files: -$config_files - -Configuration headers: -$config_headers - -Configuration commands: -$config_commands - -Report bugs to the package provider." - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_version="\\ -package-unused config.status version-unused -configured by $0, generated by GNU Autoconf 2.64, - with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" - -Copyright (C) 2009 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." - -ac_pwd='$ac_pwd' -srcdir='$srcdir' -INSTALL='$INSTALL' -MKDIR_P='$MKDIR_P' -AWK='$AWK' -test -n "\$AWK" || AWK=awk -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# The default lists apply if the user does not specify any file. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac - - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --file | --fil | --fi | --f ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_FILES " '$ac_optarg'" - ac_need_defaults=false;; - --header | --heade | --head | --hea ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_HEADERS " '$ac_optarg'" - ac_need_defaults=false;; - --he | --h) - # Conflict between --help and --header - as_fn_error "ambiguous option: \`$1' -Try \`$0 --help' for more information.";; - --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; - - # This is an error. - -*) as_fn_error "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; - - *) as_fn_append ac_config_targets " $1" - ac_need_defaults=false ;; - - esac - shift -done - -ac_configure_extra_args= - -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -if \$ac_cs_recheck; then - set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion - shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 - CONFIG_SHELL='$SHELL' - export CONFIG_SHELL - exec "\$@" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - $as_echo "$ac_log" -} >&5 - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# -# INIT-COMMANDS -# - -srcdir="$srcdir" -host="$host" -target="$target" -with_multisubdir="$with_multisubdir" -with_multisrctop="$with_multisrctop" -with_target_subdir="$with_target_subdir" -ac_configure_args="${multilib_arg} ${ac_configure_args}" -multi_basedir="$multi_basedir" -CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} -CC="$CC" -CXX="$CXX" -GFORTRAN="$GFORTRAN" -GCJ="$GCJ" - - -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -sed_quote_subst='$sed_quote_subst' -double_quote_subst='$double_quote_subst' -delay_variable_subst='$delay_variable_subst' -macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' -macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' -enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' -enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' -pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' -enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' -SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' -ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' -host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' -host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' -host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' -build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' -build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' -build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' -SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' -Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' -GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' -EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' -FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' -LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' -NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' -LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' -max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' -ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' -exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' -lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' -lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' -lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' -reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' -reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' -OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' -deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' -file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' -AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' -AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' -STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' -RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' -old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' -old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' -old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' -lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' -CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' -CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' -compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' -GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' -lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' -lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' -lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' -lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' -objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' -MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' -lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' -lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' -lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' -lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' -lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' -need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' -DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' -NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' -LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' -OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' -OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' -libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' -shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' -extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' -archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' -enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' -export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' -whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' -compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' -old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' -old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' -archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' -archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' -module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' -module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' -with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' -allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' -no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' -hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' -hardcode_libdir_flag_spec_ld='`$ECHO "$hardcode_libdir_flag_spec_ld" | $SED "$delay_single_quote_subst"`' -hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' -hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' -hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' -hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' -hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' -hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' -inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' -link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' -fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' -always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' -export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' -exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' -include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' -prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' -file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' -variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' -need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' -need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' -version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' -runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' -shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' -shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' -libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' -library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' -soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' -install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' -postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' -postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' -finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' -finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' -hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' -sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' -sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`' -hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' -enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' -enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' -enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' -old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' -striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' - -LTCC='$LTCC' -LTCFLAGS='$LTCFLAGS' -compiler='$compiler_DEFAULT' - -# A function that is used when there is no print builtin or printf. -func_fallback_echo () -{ - eval 'cat <<_LTECHO_EOF -\$1 -_LTECHO_EOF' -} - -# Quote evaled strings. -for var in SHELL \ -ECHO \ -SED \ -GREP \ -EGREP \ -FGREP \ -LD \ -NM \ -LN_S \ -lt_SP2NL \ -lt_NL2SP \ -reload_flag \ -OBJDUMP \ -deplibs_check_method \ -file_magic_cmd \ -AR \ -AR_FLAGS \ -STRIP \ -RANLIB \ -CC \ -CFLAGS \ -compiler \ -lt_cv_sys_global_symbol_pipe \ -lt_cv_sys_global_symbol_to_cdecl \ -lt_cv_sys_global_symbol_to_c_name_address \ -lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ -lt_prog_compiler_no_builtin_flag \ -lt_prog_compiler_wl \ -lt_prog_compiler_pic \ -lt_prog_compiler_static \ -lt_cv_prog_compiler_c_o \ -need_locks \ -DSYMUTIL \ -NMEDIT \ -LIPO \ -OTOOL \ -OTOOL64 \ -shrext_cmds \ -export_dynamic_flag_spec \ -whole_archive_flag_spec \ -compiler_needs_object \ -with_gnu_ld \ -allow_undefined_flag \ -no_undefined_flag \ -hardcode_libdir_flag_spec \ -hardcode_libdir_flag_spec_ld \ -hardcode_libdir_separator \ -fix_srcfile_path \ -exclude_expsyms \ -include_expsyms \ -file_list_spec \ -variables_saved_for_relink \ -libname_spec \ -library_names_spec \ -soname_spec \ -install_override_mode \ -finish_eval \ -old_striplib \ -striplib; do - case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in - *[\\\\\\\`\\"\\\$]*) - eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" - ;; - *) - eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" - ;; - esac -done - -# Double-quote double-evaled strings. -for var in reload_cmds \ -old_postinstall_cmds \ -old_postuninstall_cmds \ -old_archive_cmds \ -extract_expsyms_cmds \ -old_archive_from_new_cmds \ -old_archive_from_expsyms_cmds \ -archive_cmds \ -archive_expsym_cmds \ -module_cmds \ -module_expsym_cmds \ -export_symbols_cmds \ -prelink_cmds \ -postinstall_cmds \ -postuninstall_cmds \ -finish_cmds \ -sys_lib_search_path_spec \ -sys_lib_dlsearch_path_spec; do - case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in - *[\\\\\\\`\\"\\\$]*) - eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" - ;; - *) - eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" - ;; - esac -done - -ac_aux_dir='$ac_aux_dir' -xsi_shell='$xsi_shell' -lt_shell_append='$lt_shell_append' - -# See if we are running on zsh, and set the options which allow our -# commands through without removal of \ escapes INIT. -if test -n "\${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST -fi - - - PACKAGE='$PACKAGE' - VERSION='$VERSION' - TIMESTAMP='$TIMESTAMP' - RM='$RM' - ofile='$ofile' - - - - -GCC="$GCC" -CC="$CC" -acx_cv_header_stdint="$acx_cv_header_stdint" -acx_cv_type_int8_t="$acx_cv_type_int8_t" -acx_cv_type_int16_t="$acx_cv_type_int16_t" -acx_cv_type_int32_t="$acx_cv_type_int32_t" -acx_cv_type_int64_t="$acx_cv_type_int64_t" -acx_cv_type_intptr_t="$acx_cv_type_intptr_t" -ac_cv_type_uintmax_t="$ac_cv_type_uintmax_t" -ac_cv_type_uintptr_t="$ac_cv_type_uintptr_t" -ac_cv_type_uint64_t="$ac_cv_type_uint64_t" -ac_cv_type_u_int64_t="$ac_cv_type_u_int64_t" -ac_cv_type_u_int32_t="$ac_cv_type_u_int32_t" -ac_cv_type_int_least32_t="$ac_cv_type_int_least32_t" -ac_cv_type_int_fast32_t="$ac_cv_type_int_fast32_t" -ac_cv_sizeof_void_p="$ac_cv_sizeof_void_p" - - -# Variables needed in config.status (file generation) which aren't already -# passed by autoconf. -SUBDIRS="$SUBDIRS" - - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; - "default-1") CONFIG_COMMANDS="$CONFIG_COMMANDS default-1" ;; - "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; - "gstdint.h") CONFIG_COMMANDS="$CONFIG_COMMANDS gstdint.h" ;; - "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; - "backtrace-supported.h") CONFIG_FILES="$CONFIG_FILES backtrace-supported.h" ;; - "default") CONFIG_COMMANDS="$CONFIG_COMMANDS default" ;; - - *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;; - esac -done - - -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files - test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers - test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands -fi - -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= - trap 'exit_status=$? - { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status -' 0 - trap 'as_fn_exit 1' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. - -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -n "$tmp" && test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || as_fn_error "cannot create a temporary directory in ." "$LINENO" 5 - -# Set up the scripts for CONFIG_FILES section. -# No need to generate them if there are no CONFIG_FILES. -# This happens for instance with `./config.status config.h'. -if test -n "$CONFIG_FILES"; then - - -ac_cr=`echo X | tr X '\015'` -# On cygwin, bash can eat \r inside `` if the user requested igncr. -# But we know of no other shell where ac_cr would be empty at this -# point, so we can use a bashism as a fallback. -if test "x$ac_cr" = x; then - eval ac_cr=\$\'\\r\' -fi -ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` -if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then - ac_cs_awk_cr='\r' -else - ac_cs_awk_cr=$ac_cr -fi - -echo 'BEGIN {' >"$tmp/subs1.awk" && -_ACEOF - - -{ - echo "cat >conf$$subs.awk <<_ACEOF" && - echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && - echo "_ACEOF" -} >conf$$subs.sh || - as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 -ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'` -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - . ./conf$$subs.sh || - as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 - - ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` - if test $ac_delim_n = $ac_delim_num; then - break - elif $ac_last_try; then - as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done -rm -f conf$$subs.sh - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -cat >>"\$tmp/subs1.awk" <<\\_ACAWK && -_ACEOF -sed -n ' -h -s/^/S["/; s/!.*/"]=/ -p -g -s/^[^!]*!// -:repl -t repl -s/'"$ac_delim"'$// -t delim -:nl -h -s/\(.\{148\}\).*/\1/ -t more1 -s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ -p -n -b repl -:more1 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t nl -:delim -h -s/\(.\{148\}\).*/\1/ -t more2 -s/["\\]/\\&/g; s/^/"/; s/$/"/ -p -b -:more2 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t delim -' >$CONFIG_STATUS || ac_write_fail=1 -rm -f conf$$subs.awk -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACAWK -cat >>"\$tmp/subs1.awk" <<_ACAWK && - for (key in S) S_is_set[key] = 1 - FS = "" - -} -{ - line = $ 0 - nfields = split(line, field, "@") - substed = 0 - len = length(field[1]) - for (i = 2; i < nfields; i++) { - key = field[i] - keylen = length(key) - if (S_is_set[key]) { - value = S[key] - line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) - len += length(value) + length(field[++i]) - substed = 1 - } else - len += 1 + keylen - } - - print line -} - -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then - sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" -else - cat -fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \ - || as_fn_error "could not setup config files machinery" "$LINENO" 5 -_ACEOF - -# VPATH may cause trouble with some makes, so we remove $(srcdir), -# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and -# trailing colons and then remove the whole line if VPATH becomes empty -# (actually we leave an empty line to preserve line numbers). -if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=/{ -s/:*\$(srcdir):*/:/ -s/:*\${srcdir}:*/:/ -s/:*@srcdir@:*/:/ -s/^\([^=]*=[ ]*\):*/\1/ -s/:*$// -s/^[^=]*=[ ]*$// -}' -fi - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -fi # test -n "$CONFIG_FILES" - -# Set up the scripts for CONFIG_HEADERS section. -# No need to generate them if there are no CONFIG_HEADERS. -# This happens for instance with `./config.status Makefile'. -if test -n "$CONFIG_HEADERS"; then -cat >"$tmp/defines.awk" <<\_ACAWK || -BEGIN { -_ACEOF - -# Transform confdefs.h into an awk script `defines.awk', embedded as -# here-document in config.status, that substitutes the proper values into -# config.h.in to produce config.h. - -# Create a delimiter string that does not exist in confdefs.h, to ease -# handling of long lines. -ac_delim='%!_!# ' -for ac_last_try in false false :; do - ac_t=`sed -n "/$ac_delim/p" confdefs.h` - if test -z "$ac_t"; then - break - elif $ac_last_try; then - as_fn_error "could not make $CONFIG_HEADERS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done - -# For the awk script, D is an array of macro values keyed by name, -# likewise P contains macro parameters if any. Preserve backslash -# newline sequences. - -ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* -sed -n ' -s/.\{148\}/&'"$ac_delim"'/g -t rset -:rset -s/^[ ]*#[ ]*define[ ][ ]*/ / -t def -d -:def -s/\\$// -t bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3"/p -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p -d -:bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3\\\\\\n"\\/p -t cont -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p -t cont -d -:cont -n -s/.\{148\}/&'"$ac_delim"'/g -t clear -:clear -s/\\$// -t bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/"/p -d -:bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p -b cont -' >$CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - for (key in D) D_is_set[key] = 1 - FS = "" -} -/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { - line = \$ 0 - split(line, arg, " ") - if (arg[1] == "#") { - defundef = arg[2] - mac1 = arg[3] - } else { - defundef = substr(arg[1], 2) - mac1 = arg[2] - } - split(mac1, mac2, "(") #) - macro = mac2[1] - prefix = substr(line, 1, index(line, defundef) - 1) - if (D_is_set[macro]) { - # Preserve the white space surrounding the "#". - print prefix "define", macro P[macro] D[macro] - next - } else { - # Replace #undef with comments. This is necessary, for example, - # in the case of _POSIX_SOURCE, which is predefined and required - # on some systems where configure will not decide to define it. - if (defundef == "undef") { - print "/*", prefix defundef, macro, "*/" - next - } - } -} -{ print } -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - as_fn_error "could not setup config headers machinery" "$LINENO" 5 -fi # test -n "$CONFIG_HEADERS" - - -eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" -shift -for ac_tag -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; - esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) as_fn_error "invalid tag \`$ac_tag'" "$LINENO" 5;; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; - esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift - - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - as_fn_error "cannot find input file: \`$ac_f'" "$LINENO" 5;; - esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" - done - - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' - `' by configure.' - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} - fi - # Neutralize special characters interpreted by sed in replacement strings. - case $configure_input in #( - *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | - sed 's/[\\\\&|]/\\\\&/g'`;; #( - *) ac_sed_conf_input=$configure_input;; - esac - - case $ac_tag in - *:-:* | *:-) cat >"$tmp/stdin" \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 ;; - esac - ;; - esac - - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - case $ac_mode in - :F) - # - # CONFIG_FILE - # - - case $INSTALL in - [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; - *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; - esac - ac_MKDIR_P=$MKDIR_P - case $MKDIR_P in - [\\/$]* | ?:[\\/]* ) ;; - */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; - esac -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# If the template does not know about datarootdir, expand it. -# FIXME: This hack should be removed a few years after 2.60. -ac_datarootdir_hack=; ac_datarootdir_seen= -ac_sed_dataroot=' -/datarootdir/ { - p - q -} -/@datadir@/p -/@docdir@/p -/@infodir@/p -/@localedir@/p -/@mandir@/p' -case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in -*datarootdir*) ac_datarootdir_seen=yes;; -*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - ac_datarootdir_hack=' - s&@datadir@&$datadir&g - s&@docdir@&$docdir&g - s&@infodir@&$infodir&g - s&@localedir@&$localedir&g - s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; -esac -_ACEOF - -# Neutralize VPATH when `$srcdir' = `.'. -# Shell code in configure.ac might set extrasub. -# FIXME: do we really want to maintain this feature? -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_sed_extra="$ac_vpsub -$extrasub -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -:t -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -s|@configure_input@|$ac_sed_conf_input|;t t -s&@top_builddir@&$ac_top_builddir_sub&;t t -s&@top_build_prefix@&$ac_top_build_prefix&;t t -s&@srcdir@&$ac_srcdir&;t t -s&@abs_srcdir@&$ac_abs_srcdir&;t t -s&@top_srcdir@&$ac_top_srcdir&;t t -s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t -s&@builddir@&$ac_builddir&;t t -s&@abs_builddir@&$ac_abs_builddir&;t t -s&@abs_top_builddir@&$ac_abs_top_builddir&;t t -s&@INSTALL@&$ac_INSTALL&;t t -s&@MKDIR_P@&$ac_MKDIR_P&;t t -$ac_datarootdir_hack -" -eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 - -test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined." >&5 -$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined." >&2;} - - rm -f "$tmp/stdin" - case $ac_file in - -) cat "$tmp/out" && rm -f "$tmp/out";; - *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";; - esac \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 - ;; - :H) - # - # CONFIG_HEADER - # - if test x"$ac_file" != x-; then - { - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" - } >"$tmp/config.h" \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 - if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 -$as_echo "$as_me: $ac_file is unchanged" >&6;} - else - rm -f "$ac_file" - mv "$tmp/config.h" "$ac_file" \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 - fi - else - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \ - || as_fn_error "could not create -" "$LINENO" 5 - fi -# Compute "$ac_file"'s index in $config_headers. -_am_arg="$ac_file" -_am_stamp_count=1 -for _am_header in $config_headers :; do - case $_am_header in - $_am_arg | $_am_arg:* ) - break ;; - * ) - _am_stamp_count=`expr $_am_stamp_count + 1` ;; - esac -done -echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || -$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$_am_arg" : 'X\(//\)[^/]' \| \ - X"$_am_arg" : 'X\(//\)$' \| \ - X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$_am_arg" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'`/stamp-h$_am_stamp_count - ;; - - :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 -$as_echo "$as_me: executing $ac_file commands" >&6;} - ;; - esac - - - case $ac_file$ac_mode in - "default-1":C) -# Only add multilib support code if we just rebuilt the top-level -# Makefile. -case " $CONFIG_FILES " in - *" Makefile "*) - ac_file=Makefile . ${multi_basedir}/config-ml.in - ;; -esac ;; - "libtool":C) - - # See if we are running on zsh, and set the options which allow our - # commands through without removal of \ escapes. - if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST - fi - - cfgfile="${ofile}T" - trap "$RM \"$cfgfile\"; exit 1" 1 2 15 - $RM "$cfgfile" - - cat <<_LT_EOF >> "$cfgfile" -#! $SHELL - -# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. -# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION -# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: -# NOTE: Changes made to this file will be lost: look at ltmain.sh. -# -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, -# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. -# Written by Gordon Matzigkeit, 1996 -# -# This file is part of GNU Libtool. -# -# GNU Libtool is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 2 of -# the License, or (at your option) any later version. -# -# As a special exception to the GNU General Public License, -# if you distribute this file as part of a program or library that -# is built using GNU Libtool, you may include this file under the -# same distribution terms that you use for the rest of that program. -# -# GNU Libtool is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Libtool; see the file COPYING. If not, a copy -# can be downloaded from http://www.gnu.org/licenses/gpl.html, or -# obtained by writing to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - - -# The names of the tagged configurations supported by this script. -available_tags="" - -# ### BEGIN LIBTOOL CONFIG - -# Which release of libtool.m4 was used? -macro_version=$macro_version -macro_revision=$macro_revision - -# Whether or not to build shared libraries. -build_libtool_libs=$enable_shared - -# Whether or not to build static libraries. -build_old_libs=$enable_static - -# What type of objects to build. -pic_mode=$pic_mode - -# Whether or not to optimize for fast installation. -fast_install=$enable_fast_install - -# Shell to use when invoking shell scripts. -SHELL=$lt_SHELL - -# An echo program that protects backslashes. -ECHO=$lt_ECHO - -# The host system. -host_alias=$host_alias -host=$host -host_os=$host_os - -# The build system. -build_alias=$build_alias -build=$build -build_os=$build_os - -# A sed program that does not truncate output. -SED=$lt_SED - -# Sed that helps us avoid accidentally triggering echo(1) options like -n. -Xsed="\$SED -e 1s/^X//" - -# A grep program that handles long lines. -GREP=$lt_GREP - -# An ERE matcher. -EGREP=$lt_EGREP - -# A literal string matcher. -FGREP=$lt_FGREP - -# A BSD- or MS-compatible name lister. -NM=$lt_NM - -# Whether we need soft or hard links. -LN_S=$lt_LN_S - -# What is the maximum length of a command? -max_cmd_len=$max_cmd_len - -# Object file suffix (normally "o"). -objext=$ac_objext - -# Executable file suffix (normally ""). -exeext=$exeext - -# whether the shell understands "unset". -lt_unset=$lt_unset - -# turn spaces into newlines. -SP2NL=$lt_lt_SP2NL - -# turn newlines into spaces. -NL2SP=$lt_lt_NL2SP - -# An object symbol dumper. -OBJDUMP=$lt_OBJDUMP - -# Method to check whether dependent libraries are shared objects. -deplibs_check_method=$lt_deplibs_check_method - -# Command to use when deplibs_check_method == "file_magic". -file_magic_cmd=$lt_file_magic_cmd - -# The archiver. -AR=$lt_AR -AR_FLAGS=$lt_AR_FLAGS - -# A symbol stripping program. -STRIP=$lt_STRIP - -# Commands used to install an old-style archive. -RANLIB=$lt_RANLIB -old_postinstall_cmds=$lt_old_postinstall_cmds -old_postuninstall_cmds=$lt_old_postuninstall_cmds - -# Whether to use a lock for old archive extraction. -lock_old_archive_extraction=$lock_old_archive_extraction - -# A C compiler. -LTCC=$lt_CC - -# LTCC compiler flags. -LTCFLAGS=$lt_CFLAGS - -# Take the output of nm and produce a listing of raw symbols and C names. -global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe - -# Transform the output of nm in a proper C declaration. -global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl - -# Transform the output of nm in a C name address pair. -global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - -# Transform the output of nm in a C name address pair when lib prefix is needed. -global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix - -# The name of the directory that contains temporary libtool files. -objdir=$objdir - -# Used to examine libraries when file_magic_cmd begins with "file". -MAGIC_CMD=$MAGIC_CMD - -# Must we lock files when doing compilation? -need_locks=$lt_need_locks - -# Tool to manipulate archived DWARF debug symbol files on Mac OS X. -DSYMUTIL=$lt_DSYMUTIL - -# Tool to change global to local symbols on Mac OS X. -NMEDIT=$lt_NMEDIT - -# Tool to manipulate fat objects and archives on Mac OS X. -LIPO=$lt_LIPO - -# ldd/readelf like tool for Mach-O binaries on Mac OS X. -OTOOL=$lt_OTOOL - -# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. -OTOOL64=$lt_OTOOL64 - -# Old archive suffix (normally "a"). -libext=$libext - -# Shared library suffix (normally ".so"). -shrext_cmds=$lt_shrext_cmds - -# The commands to extract the exported symbol list from a shared archive. -extract_expsyms_cmds=$lt_extract_expsyms_cmds - -# Variables whose values should be saved in libtool wrapper scripts and -# restored at link time. -variables_saved_for_relink=$lt_variables_saved_for_relink - -# Do we need the "lib" prefix for modules? -need_lib_prefix=$need_lib_prefix - -# Do we need a version for libraries? -need_version=$need_version - -# Library versioning type. -version_type=$version_type - -# Shared library runtime path variable. -runpath_var=$runpath_var - -# Shared library path variable. -shlibpath_var=$shlibpath_var - -# Is shlibpath searched before the hard-coded library search path? -shlibpath_overrides_runpath=$shlibpath_overrides_runpath - -# Format of library name prefix. -libname_spec=$lt_libname_spec - -# List of archive names. First name is the real one, the rest are links. -# The last name is the one that the linker finds with -lNAME -library_names_spec=$lt_library_names_spec - -# The coded name of the library, if different from the real name. -soname_spec=$lt_soname_spec - -# Permission mode override for installation of shared libraries. -install_override_mode=$lt_install_override_mode - -# Command to use after installation of a shared archive. -postinstall_cmds=$lt_postinstall_cmds - -# Command to use after uninstallation of a shared archive. -postuninstall_cmds=$lt_postuninstall_cmds - -# Commands used to finish a libtool library installation in a directory. -finish_cmds=$lt_finish_cmds - -# As "finish_cmds", except a single script fragment to be evaled but -# not shown. -finish_eval=$lt_finish_eval - -# Whether we should hardcode library paths into libraries. -hardcode_into_libs=$hardcode_into_libs - -# Compile-time system search path for libraries. -sys_lib_search_path_spec=$lt_sys_lib_search_path_spec - -# Run-time system search path for libraries. -sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec - -# Whether dlopen is supported. -dlopen_support=$enable_dlopen - -# Whether dlopen of programs is supported. -dlopen_self=$enable_dlopen_self - -# Whether dlopen of statically linked programs is supported. -dlopen_self_static=$enable_dlopen_self_static - -# Commands to strip libraries. -old_striplib=$lt_old_striplib -striplib=$lt_striplib - - -# The linker used to build libraries. -LD=$lt_LD - -# How to create reloadable object files. -reload_flag=$lt_reload_flag -reload_cmds=$lt_reload_cmds - -# Commands used to build an old-style archive. -old_archive_cmds=$lt_old_archive_cmds - -# A language specific compiler. -CC=$lt_compiler - -# Is the compiler the GNU compiler? -with_gcc=$GCC - -# Compiler flag to turn off builtin functions. -no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag - -# How to pass a linker flag through the compiler. -wl=$lt_lt_prog_compiler_wl - -# Additional compiler flags for building library objects. -pic_flag=$lt_lt_prog_compiler_pic - -# Compiler flag to prevent dynamic linking. -link_static_flag=$lt_lt_prog_compiler_static - -# Does compiler simultaneously support -c and -o options? -compiler_c_o=$lt_lt_cv_prog_compiler_c_o - -# Whether or not to add -lc for building shared libraries. -build_libtool_need_lc=$archive_cmds_need_lc - -# Whether or not to disallow shared libs when runtime libs are static. -allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes - -# Compiler flag to allow reflexive dlopens. -export_dynamic_flag_spec=$lt_export_dynamic_flag_spec - -# Compiler flag to generate shared objects directly from archives. -whole_archive_flag_spec=$lt_whole_archive_flag_spec - -# Whether the compiler copes with passing no objects directly. -compiler_needs_object=$lt_compiler_needs_object - -# Create an old-style archive from a shared archive. -old_archive_from_new_cmds=$lt_old_archive_from_new_cmds - -# Create a temporary old-style archive to link instead of a shared archive. -old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds - -# Commands used to build a shared archive. -archive_cmds=$lt_archive_cmds -archive_expsym_cmds=$lt_archive_expsym_cmds - -# Commands used to build a loadable module if different from building -# a shared archive. -module_cmds=$lt_module_cmds -module_expsym_cmds=$lt_module_expsym_cmds - -# Whether we are building with GNU ld or not. -with_gnu_ld=$lt_with_gnu_ld - -# Flag that allows shared libraries with undefined symbols to be built. -allow_undefined_flag=$lt_allow_undefined_flag - -# Flag that enforces no undefined symbols. -no_undefined_flag=$lt_no_undefined_flag - -# Flag to hardcode \$libdir into a binary during linking. -# This must work even if \$libdir does not exist -hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec - -# If ld is used when linking, flag to hardcode \$libdir into a binary -# during linking. This must work even if \$libdir does not exist. -hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld - -# Whether we need a single "-rpath" flag with a separated argument. -hardcode_libdir_separator=$lt_hardcode_libdir_separator - -# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes -# DIR into the resulting binary. -hardcode_direct=$hardcode_direct - -# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes -# DIR into the resulting binary and the resulting library dependency is -# "absolute",i.e impossible to change by setting \${shlibpath_var} if the -# library is relocated. -hardcode_direct_absolute=$hardcode_direct_absolute - -# Set to "yes" if using the -LDIR flag during linking hardcodes DIR -# into the resulting binary. -hardcode_minus_L=$hardcode_minus_L - -# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR -# into the resulting binary. -hardcode_shlibpath_var=$hardcode_shlibpath_var - -# Set to "yes" if building a shared library automatically hardcodes DIR -# into the library and all subsequent libraries and executables linked -# against it. -hardcode_automatic=$hardcode_automatic - -# Set to yes if linker adds runtime paths of dependent libraries -# to runtime path list. -inherit_rpath=$inherit_rpath - -# Whether libtool must link a program against all its dependency libraries. -link_all_deplibs=$link_all_deplibs - -# Fix the shell variable \$srcfile for the compiler. -fix_srcfile_path=$lt_fix_srcfile_path - -# Set to "yes" if exported symbols are required. -always_export_symbols=$always_export_symbols - -# The commands to list exported symbols. -export_symbols_cmds=$lt_export_symbols_cmds - -# Symbols that should not be listed in the preloaded symbols. -exclude_expsyms=$lt_exclude_expsyms - -# Symbols that must always be exported. -include_expsyms=$lt_include_expsyms - -# Commands necessary for linking programs (against libraries) with templates. -prelink_cmds=$lt_prelink_cmds - -# Specify filename containing input files. -file_list_spec=$lt_file_list_spec - -# How to hardcode a shared library path into an executable. -hardcode_action=$hardcode_action - -# ### END LIBTOOL CONFIG - -_LT_EOF - - case $host_os in - aix3*) - cat <<\_LT_EOF >> "$cfgfile" -# AIX sometimes has problems with the GCC collect2 program. For some -# reason, if we set the COLLECT_NAMES environment variable, the problems -# vanish in a puff of smoke. -if test "X${COLLECT_NAMES+set}" != Xset; then - COLLECT_NAMES= - export COLLECT_NAMES -fi -_LT_EOF - ;; - esac - - -ltmain="$ac_aux_dir/ltmain.sh" - - - # We use sed instead of cat because bash on DJGPP gets confused if - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? - sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ - || (rm -f "$cfgfile"; exit 1) - - case $xsi_shell in - yes) - cat << \_LT_EOF >> "$cfgfile" - -# func_dirname file append nondir_replacement -# Compute the dirname of FILE. If nonempty, add APPEND to the result, -# otherwise set result to NONDIR_REPLACEMENT. -func_dirname () -{ - case ${1} in - */*) func_dirname_result="${1%/*}${2}" ;; - * ) func_dirname_result="${3}" ;; - esac -} - -# func_basename file -func_basename () -{ - func_basename_result="${1##*/}" -} - -# func_dirname_and_basename file append nondir_replacement -# perform func_basename and func_dirname in a single function -# call: -# dirname: Compute the dirname of FILE. If nonempty, -# add APPEND to the result, otherwise set result -# to NONDIR_REPLACEMENT. -# value returned in "$func_dirname_result" -# basename: Compute filename of FILE. -# value returned in "$func_basename_result" -# Implementation must be kept synchronized with func_dirname -# and func_basename. For efficiency, we do not delegate to -# those functions but instead duplicate the functionality here. -func_dirname_and_basename () -{ - case ${1} in - */*) func_dirname_result="${1%/*}${2}" ;; - * ) func_dirname_result="${3}" ;; - esac - func_basename_result="${1##*/}" -} - -# func_stripname prefix suffix name -# strip PREFIX and SUFFIX off of NAME. -# PREFIX and SUFFIX must not contain globbing or regex special -# characters, hashes, percent signs, but SUFFIX may contain a leading -# dot (in which case that matches only a dot). -func_stripname () -{ - # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are - # positional parameters, so assign one to ordinary parameter first. - func_stripname_result=${3} - func_stripname_result=${func_stripname_result#"${1}"} - func_stripname_result=${func_stripname_result%"${2}"} -} - -# func_opt_split -func_opt_split () -{ - func_opt_split_opt=${1%%=*} - func_opt_split_arg=${1#*=} -} - -# func_lo2o object -func_lo2o () -{ - case ${1} in - *.lo) func_lo2o_result=${1%.lo}.${objext} ;; - *) func_lo2o_result=${1} ;; - esac -} - -# func_xform libobj-or-source -func_xform () -{ - func_xform_result=${1%.*}.lo -} - -# func_arith arithmetic-term... -func_arith () -{ - func_arith_result=$(( $* )) -} - -# func_len string -# STRING may not start with a hyphen. -func_len () -{ - func_len_result=${#1} -} - -_LT_EOF - ;; - *) # Bourne compatible functions. - cat << \_LT_EOF >> "$cfgfile" - -# func_dirname file append nondir_replacement -# Compute the dirname of FILE. If nonempty, add APPEND to the result, -# otherwise set result to NONDIR_REPLACEMENT. -func_dirname () -{ - # Extract subdirectory from the argument. - func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` - if test "X$func_dirname_result" = "X${1}"; then - func_dirname_result="${3}" - else - func_dirname_result="$func_dirname_result${2}" - fi -} - -# func_basename file -func_basename () -{ - func_basename_result=`$ECHO "${1}" | $SED "$basename"` -} - - -# func_stripname prefix suffix name -# strip PREFIX and SUFFIX off of NAME. -# PREFIX and SUFFIX must not contain globbing or regex special -# characters, hashes, percent signs, but SUFFIX may contain a leading -# dot (in which case that matches only a dot). -# func_strip_suffix prefix name -func_stripname () -{ - case ${2} in - .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; - *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; - esac -} - -# sed scripts: -my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' -my_sed_long_arg='1s/^-[^=]*=//' - -# func_opt_split -func_opt_split () -{ - func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` - func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` -} - -# func_lo2o object -func_lo2o () -{ - func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` -} - -# func_xform libobj-or-source -func_xform () -{ - func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` -} - -# func_arith arithmetic-term... -func_arith () -{ - func_arith_result=`expr "$@"` -} - -# func_len string -# STRING may not start with a hyphen. -func_len () -{ - func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` -} - -_LT_EOF -esac - -case $lt_shell_append in - yes) - cat << \_LT_EOF >> "$cfgfile" - -# func_append var value -# Append VALUE to the end of shell variable VAR. -func_append () -{ - eval "$1+=\$2" -} -_LT_EOF - ;; - *) - cat << \_LT_EOF >> "$cfgfile" - -# func_append var value -# Append VALUE to the end of shell variable VAR. -func_append () -{ - eval "$1=\$$1\$2" -} - -_LT_EOF - ;; - esac - - - sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ - || (rm -f "$cfgfile"; exit 1) - - mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" - - ;; - "gstdint.h":C) -if test "$GCC" = yes; then - echo "/* generated for " `$CC --version | sed 1q` "*/" > tmp-stdint.h -else - echo "/* generated for $CC */" > tmp-stdint.h -fi - -sed 's/^ *//' >> tmp-stdint.h < -EOF - -if test "$acx_cv_header_stdint" != stdint.h; then - echo "#include " >> tmp-stdint.h -fi -if test "$acx_cv_header_stdint" != stddef.h; then - echo "#include <$acx_cv_header_stdint>" >> tmp-stdint.h -fi - -sed 's/^ *//' >> tmp-stdint.h <> tmp-stdint.h <> tmp-stdint.h <> tmp-stdint.h <> tmp-stdint.h <> tmp-stdint.h <> tmp-stdint.h <> tmp-stdint.h <= 199901L - #ifndef _INT64_T - #define _INT64_T - #ifndef __int64_t_defined - #ifndef int64_t - typedef long long int64_t; - #endif - #endif - #endif - #ifndef _UINT64_T - #define _UINT64_T - #ifndef uint64_t - typedef unsigned long long uint64_t; - #endif - #endif - - #elif defined __GNUC__ && defined (__STDC__) && __STDC__-0 - /* NextStep 2.0 cc is really gcc 1.93 but it defines __GNUC__ = 2 and - does not implement __extension__. But that compiler doesn't define - __GNUC_MINOR__. */ - # if __GNUC__ < 2 || (__NeXT__ && !__GNUC_MINOR__) - # define __extension__ - # endif - - # ifndef _INT64_T - # define _INT64_T - # ifndef int64_t - __extension__ typedef long long int64_t; - # endif - # endif - # ifndef _UINT64_T - # define _UINT64_T - # ifndef uint64_t - __extension__ typedef unsigned long long uint64_t; - # endif - # endif - - #elif !defined __STRICT_ANSI__ - # if defined _MSC_VER || defined __WATCOMC__ || defined __BORLANDC__ - - # ifndef _INT64_T - # define _INT64_T - # ifndef int64_t - typedef __int64 int64_t; - # endif - # endif - # ifndef _UINT64_T - # define _UINT64_T - # ifndef uint64_t - typedef unsigned __int64 uint64_t; - # endif - # endif - # endif /* compiler */ - - #endif /* ANSI version */ -EOF -fi - -# ------------- done int64_t types, emit intptr types ------------ -if test "$ac_cv_type_uintptr_t" != yes; then - sed 's/^ *//' >> tmp-stdint.h <> tmp-stdint.h <> tmp-stdint.h <> tmp-stdint.h <> tmp-stdint.h < - # srcdir/Makefile.am -> srcdir/{src,libsupc++,...}/Makefile.am, manually - # append it here. Only modify Makefiles that have just been created. - # - # Also, get rid of this simulated-VPATH thing that automake does. - cat > vpsed << \_EOF - s!`test -f '$<' || echo '$(srcdir)/'`!! -_EOF - for i in $SUBDIRS; do - case $CONFIG_FILES in - *${i}/Makefile*) - #echo "Adding MULTISUBDIR to $i/Makefile" - sed -f vpsed $i/Makefile > tmp - grep '^MULTISUBDIR =' Makefile >> tmp - mv tmp $i/Makefile - ;; - esac - done - rm vpsed - fi - fi - ;; - - esac -done # for ac_tag - - -as_fn_exit 0 -_ACEOF -ac_clean_files=$ac_clean_files_save - -test $ac_write_fail = 0 || - as_fn_error "write failure creating $CONFIG_STATUS" "$LINENO" 5 - - -# configure is writing to config.log, and then calls config.status. -# config.status does its own redirection, appending to config.log. -# Unfortunately, on DOS this fails, as config.log is still kept open -# by configure, so config.status won't be able to write to it; its -# output is simply discarded. So we exec the FD to /dev/null, -# effectively closing config.log, so it can be properly (re)opened and -# appended to by config.status. When coming back to configure, we -# need to make the FD available again. -if test "$no_create" != yes; then - ac_cs_success=: - ac_config_status_args= - test "$silent" = yes && - ac_config_status_args="$ac_config_status_args --quiet" - exec 5>/dev/null - $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false - exec 5>>config.log - # Use ||, not &&, to avoid exiting from the if with $? = 1, which - # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit $? -fi -if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} -fi - diff --git a/src/libbacktrace/configure.ac b/src/libbacktrace/configure.ac deleted file mode 100644 index ea1b27d807e1..000000000000 --- a/src/libbacktrace/configure.ac +++ /dev/null @@ -1,418 +0,0 @@ -# configure.ac -- Backtrace configure script. -# Copyright (C) 2012-2016 Free Software Foundation, Inc. - -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: - -# (1) Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. - -# (2) Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. - -# (3) The name of the author may not be used to -# endorse or promote products derived from this software without -# specific prior written permission. - -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -AC_PREREQ(2.64) -AC_INIT(package-unused, version-unused,, libbacktrace) -AC_CONFIG_SRCDIR(backtrace.h) -AC_CONFIG_HEADER(config.h) - -if test -n "${with_target_subdir}"; then - AM_ENABLE_MULTILIB(, ..) -fi - -AC_CANONICAL_SYSTEM -target_alias=${target_alias-$host_alias} - -AC_USE_SYSTEM_EXTENSIONS - -libtool_VERSION=1:0:0 -AC_SUBST(libtool_VERSION) - -# 1.11.1: Require that version of automake. -# foreign: Don't require README, INSTALL, NEWS, etc. -# no-define: Don't define PACKAGE and VERSION. -# no-dependencies: Don't generate automatic dependencies. -# (because it breaks when using bootstrap-lean, since some of the -# headers are gone at "make install" time). -# -Wall: Issue all automake warnings. -# -Wno-portability: Don't warn about constructs supported by GNU make. -# (because GCC requires GNU make anyhow). -AM_INIT_AUTOMAKE([1.11.1 foreign no-dist no-define no-dependencies -Wall -Wno-portability]) - -AM_MAINTAINER_MODE - -AC_ARG_WITH(target-subdir, -[ --with-target-subdir=SUBDIR Configuring in a subdirectory for target]) - -# We must force CC to /not/ be precious variables; otherwise -# the wrong, non-multilib-adjusted value will be used in multilibs. -# As a side effect, we have to subst CFLAGS ourselves. -m4_rename([_AC_ARG_VAR_PRECIOUS],[backtrace_PRECIOUS]) -m4_define([_AC_ARG_VAR_PRECIOUS],[]) -AC_PROG_CC -m4_rename_force([backtrace_PRECIOUS],[_AC_ARG_VAR_PRECIOUS]) - -AC_SUBST(CFLAGS) - -AC_PROG_RANLIB - -AC_PROG_AWK -case "$AWK" in -"") AC_MSG_ERROR([can't build without awk]) ;; -esac - -LT_INIT -AM_PROG_LIBTOOL - -backtrace_supported=yes - -if test -n "${with_target_subdir}"; then - # We are compiling a GCC library. We can assume that the unwind - # library exists. - BACKTRACE_FILE="backtrace.lo simple.lo" -else - AC_CHECK_HEADER([unwind.h], - [AC_CHECK_FUNC([_Unwind_Backtrace], - [BACKTRACE_FILE="backtrace.lo simple.lo"], - [BACKTRACE_FILE="nounwind.lo" - backtrace_supported=no])], - [BACKTRACE_FILE="nounwind.lo" - backtrace_supported=no]) -fi -AC_SUBST(BACKTRACE_FILE) - -EXTRA_FLAGS= -if test -n "${with_target_subdir}"; then - EXTRA_FLAGS="-funwind-tables -frandom-seed=\$@" -else - AC_CACHE_CHECK([for -funwind-tables option], - [libbacktrace_cv_c_unwind_tables], - [CFLAGS_hold="$CFLAGS" - CFLAGS="$CFLAGS -funwind-tables" - AC_COMPILE_IFELSE( - [AC_LANG_PROGRAM([static int f() { return 0; }], [return f();])], - [libbacktrace_cv_c_unwind_tables=yes], - [libbacktrace_cv_c_unwind_tables=no]) - CFLAGS="$CFLAGS_hold"]) - if test "$libbacktrace_cv_c_unwind_tables" = "yes"; then - EXTRA_FLAGS=-funwind-tables - fi - AC_CACHE_CHECK([for -frandom-seed=string option], - [libbacktrace_cv_c_random_seed_string], - [CFLAGS_hold="$CFLAGS" - CFLAGS="$CFLAGS -frandom-seed=conftest.lo" - AC_COMPILE_IFELSE( - [AC_LANG_PROGRAM([], [return 0;])], - [libbacktrace_cv_c_random_seed_string=yes], - [libbacktrace_cv_c_random_seed_string=no]) - CFLAGS="$CFLAGS_hold"]) - if test "$libbacktrace_cv_c_random_seed_string" = "yes"; then - EXTRA_FLAGS="$EXTRA_FLAGS -frandom-seed=\$@" - fi -fi -AC_SUBST(EXTRA_FLAGS) - -ACX_PROG_CC_WARNING_OPTS([-W -Wall -Wwrite-strings -Wstrict-prototypes \ - -Wmissing-prototypes -Wold-style-definition \ - -Wmissing-format-attribute -Wcast-qual], - [WARN_FLAGS]) - -if test -n "${with_target_subdir}"; then - WARN_FLAGS="$WARN_FLAGS -Werror" -fi - -AC_SUBST(WARN_FLAGS) - -if test -n "${with_target_subdir}"; then - GCC_CHECK_UNWIND_GETIPINFO -else - ac_save_CFFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -Werror-implicit-function-declaration" - AC_MSG_CHECKING([for _Unwind_GetIPInfo]) - AC_LINK_IFELSE( - [AC_LANG_PROGRAM( - [#include "unwind.h" - struct _Unwind_Context *context; - int ip_before_insn = 0;], - [return _Unwind_GetIPInfo (context, &ip_before_insn);])], - [have_unwind_getipinfo=yes], [have_unwind_getipinfo=no]) - CFLAGS="$ac_save_CFLAGS" - AC_MSG_RESULT([$have_unwind_getipinfo]) - if test "$have_unwind_getipinfo" = "yes"; then - AC_DEFINE(HAVE_GETIPINFO, 1, [Define if _Unwind_GetIPInfo is available.]) - fi -fi - -# Enable --enable-host-shared. -AC_ARG_ENABLE(host-shared, -[AS_HELP_STRING([--enable-host-shared], - [build host code as shared libraries])], -[PIC_FLAG=-fPIC], [PIC_FLAG=]) -AC_SUBST(PIC_FLAG) - -# Test for __sync support. -AC_CACHE_CHECK([__sync extensions], -[libbacktrace_cv_sys_sync], -[if test -n "${with_target_subdir}"; then - case "${host}" in - hppa*-*-hpux*) libbacktrace_cv_sys_sync=no ;; - *) libbacktrace_cv_sys_sync=yes ;; - esac - else - AC_LINK_IFELSE( - [AC_LANG_PROGRAM([int i;], - [__sync_bool_compare_and_swap (&i, i, i); - __sync_lock_test_and_set (&i, 1); - __sync_lock_release (&i);])], - [libbacktrace_cv_sys_sync=yes], - [libbacktrace_cv_sys_sync=no]) - fi]) -BACKTRACE_SUPPORTS_THREADS=0 -if test "$libbacktrace_cv_sys_sync" = "yes"; then - BACKTRACE_SUPPORTS_THREADS=1 - AC_DEFINE([HAVE_SYNC_FUNCTIONS], 1, - [Define to 1 if you have the __sync functions]) -fi -AC_SUBST(BACKTRACE_SUPPORTS_THREADS) - -# Test for __atomic support. -AC_CACHE_CHECK([__atomic extensions], -[libbacktrace_cv_sys_atomic], -[if test -n "${with_target_subdir}"; then - libbacktrace_cv_sys_atomic=yes - else - AC_LINK_IFELSE( - [AC_LANG_PROGRAM([int i;], - [__atomic_load_n (&i, __ATOMIC_ACQUIRE); - __atomic_store_n (&i, 1, __ATOMIC_RELEASE);])], - [libbacktrace_cv_sys_atomic=yes], - [libbacktrace_cv_sys_atomic=no]) - fi]) -if test "$libbacktrace_cv_sys_atomic" = "yes"; then - AC_DEFINE([HAVE_ATOMIC_FUNCTIONS], 1, - [Define to 1 if you have the __atomic functions]) -fi - -# The library needs to be able to read the executable itself. Compile -# a file to determine the executable format. The awk script -# filetype.awk prints out the file type. -AC_CACHE_CHECK([output filetype], -[libbacktrace_cv_sys_filetype], -[filetype= -AC_COMPILE_IFELSE( - [AC_LANG_PROGRAM([int i;], [int j;])], - [filetype=`${AWK} -f $srcdir/filetype.awk conftest.$ac_objext`], - [AC_MSG_FAILURE([compiler failed])]) -libbacktrace_cv_sys_filetype=$filetype]) - -# Match the file type to decide what files to compile. -FORMAT_FILE= -backtrace_supports_data=yes -case "$libbacktrace_cv_sys_filetype" in -elf*) FORMAT_FILE="elf.lo" ;; -pecoff) FORMAT_FILE="pecoff.lo" - backtrace_supports_data=no - ;; -macho*) FORMAT_FILE="macho.lo" - backtrace_supports_data=no - ;; -*) AC_MSG_WARN([could not determine output file type]) - FORMAT_FILE="unknown.lo" - backtrace_supported=no - ;; -esac -AC_SUBST(FORMAT_FILE) - -# ELF defines. -elfsize= -case "$libbacktrace_cv_sys_filetype" in -elf32) elfsize=32 ;; -elf64) elfsize=64 ;; -*) elfsize=unused -esac -AC_DEFINE_UNQUOTED([BACKTRACE_ELF_SIZE], [$elfsize], [ELF size: 32 or 64]) - -BACKTRACE_SUPPORTED=0 -if test "$backtrace_supported" = "yes"; then - BACKTRACE_SUPPORTED=1 -fi -AC_SUBST(BACKTRACE_SUPPORTED) - -BACKTRACE_SUPPORTS_DATA=0 -if test "$backtrace_supports_data" = "yes"; then - BACKTRACE_SUPPORTS_DATA=1 -fi -AC_SUBST(BACKTRACE_SUPPORTS_DATA) - -GCC_HEADER_STDINT(gstdint.h) - -AC_CHECK_HEADERS(sys/mman.h) -if test "$ac_cv_header_sys_mman_h" = "no"; then - have_mmap=no -else - if test -n "${with_target_subdir}"; then - # When built as a GCC target library, we can't do a link test. We - # simply assume that if we have mman.h, we have mmap. - have_mmap=yes - case "${host}" in - spu-*-*|*-*-msdosdjgpp) - # The SPU does not have mmap, but it has a sys/mman.h header file - # containing "mmap_eaddr" and the mmap flags, confusing the test. - # DJGPP also has sys/man.h, but no mmap - have_mmap=no ;; - esac - else - AC_CHECK_FUNC(mmap, [have_mmap=yes], [have_mmap=no]) - fi -fi - -case "${host_os}" in -darwin*) - have_mmap=no ;; -esac - -if test "$have_mmap" = "no"; then - VIEW_FILE=read.lo - ALLOC_FILE=alloc.lo -else - VIEW_FILE=mmapio.lo - AC_PREPROC_IFELSE([ -#include -#if !defined(MAP_ANONYMOUS) && !defined(MAP_ANON) - #error no MAP_ANONYMOUS -#endif -], [ALLOC_FILE=mmap.lo], [ALLOC_FILE=alloc.lo]) -fi -AC_SUBST(VIEW_FILE) -AC_SUBST(ALLOC_FILE) - -BACKTRACE_USES_MALLOC=0 -if test "$ALLOC_FILE" = "alloc.lo"; then - BACKTRACE_USES_MALLOC=1 -fi -AC_SUBST(BACKTRACE_USES_MALLOC) - -# Check for dl_iterate_phdr. -AC_CHECK_HEADERS(link.h) -if test "$ac_cv_header_link_h" = "no"; then - have_dl_iterate_phdr=no -else - if test -n "${with_target_subdir}"; then - # When built as a GCC target library, we can't do a link test. - AC_EGREP_HEADER([dl_iterate_phdr], [link.h], [have_dl_iterate_phdr=yes], - [have_dl_iterate_phdr=no]) - case "${host}" in - *-*-solaris2.10*) - # Avoid dl_iterate_phdr on Solaris 10, where it is in the - # header file but is only in -ldl. - have_dl_iterate_phdr=no ;; - esac - else - AC_CHECK_FUNC([dl_iterate_phdr], [have_dl_iterate_phdr=yes], - [have_dl_iterate_phdr=no]) - fi -fi -if test "$have_dl_iterate_phdr" = "yes"; then - AC_DEFINE(HAVE_DL_ITERATE_PHDR, 1, [Define if dl_iterate_phdr is available.]) -fi - -# Check for the fcntl function. -if test -n "${with_target_subdir}"; then - case "${host}" in - *-*-mingw*) have_fcntl=no ;; - spu-*-*) have_fcntl=no ;; - *) have_fcntl=yes ;; - esac -else - AC_CHECK_FUNC(fcntl, [have_fcntl=yes], [have_fcntl=no]) -fi -if test "$have_fcntl" = "yes"; then - AC_DEFINE([HAVE_FCNTL], 1, - [Define to 1 if you have the fcntl function]) -fi - -AC_CHECK_DECLS(strnlen) - -# Check for getexecname function. -if test -n "${with_target_subdir}"; then - case "${host}" in - *-*-solaris2*) have_getexecname=yes ;; - *) have_getexecname=no ;; - esac -else - AC_CHECK_FUNC(getexecname, [have_getexecname=yes], [have_getexecname=no]) -fi -if test "$have_getexecname" = "yes"; then - AC_DEFINE(HAVE_GETEXECNAME, 1, [Define if getexecname is available.]) -fi - -AC_CACHE_CHECK([whether tests can run], - [libbacktrace_cv_sys_native], - [AC_RUN_IFELSE([AC_LANG_PROGRAM([], [return 0;])], - [libbacktrace_cv_sys_native=yes], - [libbacktrace_cv_sys_native=no], - [libbacktrace_cv_sys_native=no])]) -AM_CONDITIONAL(NATIVE, test "$libbacktrace_cv_sys_native" = "yes") - -if test "${multilib}" = "yes"; then - multilib_arg="--enable-multilib" -else - multilib_arg= -fi - -AC_CONFIG_FILES(Makefile backtrace-supported.h) - -# We need multilib support, but only if configuring for the target. -AC_CONFIG_COMMANDS([default], -[if test -n "$CONFIG_FILES"; then - if test -n "${with_target_subdir}"; then - # Multilibs need MULTISUBDIR defined correctly in certain makefiles so - # that multilib installs will end up installed in the correct place. - # The testsuite needs it for multilib-aware ABI baseline files. - # To work around this not being passed down from config-ml.in -> - # srcdir/Makefile.am -> srcdir/{src,libsupc++,...}/Makefile.am, manually - # append it here. Only modify Makefiles that have just been created. - # - # Also, get rid of this simulated-VPATH thing that automake does. - cat > vpsed << \_EOF - s!`test -f '$<' || echo '$(srcdir)/'`!! -_EOF - for i in $SUBDIRS; do - case $CONFIG_FILES in - *${i}/Makefile*) - #echo "Adding MULTISUBDIR to $i/Makefile" - sed -f vpsed $i/Makefile > tmp - grep '^MULTISUBDIR =' Makefile >> tmp - mv tmp $i/Makefile - ;; - esac - done - rm vpsed - fi - fi -], -[ -# Variables needed in config.status (file generation) which aren't already -# passed by autoconf. -SUBDIRS="$SUBDIRS" -]) - -AC_OUTPUT diff --git a/src/libbacktrace/dwarf.c b/src/libbacktrace/dwarf.c deleted file mode 100644 index 55b8d7dc2a56..000000000000 --- a/src/libbacktrace/dwarf.c +++ /dev/null @@ -1,3038 +0,0 @@ -/* dwarf.c -- Get file/line information from DWARF for backtraces. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include -#include - -#include "dwarf2.h" -#include "filenames.h" - -#include "backtrace.h" -#include "internal.h" - -#if !defined(HAVE_DECL_STRNLEN) || !HAVE_DECL_STRNLEN - -/* If strnlen is not declared, provide our own version. */ - -static size_t -xstrnlen (const char *s, size_t maxlen) -{ - size_t i; - - for (i = 0; i < maxlen; ++i) - if (s[i] == '\0') - break; - return i; -} - -#define strnlen xstrnlen - -#endif - -/* A buffer to read DWARF info. */ - -struct dwarf_buf -{ - /* Buffer name for error messages. */ - const char *name; - /* Start of the buffer. */ - const unsigned char *start; - /* Next byte to read. */ - const unsigned char *buf; - /* The number of bytes remaining. */ - size_t left; - /* Whether the data is big-endian. */ - int is_bigendian; - /* Error callback routine. */ - backtrace_error_callback error_callback; - /* Data for error_callback. */ - void *data; - /* Non-zero if we've reported an underflow error. */ - int reported_underflow; -}; - -/* A single attribute in a DWARF abbreviation. */ - -struct attr -{ - /* The attribute name. */ - enum dwarf_attribute name; - /* The attribute form. */ - enum dwarf_form form; -}; - -/* A single DWARF abbreviation. */ - -struct abbrev -{ - /* The abbrev code--the number used to refer to the abbrev. */ - uint64_t code; - /* The entry tag. */ - enum dwarf_tag tag; - /* Non-zero if this abbrev has child entries. */ - int has_children; - /* The number of attributes. */ - size_t num_attrs; - /* The attributes. */ - struct attr *attrs; -}; - -/* The DWARF abbreviations for a compilation unit. This structure - only exists while reading the compilation unit. Most DWARF readers - seem to a hash table to map abbrev ID's to abbrev entries. - However, we primarily care about GCC, and GCC simply issues ID's in - numerical order starting at 1. So we simply keep a sorted vector, - and try to just look up the code. */ - -struct abbrevs -{ - /* The number of abbrevs in the vector. */ - size_t num_abbrevs; - /* The abbrevs, sorted by the code field. */ - struct abbrev *abbrevs; -}; - -/* The different kinds of attribute values. */ - -enum attr_val_encoding -{ - /* An address. */ - ATTR_VAL_ADDRESS, - /* A unsigned integer. */ - ATTR_VAL_UINT, - /* A sigd integer. */ - ATTR_VAL_SINT, - /* A string. */ - ATTR_VAL_STRING, - /* An offset to other data in the containing unit. */ - ATTR_VAL_REF_UNIT, - /* An offset to other data within the .dwarf_info section. */ - ATTR_VAL_REF_INFO, - /* An offset to data in some other section. */ - ATTR_VAL_REF_SECTION, - /* A type signature. */ - ATTR_VAL_REF_TYPE, - /* A block of data (not represented). */ - ATTR_VAL_BLOCK, - /* An expression (not represented). */ - ATTR_VAL_EXPR, -}; - -/* An attribute value. */ - -struct attr_val -{ - /* How the value is stored in the field u. */ - enum attr_val_encoding encoding; - union - { - /* ATTR_VAL_ADDRESS, ATTR_VAL_UINT, ATTR_VAL_REF*. */ - uint64_t uint; - /* ATTR_VAL_SINT. */ - int64_t sint; - /* ATTR_VAL_STRING. */ - const char *string; - /* ATTR_VAL_BLOCK not stored. */ - } u; -}; - -/* The line number program header. */ - -struct line_header -{ - /* The version of the line number information. */ - int version; - /* The minimum instruction length. */ - unsigned int min_insn_len; - /* The maximum number of ops per instruction. */ - unsigned int max_ops_per_insn; - /* The line base for special opcodes. */ - int line_base; - /* The line range for special opcodes. */ - unsigned int line_range; - /* The opcode base--the first special opcode. */ - unsigned int opcode_base; - /* Opcode lengths, indexed by opcode - 1. */ - const unsigned char *opcode_lengths; - /* The number of directory entries. */ - size_t dirs_count; - /* The directory entries. */ - const char **dirs; - /* The number of filenames. */ - size_t filenames_count; - /* The filenames. */ - const char **filenames; -}; - -/* Map a single PC value to a file/line. We will keep a vector of - these sorted by PC value. Each file/line will be correct from the - PC up to the PC of the next entry if there is one. We allocate one - extra entry at the end so that we can use bsearch. */ - -struct line -{ - /* PC. */ - uintptr_t pc; - /* File name. Many entries in the array are expected to point to - the same file name. */ - const char *filename; - /* Line number. */ - int lineno; - /* Index of the object in the original array read from the DWARF - section, before it has been sorted. The index makes it possible - to use Quicksort and maintain stability. */ - int idx; -}; - -/* A growable vector of line number information. This is used while - reading the line numbers. */ - -struct line_vector -{ - /* Memory. This is an array of struct line. */ - struct backtrace_vector vec; - /* Number of valid mappings. */ - size_t count; -}; - -/* A function described in the debug info. */ - -struct function -{ - /* The name of the function. */ - const char *name; - /* If this is an inlined function, the filename of the call - site. */ - const char *caller_filename; - /* If this is an inlined function, the line number of the call - site. */ - int caller_lineno; - /* Map PC ranges to inlined functions. */ - struct function_addrs *function_addrs; - size_t function_addrs_count; -}; - -/* An address range for a function. This maps a PC value to a - specific function. */ - -struct function_addrs -{ - /* Range is LOW <= PC < HIGH. */ - uint64_t low; - uint64_t high; - /* Function for this address range. */ - struct function *function; -}; - -/* A growable vector of function address ranges. */ - -struct function_vector -{ - /* Memory. This is an array of struct function_addrs. */ - struct backtrace_vector vec; - /* Number of address ranges present. */ - size_t count; -}; - -/* A DWARF compilation unit. This only holds the information we need - to map a PC to a file and line. */ - -struct unit -{ - /* The first entry for this compilation unit. */ - const unsigned char *unit_data; - /* The length of the data for this compilation unit. */ - size_t unit_data_len; - /* The offset of UNIT_DATA from the start of the information for - this compilation unit. */ - size_t unit_data_offset; - /* DWARF version. */ - int version; - /* Whether unit is DWARF64. */ - int is_dwarf64; - /* Address size. */ - int addrsize; - /* Offset into line number information. */ - off_t lineoff; - /* Primary source file. */ - const char *filename; - /* Compilation command working directory. */ - const char *comp_dir; - /* Absolute file name, only set if needed. */ - const char *abs_filename; - /* The abbreviations for this unit. */ - struct abbrevs abbrevs; - - /* The fields above this point are read in during initialization and - may be accessed freely. The fields below this point are read in - as needed, and therefore require care, as different threads may - try to initialize them simultaneously. */ - - /* PC to line number mapping. This is NULL if the values have not - been read. This is (struct line *) -1 if there was an error - reading the values. */ - struct line *lines; - /* Number of entries in lines. */ - size_t lines_count; - /* PC ranges to function. */ - struct function_addrs *function_addrs; - size_t function_addrs_count; -}; - -/* An address range for a compilation unit. This maps a PC value to a - specific compilation unit. Note that we invert the representation - in DWARF: instead of listing the units and attaching a list of - ranges, we list the ranges and have each one point to the unit. - This lets us do a binary search to find the unit. */ - -struct unit_addrs -{ - /* Range is LOW <= PC < HIGH. */ - uint64_t low; - uint64_t high; - /* Compilation unit for this address range. */ - struct unit *u; -}; - -/* A growable vector of compilation unit address ranges. */ - -struct unit_addrs_vector -{ - /* Memory. This is an array of struct unit_addrs. */ - struct backtrace_vector vec; - /* Number of address ranges present. */ - size_t count; -}; - -/* The information we need to map a PC to a file and line. */ - -struct dwarf_data -{ - /* The data for the next file we know about. */ - struct dwarf_data *next; - /* The base address for this file. */ - uintptr_t base_address; - /* A sorted list of address ranges. */ - struct unit_addrs *addrs; - /* Number of address ranges in list. */ - size_t addrs_count; - /* The unparsed .debug_info section. */ - const unsigned char *dwarf_info; - size_t dwarf_info_size; - /* The unparsed .debug_line section. */ - const unsigned char *dwarf_line; - size_t dwarf_line_size; - /* The unparsed .debug_ranges section. */ - const unsigned char *dwarf_ranges; - size_t dwarf_ranges_size; - /* The unparsed .debug_str section. */ - const unsigned char *dwarf_str; - size_t dwarf_str_size; - /* Whether the data is big-endian or not. */ - int is_bigendian; - /* A vector used for function addresses. We keep this here so that - we can grow the vector as we read more functions. */ - struct function_vector fvec; -}; - -/* Report an error for a DWARF buffer. */ - -static void -dwarf_buf_error (struct dwarf_buf *buf, const char *msg) -{ - char b[200]; - - snprintf (b, sizeof b, "%s in %s at %d", - msg, buf->name, (int) (buf->buf - buf->start)); - buf->error_callback (buf->data, b, 0); -} - -/* Require at least COUNT bytes in BUF. Return 1 if all is well, 0 on - error. */ - -static int -require (struct dwarf_buf *buf, size_t count) -{ - if (buf->left >= count) - return 1; - - if (!buf->reported_underflow) - { - dwarf_buf_error (buf, "DWARF underflow"); - buf->reported_underflow = 1; - } - - return 0; -} - -/* Advance COUNT bytes in BUF. Return 1 if all is well, 0 on - error. */ - -static int -advance (struct dwarf_buf *buf, size_t count) -{ - if (!require (buf, count)) - return 0; - buf->buf += count; - buf->left -= count; - return 1; -} - -/* Read one byte from BUF and advance 1 byte. */ - -static unsigned char -read_byte (struct dwarf_buf *buf) -{ - const unsigned char *p = buf->buf; - - if (!advance (buf, 1)) - return 0; - return p[0]; -} - -/* Read a signed char from BUF and advance 1 byte. */ - -static signed char -read_sbyte (struct dwarf_buf *buf) -{ - const unsigned char *p = buf->buf; - - if (!advance (buf, 1)) - return 0; - return (*p ^ 0x80) - 0x80; -} - -/* Read a uint16 from BUF and advance 2 bytes. */ - -static uint16_t -read_uint16 (struct dwarf_buf *buf) -{ - const unsigned char *p = buf->buf; - - if (!advance (buf, 2)) - return 0; - if (buf->is_bigendian) - return ((uint16_t) p[0] << 8) | (uint16_t) p[1]; - else - return ((uint16_t) p[1] << 8) | (uint16_t) p[0]; -} - -/* Read a uint32 from BUF and advance 4 bytes. */ - -static uint32_t -read_uint32 (struct dwarf_buf *buf) -{ - const unsigned char *p = buf->buf; - - if (!advance (buf, 4)) - return 0; - if (buf->is_bigendian) - return (((uint32_t) p[0] << 24) | ((uint32_t) p[1] << 16) - | ((uint32_t) p[2] << 8) | (uint32_t) p[3]); - else - return (((uint32_t) p[3] << 24) | ((uint32_t) p[2] << 16) - | ((uint32_t) p[1] << 8) | (uint32_t) p[0]); -} - -/* Read a uint64 from BUF and advance 8 bytes. */ - -static uint64_t -read_uint64 (struct dwarf_buf *buf) -{ - const unsigned char *p = buf->buf; - - if (!advance (buf, 8)) - return 0; - if (buf->is_bigendian) - return (((uint64_t) p[0] << 56) | ((uint64_t) p[1] << 48) - | ((uint64_t) p[2] << 40) | ((uint64_t) p[3] << 32) - | ((uint64_t) p[4] << 24) | ((uint64_t) p[5] << 16) - | ((uint64_t) p[6] << 8) | (uint64_t) p[7]); - else - return (((uint64_t) p[7] << 56) | ((uint64_t) p[6] << 48) - | ((uint64_t) p[5] << 40) | ((uint64_t) p[4] << 32) - | ((uint64_t) p[3] << 24) | ((uint64_t) p[2] << 16) - | ((uint64_t) p[1] << 8) | (uint64_t) p[0]); -} - -/* Read an offset from BUF and advance the appropriate number of - bytes. */ - -static uint64_t -read_offset (struct dwarf_buf *buf, int is_dwarf64) -{ - if (is_dwarf64) - return read_uint64 (buf); - else - return read_uint32 (buf); -} - -/* Read an address from BUF and advance the appropriate number of - bytes. */ - -static uint64_t -read_address (struct dwarf_buf *buf, int addrsize) -{ - switch (addrsize) - { - case 1: - return read_byte (buf); - case 2: - return read_uint16 (buf); - case 4: - return read_uint32 (buf); - case 8: - return read_uint64 (buf); - default: - dwarf_buf_error (buf, "unrecognized address size"); - return 0; - } -} - -/* Return whether a value is the highest possible address, given the - address size. */ - -static int -is_highest_address (uint64_t address, int addrsize) -{ - switch (addrsize) - { - case 1: - return address == (unsigned char) -1; - case 2: - return address == (uint16_t) -1; - case 4: - return address == (uint32_t) -1; - case 8: - return address == (uint64_t) -1; - default: - return 0; - } -} - -/* Read an unsigned LEB128 number. */ - -static uint64_t -read_uleb128 (struct dwarf_buf *buf) -{ - uint64_t ret; - unsigned int shift; - int overflow; - unsigned char b; - - ret = 0; - shift = 0; - overflow = 0; - do - { - const unsigned char *p; - - p = buf->buf; - if (!advance (buf, 1)) - return 0; - b = *p; - if (shift < 64) - ret |= ((uint64_t) (b & 0x7f)) << shift; - else if (!overflow) - { - dwarf_buf_error (buf, "LEB128 overflows uint64_t"); - overflow = 1; - } - shift += 7; - } - while ((b & 0x80) != 0); - - return ret; -} - -/* Read a signed LEB128 number. */ - -static int64_t -read_sleb128 (struct dwarf_buf *buf) -{ - uint64_t val; - unsigned int shift; - int overflow; - unsigned char b; - - val = 0; - shift = 0; - overflow = 0; - do - { - const unsigned char *p; - - p = buf->buf; - if (!advance (buf, 1)) - return 0; - b = *p; - if (shift < 64) - val |= ((uint64_t) (b & 0x7f)) << shift; - else if (!overflow) - { - dwarf_buf_error (buf, "signed LEB128 overflows uint64_t"); - overflow = 1; - } - shift += 7; - } - while ((b & 0x80) != 0); - - if ((b & 0x40) != 0 && shift < 64) - val |= ((uint64_t) -1) << shift; - - return (int64_t) val; -} - -/* Return the length of an LEB128 number. */ - -static size_t -leb128_len (const unsigned char *p) -{ - size_t ret; - - ret = 1; - while ((*p & 0x80) != 0) - { - ++p; - ++ret; - } - return ret; -} - -/* Free an abbreviations structure. */ - -static void -free_abbrevs (struct backtrace_state *state, struct abbrevs *abbrevs, - backtrace_error_callback error_callback, void *data) -{ - size_t i; - - for (i = 0; i < abbrevs->num_abbrevs; ++i) - backtrace_free (state, abbrevs->abbrevs[i].attrs, - abbrevs->abbrevs[i].num_attrs * sizeof (struct attr), - error_callback, data); - backtrace_free (state, abbrevs->abbrevs, - abbrevs->num_abbrevs * sizeof (struct abbrev), - error_callback, data); - abbrevs->num_abbrevs = 0; - abbrevs->abbrevs = NULL; -} - -/* Read an attribute value. Returns 1 on success, 0 on failure. If - the value can be represented as a uint64_t, sets *VAL and sets - *IS_VALID to 1. We don't try to store the value of other attribute - forms, because we don't care about them. */ - -static int -read_attribute (enum dwarf_form form, struct dwarf_buf *buf, - int is_dwarf64, int version, int addrsize, - const unsigned char *dwarf_str, size_t dwarf_str_size, - struct attr_val *val) -{ - /* Avoid warnings about val.u.FIELD may be used uninitialized if - this function is inlined. The warnings aren't valid but can - occur because the different fields are set and used - conditionally. */ - memset (val, 0, sizeof *val); - - switch (form) - { - case DW_FORM_addr: - val->encoding = ATTR_VAL_ADDRESS; - val->u.uint = read_address (buf, addrsize); - return 1; - case DW_FORM_block2: - val->encoding = ATTR_VAL_BLOCK; - return advance (buf, read_uint16 (buf)); - case DW_FORM_block4: - val->encoding = ATTR_VAL_BLOCK; - return advance (buf, read_uint32 (buf)); - case DW_FORM_data2: - val->encoding = ATTR_VAL_UINT; - val->u.uint = read_uint16 (buf); - return 1; - case DW_FORM_data4: - val->encoding = ATTR_VAL_UINT; - val->u.uint = read_uint32 (buf); - return 1; - case DW_FORM_data8: - val->encoding = ATTR_VAL_UINT; - val->u.uint = read_uint64 (buf); - return 1; - case DW_FORM_string: - val->encoding = ATTR_VAL_STRING; - val->u.string = (const char *) buf->buf; - return advance (buf, strnlen ((const char *) buf->buf, buf->left) + 1); - case DW_FORM_block: - val->encoding = ATTR_VAL_BLOCK; - return advance (buf, read_uleb128 (buf)); - case DW_FORM_block1: - val->encoding = ATTR_VAL_BLOCK; - return advance (buf, read_byte (buf)); - case DW_FORM_data1: - val->encoding = ATTR_VAL_UINT; - val->u.uint = read_byte (buf); - return 1; - case DW_FORM_flag: - val->encoding = ATTR_VAL_UINT; - val->u.uint = read_byte (buf); - return 1; - case DW_FORM_sdata: - val->encoding = ATTR_VAL_SINT; - val->u.sint = read_sleb128 (buf); - return 1; - case DW_FORM_strp: - { - uint64_t offset; - - offset = read_offset (buf, is_dwarf64); - if (offset >= dwarf_str_size) - { - dwarf_buf_error (buf, "DW_FORM_strp out of range"); - return 0; - } - val->encoding = ATTR_VAL_STRING; - val->u.string = (const char *) dwarf_str + offset; - return 1; - } - case DW_FORM_udata: - val->encoding = ATTR_VAL_UINT; - val->u.uint = read_uleb128 (buf); - return 1; - case DW_FORM_ref_addr: - val->encoding = ATTR_VAL_REF_INFO; - if (version == 2) - val->u.uint = read_address (buf, addrsize); - else - val->u.uint = read_offset (buf, is_dwarf64); - return 1; - case DW_FORM_ref1: - val->encoding = ATTR_VAL_REF_UNIT; - val->u.uint = read_byte (buf); - return 1; - case DW_FORM_ref2: - val->encoding = ATTR_VAL_REF_UNIT; - val->u.uint = read_uint16 (buf); - return 1; - case DW_FORM_ref4: - val->encoding = ATTR_VAL_REF_UNIT; - val->u.uint = read_uint32 (buf); - return 1; - case DW_FORM_ref8: - val->encoding = ATTR_VAL_REF_UNIT; - val->u.uint = read_uint64 (buf); - return 1; - case DW_FORM_ref_udata: - val->encoding = ATTR_VAL_REF_UNIT; - val->u.uint = read_uleb128 (buf); - return 1; - case DW_FORM_indirect: - { - uint64_t form; - - form = read_uleb128 (buf); - return read_attribute ((enum dwarf_form) form, buf, is_dwarf64, - version, addrsize, dwarf_str, dwarf_str_size, - val); - } - case DW_FORM_sec_offset: - val->encoding = ATTR_VAL_REF_SECTION; - val->u.uint = read_offset (buf, is_dwarf64); - return 1; - case DW_FORM_exprloc: - val->encoding = ATTR_VAL_EXPR; - return advance (buf, read_uleb128 (buf)); - case DW_FORM_flag_present: - val->encoding = ATTR_VAL_UINT; - val->u.uint = 1; - return 1; - case DW_FORM_ref_sig8: - val->encoding = ATTR_VAL_REF_TYPE; - val->u.uint = read_uint64 (buf); - return 1; - case DW_FORM_GNU_addr_index: - val->encoding = ATTR_VAL_REF_SECTION; - val->u.uint = read_uleb128 (buf); - return 1; - case DW_FORM_GNU_str_index: - val->encoding = ATTR_VAL_REF_SECTION; - val->u.uint = read_uleb128 (buf); - return 1; - case DW_FORM_GNU_ref_alt: - val->encoding = ATTR_VAL_REF_SECTION; - val->u.uint = read_offset (buf, is_dwarf64); - return 1; - case DW_FORM_GNU_strp_alt: - val->encoding = ATTR_VAL_REF_SECTION; - val->u.uint = read_offset (buf, is_dwarf64); - return 1; - default: - dwarf_buf_error (buf, "unrecognized DWARF form"); - return 0; - } -} - -/* Compare function_addrs for qsort. When ranges are nested, make the - smallest one sort last. */ - -static int -function_addrs_compare (const void *v1, const void *v2) -{ - const struct function_addrs *a1 = (const struct function_addrs *) v1; - const struct function_addrs *a2 = (const struct function_addrs *) v2; - - if (a1->low < a2->low) - return -1; - if (a1->low > a2->low) - return 1; - if (a1->high < a2->high) - return 1; - if (a1->high > a2->high) - return -1; - return strcmp (a1->function->name, a2->function->name); -} - -/* Compare a PC against a function_addrs for bsearch. Note that if - there are multiple ranges containing PC, which one will be returned - is unpredictable. We compensate for that in dwarf_fileline. */ - -static int -function_addrs_search (const void *vkey, const void *ventry) -{ - const uintptr_t *key = (const uintptr_t *) vkey; - const struct function_addrs *entry = (const struct function_addrs *) ventry; - uintptr_t pc; - - pc = *key; - if (pc < entry->low) - return -1; - else if (pc >= entry->high) - return 1; - else - return 0; -} - -/* Add a new compilation unit address range to a vector. Returns 1 on - success, 0 on failure. */ - -static int -add_unit_addr (struct backtrace_state *state, uintptr_t base_address, - struct unit_addrs addrs, - backtrace_error_callback error_callback, void *data, - struct unit_addrs_vector *vec) -{ - struct unit_addrs *p; - - /* Add in the base address of the module here, so that we can look - up the PC directly. */ - addrs.low += base_address; - addrs.high += base_address; - - /* Try to merge with the last entry. */ - if (vec->count > 0) - { - p = (struct unit_addrs *) vec->vec.base + (vec->count - 1); - if ((addrs.low == p->high || addrs.low == p->high + 1) - && addrs.u == p->u) - { - if (addrs.high > p->high) - p->high = addrs.high; - return 1; - } - } - - p = ((struct unit_addrs *) - backtrace_vector_grow (state, sizeof (struct unit_addrs), - error_callback, data, &vec->vec)); - if (p == NULL) - return 0; - - *p = addrs; - ++vec->count; - return 1; -} - -/* Free a unit address vector. */ - -static void -free_unit_addrs_vector (struct backtrace_state *state, - struct unit_addrs_vector *vec, - backtrace_error_callback error_callback, void *data) -{ - struct unit_addrs *addrs; - size_t i; - - addrs = (struct unit_addrs *) vec->vec.base; - for (i = 0; i < vec->count; ++i) - free_abbrevs (state, &addrs[i].u->abbrevs, error_callback, data); -} - -/* Compare unit_addrs for qsort. When ranges are nested, make the - smallest one sort last. */ - -static int -unit_addrs_compare (const void *v1, const void *v2) -{ - const struct unit_addrs *a1 = (const struct unit_addrs *) v1; - const struct unit_addrs *a2 = (const struct unit_addrs *) v2; - - if (a1->low < a2->low) - return -1; - if (a1->low > a2->low) - return 1; - if (a1->high < a2->high) - return 1; - if (a1->high > a2->high) - return -1; - if (a1->u->lineoff < a2->u->lineoff) - return -1; - if (a1->u->lineoff > a2->u->lineoff) - return 1; - return 0; -} - -/* Compare a PC against a unit_addrs for bsearch. Note that if there - are multiple ranges containing PC, which one will be returned is - unpredictable. We compensate for that in dwarf_fileline. */ - -static int -unit_addrs_search (const void *vkey, const void *ventry) -{ - const uintptr_t *key = (const uintptr_t *) vkey; - const struct unit_addrs *entry = (const struct unit_addrs *) ventry; - uintptr_t pc; - - pc = *key; - if (pc < entry->low) - return -1; - else if (pc >= entry->high) - return 1; - else - return 0; -} - -/* Sort the line vector by PC. We want a stable sort here to maintain - the order of lines for the same PC values. Since the sequence is - being sorted in place, their addresses cannot be relied on to - maintain stability. That is the purpose of the index member. */ - -static int -line_compare (const void *v1, const void *v2) -{ - const struct line *ln1 = (const struct line *) v1; - const struct line *ln2 = (const struct line *) v2; - - if (ln1->pc < ln2->pc) - return -1; - else if (ln1->pc > ln2->pc) - return 1; - else if (ln1->idx < ln2->idx) - return -1; - else if (ln1->idx > ln2->idx) - return 1; - else - return 0; -} - -/* Find a PC in a line vector. We always allocate an extra entry at - the end of the lines vector, so that this routine can safely look - at the next entry. Note that when there are multiple mappings for - the same PC value, this will return the last one. */ - -static int -line_search (const void *vkey, const void *ventry) -{ - const uintptr_t *key = (const uintptr_t *) vkey; - const struct line *entry = (const struct line *) ventry; - uintptr_t pc; - - pc = *key; - if (pc < entry->pc) - return -1; - else if (pc >= (entry + 1)->pc) - return 1; - else - return 0; -} - -/* Sort the abbrevs by the abbrev code. This function is passed to - both qsort and bsearch. */ - -static int -abbrev_compare (const void *v1, const void *v2) -{ - const struct abbrev *a1 = (const struct abbrev *) v1; - const struct abbrev *a2 = (const struct abbrev *) v2; - - if (a1->code < a2->code) - return -1; - else if (a1->code > a2->code) - return 1; - else - { - /* This really shouldn't happen. It means there are two - different abbrevs with the same code, and that means we don't - know which one lookup_abbrev should return. */ - return 0; - } -} - -/* Read the abbreviation table for a compilation unit. Returns 1 on - success, 0 on failure. */ - -static int -read_abbrevs (struct backtrace_state *state, uint64_t abbrev_offset, - const unsigned char *dwarf_abbrev, size_t dwarf_abbrev_size, - int is_bigendian, backtrace_error_callback error_callback, - void *data, struct abbrevs *abbrevs) -{ - struct dwarf_buf abbrev_buf; - struct dwarf_buf count_buf; - size_t num_abbrevs; - - abbrevs->num_abbrevs = 0; - abbrevs->abbrevs = NULL; - - if (abbrev_offset >= dwarf_abbrev_size) - { - error_callback (data, "abbrev offset out of range", 0); - return 0; - } - - abbrev_buf.name = ".debug_abbrev"; - abbrev_buf.start = dwarf_abbrev; - abbrev_buf.buf = dwarf_abbrev + abbrev_offset; - abbrev_buf.left = dwarf_abbrev_size - abbrev_offset; - abbrev_buf.is_bigendian = is_bigendian; - abbrev_buf.error_callback = error_callback; - abbrev_buf.data = data; - abbrev_buf.reported_underflow = 0; - - /* Count the number of abbrevs in this list. */ - - count_buf = abbrev_buf; - num_abbrevs = 0; - while (read_uleb128 (&count_buf) != 0) - { - if (count_buf.reported_underflow) - return 0; - ++num_abbrevs; - // Skip tag. - read_uleb128 (&count_buf); - // Skip has_children. - read_byte (&count_buf); - // Skip attributes. - while (read_uleb128 (&count_buf) != 0) - read_uleb128 (&count_buf); - // Skip form of last attribute. - read_uleb128 (&count_buf); - } - - if (count_buf.reported_underflow) - return 0; - - if (num_abbrevs == 0) - return 1; - - abbrevs->num_abbrevs = num_abbrevs; - abbrevs->abbrevs = ((struct abbrev *) - backtrace_alloc (state, - num_abbrevs * sizeof (struct abbrev), - error_callback, data)); - if (abbrevs->abbrevs == NULL) - return 0; - memset (abbrevs->abbrevs, 0, num_abbrevs * sizeof (struct abbrev)); - - num_abbrevs = 0; - while (1) - { - uint64_t code; - struct abbrev a; - size_t num_attrs; - struct attr *attrs; - - if (abbrev_buf.reported_underflow) - goto fail; - - code = read_uleb128 (&abbrev_buf); - if (code == 0) - break; - - a.code = code; - a.tag = (enum dwarf_tag) read_uleb128 (&abbrev_buf); - a.has_children = read_byte (&abbrev_buf); - - count_buf = abbrev_buf; - num_attrs = 0; - while (read_uleb128 (&count_buf) != 0) - { - ++num_attrs; - read_uleb128 (&count_buf); - } - - if (num_attrs == 0) - { - attrs = NULL; - read_uleb128 (&abbrev_buf); - read_uleb128 (&abbrev_buf); - } - else - { - attrs = ((struct attr *) - backtrace_alloc (state, num_attrs * sizeof *attrs, - error_callback, data)); - if (attrs == NULL) - goto fail; - num_attrs = 0; - while (1) - { - uint64_t name; - uint64_t form; - - name = read_uleb128 (&abbrev_buf); - form = read_uleb128 (&abbrev_buf); - if (name == 0) - break; - attrs[num_attrs].name = (enum dwarf_attribute) name; - attrs[num_attrs].form = (enum dwarf_form) form; - ++num_attrs; - } - } - - a.num_attrs = num_attrs; - a.attrs = attrs; - - abbrevs->abbrevs[num_abbrevs] = a; - ++num_abbrevs; - } - - backtrace_qsort (abbrevs->abbrevs, abbrevs->num_abbrevs, - sizeof (struct abbrev), abbrev_compare); - - return 1; - - fail: - free_abbrevs (state, abbrevs, error_callback, data); - return 0; -} - -/* Return the abbrev information for an abbrev code. */ - -static const struct abbrev * -lookup_abbrev (struct abbrevs *abbrevs, uint64_t code, - backtrace_error_callback error_callback, void *data) -{ - struct abbrev key; - void *p; - - /* With GCC, where abbrevs are simply numbered in order, we should - be able to just look up the entry. */ - if (code - 1 < abbrevs->num_abbrevs - && abbrevs->abbrevs[code - 1].code == code) - return &abbrevs->abbrevs[code - 1]; - - /* Otherwise we have to search. */ - memset (&key, 0, sizeof key); - key.code = code; - p = bsearch (&key, abbrevs->abbrevs, abbrevs->num_abbrevs, - sizeof (struct abbrev), abbrev_compare); - if (p == NULL) - { - error_callback (data, "invalid abbreviation code", 0); - return NULL; - } - return (const struct abbrev *) p; -} - -/* Add non-contiguous address ranges for a compilation unit. Returns - 1 on success, 0 on failure. */ - -static int -add_unit_ranges (struct backtrace_state *state, uintptr_t base_address, - struct unit *u, uint64_t ranges, uint64_t base, - int is_bigendian, const unsigned char *dwarf_ranges, - size_t dwarf_ranges_size, - backtrace_error_callback error_callback, void *data, - struct unit_addrs_vector *addrs) -{ - struct dwarf_buf ranges_buf; - - if (ranges >= dwarf_ranges_size) - { - error_callback (data, "ranges offset out of range", 0); - return 0; - } - - ranges_buf.name = ".debug_ranges"; - ranges_buf.start = dwarf_ranges; - ranges_buf.buf = dwarf_ranges + ranges; - ranges_buf.left = dwarf_ranges_size - ranges; - ranges_buf.is_bigendian = is_bigendian; - ranges_buf.error_callback = error_callback; - ranges_buf.data = data; - ranges_buf.reported_underflow = 0; - - while (1) - { - uint64_t low; - uint64_t high; - - if (ranges_buf.reported_underflow) - return 0; - - low = read_address (&ranges_buf, u->addrsize); - high = read_address (&ranges_buf, u->addrsize); - - if (low == 0 && high == 0) - break; - - if (is_highest_address (low, u->addrsize)) - base = high; - else - { - struct unit_addrs a; - - a.low = low + base; - a.high = high + base; - a.u = u; - if (!add_unit_addr (state, base_address, a, error_callback, data, - addrs)) - return 0; - } - } - - if (ranges_buf.reported_underflow) - return 0; - - return 1; -} - -/* Find the address range covered by a compilation unit, reading from - UNIT_BUF and adding values to U. Returns 1 if all data could be - read, 0 if there is some error. */ - -static int -find_address_ranges (struct backtrace_state *state, uintptr_t base_address, - struct dwarf_buf *unit_buf, - const unsigned char *dwarf_str, size_t dwarf_str_size, - const unsigned char *dwarf_ranges, - size_t dwarf_ranges_size, - int is_bigendian, backtrace_error_callback error_callback, - void *data, struct unit *u, - struct unit_addrs_vector *addrs) -{ - while (unit_buf->left > 0) - { - uint64_t code; - const struct abbrev *abbrev; - uint64_t lowpc; - int have_lowpc; - uint64_t highpc; - int have_highpc; - int highpc_is_relative; - uint64_t ranges; - int have_ranges; - size_t i; - - code = read_uleb128 (unit_buf); - if (code == 0) - return 1; - - abbrev = lookup_abbrev (&u->abbrevs, code, error_callback, data); - if (abbrev == NULL) - return 0; - - lowpc = 0; - have_lowpc = 0; - highpc = 0; - have_highpc = 0; - highpc_is_relative = 0; - ranges = 0; - have_ranges = 0; - for (i = 0; i < abbrev->num_attrs; ++i) - { - struct attr_val val; - - if (!read_attribute (abbrev->attrs[i].form, unit_buf, - u->is_dwarf64, u->version, u->addrsize, - dwarf_str, dwarf_str_size, &val)) - return 0; - - switch (abbrev->attrs[i].name) - { - case DW_AT_low_pc: - if (val.encoding == ATTR_VAL_ADDRESS) - { - lowpc = val.u.uint; - have_lowpc = 1; - } - break; - - case DW_AT_high_pc: - if (val.encoding == ATTR_VAL_ADDRESS) - { - highpc = val.u.uint; - have_highpc = 1; - } - else if (val.encoding == ATTR_VAL_UINT) - { - highpc = val.u.uint; - have_highpc = 1; - highpc_is_relative = 1; - } - break; - - case DW_AT_ranges: - if (val.encoding == ATTR_VAL_UINT - || val.encoding == ATTR_VAL_REF_SECTION) - { - ranges = val.u.uint; - have_ranges = 1; - } - break; - - case DW_AT_stmt_list: - if (abbrev->tag == DW_TAG_compile_unit - && (val.encoding == ATTR_VAL_UINT - || val.encoding == ATTR_VAL_REF_SECTION)) - u->lineoff = val.u.uint; - break; - - case DW_AT_name: - if (abbrev->tag == DW_TAG_compile_unit - && val.encoding == ATTR_VAL_STRING) - u->filename = val.u.string; - break; - - case DW_AT_comp_dir: - if (abbrev->tag == DW_TAG_compile_unit - && val.encoding == ATTR_VAL_STRING) - u->comp_dir = val.u.string; - break; - - default: - break; - } - } - - if (abbrev->tag == DW_TAG_compile_unit - || abbrev->tag == DW_TAG_subprogram) - { - if (have_ranges) - { - if (!add_unit_ranges (state, base_address, u, ranges, lowpc, - is_bigendian, dwarf_ranges, - dwarf_ranges_size, error_callback, - data, addrs)) - return 0; - } - else if (have_lowpc && have_highpc) - { - struct unit_addrs a; - - if (highpc_is_relative) - highpc += lowpc; - a.low = lowpc; - a.high = highpc; - a.u = u; - - if (!add_unit_addr (state, base_address, a, error_callback, data, - addrs)) - return 0; - } - - /* If we found the PC range in the DW_TAG_compile_unit, we - can stop now. */ - if (abbrev->tag == DW_TAG_compile_unit - && (have_ranges || (have_lowpc && have_highpc))) - return 1; - } - - if (abbrev->has_children) - { - if (!find_address_ranges (state, base_address, unit_buf, - dwarf_str, dwarf_str_size, - dwarf_ranges, dwarf_ranges_size, - is_bigendian, error_callback, data, - u, addrs)) - return 0; - } - } - - return 1; -} - -/* Build a mapping from address ranges to the compilation units where - the line number information for that range can be found. Returns 1 - on success, 0 on failure. */ - -static int -build_address_map (struct backtrace_state *state, uintptr_t base_address, - const unsigned char *dwarf_info, size_t dwarf_info_size, - const unsigned char *dwarf_abbrev, size_t dwarf_abbrev_size, - const unsigned char *dwarf_ranges, size_t dwarf_ranges_size, - const unsigned char *dwarf_str, size_t dwarf_str_size, - int is_bigendian, backtrace_error_callback error_callback, - void *data, struct unit_addrs_vector *addrs) -{ - struct dwarf_buf info; - struct abbrevs abbrevs; - - memset (&addrs->vec, 0, sizeof addrs->vec); - addrs->count = 0; - - /* Read through the .debug_info section. FIXME: Should we use the - .debug_aranges section? gdb and addr2line don't use it, but I'm - not sure why. */ - - info.name = ".debug_info"; - info.start = dwarf_info; - info.buf = dwarf_info; - info.left = dwarf_info_size; - info.is_bigendian = is_bigendian; - info.error_callback = error_callback; - info.data = data; - info.reported_underflow = 0; - - memset (&abbrevs, 0, sizeof abbrevs); - while (info.left > 0) - { - const unsigned char *unit_data_start; - uint64_t len; - int is_dwarf64; - struct dwarf_buf unit_buf; - int version; - uint64_t abbrev_offset; - int addrsize; - struct unit *u; - - if (info.reported_underflow) - goto fail; - - unit_data_start = info.buf; - - is_dwarf64 = 0; - len = read_uint32 (&info); - if (len == 0xffffffff) - { - len = read_uint64 (&info); - is_dwarf64 = 1; - } - - unit_buf = info; - unit_buf.left = len; - - if (!advance (&info, len)) - goto fail; - - version = read_uint16 (&unit_buf); - if (version < 2 || version > 4) - { - dwarf_buf_error (&unit_buf, "unrecognized DWARF version"); - goto fail; - } - - abbrev_offset = read_offset (&unit_buf, is_dwarf64); - if (!read_abbrevs (state, abbrev_offset, dwarf_abbrev, dwarf_abbrev_size, - is_bigendian, error_callback, data, &abbrevs)) - goto fail; - - addrsize = read_byte (&unit_buf); - - u = ((struct unit *) - backtrace_alloc (state, sizeof *u, error_callback, data)); - if (u == NULL) - goto fail; - u->unit_data = unit_buf.buf; - u->unit_data_len = unit_buf.left; - u->unit_data_offset = unit_buf.buf - unit_data_start; - u->version = version; - u->is_dwarf64 = is_dwarf64; - u->addrsize = addrsize; - u->filename = NULL; - u->comp_dir = NULL; - u->abs_filename = NULL; - u->lineoff = 0; - u->abbrevs = abbrevs; - memset (&abbrevs, 0, sizeof abbrevs); - - /* The actual line number mappings will be read as needed. */ - u->lines = NULL; - u->lines_count = 0; - u->function_addrs = NULL; - u->function_addrs_count = 0; - - if (!find_address_ranges (state, base_address, &unit_buf, - dwarf_str, dwarf_str_size, - dwarf_ranges, dwarf_ranges_size, - is_bigendian, error_callback, data, - u, addrs)) - { - free_abbrevs (state, &u->abbrevs, error_callback, data); - backtrace_free (state, u, sizeof *u, error_callback, data); - goto fail; - } - - if (unit_buf.reported_underflow) - { - free_abbrevs (state, &u->abbrevs, error_callback, data); - backtrace_free (state, u, sizeof *u, error_callback, data); - goto fail; - } - } - if (info.reported_underflow) - goto fail; - - return 1; - - fail: - free_abbrevs (state, &abbrevs, error_callback, data); - free_unit_addrs_vector (state, addrs, error_callback, data); - return 0; -} - -/* Add a new mapping to the vector of line mappings that we are - building. Returns 1 on success, 0 on failure. */ - -static int -add_line (struct backtrace_state *state, struct dwarf_data *ddata, - uintptr_t pc, const char *filename, int lineno, - backtrace_error_callback error_callback, void *data, - struct line_vector *vec) -{ - struct line *ln; - - /* If we are adding the same mapping, ignore it. This can happen - when using discriminators. */ - if (vec->count > 0) - { - ln = (struct line *) vec->vec.base + (vec->count - 1); - if (pc == ln->pc && filename == ln->filename && lineno == ln->lineno) - return 1; - } - - ln = ((struct line *) - backtrace_vector_grow (state, sizeof (struct line), error_callback, - data, &vec->vec)); - if (ln == NULL) - return 0; - - /* Add in the base address here, so that we can look up the PC - directly. */ - ln->pc = pc + ddata->base_address; - - ln->filename = filename; - ln->lineno = lineno; - ln->idx = vec->count; - - ++vec->count; - - return 1; -} - -/* Free the line header information. If FREE_FILENAMES is true we - free the file names themselves, otherwise we leave them, as there - may be line structures pointing to them. */ - -static void -free_line_header (struct backtrace_state *state, struct line_header *hdr, - backtrace_error_callback error_callback, void *data) -{ - backtrace_free (state, hdr->dirs, hdr->dirs_count * sizeof (const char *), - error_callback, data); - backtrace_free (state, hdr->filenames, - hdr->filenames_count * sizeof (char *), - error_callback, data); -} - -/* Read the line header. Return 1 on success, 0 on failure. */ - -static int -read_line_header (struct backtrace_state *state, struct unit *u, - int is_dwarf64, struct dwarf_buf *line_buf, - struct line_header *hdr) -{ - uint64_t hdrlen; - struct dwarf_buf hdr_buf; - const unsigned char *p; - const unsigned char *pend; - size_t i; - - hdr->version = read_uint16 (line_buf); - if (hdr->version < 2 || hdr->version > 4) - { - dwarf_buf_error (line_buf, "unsupported line number version"); - return 0; - } - - hdrlen = read_offset (line_buf, is_dwarf64); - - hdr_buf = *line_buf; - hdr_buf.left = hdrlen; - - if (!advance (line_buf, hdrlen)) - return 0; - - hdr->min_insn_len = read_byte (&hdr_buf); - if (hdr->version < 4) - hdr->max_ops_per_insn = 1; - else - hdr->max_ops_per_insn = read_byte (&hdr_buf); - - /* We don't care about default_is_stmt. */ - read_byte (&hdr_buf); - - hdr->line_base = read_sbyte (&hdr_buf); - hdr->line_range = read_byte (&hdr_buf); - - hdr->opcode_base = read_byte (&hdr_buf); - hdr->opcode_lengths = hdr_buf.buf; - if (!advance (&hdr_buf, hdr->opcode_base - 1)) - return 0; - - /* Count the number of directory entries. */ - hdr->dirs_count = 0; - p = hdr_buf.buf; - pend = p + hdr_buf.left; - while (p < pend && *p != '\0') - { - p += strnlen((const char *) p, pend - p) + 1; - ++hdr->dirs_count; - } - - hdr->dirs = ((const char **) - backtrace_alloc (state, - hdr->dirs_count * sizeof (const char *), - line_buf->error_callback, line_buf->data)); - if (hdr->dirs == NULL) - return 0; - - i = 0; - while (*hdr_buf.buf != '\0') - { - if (hdr_buf.reported_underflow) - return 0; - - hdr->dirs[i] = (const char *) hdr_buf.buf; - ++i; - if (!advance (&hdr_buf, - strnlen ((const char *) hdr_buf.buf, hdr_buf.left) + 1)) - return 0; - } - if (!advance (&hdr_buf, 1)) - return 0; - - /* Count the number of file entries. */ - hdr->filenames_count = 0; - p = hdr_buf.buf; - pend = p + hdr_buf.left; - while (p < pend && *p != '\0') - { - p += strnlen ((const char *) p, pend - p) + 1; - p += leb128_len (p); - p += leb128_len (p); - p += leb128_len (p); - ++hdr->filenames_count; - } - - hdr->filenames = ((const char **) - backtrace_alloc (state, - hdr->filenames_count * sizeof (char *), - line_buf->error_callback, - line_buf->data)); - if (hdr->filenames == NULL) - return 0; - i = 0; - while (*hdr_buf.buf != '\0') - { - const char *filename; - uint64_t dir_index; - - if (hdr_buf.reported_underflow) - return 0; - - filename = (const char *) hdr_buf.buf; - if (!advance (&hdr_buf, - strnlen ((const char *) hdr_buf.buf, hdr_buf.left) + 1)) - return 0; - dir_index = read_uleb128 (&hdr_buf); - if (IS_ABSOLUTE_PATH (filename) - || (dir_index == 0 && u->comp_dir == NULL)) - hdr->filenames[i] = filename; - else - { - const char *dir; - size_t dir_len; - size_t filename_len; - char *s; - - if (dir_index == 0) - dir = u->comp_dir; - else if (dir_index - 1 < hdr->dirs_count) - dir = hdr->dirs[dir_index - 1]; - else - { - dwarf_buf_error (line_buf, - ("invalid directory index in " - "line number program header")); - return 0; - } - dir_len = strlen (dir); - filename_len = strlen (filename); - s = ((char *) - backtrace_alloc (state, dir_len + filename_len + 2, - line_buf->error_callback, line_buf->data)); - if (s == NULL) - return 0; - memcpy (s, dir, dir_len); - /* FIXME: If we are on a DOS-based file system, and the - directory or the file name use backslashes, then we - should use a backslash here. */ - s[dir_len] = '/'; - memcpy (s + dir_len + 1, filename, filename_len + 1); - hdr->filenames[i] = s; - } - - /* Ignore the modification time and size. */ - read_uleb128 (&hdr_buf); - read_uleb128 (&hdr_buf); - - ++i; - } - - if (hdr_buf.reported_underflow) - return 0; - - return 1; -} - -/* Read the line program, adding line mappings to VEC. Return 1 on - success, 0 on failure. */ - -static int -read_line_program (struct backtrace_state *state, struct dwarf_data *ddata, - struct unit *u, const struct line_header *hdr, - struct dwarf_buf *line_buf, struct line_vector *vec) -{ - uint64_t address; - unsigned int op_index; - const char *reset_filename; - const char *filename; - int lineno; - - address = 0; - op_index = 0; - if (hdr->filenames_count > 0) - reset_filename = hdr->filenames[0]; - else - reset_filename = ""; - filename = reset_filename; - lineno = 1; - while (line_buf->left > 0) - { - unsigned int op; - - op = read_byte (line_buf); - if (op >= hdr->opcode_base) - { - unsigned int advance; - - /* Special opcode. */ - op -= hdr->opcode_base; - advance = op / hdr->line_range; - address += (hdr->min_insn_len * (op_index + advance) - / hdr->max_ops_per_insn); - op_index = (op_index + advance) % hdr->max_ops_per_insn; - lineno += hdr->line_base + (int) (op % hdr->line_range); - add_line (state, ddata, address, filename, lineno, - line_buf->error_callback, line_buf->data, vec); - } - else if (op == DW_LNS_extended_op) - { - uint64_t len; - - len = read_uleb128 (line_buf); - op = read_byte (line_buf); - switch (op) - { - case DW_LNE_end_sequence: - /* FIXME: Should we mark the high PC here? It seems - that we already have that information from the - compilation unit. */ - address = 0; - op_index = 0; - filename = reset_filename; - lineno = 1; - break; - case DW_LNE_set_address: - address = read_address (line_buf, u->addrsize); - break; - case DW_LNE_define_file: - { - const char *f; - unsigned int dir_index; - - f = (const char *) line_buf->buf; - if (!advance (line_buf, strnlen (f, line_buf->left) + 1)) - return 0; - dir_index = read_uleb128 (line_buf); - /* Ignore that time and length. */ - read_uleb128 (line_buf); - read_uleb128 (line_buf); - if (IS_ABSOLUTE_PATH (f)) - filename = f; - else - { - const char *dir; - size_t dir_len; - size_t f_len; - char *p; - - if (dir_index == 0) - dir = u->comp_dir; - else if (dir_index - 1 < hdr->dirs_count) - dir = hdr->dirs[dir_index - 1]; - else - { - dwarf_buf_error (line_buf, - ("invalid directory index " - "in line number program")); - return 0; - } - dir_len = strlen (dir); - f_len = strlen (f); - p = ((char *) - backtrace_alloc (state, dir_len + f_len + 2, - line_buf->error_callback, - line_buf->data)); - if (p == NULL) - return 0; - memcpy (p, dir, dir_len); - /* FIXME: If we are on a DOS-based file system, - and the directory or the file name use - backslashes, then we should use a backslash - here. */ - p[dir_len] = '/'; - memcpy (p + dir_len + 1, f, f_len + 1); - filename = p; - } - } - break; - case DW_LNE_set_discriminator: - /* We don't care about discriminators. */ - read_uleb128 (line_buf); - break; - default: - if (!advance (line_buf, len - 1)) - return 0; - break; - } - } - else - { - switch (op) - { - case DW_LNS_copy: - add_line (state, ddata, address, filename, lineno, - line_buf->error_callback, line_buf->data, vec); - break; - case DW_LNS_advance_pc: - { - uint64_t advance; - - advance = read_uleb128 (line_buf); - address += (hdr->min_insn_len * (op_index + advance) - / hdr->max_ops_per_insn); - op_index = (op_index + advance) % hdr->max_ops_per_insn; - } - break; - case DW_LNS_advance_line: - lineno += (int) read_sleb128 (line_buf); - break; - case DW_LNS_set_file: - { - uint64_t fileno; - - fileno = read_uleb128 (line_buf); - if (fileno == 0) - filename = ""; - else - { - if (fileno - 1 >= hdr->filenames_count) - { - dwarf_buf_error (line_buf, - ("invalid file number in " - "line number program")); - return 0; - } - filename = hdr->filenames[fileno - 1]; - } - } - break; - case DW_LNS_set_column: - read_uleb128 (line_buf); - break; - case DW_LNS_negate_stmt: - break; - case DW_LNS_set_basic_block: - break; - case DW_LNS_const_add_pc: - { - unsigned int advance; - - op = 255 - hdr->opcode_base; - advance = op / hdr->line_range; - address += (hdr->min_insn_len * (op_index + advance) - / hdr->max_ops_per_insn); - op_index = (op_index + advance) % hdr->max_ops_per_insn; - } - break; - case DW_LNS_fixed_advance_pc: - address += read_uint16 (line_buf); - op_index = 0; - break; - case DW_LNS_set_prologue_end: - break; - case DW_LNS_set_epilogue_begin: - break; - case DW_LNS_set_isa: - read_uleb128 (line_buf); - break; - default: - { - unsigned int i; - - for (i = hdr->opcode_lengths[op - 1]; i > 0; --i) - read_uleb128 (line_buf); - } - break; - } - } - } - - return 1; -} - -/* Read the line number information for a compilation unit. Returns 1 - on success, 0 on failure. */ - -static int -read_line_info (struct backtrace_state *state, struct dwarf_data *ddata, - backtrace_error_callback error_callback, void *data, - struct unit *u, struct line_header *hdr, struct line **lines, - size_t *lines_count) -{ - struct line_vector vec; - struct dwarf_buf line_buf; - uint64_t len; - int is_dwarf64; - struct line *ln; - - memset (&vec.vec, 0, sizeof vec.vec); - vec.count = 0; - - memset (hdr, 0, sizeof *hdr); - - if (u->lineoff != (off_t) (size_t) u->lineoff - || (size_t) u->lineoff >= ddata->dwarf_line_size) - { - error_callback (data, "unit line offset out of range", 0); - goto fail; - } - - line_buf.name = ".debug_line"; - line_buf.start = ddata->dwarf_line; - line_buf.buf = ddata->dwarf_line + u->lineoff; - line_buf.left = ddata->dwarf_line_size - u->lineoff; - line_buf.is_bigendian = ddata->is_bigendian; - line_buf.error_callback = error_callback; - line_buf.data = data; - line_buf.reported_underflow = 0; - - is_dwarf64 = 0; - len = read_uint32 (&line_buf); - if (len == 0xffffffff) - { - len = read_uint64 (&line_buf); - is_dwarf64 = 1; - } - line_buf.left = len; - - if (!read_line_header (state, u, is_dwarf64, &line_buf, hdr)) - goto fail; - - if (!read_line_program (state, ddata, u, hdr, &line_buf, &vec)) - goto fail; - - if (line_buf.reported_underflow) - goto fail; - - if (vec.count == 0) - { - /* This is not a failure in the sense of a generating an error, - but it is a failure in that sense that we have no useful - information. */ - goto fail; - } - - /* Allocate one extra entry at the end. */ - ln = ((struct line *) - backtrace_vector_grow (state, sizeof (struct line), error_callback, - data, &vec.vec)); - if (ln == NULL) - goto fail; - ln->pc = (uintptr_t) -1; - ln->filename = NULL; - ln->lineno = 0; - ln->idx = 0; - - if (!backtrace_vector_release (state, &vec.vec, error_callback, data)) - goto fail; - - ln = (struct line *) vec.vec.base; - backtrace_qsort (ln, vec.count, sizeof (struct line), line_compare); - - *lines = ln; - *lines_count = vec.count; - - return 1; - - fail: - vec.vec.alc += vec.vec.size; - vec.vec.size = 0; - backtrace_vector_release (state, &vec.vec, error_callback, data); - free_line_header (state, hdr, error_callback, data); - *lines = (struct line *) (uintptr_t) -1; - *lines_count = 0; - return 0; -} - -/* Read the name of a function from a DIE referenced by a - DW_AT_abstract_origin or DW_AT_specification tag. OFFSET is within - the same compilation unit. */ - -static const char * -read_referenced_name (struct dwarf_data *ddata, struct unit *u, - uint64_t offset, backtrace_error_callback error_callback, - void *data) -{ - struct dwarf_buf unit_buf; - uint64_t code; - const struct abbrev *abbrev; - const char *ret; - size_t i; - - /* OFFSET is from the start of the data for this compilation unit. - U->unit_data is the data, but it starts U->unit_data_offset bytes - from the beginning. */ - - if (offset < u->unit_data_offset - || offset - u->unit_data_offset >= u->unit_data_len) - { - error_callback (data, - "abstract origin or specification out of range", - 0); - return NULL; - } - - offset -= u->unit_data_offset; - - unit_buf.name = ".debug_info"; - unit_buf.start = ddata->dwarf_info; - unit_buf.buf = u->unit_data + offset; - unit_buf.left = u->unit_data_len - offset; - unit_buf.is_bigendian = ddata->is_bigendian; - unit_buf.error_callback = error_callback; - unit_buf.data = data; - unit_buf.reported_underflow = 0; - - code = read_uleb128 (&unit_buf); - if (code == 0) - { - dwarf_buf_error (&unit_buf, "invalid abstract origin or specification"); - return NULL; - } - - abbrev = lookup_abbrev (&u->abbrevs, code, error_callback, data); - if (abbrev == NULL) - return NULL; - - ret = NULL; - for (i = 0; i < abbrev->num_attrs; ++i) - { - struct attr_val val; - - if (!read_attribute (abbrev->attrs[i].form, &unit_buf, - u->is_dwarf64, u->version, u->addrsize, - ddata->dwarf_str, ddata->dwarf_str_size, - &val)) - return NULL; - - switch (abbrev->attrs[i].name) - { - case DW_AT_name: - /* We prefer the linkage name if get one. */ - if (val.encoding == ATTR_VAL_STRING) - ret = val.u.string; - break; - - case DW_AT_linkage_name: - case DW_AT_MIPS_linkage_name: - if (val.encoding == ATTR_VAL_STRING) - return val.u.string; - break; - - case DW_AT_specification: - if (abbrev->attrs[i].form == DW_FORM_ref_addr - || abbrev->attrs[i].form == DW_FORM_ref_sig8) - { - /* This refers to a specification defined in some other - compilation unit. We can handle this case if we - must, but it's harder. */ - break; - } - if (val.encoding == ATTR_VAL_UINT - || val.encoding == ATTR_VAL_REF_UNIT) - { - const char *name; - - name = read_referenced_name (ddata, u, val.u.uint, - error_callback, data); - if (name != NULL) - ret = name; - } - break; - - default: - break; - } - } - - return ret; -} - -/* Add a single range to U that maps to function. Returns 1 on - success, 0 on error. */ - -static int -add_function_range (struct backtrace_state *state, struct dwarf_data *ddata, - struct function *function, uint64_t lowpc, uint64_t highpc, - backtrace_error_callback error_callback, - void *data, struct function_vector *vec) -{ - struct function_addrs *p; - - /* Add in the base address here, so that we can look up the PC - directly. */ - lowpc += ddata->base_address; - highpc += ddata->base_address; - - if (vec->count > 0) - { - p = (struct function_addrs *) vec->vec.base + vec->count - 1; - if ((lowpc == p->high || lowpc == p->high + 1) - && function == p->function) - { - if (highpc > p->high) - p->high = highpc; - return 1; - } - } - - p = ((struct function_addrs *) - backtrace_vector_grow (state, sizeof (struct function_addrs), - error_callback, data, &vec->vec)); - if (p == NULL) - return 0; - - p->low = lowpc; - p->high = highpc; - p->function = function; - ++vec->count; - return 1; -} - -/* Add PC ranges to U that map to FUNCTION. Returns 1 on success, 0 - on error. */ - -static int -add_function_ranges (struct backtrace_state *state, struct dwarf_data *ddata, - struct unit *u, struct function *function, - uint64_t ranges, uint64_t base, - backtrace_error_callback error_callback, void *data, - struct function_vector *vec) -{ - struct dwarf_buf ranges_buf; - - if (ranges >= ddata->dwarf_ranges_size) - { - error_callback (data, "function ranges offset out of range", 0); - return 0; - } - - ranges_buf.name = ".debug_ranges"; - ranges_buf.start = ddata->dwarf_ranges; - ranges_buf.buf = ddata->dwarf_ranges + ranges; - ranges_buf.left = ddata->dwarf_ranges_size - ranges; - ranges_buf.is_bigendian = ddata->is_bigendian; - ranges_buf.error_callback = error_callback; - ranges_buf.data = data; - ranges_buf.reported_underflow = 0; - - while (1) - { - uint64_t low; - uint64_t high; - - if (ranges_buf.reported_underflow) - return 0; - - low = read_address (&ranges_buf, u->addrsize); - high = read_address (&ranges_buf, u->addrsize); - - if (low == 0 && high == 0) - break; - - if (is_highest_address (low, u->addrsize)) - base = high; - else - { - if (!add_function_range (state, ddata, function, low + base, - high + base, error_callback, data, vec)) - return 0; - } - } - - if (ranges_buf.reported_underflow) - return 0; - - return 1; -} - -/* Read one entry plus all its children. Add function addresses to - VEC. Returns 1 on success, 0 on error. */ - -static int -read_function_entry (struct backtrace_state *state, struct dwarf_data *ddata, - struct unit *u, uint64_t base, struct dwarf_buf *unit_buf, - const struct line_header *lhdr, - backtrace_error_callback error_callback, void *data, - struct function_vector *vec_function, - struct function_vector *vec_inlined) -{ - while (unit_buf->left > 0) - { - uint64_t code; - const struct abbrev *abbrev; - int is_function; - struct function *function; - struct function_vector *vec; - size_t i; - uint64_t lowpc; - int have_lowpc; - uint64_t highpc; - int have_highpc; - int highpc_is_relative; - uint64_t ranges; - int have_ranges; - - code = read_uleb128 (unit_buf); - if (code == 0) - return 1; - - abbrev = lookup_abbrev (&u->abbrevs, code, error_callback, data); - if (abbrev == NULL) - return 0; - - is_function = (abbrev->tag == DW_TAG_subprogram - || abbrev->tag == DW_TAG_entry_point - || abbrev->tag == DW_TAG_inlined_subroutine); - - if (abbrev->tag == DW_TAG_inlined_subroutine) - vec = vec_inlined; - else - vec = vec_function; - - function = NULL; - if (is_function) - { - function = ((struct function *) - backtrace_alloc (state, sizeof *function, - error_callback, data)); - if (function == NULL) - return 0; - memset (function, 0, sizeof *function); - } - - lowpc = 0; - have_lowpc = 0; - highpc = 0; - have_highpc = 0; - highpc_is_relative = 0; - ranges = 0; - have_ranges = 0; - for (i = 0; i < abbrev->num_attrs; ++i) - { - struct attr_val val; - - if (!read_attribute (abbrev->attrs[i].form, unit_buf, - u->is_dwarf64, u->version, u->addrsize, - ddata->dwarf_str, ddata->dwarf_str_size, - &val)) - return 0; - - /* The compile unit sets the base address for any address - ranges in the function entries. */ - if (abbrev->tag == DW_TAG_compile_unit - && abbrev->attrs[i].name == DW_AT_low_pc - && val.encoding == ATTR_VAL_ADDRESS) - base = val.u.uint; - - if (is_function) - { - switch (abbrev->attrs[i].name) - { - case DW_AT_call_file: - if (val.encoding == ATTR_VAL_UINT) - { - if (val.u.uint == 0) - function->caller_filename = ""; - else - { - if (val.u.uint - 1 >= lhdr->filenames_count) - { - dwarf_buf_error (unit_buf, - ("invalid file number in " - "DW_AT_call_file attribute")); - return 0; - } - function->caller_filename = - lhdr->filenames[val.u.uint - 1]; - } - } - break; - - case DW_AT_call_line: - if (val.encoding == ATTR_VAL_UINT) - function->caller_lineno = val.u.uint; - break; - - case DW_AT_abstract_origin: - case DW_AT_specification: - if (abbrev->attrs[i].form == DW_FORM_ref_addr - || abbrev->attrs[i].form == DW_FORM_ref_sig8) - { - /* This refers to an abstract origin defined in - some other compilation unit. We can handle - this case if we must, but it's harder. */ - break; - } - if (val.encoding == ATTR_VAL_UINT - || val.encoding == ATTR_VAL_REF_UNIT) - { - const char *name; - - name = read_referenced_name (ddata, u, val.u.uint, - error_callback, data); - if (name != NULL) - function->name = name; - } - break; - - case DW_AT_name: - if (val.encoding == ATTR_VAL_STRING) - { - /* Don't override a name we found in some other - way, as it will normally be more - useful--e.g., this name is normally not - mangled. */ - if (function->name == NULL) - function->name = val.u.string; - } - break; - - case DW_AT_linkage_name: - case DW_AT_MIPS_linkage_name: - if (val.encoding == ATTR_VAL_STRING) - function->name = val.u.string; - break; - - case DW_AT_low_pc: - if (val.encoding == ATTR_VAL_ADDRESS) - { - lowpc = val.u.uint; - have_lowpc = 1; - } - break; - - case DW_AT_high_pc: - if (val.encoding == ATTR_VAL_ADDRESS) - { - highpc = val.u.uint; - have_highpc = 1; - } - else if (val.encoding == ATTR_VAL_UINT) - { - highpc = val.u.uint; - have_highpc = 1; - highpc_is_relative = 1; - } - break; - - case DW_AT_ranges: - if (val.encoding == ATTR_VAL_UINT - || val.encoding == ATTR_VAL_REF_SECTION) - { - ranges = val.u.uint; - have_ranges = 1; - } - break; - - default: - break; - } - } - } - - /* If we couldn't find a name for the function, we have no use - for it. */ - if (is_function && function->name == NULL) - { - backtrace_free (state, function, sizeof *function, - error_callback, data); - is_function = 0; - } - - if (is_function) - { - if (have_ranges) - { - if (!add_function_ranges (state, ddata, u, function, ranges, - base, error_callback, data, vec)) - return 0; - } - else if (have_lowpc && have_highpc) - { - if (highpc_is_relative) - highpc += lowpc; - if (!add_function_range (state, ddata, function, lowpc, highpc, - error_callback, data, vec)) - return 0; - } - else - { - backtrace_free (state, function, sizeof *function, - error_callback, data); - is_function = 0; - } - } - - if (abbrev->has_children) - { - if (!is_function) - { - if (!read_function_entry (state, ddata, u, base, unit_buf, lhdr, - error_callback, data, vec_function, - vec_inlined)) - return 0; - } - else - { - struct function_vector fvec; - - /* Gather any information for inlined functions in - FVEC. */ - - memset (&fvec, 0, sizeof fvec); - - if (!read_function_entry (state, ddata, u, base, unit_buf, lhdr, - error_callback, data, vec_function, - &fvec)) - return 0; - - if (fvec.count > 0) - { - struct function_addrs *faddrs; - - if (!backtrace_vector_release (state, &fvec.vec, - error_callback, data)) - return 0; - - faddrs = (struct function_addrs *) fvec.vec.base; - backtrace_qsort (faddrs, fvec.count, - sizeof (struct function_addrs), - function_addrs_compare); - - function->function_addrs = faddrs; - function->function_addrs_count = fvec.count; - } - } - } - } - - return 1; -} - -/* Read function name information for a compilation unit. We look - through the whole unit looking for function tags. */ - -static void -read_function_info (struct backtrace_state *state, struct dwarf_data *ddata, - const struct line_header *lhdr, - backtrace_error_callback error_callback, void *data, - struct unit *u, struct function_vector *fvec, - struct function_addrs **ret_addrs, - size_t *ret_addrs_count) -{ - struct function_vector lvec; - struct function_vector *pfvec; - struct dwarf_buf unit_buf; - struct function_addrs *addrs; - size_t addrs_count; - - /* Use FVEC if it is not NULL. Otherwise use our own vector. */ - if (fvec != NULL) - pfvec = fvec; - else - { - memset (&lvec, 0, sizeof lvec); - pfvec = &lvec; - } - - unit_buf.name = ".debug_info"; - unit_buf.start = ddata->dwarf_info; - unit_buf.buf = u->unit_data; - unit_buf.left = u->unit_data_len; - unit_buf.is_bigendian = ddata->is_bigendian; - unit_buf.error_callback = error_callback; - unit_buf.data = data; - unit_buf.reported_underflow = 0; - - while (unit_buf.left > 0) - { - if (!read_function_entry (state, ddata, u, 0, &unit_buf, lhdr, - error_callback, data, pfvec, pfvec)) - return; - } - - if (pfvec->count == 0) - return; - - addrs_count = pfvec->count; - - if (fvec == NULL) - { - if (!backtrace_vector_release (state, &lvec.vec, error_callback, data)) - return; - addrs = (struct function_addrs *) pfvec->vec.base; - } - else - { - /* Finish this list of addresses, but leave the remaining space in - the vector available for the next function unit. */ - addrs = ((struct function_addrs *) - backtrace_vector_finish (state, &fvec->vec, - error_callback, data)); - if (addrs == NULL) - return; - fvec->count = 0; - } - - backtrace_qsort (addrs, addrs_count, sizeof (struct function_addrs), - function_addrs_compare); - - *ret_addrs = addrs; - *ret_addrs_count = addrs_count; -} - -/* See if PC is inlined in FUNCTION. If it is, print out the inlined - information, and update FILENAME and LINENO for the caller. - Returns whatever CALLBACK returns, or 0 to keep going. */ - -static int -report_inlined_functions (uintptr_t pc, struct function *function, - backtrace_full_callback callback, void *data, - const char **filename, int *lineno) -{ - struct function_addrs *function_addrs; - struct function *inlined; - int ret; - - if (function->function_addrs_count == 0) - return 0; - - function_addrs = ((struct function_addrs *) - bsearch (&pc, function->function_addrs, - function->function_addrs_count, - sizeof (struct function_addrs), - function_addrs_search)); - if (function_addrs == NULL) - return 0; - - while (((size_t) (function_addrs - function->function_addrs) + 1 - < function->function_addrs_count) - && pc >= (function_addrs + 1)->low - && pc < (function_addrs + 1)->high) - ++function_addrs; - - /* We found an inlined call. */ - - inlined = function_addrs->function; - - /* Report any calls inlined into this one. */ - ret = report_inlined_functions (pc, inlined, callback, data, - filename, lineno); - if (ret != 0) - return ret; - - /* Report this inlined call. */ - ret = callback (data, pc, *filename, *lineno, inlined->name); - if (ret != 0) - return ret; - - /* Our caller will report the caller of the inlined function; tell - it the appropriate filename and line number. */ - *filename = inlined->caller_filename; - *lineno = inlined->caller_lineno; - - return 0; -} - -/* Look for a PC in the DWARF mapping for one module. On success, - call CALLBACK and return whatever it returns. On error, call - ERROR_CALLBACK and return 0. Sets *FOUND to 1 if the PC is found, - 0 if not. */ - -static int -dwarf_lookup_pc (struct backtrace_state *state, struct dwarf_data *ddata, - uintptr_t pc, backtrace_full_callback callback, - backtrace_error_callback error_callback, void *data, - int *found) -{ - struct unit_addrs *entry; - struct unit *u; - int new_data; - struct line *lines; - struct line *ln; - struct function_addrs *function_addrs; - struct function *function; - const char *filename; - int lineno; - int ret; - - *found = 1; - - /* Find an address range that includes PC. */ - entry = bsearch (&pc, ddata->addrs, ddata->addrs_count, - sizeof (struct unit_addrs), unit_addrs_search); - - if (entry == NULL) - { - *found = 0; - return 0; - } - - /* If there are multiple ranges that contain PC, use the last one, - in order to produce predictable results. If we assume that all - ranges are properly nested, then the last range will be the - smallest one. */ - while ((size_t) (entry - ddata->addrs) + 1 < ddata->addrs_count - && pc >= (entry + 1)->low - && pc < (entry + 1)->high) - ++entry; - - /* We need the lines, lines_count, function_addrs, - function_addrs_count fields of u. If they are not set, we need - to set them. When running in threaded mode, we need to allow for - the possibility that some other thread is setting them - simultaneously. */ - - u = entry->u; - lines = u->lines; - - /* Skip units with no useful line number information by walking - backward. Useless line number information is marked by setting - lines == -1. */ - while (entry > ddata->addrs - && pc >= (entry - 1)->low - && pc < (entry - 1)->high) - { - if (state->threaded) - lines = (struct line *) backtrace_atomic_load_pointer (&u->lines); - - if (lines != (struct line *) (uintptr_t) -1) - break; - - --entry; - - u = entry->u; - lines = u->lines; - } - - if (state->threaded) - lines = backtrace_atomic_load_pointer (&u->lines); - - new_data = 0; - if (lines == NULL) - { - size_t function_addrs_count; - struct line_header lhdr; - size_t count; - - /* We have never read the line information for this unit. Read - it now. */ - - function_addrs = NULL; - function_addrs_count = 0; - if (read_line_info (state, ddata, error_callback, data, entry->u, &lhdr, - &lines, &count)) - { - struct function_vector *pfvec; - - /* If not threaded, reuse DDATA->FVEC for better memory - consumption. */ - if (state->threaded) - pfvec = NULL; - else - pfvec = &ddata->fvec; - read_function_info (state, ddata, &lhdr, error_callback, data, - entry->u, pfvec, &function_addrs, - &function_addrs_count); - free_line_header (state, &lhdr, error_callback, data); - new_data = 1; - } - - /* Atomically store the information we just read into the unit. - If another thread is simultaneously writing, it presumably - read the same information, and we don't care which one we - wind up with; we just leak the other one. We do have to - write the lines field last, so that the acquire-loads above - ensure that the other fields are set. */ - - if (!state->threaded) - { - u->lines_count = count; - u->function_addrs = function_addrs; - u->function_addrs_count = function_addrs_count; - u->lines = lines; - } - else - { - backtrace_atomic_store_size_t (&u->lines_count, count); - backtrace_atomic_store_pointer (&u->function_addrs, function_addrs); - backtrace_atomic_store_size_t (&u->function_addrs_count, - function_addrs_count); - backtrace_atomic_store_pointer (&u->lines, lines); - } - } - - /* Now all fields of U have been initialized. */ - - if (lines == (struct line *) (uintptr_t) -1) - { - /* If reading the line number information failed in some way, - try again to see if there is a better compilation unit for - this PC. */ - if (new_data) - return dwarf_lookup_pc (state, ddata, pc, callback, error_callback, - data, found); - return callback (data, pc, NULL, 0, NULL); - } - - /* Search for PC within this unit. */ - - ln = (struct line *) bsearch (&pc, lines, entry->u->lines_count, - sizeof (struct line), line_search); - if (ln == NULL) - { - /* The PC is between the low_pc and high_pc attributes of the - compilation unit, but no entry in the line table covers it. - This implies that the start of the compilation unit has no - line number information. */ - - if (entry->u->abs_filename == NULL) - { - const char *filename; - - filename = entry->u->filename; - if (filename != NULL - && !IS_ABSOLUTE_PATH (filename) - && entry->u->comp_dir != NULL) - { - size_t filename_len; - const char *dir; - size_t dir_len; - char *s; - - filename_len = strlen (filename); - dir = entry->u->comp_dir; - dir_len = strlen (dir); - s = (char *) backtrace_alloc (state, dir_len + filename_len + 2, - error_callback, data); - if (s == NULL) - { - *found = 0; - return 0; - } - memcpy (s, dir, dir_len); - /* FIXME: Should use backslash if DOS file system. */ - s[dir_len] = '/'; - memcpy (s + dir_len + 1, filename, filename_len + 1); - filename = s; - } - entry->u->abs_filename = filename; - } - - return callback (data, pc, entry->u->abs_filename, 0, NULL); - } - - /* Search for function name within this unit. */ - - if (entry->u->function_addrs_count == 0) - return callback (data, pc, ln->filename, ln->lineno, NULL); - - function_addrs = ((struct function_addrs *) - bsearch (&pc, entry->u->function_addrs, - entry->u->function_addrs_count, - sizeof (struct function_addrs), - function_addrs_search)); - if (function_addrs == NULL) - return callback (data, pc, ln->filename, ln->lineno, NULL); - - /* If there are multiple function ranges that contain PC, use the - last one, in order to produce predictable results. */ - - while (((size_t) (function_addrs - entry->u->function_addrs + 1) - < entry->u->function_addrs_count) - && pc >= (function_addrs + 1)->low - && pc < (function_addrs + 1)->high) - ++function_addrs; - - function = function_addrs->function; - - filename = ln->filename; - lineno = ln->lineno; - - ret = report_inlined_functions (pc, function, callback, data, - &filename, &lineno); - if (ret != 0) - return ret; - - return callback (data, pc, filename, lineno, function->name); -} - - -/* Return the file/line information for a PC using the DWARF mapping - we built earlier. */ - -static int -dwarf_fileline (struct backtrace_state *state, uintptr_t pc, - backtrace_full_callback callback, - backtrace_error_callback error_callback, void *data) -{ - struct dwarf_data *ddata; - int found; - int ret; - - if (!state->threaded) - { - for (ddata = (struct dwarf_data *) state->fileline_data; - ddata != NULL; - ddata = ddata->next) - { - ret = dwarf_lookup_pc (state, ddata, pc, callback, error_callback, - data, &found); - if (ret != 0 || found) - return ret; - } - } - else - { - struct dwarf_data **pp; - - pp = (struct dwarf_data **) (void *) &state->fileline_data; - while (1) - { - ddata = backtrace_atomic_load_pointer (pp); - if (ddata == NULL) - break; - - ret = dwarf_lookup_pc (state, ddata, pc, callback, error_callback, - data, &found); - if (ret != 0 || found) - return ret; - - pp = &ddata->next; - } - } - - /* FIXME: See if any libraries have been dlopen'ed. */ - - return callback (data, pc, NULL, 0, NULL); -} - -/* Initialize our data structures from the DWARF debug info for a - file. Return NULL on failure. */ - -static struct dwarf_data * -build_dwarf_data (struct backtrace_state *state, - uintptr_t base_address, - const unsigned char *dwarf_info, - size_t dwarf_info_size, - const unsigned char *dwarf_line, - size_t dwarf_line_size, - const unsigned char *dwarf_abbrev, - size_t dwarf_abbrev_size, - const unsigned char *dwarf_ranges, - size_t dwarf_ranges_size, - const unsigned char *dwarf_str, - size_t dwarf_str_size, - int is_bigendian, - backtrace_error_callback error_callback, - void *data) -{ - struct unit_addrs_vector addrs_vec; - struct unit_addrs *addrs; - size_t addrs_count; - struct dwarf_data *fdata; - - if (!build_address_map (state, base_address, dwarf_info, dwarf_info_size, - dwarf_abbrev, dwarf_abbrev_size, dwarf_ranges, - dwarf_ranges_size, dwarf_str, dwarf_str_size, - is_bigendian, error_callback, data, &addrs_vec)) - return NULL; - - if (!backtrace_vector_release (state, &addrs_vec.vec, error_callback, data)) - return NULL; - addrs = (struct unit_addrs *) addrs_vec.vec.base; - addrs_count = addrs_vec.count; - backtrace_qsort (addrs, addrs_count, sizeof (struct unit_addrs), - unit_addrs_compare); - - fdata = ((struct dwarf_data *) - backtrace_alloc (state, sizeof (struct dwarf_data), - error_callback, data)); - if (fdata == NULL) - return NULL; - - fdata->next = NULL; - fdata->base_address = base_address; - fdata->addrs = addrs; - fdata->addrs_count = addrs_count; - fdata->dwarf_info = dwarf_info; - fdata->dwarf_info_size = dwarf_info_size; - fdata->dwarf_line = dwarf_line; - fdata->dwarf_line_size = dwarf_line_size; - fdata->dwarf_ranges = dwarf_ranges; - fdata->dwarf_ranges_size = dwarf_ranges_size; - fdata->dwarf_str = dwarf_str; - fdata->dwarf_str_size = dwarf_str_size; - fdata->is_bigendian = is_bigendian; - memset (&fdata->fvec, 0, sizeof fdata->fvec); - - return fdata; -} - -/* Build our data structures from the DWARF sections for a module. - Set FILELINE_FN and STATE->FILELINE_DATA. Return 1 on success, 0 - on failure. */ - -int -backtrace_dwarf_add (struct backtrace_state *state, - uintptr_t base_address, - const unsigned char *dwarf_info, - size_t dwarf_info_size, - const unsigned char *dwarf_line, - size_t dwarf_line_size, - const unsigned char *dwarf_abbrev, - size_t dwarf_abbrev_size, - const unsigned char *dwarf_ranges, - size_t dwarf_ranges_size, - const unsigned char *dwarf_str, - size_t dwarf_str_size, - int is_bigendian, - backtrace_error_callback error_callback, - void *data, fileline *fileline_fn) -{ - struct dwarf_data *fdata; - - fdata = build_dwarf_data (state, base_address, dwarf_info, dwarf_info_size, - dwarf_line, dwarf_line_size, dwarf_abbrev, - dwarf_abbrev_size, dwarf_ranges, dwarf_ranges_size, - dwarf_str, dwarf_str_size, is_bigendian, - error_callback, data); - if (fdata == NULL) - return 0; - - if (!state->threaded) - { - struct dwarf_data **pp; - - for (pp = (struct dwarf_data **) (void *) &state->fileline_data; - *pp != NULL; - pp = &(*pp)->next) - ; - *pp = fdata; - } - else - { - while (1) - { - struct dwarf_data **pp; - - pp = (struct dwarf_data **) (void *) &state->fileline_data; - - while (1) - { - struct dwarf_data *p; - - p = backtrace_atomic_load_pointer (pp); - - if (p == NULL) - break; - - pp = &p->next; - } - - if (__sync_bool_compare_and_swap (pp, NULL, fdata)) - break; - } - } - - *fileline_fn = dwarf_fileline; - - return 1; -} diff --git a/src/libbacktrace/dwarf2.def b/src/libbacktrace/dwarf2.def deleted file mode 100644 index 2dfee5666dea..000000000000 --- a/src/libbacktrace/dwarf2.def +++ /dev/null @@ -1,713 +0,0 @@ -/* -*- c -*- - Declarations and definitions of codes relating to the DWARF2 and - DWARF3 symbolic debugging information formats. - Copyright (C) 1992-2015 Free Software Foundation, Inc. - - Written by Gary Funck (gary@intrepid.com) The Ada Joint Program - Office (AJPO), Florida State University and Silicon Graphics Inc. - provided support for this effort -- June 21, 1995. - - Derived from the DWARF 1 implementation written by Ron Guilmette - (rfg@netcom.com), November 1990. - - This file is part of GCC. - - GCC is free software; you can redistribute it and/or modify it under - the terms of the GNU General Public License as published by the Free - Software Foundation; either version 3, or (at your option) any later - version. - - GCC is distributed in the hope that it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public - License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file is derived from the DWARF specification (a public document) - Revision 2.0.0 (July 27, 1993) developed by the UNIX International - Programming Languages Special Interest Group (UI/PLSIG) and distributed - by UNIX International. Copies of this specification are available from - UNIX International, 20 Waterview Boulevard, Parsippany, NJ, 07054. - - This file also now contains definitions from the DWARF 3 specification - published Dec 20, 2005, available from: http://dwarf.freestandards.org. - - This file also now contains definitions from the DWARF 4 - specification, available from: http://dwarfstd.org/ */ - -/* This file declares various DWARF-related constants using a set of - macros which can be redefined by the including file. - - The macros are in sections. Each section corresponds to a single - set of DWARF constants and has a corresponding key. The key is - used in all the macro names. - - The sections are TAG (for DW_TAG_ constants), FORM (DW_FORM_), AT - (DW_AT_), OP (DW_OP_), ATE (DW_ATE_), and CFA (DW_CFA_). - - Using TAG as an example, the following macros may be used for each - key: - - DW_FIRST_TAG(name, value) - Introduce the first DW_TAG constant. - - DW_TAG(name, value) - Define a subsequent constant. - - DW_TAG_DUP(name, value) - Define a subsequent constant whose value - is a duplicate of some other constant. Not all keys use the _DUP - macro form. If more than one name shares a value, then the base - (DW_TAG) form will be the preferred name and DW_TAG_DUP will hold - any alternate names. - - DW_END_TAG - Invoked at the end of the DW_TAG constants. */ - -DW_FIRST_TAG (DW_TAG_padding, 0x00) -DW_TAG (DW_TAG_array_type, 0x01) -DW_TAG (DW_TAG_class_type, 0x02) -DW_TAG (DW_TAG_entry_point, 0x03) -DW_TAG (DW_TAG_enumeration_type, 0x04) -DW_TAG (DW_TAG_formal_parameter, 0x05) -DW_TAG (DW_TAG_imported_declaration, 0x08) -DW_TAG (DW_TAG_label, 0x0a) -DW_TAG (DW_TAG_lexical_block, 0x0b) -DW_TAG (DW_TAG_member, 0x0d) -DW_TAG (DW_TAG_pointer_type, 0x0f) -DW_TAG (DW_TAG_reference_type, 0x10) -DW_TAG (DW_TAG_compile_unit, 0x11) -DW_TAG (DW_TAG_string_type, 0x12) -DW_TAG (DW_TAG_structure_type, 0x13) -DW_TAG (DW_TAG_subroutine_type, 0x15) -DW_TAG (DW_TAG_typedef, 0x16) -DW_TAG (DW_TAG_union_type, 0x17) -DW_TAG (DW_TAG_unspecified_parameters, 0x18) -DW_TAG (DW_TAG_variant, 0x19) -DW_TAG (DW_TAG_common_block, 0x1a) -DW_TAG (DW_TAG_common_inclusion, 0x1b) -DW_TAG (DW_TAG_inheritance, 0x1c) -DW_TAG (DW_TAG_inlined_subroutine, 0x1d) -DW_TAG (DW_TAG_module, 0x1e) -DW_TAG (DW_TAG_ptr_to_member_type, 0x1f) -DW_TAG (DW_TAG_set_type, 0x20) -DW_TAG (DW_TAG_subrange_type, 0x21) -DW_TAG (DW_TAG_with_stmt, 0x22) -DW_TAG (DW_TAG_access_declaration, 0x23) -DW_TAG (DW_TAG_base_type, 0x24) -DW_TAG (DW_TAG_catch_block, 0x25) -DW_TAG (DW_TAG_const_type, 0x26) -DW_TAG (DW_TAG_constant, 0x27) -DW_TAG (DW_TAG_enumerator, 0x28) -DW_TAG (DW_TAG_file_type, 0x29) -DW_TAG (DW_TAG_friend, 0x2a) -DW_TAG (DW_TAG_namelist, 0x2b) -DW_TAG (DW_TAG_namelist_item, 0x2c) -DW_TAG (DW_TAG_packed_type, 0x2d) -DW_TAG (DW_TAG_subprogram, 0x2e) -DW_TAG (DW_TAG_template_type_param, 0x2f) -DW_TAG (DW_TAG_template_value_param, 0x30) -DW_TAG (DW_TAG_thrown_type, 0x31) -DW_TAG (DW_TAG_try_block, 0x32) -DW_TAG (DW_TAG_variant_part, 0x33) -DW_TAG (DW_TAG_variable, 0x34) -DW_TAG (DW_TAG_volatile_type, 0x35) -/* DWARF 3. */ -DW_TAG (DW_TAG_dwarf_procedure, 0x36) -DW_TAG (DW_TAG_restrict_type, 0x37) -DW_TAG (DW_TAG_interface_type, 0x38) -DW_TAG (DW_TAG_namespace, 0x39) -DW_TAG (DW_TAG_imported_module, 0x3a) -DW_TAG (DW_TAG_unspecified_type, 0x3b) -DW_TAG (DW_TAG_partial_unit, 0x3c) -DW_TAG (DW_TAG_imported_unit, 0x3d) -DW_TAG (DW_TAG_condition, 0x3f) -DW_TAG (DW_TAG_shared_type, 0x40) -/* DWARF 4. */ -DW_TAG (DW_TAG_type_unit, 0x41) -DW_TAG (DW_TAG_rvalue_reference_type, 0x42) -DW_TAG (DW_TAG_template_alias, 0x43) -/* DWARF 5. */ -DW_TAG (DW_TAG_atomic_type, 0x47) - -DW_TAG_DUP (DW_TAG_lo_user, 0x4080) -DW_TAG_DUP (DW_TAG_hi_user, 0xffff) - -/* SGI/MIPS Extensions. */ -DW_TAG (DW_TAG_MIPS_loop, 0x4081) - -/* HP extensions. See: ftp://ftp.hp.com/pub/lang/tools/WDB/wdb-4.0.tar.gz . */ -DW_TAG (DW_TAG_HP_array_descriptor, 0x4090) -DW_TAG (DW_TAG_HP_Bliss_field, 0x4091) -DW_TAG (DW_TAG_HP_Bliss_field_set, 0x4092) - -/* GNU extensions. */ -DW_TAG (DW_TAG_format_label, 0x4101) /* For FORTRAN 77 and Fortran 90. */ -DW_TAG (DW_TAG_function_template, 0x4102) /* For C++. */ -DW_TAG (DW_TAG_class_template, 0x4103) /* For C++. */ -DW_TAG (DW_TAG_GNU_BINCL, 0x4104) -DW_TAG (DW_TAG_GNU_EINCL, 0x4105) -/* Template template parameter. - See http://gcc.gnu.org/wiki/TemplateParmsDwarf . */ -DW_TAG (DW_TAG_GNU_template_template_param, 0x4106) - -/* Template parameter pack extension, specified at - http://wiki.dwarfstd.org/index.php?title=C%2B%2B0x:_Variadic_templates - The values of these two TAGS are in the DW_TAG_GNU_* space until the tags - are properly part of DWARF 5. */ -DW_TAG (DW_TAG_GNU_template_parameter_pack, 0x4107) -DW_TAG (DW_TAG_GNU_formal_parameter_pack, 0x4108) -/* The GNU call site extension, specified at - http://www.dwarfstd.org/ShowIssue.php?issue=100909.2&type=open . - The values of these two TAGS are in the DW_TAG_GNU_* space until the tags - are properly part of DWARF 5. */ -DW_TAG (DW_TAG_GNU_call_site, 0x4109) -DW_TAG (DW_TAG_GNU_call_site_parameter, 0x410a) -/* Extensions for UPC. See: http://dwarfstd.org/doc/DWARF4.pdf. */ -DW_TAG (DW_TAG_upc_shared_type, 0x8765) -DW_TAG (DW_TAG_upc_strict_type, 0x8766) -DW_TAG (DW_TAG_upc_relaxed_type, 0x8767) -/* PGI (STMicroelectronics) extensions. No documentation available. */ -DW_TAG (DW_TAG_PGI_kanji_type, 0xA000) -DW_TAG (DW_TAG_PGI_interface_block, 0xA020) -DW_END_TAG - -DW_FIRST_FORM (DW_FORM_addr, 0x01) -DW_FORM (DW_FORM_block2, 0x03) -DW_FORM (DW_FORM_block4, 0x04) -DW_FORM (DW_FORM_data2, 0x05) -DW_FORM (DW_FORM_data4, 0x06) -DW_FORM (DW_FORM_data8, 0x07) -DW_FORM (DW_FORM_string, 0x08) -DW_FORM (DW_FORM_block, 0x09) -DW_FORM (DW_FORM_block1, 0x0a) -DW_FORM (DW_FORM_data1, 0x0b) -DW_FORM (DW_FORM_flag, 0x0c) -DW_FORM (DW_FORM_sdata, 0x0d) -DW_FORM (DW_FORM_strp, 0x0e) -DW_FORM (DW_FORM_udata, 0x0f) -DW_FORM (DW_FORM_ref_addr, 0x10) -DW_FORM (DW_FORM_ref1, 0x11) -DW_FORM (DW_FORM_ref2, 0x12) -DW_FORM (DW_FORM_ref4, 0x13) -DW_FORM (DW_FORM_ref8, 0x14) -DW_FORM (DW_FORM_ref_udata, 0x15) -DW_FORM (DW_FORM_indirect, 0x16) -/* DWARF 4. */ -DW_FORM (DW_FORM_sec_offset, 0x17) -DW_FORM (DW_FORM_exprloc, 0x18) -DW_FORM (DW_FORM_flag_present, 0x19) -DW_FORM (DW_FORM_ref_sig8, 0x20) -/* Extensions for Fission. See http://gcc.gnu.org/wiki/DebugFission. */ -DW_FORM (DW_FORM_GNU_addr_index, 0x1f01) -DW_FORM (DW_FORM_GNU_str_index, 0x1f02) -/* Extensions for DWZ multifile. - See http://www.dwarfstd.org/ShowIssue.php?issue=120604.1&type=open . */ -DW_FORM (DW_FORM_GNU_ref_alt, 0x1f20) -DW_FORM (DW_FORM_GNU_strp_alt, 0x1f21) -DW_END_FORM - -DW_FIRST_AT (DW_AT_sibling, 0x01) -DW_AT (DW_AT_location, 0x02) -DW_AT (DW_AT_name, 0x03) -DW_AT (DW_AT_ordering, 0x09) -DW_AT (DW_AT_subscr_data, 0x0a) -DW_AT (DW_AT_byte_size, 0x0b) -DW_AT (DW_AT_bit_offset, 0x0c) -DW_AT (DW_AT_bit_size, 0x0d) -DW_AT (DW_AT_element_list, 0x0f) -DW_AT (DW_AT_stmt_list, 0x10) -DW_AT (DW_AT_low_pc, 0x11) -DW_AT (DW_AT_high_pc, 0x12) -DW_AT (DW_AT_language, 0x13) -DW_AT (DW_AT_member, 0x14) -DW_AT (DW_AT_discr, 0x15) -DW_AT (DW_AT_discr_value, 0x16) -DW_AT (DW_AT_visibility, 0x17) -DW_AT (DW_AT_import, 0x18) -DW_AT (DW_AT_string_length, 0x19) -DW_AT (DW_AT_common_reference, 0x1a) -DW_AT (DW_AT_comp_dir, 0x1b) -DW_AT (DW_AT_const_value, 0x1c) -DW_AT (DW_AT_containing_type, 0x1d) -DW_AT (DW_AT_default_value, 0x1e) -DW_AT (DW_AT_inline, 0x20) -DW_AT (DW_AT_is_optional, 0x21) -DW_AT (DW_AT_lower_bound, 0x22) -DW_AT (DW_AT_producer, 0x25) -DW_AT (DW_AT_prototyped, 0x27) -DW_AT (DW_AT_return_addr, 0x2a) -DW_AT (DW_AT_start_scope, 0x2c) -DW_AT (DW_AT_bit_stride, 0x2e) -DW_AT (DW_AT_upper_bound, 0x2f) -DW_AT (DW_AT_abstract_origin, 0x31) -DW_AT (DW_AT_accessibility, 0x32) -DW_AT (DW_AT_address_class, 0x33) -DW_AT (DW_AT_artificial, 0x34) -DW_AT (DW_AT_base_types, 0x35) -DW_AT (DW_AT_calling_convention, 0x36) -DW_AT (DW_AT_count, 0x37) -DW_AT (DW_AT_data_member_location, 0x38) -DW_AT (DW_AT_decl_column, 0x39) -DW_AT (DW_AT_decl_file, 0x3a) -DW_AT (DW_AT_decl_line, 0x3b) -DW_AT (DW_AT_declaration, 0x3c) -DW_AT (DW_AT_discr_list, 0x3d) -DW_AT (DW_AT_encoding, 0x3e) -DW_AT (DW_AT_external, 0x3f) -DW_AT (DW_AT_frame_base, 0x40) -DW_AT (DW_AT_friend, 0x41) -DW_AT (DW_AT_identifier_case, 0x42) -DW_AT (DW_AT_macro_info, 0x43) -DW_AT (DW_AT_namelist_items, 0x44) -DW_AT (DW_AT_priority, 0x45) -DW_AT (DW_AT_segment, 0x46) -DW_AT (DW_AT_specification, 0x47) -DW_AT (DW_AT_static_link, 0x48) -DW_AT (DW_AT_type, 0x49) -DW_AT (DW_AT_use_location, 0x4a) -DW_AT (DW_AT_variable_parameter, 0x4b) -DW_AT (DW_AT_virtuality, 0x4c) -DW_AT (DW_AT_vtable_elem_location, 0x4d) -/* DWARF 3 values. */ -DW_AT (DW_AT_allocated, 0x4e) -DW_AT (DW_AT_associated, 0x4f) -DW_AT (DW_AT_data_location, 0x50) -DW_AT (DW_AT_byte_stride, 0x51) -DW_AT (DW_AT_entry_pc, 0x52) -DW_AT (DW_AT_use_UTF8, 0x53) -DW_AT (DW_AT_extension, 0x54) -DW_AT (DW_AT_ranges, 0x55) -DW_AT (DW_AT_trampoline, 0x56) -DW_AT (DW_AT_call_column, 0x57) -DW_AT (DW_AT_call_file, 0x58) -DW_AT (DW_AT_call_line, 0x59) -DW_AT (DW_AT_description, 0x5a) -DW_AT (DW_AT_binary_scale, 0x5b) -DW_AT (DW_AT_decimal_scale, 0x5c) -DW_AT (DW_AT_small, 0x5d) -DW_AT (DW_AT_decimal_sign, 0x5e) -DW_AT (DW_AT_digit_count, 0x5f) -DW_AT (DW_AT_picture_string, 0x60) -DW_AT (DW_AT_mutable, 0x61) -DW_AT (DW_AT_threads_scaled, 0x62) -DW_AT (DW_AT_explicit, 0x63) -DW_AT (DW_AT_object_pointer, 0x64) -DW_AT (DW_AT_endianity, 0x65) -DW_AT (DW_AT_elemental, 0x66) -DW_AT (DW_AT_pure, 0x67) -DW_AT (DW_AT_recursive, 0x68) -/* DWARF 4. */ -DW_AT (DW_AT_signature, 0x69) -DW_AT (DW_AT_main_subprogram, 0x6a) -DW_AT (DW_AT_data_bit_offset, 0x6b) -DW_AT (DW_AT_const_expr, 0x6c) -DW_AT (DW_AT_enum_class, 0x6d) -DW_AT (DW_AT_linkage_name, 0x6e) -/* DWARF 5. */ -DW_AT (DW_AT_noreturn, 0x87) - -DW_AT_DUP (DW_AT_lo_user, 0x2000) /* Implementation-defined range start. */ -DW_AT_DUP (DW_AT_hi_user, 0x3fff) /* Implementation-defined range end. */ - -/* SGI/MIPS extensions. */ -DW_AT (DW_AT_MIPS_fde, 0x2001) -DW_AT (DW_AT_MIPS_loop_begin, 0x2002) -DW_AT (DW_AT_MIPS_tail_loop_begin, 0x2003) -DW_AT (DW_AT_MIPS_epilog_begin, 0x2004) -DW_AT (DW_AT_MIPS_loop_unroll_factor, 0x2005) -DW_AT (DW_AT_MIPS_software_pipeline_depth, 0x2006) -DW_AT (DW_AT_MIPS_linkage_name, 0x2007) -DW_AT (DW_AT_MIPS_stride, 0x2008) -DW_AT (DW_AT_MIPS_abstract_name, 0x2009) -DW_AT (DW_AT_MIPS_clone_origin, 0x200a) -DW_AT (DW_AT_MIPS_has_inlines, 0x200b) -/* HP extensions. */ -DW_AT (DW_AT_HP_block_index, 0x2000) -DW_AT_DUP (DW_AT_HP_unmodifiable, 0x2001) /* Same as DW_AT_MIPS_fde. */ -DW_AT_DUP (DW_AT_HP_prologue, 0x2005) /* Same as DW_AT_MIPS_loop_unroll. */ -DW_AT_DUP (DW_AT_HP_epilogue, 0x2008) /* Same as DW_AT_MIPS_stride. */ -DW_AT (DW_AT_HP_actuals_stmt_list, 0x2010) -DW_AT (DW_AT_HP_proc_per_section, 0x2011) -DW_AT (DW_AT_HP_raw_data_ptr, 0x2012) -DW_AT (DW_AT_HP_pass_by_reference, 0x2013) -DW_AT (DW_AT_HP_opt_level, 0x2014) -DW_AT (DW_AT_HP_prof_version_id, 0x2015) -DW_AT (DW_AT_HP_opt_flags, 0x2016) -DW_AT (DW_AT_HP_cold_region_low_pc, 0x2017) -DW_AT (DW_AT_HP_cold_region_high_pc, 0x2018) -DW_AT (DW_AT_HP_all_variables_modifiable, 0x2019) -DW_AT (DW_AT_HP_linkage_name, 0x201a) -DW_AT (DW_AT_HP_prof_flags, 0x201b) /* In comp unit of procs_info for -g. */ -DW_AT (DW_AT_HP_unit_name, 0x201f) -DW_AT (DW_AT_HP_unit_size, 0x2020) -DW_AT (DW_AT_HP_widened_byte_size, 0x2021) -DW_AT (DW_AT_HP_definition_points, 0x2022) -DW_AT (DW_AT_HP_default_location, 0x2023) -DW_AT (DW_AT_HP_is_result_param, 0x2029) - -/* GNU extensions. */ -DW_AT (DW_AT_sf_names, 0x2101) -DW_AT (DW_AT_src_info, 0x2102) -DW_AT (DW_AT_mac_info, 0x2103) -DW_AT (DW_AT_src_coords, 0x2104) -DW_AT (DW_AT_body_begin, 0x2105) -DW_AT (DW_AT_body_end, 0x2106) -DW_AT (DW_AT_GNU_vector, 0x2107) -/* Thread-safety annotations. - See http://gcc.gnu.org/wiki/ThreadSafetyAnnotation . */ -DW_AT (DW_AT_GNU_guarded_by, 0x2108) -DW_AT (DW_AT_GNU_pt_guarded_by, 0x2109) -DW_AT (DW_AT_GNU_guarded, 0x210a) -DW_AT (DW_AT_GNU_pt_guarded, 0x210b) -DW_AT (DW_AT_GNU_locks_excluded, 0x210c) -DW_AT (DW_AT_GNU_exclusive_locks_required, 0x210d) -DW_AT (DW_AT_GNU_shared_locks_required, 0x210e) -/* One-definition rule violation detection. - See http://gcc.gnu.org/wiki/DwarfSeparateTypeInfo . */ -DW_AT (DW_AT_GNU_odr_signature, 0x210f) -/* Template template argument name. - See http://gcc.gnu.org/wiki/TemplateParmsDwarf . */ -DW_AT (DW_AT_GNU_template_name, 0x2110) -/* The GNU call site extension. - See http://www.dwarfstd.org/ShowIssue.php?issue=100909.2&type=open . */ -DW_AT (DW_AT_GNU_call_site_value, 0x2111) -DW_AT (DW_AT_GNU_call_site_data_value, 0x2112) -DW_AT (DW_AT_GNU_call_site_target, 0x2113) -DW_AT (DW_AT_GNU_call_site_target_clobbered, 0x2114) -DW_AT (DW_AT_GNU_tail_call, 0x2115) -DW_AT (DW_AT_GNU_all_tail_call_sites, 0x2116) -DW_AT (DW_AT_GNU_all_call_sites, 0x2117) -DW_AT (DW_AT_GNU_all_source_call_sites, 0x2118) -/* Section offset into .debug_macro section. */ -DW_AT (DW_AT_GNU_macros, 0x2119) -/* Attribute for C++ deleted special member functions (= delete;). */ -DW_AT (DW_AT_GNU_deleted, 0x211a) -/* Extensions for Fission. See http://gcc.gnu.org/wiki/DebugFission. */ -DW_AT (DW_AT_GNU_dwo_name, 0x2130) -DW_AT (DW_AT_GNU_dwo_id, 0x2131) -DW_AT (DW_AT_GNU_ranges_base, 0x2132) -DW_AT (DW_AT_GNU_addr_base, 0x2133) -DW_AT (DW_AT_GNU_pubnames, 0x2134) -DW_AT (DW_AT_GNU_pubtypes, 0x2135) -/* Attribute for discriminator. - See http://gcc.gnu.org/wiki/Discriminator */ -DW_AT (DW_AT_GNU_discriminator, 0x2136) -/* VMS extensions. */ -DW_AT (DW_AT_VMS_rtnbeg_pd_address, 0x2201) -/* GNAT extensions. */ -/* GNAT descriptive type. - See http://gcc.gnu.org/wiki/DW_AT_GNAT_descriptive_type . */ -DW_AT (DW_AT_use_GNAT_descriptive_type, 0x2301) -DW_AT (DW_AT_GNAT_descriptive_type, 0x2302) -/* Rational constant extension. - See https://gcc.gnu.org/wiki/DW_AT_GNU_numerator_denominator . */ -DW_TAG (DW_AT_GNU_numerator, 0x2303) -DW_TAG (DW_AT_GNU_denominator, 0x2304) -/* Biased integer extension. - See https://gcc.gnu.org/wiki/DW_AT_GNU_bias . */ -DW_TAG (DW_AT_GNU_bias, 0x2305) -/* UPC extension. */ -DW_AT (DW_AT_upc_threads_scaled, 0x3210) -/* PGI (STMicroelectronics) extensions. */ -DW_AT (DW_AT_PGI_lbase, 0x3a00) -DW_AT (DW_AT_PGI_soffset, 0x3a01) -DW_AT (DW_AT_PGI_lstride, 0x3a02) -/* Apple extensions. */ -DW_AT (DW_AT_APPLE_optimized, 0x3fe1) -DW_AT (DW_AT_APPLE_flags, 0x3fe2) -DW_AT (DW_AT_APPLE_isa, 0x3fe3) -DW_AT (DW_AT_APPLE_block, 0x3fe4) -DW_AT (DW_AT_APPLE_major_runtime_vers, 0x3fe5) -DW_AT (DW_AT_APPLE_runtime_class, 0x3fe6) -DW_AT (DW_AT_APPLE_omit_frame_ptr, 0x3fe7) -DW_AT (DW_AT_APPLE_property_name, 0x3fe8) -DW_AT (DW_AT_APPLE_property_getter, 0x3fe9) -DW_AT (DW_AT_APPLE_property_setter, 0x3fea) -DW_AT (DW_AT_APPLE_property_attribute, 0x3feb) -DW_AT (DW_AT_APPLE_objc_complete_type, 0x3fec) -DW_AT (DW_AT_APPLE_property, 0x3fed) -DW_END_AT - -DW_FIRST_OP (DW_OP_addr, 0x03) -DW_OP (DW_OP_deref, 0x06) -DW_OP (DW_OP_const1u, 0x08) -DW_OP (DW_OP_const1s, 0x09) -DW_OP (DW_OP_const2u, 0x0a) -DW_OP (DW_OP_const2s, 0x0b) -DW_OP (DW_OP_const4u, 0x0c) -DW_OP (DW_OP_const4s, 0x0d) -DW_OP (DW_OP_const8u, 0x0e) -DW_OP (DW_OP_const8s, 0x0f) -DW_OP (DW_OP_constu, 0x10) -DW_OP (DW_OP_consts, 0x11) -DW_OP (DW_OP_dup, 0x12) -DW_OP (DW_OP_drop, 0x13) -DW_OP (DW_OP_over, 0x14) -DW_OP (DW_OP_pick, 0x15) -DW_OP (DW_OP_swap, 0x16) -DW_OP (DW_OP_rot, 0x17) -DW_OP (DW_OP_xderef, 0x18) -DW_OP (DW_OP_abs, 0x19) -DW_OP (DW_OP_and, 0x1a) -DW_OP (DW_OP_div, 0x1b) -DW_OP (DW_OP_minus, 0x1c) -DW_OP (DW_OP_mod, 0x1d) -DW_OP (DW_OP_mul, 0x1e) -DW_OP (DW_OP_neg, 0x1f) -DW_OP (DW_OP_not, 0x20) -DW_OP (DW_OP_or, 0x21) -DW_OP (DW_OP_plus, 0x22) -DW_OP (DW_OP_plus_uconst, 0x23) -DW_OP (DW_OP_shl, 0x24) -DW_OP (DW_OP_shr, 0x25) -DW_OP (DW_OP_shra, 0x26) -DW_OP (DW_OP_xor, 0x27) -DW_OP (DW_OP_bra, 0x28) -DW_OP (DW_OP_eq, 0x29) -DW_OP (DW_OP_ge, 0x2a) -DW_OP (DW_OP_gt, 0x2b) -DW_OP (DW_OP_le, 0x2c) -DW_OP (DW_OP_lt, 0x2d) -DW_OP (DW_OP_ne, 0x2e) -DW_OP (DW_OP_skip, 0x2f) -DW_OP (DW_OP_lit0, 0x30) -DW_OP (DW_OP_lit1, 0x31) -DW_OP (DW_OP_lit2, 0x32) -DW_OP (DW_OP_lit3, 0x33) -DW_OP (DW_OP_lit4, 0x34) -DW_OP (DW_OP_lit5, 0x35) -DW_OP (DW_OP_lit6, 0x36) -DW_OP (DW_OP_lit7, 0x37) -DW_OP (DW_OP_lit8, 0x38) -DW_OP (DW_OP_lit9, 0x39) -DW_OP (DW_OP_lit10, 0x3a) -DW_OP (DW_OP_lit11, 0x3b) -DW_OP (DW_OP_lit12, 0x3c) -DW_OP (DW_OP_lit13, 0x3d) -DW_OP (DW_OP_lit14, 0x3e) -DW_OP (DW_OP_lit15, 0x3f) -DW_OP (DW_OP_lit16, 0x40) -DW_OP (DW_OP_lit17, 0x41) -DW_OP (DW_OP_lit18, 0x42) -DW_OP (DW_OP_lit19, 0x43) -DW_OP (DW_OP_lit20, 0x44) -DW_OP (DW_OP_lit21, 0x45) -DW_OP (DW_OP_lit22, 0x46) -DW_OP (DW_OP_lit23, 0x47) -DW_OP (DW_OP_lit24, 0x48) -DW_OP (DW_OP_lit25, 0x49) -DW_OP (DW_OP_lit26, 0x4a) -DW_OP (DW_OP_lit27, 0x4b) -DW_OP (DW_OP_lit28, 0x4c) -DW_OP (DW_OP_lit29, 0x4d) -DW_OP (DW_OP_lit30, 0x4e) -DW_OP (DW_OP_lit31, 0x4f) -DW_OP (DW_OP_reg0, 0x50) -DW_OP (DW_OP_reg1, 0x51) -DW_OP (DW_OP_reg2, 0x52) -DW_OP (DW_OP_reg3, 0x53) -DW_OP (DW_OP_reg4, 0x54) -DW_OP (DW_OP_reg5, 0x55) -DW_OP (DW_OP_reg6, 0x56) -DW_OP (DW_OP_reg7, 0x57) -DW_OP (DW_OP_reg8, 0x58) -DW_OP (DW_OP_reg9, 0x59) -DW_OP (DW_OP_reg10, 0x5a) -DW_OP (DW_OP_reg11, 0x5b) -DW_OP (DW_OP_reg12, 0x5c) -DW_OP (DW_OP_reg13, 0x5d) -DW_OP (DW_OP_reg14, 0x5e) -DW_OP (DW_OP_reg15, 0x5f) -DW_OP (DW_OP_reg16, 0x60) -DW_OP (DW_OP_reg17, 0x61) -DW_OP (DW_OP_reg18, 0x62) -DW_OP (DW_OP_reg19, 0x63) -DW_OP (DW_OP_reg20, 0x64) -DW_OP (DW_OP_reg21, 0x65) -DW_OP (DW_OP_reg22, 0x66) -DW_OP (DW_OP_reg23, 0x67) -DW_OP (DW_OP_reg24, 0x68) -DW_OP (DW_OP_reg25, 0x69) -DW_OP (DW_OP_reg26, 0x6a) -DW_OP (DW_OP_reg27, 0x6b) -DW_OP (DW_OP_reg28, 0x6c) -DW_OP (DW_OP_reg29, 0x6d) -DW_OP (DW_OP_reg30, 0x6e) -DW_OP (DW_OP_reg31, 0x6f) -DW_OP (DW_OP_breg0, 0x70) -DW_OP (DW_OP_breg1, 0x71) -DW_OP (DW_OP_breg2, 0x72) -DW_OP (DW_OP_breg3, 0x73) -DW_OP (DW_OP_breg4, 0x74) -DW_OP (DW_OP_breg5, 0x75) -DW_OP (DW_OP_breg6, 0x76) -DW_OP (DW_OP_breg7, 0x77) -DW_OP (DW_OP_breg8, 0x78) -DW_OP (DW_OP_breg9, 0x79) -DW_OP (DW_OP_breg10, 0x7a) -DW_OP (DW_OP_breg11, 0x7b) -DW_OP (DW_OP_breg12, 0x7c) -DW_OP (DW_OP_breg13, 0x7d) -DW_OP (DW_OP_breg14, 0x7e) -DW_OP (DW_OP_breg15, 0x7f) -DW_OP (DW_OP_breg16, 0x80) -DW_OP (DW_OP_breg17, 0x81) -DW_OP (DW_OP_breg18, 0x82) -DW_OP (DW_OP_breg19, 0x83) -DW_OP (DW_OP_breg20, 0x84) -DW_OP (DW_OP_breg21, 0x85) -DW_OP (DW_OP_breg22, 0x86) -DW_OP (DW_OP_breg23, 0x87) -DW_OP (DW_OP_breg24, 0x88) -DW_OP (DW_OP_breg25, 0x89) -DW_OP (DW_OP_breg26, 0x8a) -DW_OP (DW_OP_breg27, 0x8b) -DW_OP (DW_OP_breg28, 0x8c) -DW_OP (DW_OP_breg29, 0x8d) -DW_OP (DW_OP_breg30, 0x8e) -DW_OP (DW_OP_breg31, 0x8f) -DW_OP (DW_OP_regx, 0x90) -DW_OP (DW_OP_fbreg, 0x91) -DW_OP (DW_OP_bregx, 0x92) -DW_OP (DW_OP_piece, 0x93) -DW_OP (DW_OP_deref_size, 0x94) -DW_OP (DW_OP_xderef_size, 0x95) -DW_OP (DW_OP_nop, 0x96) -/* DWARF 3 extensions. */ -DW_OP (DW_OP_push_object_address, 0x97) -DW_OP (DW_OP_call2, 0x98) -DW_OP (DW_OP_call4, 0x99) -DW_OP (DW_OP_call_ref, 0x9a) -DW_OP (DW_OP_form_tls_address, 0x9b) -DW_OP (DW_OP_call_frame_cfa, 0x9c) -DW_OP (DW_OP_bit_piece, 0x9d) - -/* DWARF 4 extensions. */ -DW_OP (DW_OP_implicit_value, 0x9e) -DW_OP (DW_OP_stack_value, 0x9f) - -DW_OP_DUP (DW_OP_lo_user, 0xe0) /* Implementation-defined range start. */ -DW_OP_DUP (DW_OP_hi_user, 0xff) /* Implementation-defined range end. */ - -/* GNU extensions. */ -DW_OP (DW_OP_GNU_push_tls_address, 0xe0) -/* The following is for marking variables that are uninitialized. */ -DW_OP (DW_OP_GNU_uninit, 0xf0) -DW_OP (DW_OP_GNU_encoded_addr, 0xf1) -/* The GNU implicit pointer extension. - See http://www.dwarfstd.org/ShowIssue.php?issue=100831.1&type=open . */ -DW_OP (DW_OP_GNU_implicit_pointer, 0xf2) -/* The GNU entry value extension. - See http://www.dwarfstd.org/ShowIssue.php?issue=100909.1&type=open . */ -DW_OP (DW_OP_GNU_entry_value, 0xf3) -/* The GNU typed stack extension. - See http://www.dwarfstd.org/doc/040408.1.html . */ -DW_OP (DW_OP_GNU_const_type, 0xf4) -DW_OP (DW_OP_GNU_regval_type, 0xf5) -DW_OP (DW_OP_GNU_deref_type, 0xf6) -DW_OP (DW_OP_GNU_convert, 0xf7) -DW_OP (DW_OP_GNU_reinterpret, 0xf9) -/* The GNU parameter ref extension. */ -DW_OP (DW_OP_GNU_parameter_ref, 0xfa) -/* Extensions for Fission. See http://gcc.gnu.org/wiki/DebugFission. */ -DW_OP (DW_OP_GNU_addr_index, 0xfb) -DW_OP (DW_OP_GNU_const_index, 0xfc) -/* HP extensions. */ -DW_OP_DUP (DW_OP_HP_unknown, 0xe0) /* Ouch, the same as GNU_push_tls_address. */ -DW_OP (DW_OP_HP_is_value, 0xe1) -DW_OP (DW_OP_HP_fltconst4, 0xe2) -DW_OP (DW_OP_HP_fltconst8, 0xe3) -DW_OP (DW_OP_HP_mod_range, 0xe4) -DW_OP (DW_OP_HP_unmod_range, 0xe5) -DW_OP (DW_OP_HP_tls, 0xe6) -/* PGI (STMicroelectronics) extensions. */ -DW_OP (DW_OP_PGI_omp_thread_num, 0xf8) -DW_END_OP - -DW_FIRST_ATE (DW_ATE_void, 0x0) -DW_ATE (DW_ATE_address, 0x1) -DW_ATE (DW_ATE_boolean, 0x2) -DW_ATE (DW_ATE_complex_float, 0x3) -DW_ATE (DW_ATE_float, 0x4) -DW_ATE (DW_ATE_signed, 0x5) -DW_ATE (DW_ATE_signed_char, 0x6) -DW_ATE (DW_ATE_unsigned, 0x7) -DW_ATE (DW_ATE_unsigned_char, 0x8) -/* DWARF 3. */ -DW_ATE (DW_ATE_imaginary_float, 0x9) -DW_ATE (DW_ATE_packed_decimal, 0xa) -DW_ATE (DW_ATE_numeric_string, 0xb) -DW_ATE (DW_ATE_edited, 0xc) -DW_ATE (DW_ATE_signed_fixed, 0xd) -DW_ATE (DW_ATE_unsigned_fixed, 0xe) -DW_ATE (DW_ATE_decimal_float, 0xf) -/* DWARF 4. */ -DW_ATE (DW_ATE_UTF, 0x10) - -DW_ATE_DUP (DW_ATE_lo_user, 0x80) -DW_ATE_DUP (DW_ATE_hi_user, 0xff) - -/* HP extensions. */ -DW_ATE (DW_ATE_HP_float80, 0x80) /* Floating-point (80 bit). */ -DW_ATE (DW_ATE_HP_complex_float80, 0x81) /* Complex floating-point (80 bit). */ -DW_ATE (DW_ATE_HP_float128, 0x82) /* Floating-point (128 bit). */ -DW_ATE (DW_ATE_HP_complex_float128, 0x83) /* Complex fp (128 bit). */ -DW_ATE (DW_ATE_HP_floathpintel, 0x84) /* Floating-point (82 bit IA64). */ -DW_ATE (DW_ATE_HP_imaginary_float80, 0x85) -DW_ATE (DW_ATE_HP_imaginary_float128, 0x86) -DW_ATE (DW_ATE_HP_VAX_float, 0x88) /* F or G floating. */ -DW_ATE (DW_ATE_HP_VAX_float_d, 0x89) /* D floating. */ -DW_ATE (DW_ATE_HP_packed_decimal, 0x8a) /* Cobol. */ -DW_ATE (DW_ATE_HP_zoned_decimal, 0x8b) /* Cobol. */ -DW_ATE (DW_ATE_HP_edited, 0x8c) /* Cobol. */ -DW_ATE (DW_ATE_HP_signed_fixed, 0x8d) /* Cobol. */ -DW_ATE (DW_ATE_HP_unsigned_fixed, 0x8e) /* Cobol. */ -DW_ATE (DW_ATE_HP_VAX_complex_float, 0x8f) /* F or G floating complex. */ -DW_ATE (DW_ATE_HP_VAX_complex_float_d, 0x90) /* D floating complex. */ - -DW_END_ATE - -DW_FIRST_CFA (DW_CFA_advance_loc, 0x40) -DW_CFA (DW_CFA_offset, 0x80) -DW_CFA (DW_CFA_restore, 0xc0) -DW_CFA (DW_CFA_nop, 0x00) -DW_CFA (DW_CFA_set_loc, 0x01) -DW_CFA (DW_CFA_advance_loc1, 0x02) -DW_CFA (DW_CFA_advance_loc2, 0x03) -DW_CFA (DW_CFA_advance_loc4, 0x04) -DW_CFA (DW_CFA_offset_extended, 0x05) -DW_CFA (DW_CFA_restore_extended, 0x06) -DW_CFA (DW_CFA_undefined, 0x07) -DW_CFA (DW_CFA_same_value, 0x08) -DW_CFA (DW_CFA_register, 0x09) -DW_CFA (DW_CFA_remember_state, 0x0a) -DW_CFA (DW_CFA_restore_state, 0x0b) -DW_CFA (DW_CFA_def_cfa, 0x0c) -DW_CFA (DW_CFA_def_cfa_register, 0x0d) -DW_CFA (DW_CFA_def_cfa_offset, 0x0e) -/* DWARF 3. */ -DW_CFA (DW_CFA_def_cfa_expression, 0x0f) -DW_CFA (DW_CFA_expression, 0x10) -DW_CFA (DW_CFA_offset_extended_sf, 0x11) -DW_CFA (DW_CFA_def_cfa_sf, 0x12) -DW_CFA (DW_CFA_def_cfa_offset_sf, 0x13) -DW_CFA (DW_CFA_val_offset, 0x14) -DW_CFA (DW_CFA_val_offset_sf, 0x15) -DW_CFA (DW_CFA_val_expression, 0x16) - -DW_CFA (DW_CFA_lo_user, 0x1c) -DW_CFA (DW_CFA_hi_user, 0x3f) - -/* SGI/MIPS specific. */ -DW_CFA (DW_CFA_MIPS_advance_loc8, 0x1d) -/* GNU extensions. */ -DW_CFA (DW_CFA_GNU_window_save, 0x2d) -DW_CFA (DW_CFA_GNU_args_size, 0x2e) -DW_CFA (DW_CFA_GNU_negative_offset_extended, 0x2f) - -DW_END_CFA diff --git a/src/libbacktrace/dwarf2.h b/src/libbacktrace/dwarf2.h deleted file mode 100644 index 4ada87162fa8..000000000000 --- a/src/libbacktrace/dwarf2.h +++ /dev/null @@ -1,430 +0,0 @@ -/* Declarations and definitions of codes relating to the DWARF2 and - DWARF3 symbolic debugging information formats. - Copyright (C) 1992-2015 Free Software Foundation, Inc. - - Written by Gary Funck (gary@intrepid.com) The Ada Joint Program - Office (AJPO), Florida State University and Silicon Graphics Inc. - provided support for this effort -- June 21, 1995. - - Derived from the DWARF 1 implementation written by Ron Guilmette - (rfg@netcom.com), November 1990. - - This file is part of GCC. - - GCC is free software; you can redistribute it and/or modify it under - the terms of the GNU General Public License as published by the Free - Software Foundation; either version 3, or (at your option) any later - version. - - GCC is distributed in the hope that it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public - License for more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file is derived from the DWARF specification (a public document) - Revision 2.0.0 (July 27, 1993) developed by the UNIX International - Programming Languages Special Interest Group (UI/PLSIG) and distributed - by UNIX International. Copies of this specification are available from - UNIX International, 20 Waterview Boulevard, Parsippany, NJ, 07054. - - This file also now contains definitions from the DWARF 3 specification - published Dec 20, 2005, available from: http://dwarf.freestandards.org. */ - -#ifndef _DWARF2_H -#define _DWARF2_H - -#define DW_TAG(name, value) , name = value -#define DW_TAG_DUP(name, value) , name = value -#define DW_FORM(name, value) , name = value -#define DW_AT(name, value) , name = value -#define DW_AT_DUP(name, value) , name = value -#define DW_OP(name, value) , name = value -#define DW_OP_DUP(name, value) , name = value -#define DW_ATE(name, value) , name = value -#define DW_ATE_DUP(name, value) , name = value -#define DW_CFA(name, value) , name = value - -#define DW_FIRST_TAG(name, value) enum dwarf_tag { \ - name = value -#define DW_END_TAG }; -#define DW_FIRST_FORM(name, value) enum dwarf_form { \ - name = value -#define DW_END_FORM }; -#define DW_FIRST_AT(name, value) enum dwarf_attribute { \ - name = value -#define DW_END_AT }; -#define DW_FIRST_OP(name, value) enum dwarf_location_atom { \ - name = value -#define DW_END_OP }; -#define DW_FIRST_ATE(name, value) enum dwarf_type { \ - name = value -#define DW_END_ATE }; -#define DW_FIRST_CFA(name, value) enum dwarf_call_frame_info { \ - name = value -#define DW_END_CFA }; - -#include "dwarf2.def" - -#undef DW_FIRST_TAG -#undef DW_END_TAG -#undef DW_FIRST_FORM -#undef DW_END_FORM -#undef DW_FIRST_AT -#undef DW_END_AT -#undef DW_FIRST_OP -#undef DW_END_OP -#undef DW_FIRST_ATE -#undef DW_END_ATE -#undef DW_FIRST_CFA -#undef DW_END_CFA - -#undef DW_TAG -#undef DW_TAG_DUP -#undef DW_FORM -#undef DW_AT -#undef DW_AT_DUP -#undef DW_OP -#undef DW_OP_DUP -#undef DW_ATE -#undef DW_ATE_DUP -#undef DW_CFA - -/* Flag that tells whether entry has a child or not. */ -#define DW_children_no 0 -#define DW_children_yes 1 - -#define DW_AT_stride_size DW_AT_bit_stride /* Note: The use of DW_AT_stride_size is deprecated. */ -#define DW_AT_stride DW_AT_byte_stride /* Note: The use of DW_AT_stride is deprecated. */ - -/* Decimal sign encodings. */ -enum dwarf_decimal_sign_encoding - { - /* DWARF 3. */ - DW_DS_unsigned = 0x01, - DW_DS_leading_overpunch = 0x02, - DW_DS_trailing_overpunch = 0x03, - DW_DS_leading_separate = 0x04, - DW_DS_trailing_separate = 0x05 - }; - -/* Endianity encodings. */ -enum dwarf_endianity_encoding - { - /* DWARF 3. */ - DW_END_default = 0x00, - DW_END_big = 0x01, - DW_END_little = 0x02, - - DW_END_lo_user = 0x40, - DW_END_hi_user = 0xff - }; - -/* Array ordering names and codes. */ -enum dwarf_array_dim_ordering - { - DW_ORD_row_major = 0, - DW_ORD_col_major = 1 - }; - -/* Access attribute. */ -enum dwarf_access_attribute - { - DW_ACCESS_public = 1, - DW_ACCESS_protected = 2, - DW_ACCESS_private = 3 - }; - -/* Visibility. */ -enum dwarf_visibility_attribute - { - DW_VIS_local = 1, - DW_VIS_exported = 2, - DW_VIS_qualified = 3 - }; - -/* Virtuality. */ -enum dwarf_virtuality_attribute - { - DW_VIRTUALITY_none = 0, - DW_VIRTUALITY_virtual = 1, - DW_VIRTUALITY_pure_virtual = 2 - }; - -/* Case sensitivity. */ -enum dwarf_id_case - { - DW_ID_case_sensitive = 0, - DW_ID_up_case = 1, - DW_ID_down_case = 2, - DW_ID_case_insensitive = 3 - }; - -/* Calling convention. */ -enum dwarf_calling_convention - { - DW_CC_normal = 0x1, - DW_CC_program = 0x2, - DW_CC_nocall = 0x3, - - DW_CC_lo_user = 0x40, - DW_CC_hi_user = 0xff, - - DW_CC_GNU_renesas_sh = 0x40, - DW_CC_GNU_borland_fastcall_i386 = 0x41, - - /* This DW_CC_ value is not currently generated by any toolchain. It is - used internally to GDB to indicate OpenCL C functions that have been - compiled with the IBM XL C for OpenCL compiler and use a non-platform - calling convention for passing OpenCL C vector types. This value may - be changed freely as long as it does not conflict with any other DW_CC_ - value defined here. */ - DW_CC_GDB_IBM_OpenCL = 0xff - }; - -/* Inline attribute. */ -enum dwarf_inline_attribute - { - DW_INL_not_inlined = 0, - DW_INL_inlined = 1, - DW_INL_declared_not_inlined = 2, - DW_INL_declared_inlined = 3 - }; - -/* Discriminant lists. */ -enum dwarf_discrim_list - { - DW_DSC_label = 0, - DW_DSC_range = 1 - }; - -/* Line number opcodes. */ -enum dwarf_line_number_ops - { - DW_LNS_extended_op = 0, - DW_LNS_copy = 1, - DW_LNS_advance_pc = 2, - DW_LNS_advance_line = 3, - DW_LNS_set_file = 4, - DW_LNS_set_column = 5, - DW_LNS_negate_stmt = 6, - DW_LNS_set_basic_block = 7, - DW_LNS_const_add_pc = 8, - DW_LNS_fixed_advance_pc = 9, - /* DWARF 3. */ - DW_LNS_set_prologue_end = 10, - DW_LNS_set_epilogue_begin = 11, - DW_LNS_set_isa = 12 - }; - -/* Line number extended opcodes. */ -enum dwarf_line_number_x_ops - { - DW_LNE_end_sequence = 1, - DW_LNE_set_address = 2, - DW_LNE_define_file = 3, - DW_LNE_set_discriminator = 4, - /* HP extensions. */ - DW_LNE_HP_negate_is_UV_update = 0x11, - DW_LNE_HP_push_context = 0x12, - DW_LNE_HP_pop_context = 0x13, - DW_LNE_HP_set_file_line_column = 0x14, - DW_LNE_HP_set_routine_name = 0x15, - DW_LNE_HP_set_sequence = 0x16, - DW_LNE_HP_negate_post_semantics = 0x17, - DW_LNE_HP_negate_function_exit = 0x18, - DW_LNE_HP_negate_front_end_logical = 0x19, - DW_LNE_HP_define_proc = 0x20, - DW_LNE_HP_source_file_correlation = 0x80, - - DW_LNE_lo_user = 0x80, - DW_LNE_hi_user = 0xff - }; - -/* Sub-opcodes for DW_LNE_HP_source_file_correlation. */ -enum dwarf_line_number_hp_sfc_ops - { - DW_LNE_HP_SFC_formfeed = 1, - DW_LNE_HP_SFC_set_listing_line = 2, - DW_LNE_HP_SFC_associate = 3 - }; - -/* Type codes for location list entries. - Extension for Fission. See http://gcc.gnu.org/wiki/DebugFission. */ - -enum dwarf_location_list_entry_type - { - DW_LLE_GNU_end_of_list_entry = 0, - DW_LLE_GNU_base_address_selection_entry = 1, - DW_LLE_GNU_start_end_entry = 2, - DW_LLE_GNU_start_length_entry = 3 - }; - -#define DW_CIE_ID 0xffffffff -#define DW64_CIE_ID 0xffffffffffffffffULL -#define DW_CIE_VERSION 1 - -#define DW_CFA_extended 0 - -#define DW_CHILDREN_no 0x00 -#define DW_CHILDREN_yes 0x01 - -#define DW_ADDR_none 0 - -/* Source language names and codes. */ -enum dwarf_source_language - { - DW_LANG_C89 = 0x0001, - DW_LANG_C = 0x0002, - DW_LANG_Ada83 = 0x0003, - DW_LANG_C_plus_plus = 0x0004, - DW_LANG_Cobol74 = 0x0005, - DW_LANG_Cobol85 = 0x0006, - DW_LANG_Fortran77 = 0x0007, - DW_LANG_Fortran90 = 0x0008, - DW_LANG_Pascal83 = 0x0009, - DW_LANG_Modula2 = 0x000a, - /* DWARF 3. */ - DW_LANG_Java = 0x000b, - DW_LANG_C99 = 0x000c, - DW_LANG_Ada95 = 0x000d, - DW_LANG_Fortran95 = 0x000e, - DW_LANG_PLI = 0x000f, - DW_LANG_ObjC = 0x0010, - DW_LANG_ObjC_plus_plus = 0x0011, - DW_LANG_UPC = 0x0012, - DW_LANG_D = 0x0013, - /* DWARF 4. */ - DW_LANG_Python = 0x0014, - /* DWARF 5. */ - DW_LANG_Go = 0x0016, - - DW_LANG_C_plus_plus_11 = 0x001a, /* dwarf5.20141029.pdf DRAFT */ - DW_LANG_C11 = 0x001d, - DW_LANG_C_plus_plus_14 = 0x0021, - DW_LANG_Fortran03 = 0x0022, - DW_LANG_Fortran08 = 0x0023, - - DW_LANG_lo_user = 0x8000, /* Implementation-defined range start. */ - DW_LANG_hi_user = 0xffff, /* Implementation-defined range start. */ - - /* MIPS. */ - DW_LANG_Mips_Assembler = 0x8001, - /* UPC. */ - DW_LANG_Upc = 0x8765, - /* HP extensions. */ - DW_LANG_HP_Bliss = 0x8003, - DW_LANG_HP_Basic91 = 0x8004, - DW_LANG_HP_Pascal91 = 0x8005, - DW_LANG_HP_IMacro = 0x8006, - DW_LANG_HP_Assembler = 0x8007 - }; - -/* Names and codes for macro information. */ -enum dwarf_macinfo_record_type - { - DW_MACINFO_define = 1, - DW_MACINFO_undef = 2, - DW_MACINFO_start_file = 3, - DW_MACINFO_end_file = 4, - DW_MACINFO_vendor_ext = 255 - }; - -/* Names and codes for new style macro information. */ -enum dwarf_macro_record_type - { - DW_MACRO_GNU_define = 1, - DW_MACRO_GNU_undef = 2, - DW_MACRO_GNU_start_file = 3, - DW_MACRO_GNU_end_file = 4, - DW_MACRO_GNU_define_indirect = 5, - DW_MACRO_GNU_undef_indirect = 6, - DW_MACRO_GNU_transparent_include = 7, - /* Extensions for DWZ multifile. - See http://www.dwarfstd.org/ShowIssue.php?issue=120604.1&type=open . */ - DW_MACRO_GNU_define_indirect_alt = 8, - DW_MACRO_GNU_undef_indirect_alt = 9, - DW_MACRO_GNU_transparent_include_alt = 10, - DW_MACRO_GNU_lo_user = 0xe0, - DW_MACRO_GNU_hi_user = 0xff - }; - -/* @@@ For use with GNU frame unwind information. */ - -#define DW_EH_PE_absptr 0x00 -#define DW_EH_PE_omit 0xff - -#define DW_EH_PE_uleb128 0x01 -#define DW_EH_PE_udata2 0x02 -#define DW_EH_PE_udata4 0x03 -#define DW_EH_PE_udata8 0x04 -#define DW_EH_PE_sleb128 0x09 -#define DW_EH_PE_sdata2 0x0A -#define DW_EH_PE_sdata4 0x0B -#define DW_EH_PE_sdata8 0x0C -#define DW_EH_PE_signed 0x08 - -#define DW_EH_PE_pcrel 0x10 -#define DW_EH_PE_textrel 0x20 -#define DW_EH_PE_datarel 0x30 -#define DW_EH_PE_funcrel 0x40 -#define DW_EH_PE_aligned 0x50 - -#define DW_EH_PE_indirect 0x80 - -/* Codes for the debug sections in a dwarf package (.dwp) file. - Extensions for Fission. See http://gcc.gnu.org/wiki/DebugFissionDWP. */ -enum dwarf_sect - { - DW_SECT_INFO = 1, - DW_SECT_TYPES = 2, - DW_SECT_ABBREV = 3, - DW_SECT_LINE = 4, - DW_SECT_LOC = 5, - DW_SECT_STR_OFFSETS = 6, - DW_SECT_MACINFO = 7, - DW_SECT_MACRO = 8, - DW_SECT_MAX = 8 - }; - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/* Return the name of a DW_TAG_ constant, or NULL if the value is not - recognized. */ -extern const char *get_DW_TAG_name (unsigned int tag); - -/* Return the name of a DW_AT_ constant, or NULL if the value is not - recognized. */ -extern const char *get_DW_AT_name (unsigned int attr); - -/* Return the name of a DW_FORM_ constant, or NULL if the value is not - recognized. */ -extern const char *get_DW_FORM_name (unsigned int form); - -/* Return the name of a DW_OP_ constant, or NULL if the value is not - recognized. */ -extern const char *get_DW_OP_name (unsigned int op); - -/* Return the name of a DW_ATE_ constant, or NULL if the value is not - recognized. */ -extern const char *get_DW_ATE_name (unsigned int enc); - -/* Return the name of a DW_CFA_ constant, or NULL if the value is not - recognized. */ -extern const char *get_DW_CFA_name (unsigned int opc); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* _DWARF2_H */ diff --git a/src/libbacktrace/elf.c b/src/libbacktrace/elf.c deleted file mode 100644 index 81ba3440ab7d..000000000000 --- a/src/libbacktrace/elf.c +++ /dev/null @@ -1,979 +0,0 @@ -/* elf.c -- Get debug data from an ELF file for backtraces. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include - -#ifdef HAVE_DL_ITERATE_PHDR -#include -#endif - -#include "backtrace.h" -#include "internal.h" - -#ifndef HAVE_DL_ITERATE_PHDR - -/* Dummy version of dl_iterate_phdr for systems that don't have it. */ - -#define dl_phdr_info x_dl_phdr_info -#define dl_iterate_phdr x_dl_iterate_phdr - -struct dl_phdr_info -{ - uintptr_t dlpi_addr; - const char *dlpi_name; -}; - -static int -dl_iterate_phdr (int (*callback) (struct dl_phdr_info *, - size_t, void *) ATTRIBUTE_UNUSED, - void *data ATTRIBUTE_UNUSED) -{ - return 0; -} - -#endif /* ! defined (HAVE_DL_ITERATE_PHDR) */ - -/* The configure script must tell us whether we are 32-bit or 64-bit - ELF. We could make this code test and support either possibility, - but there is no point. This code only works for the currently - running executable, which means that we know the ELF mode at - configure mode. */ - -#if BACKTRACE_ELF_SIZE != 32 && BACKTRACE_ELF_SIZE != 64 -#error "Unknown BACKTRACE_ELF_SIZE" -#endif - -/* might #include which might define our constants - with slightly different values. Undefine them to be safe. */ - -#undef EI_NIDENT -#undef EI_MAG0 -#undef EI_MAG1 -#undef EI_MAG2 -#undef EI_MAG3 -#undef EI_CLASS -#undef EI_DATA -#undef EI_VERSION -#undef ELF_MAG0 -#undef ELF_MAG1 -#undef ELF_MAG2 -#undef ELF_MAG3 -#undef ELFCLASS32 -#undef ELFCLASS64 -#undef ELFDATA2LSB -#undef ELFDATA2MSB -#undef EV_CURRENT -#undef ET_DYN -#undef SHN_LORESERVE -#undef SHN_XINDEX -#undef SHN_UNDEF -#undef SHT_SYMTAB -#undef SHT_STRTAB -#undef SHT_DYNSYM -#undef STT_OBJECT -#undef STT_FUNC - -/* Basic types. */ - -typedef uint16_t b_elf_half; /* Elf_Half. */ -typedef uint32_t b_elf_word; /* Elf_Word. */ -typedef int32_t b_elf_sword; /* Elf_Sword. */ - -#if BACKTRACE_ELF_SIZE == 32 - -typedef uint32_t b_elf_addr; /* Elf_Addr. */ -typedef uint32_t b_elf_off; /* Elf_Off. */ - -typedef uint32_t b_elf_wxword; /* 32-bit Elf_Word, 64-bit ELF_Xword. */ - -#else - -typedef uint64_t b_elf_addr; /* Elf_Addr. */ -typedef uint64_t b_elf_off; /* Elf_Off. */ -typedef uint64_t b_elf_xword; /* Elf_Xword. */ -typedef int64_t b_elf_sxword; /* Elf_Sxword. */ - -typedef uint64_t b_elf_wxword; /* 32-bit Elf_Word, 64-bit ELF_Xword. */ - -#endif - -/* Data structures and associated constants. */ - -#define EI_NIDENT 16 - -typedef struct { - unsigned char e_ident[EI_NIDENT]; /* ELF "magic number" */ - b_elf_half e_type; /* Identifies object file type */ - b_elf_half e_machine; /* Specifies required architecture */ - b_elf_word e_version; /* Identifies object file version */ - b_elf_addr e_entry; /* Entry point virtual address */ - b_elf_off e_phoff; /* Program header table file offset */ - b_elf_off e_shoff; /* Section header table file offset */ - b_elf_word e_flags; /* Processor-specific flags */ - b_elf_half e_ehsize; /* ELF header size in bytes */ - b_elf_half e_phentsize; /* Program header table entry size */ - b_elf_half e_phnum; /* Program header table entry count */ - b_elf_half e_shentsize; /* Section header table entry size */ - b_elf_half e_shnum; /* Section header table entry count */ - b_elf_half e_shstrndx; /* Section header string table index */ -} b_elf_ehdr; /* Elf_Ehdr. */ - -#define EI_MAG0 0 -#define EI_MAG1 1 -#define EI_MAG2 2 -#define EI_MAG3 3 -#define EI_CLASS 4 -#define EI_DATA 5 -#define EI_VERSION 6 - -#define ELFMAG0 0x7f -#define ELFMAG1 'E' -#define ELFMAG2 'L' -#define ELFMAG3 'F' - -#define ELFCLASS32 1 -#define ELFCLASS64 2 - -#define ELFDATA2LSB 1 -#define ELFDATA2MSB 2 - -#define EV_CURRENT 1 - -#define ET_DYN 3 - -typedef struct { - b_elf_word sh_name; /* Section name, index in string tbl */ - b_elf_word sh_type; /* Type of section */ - b_elf_wxword sh_flags; /* Miscellaneous section attributes */ - b_elf_addr sh_addr; /* Section virtual addr at execution */ - b_elf_off sh_offset; /* Section file offset */ - b_elf_wxword sh_size; /* Size of section in bytes */ - b_elf_word sh_link; /* Index of another section */ - b_elf_word sh_info; /* Additional section information */ - b_elf_wxword sh_addralign; /* Section alignment */ - b_elf_wxword sh_entsize; /* Entry size if section holds table */ -} b_elf_shdr; /* Elf_Shdr. */ - -#define SHN_UNDEF 0x0000 /* Undefined section */ -#define SHN_LORESERVE 0xFF00 /* Begin range of reserved indices */ -#define SHN_XINDEX 0xFFFF /* Section index is held elsewhere */ - -#define SHT_SYMTAB 2 -#define SHT_STRTAB 3 -#define SHT_DYNSYM 11 - -#if BACKTRACE_ELF_SIZE == 32 - -typedef struct -{ - b_elf_word st_name; /* Symbol name, index in string tbl */ - b_elf_addr st_value; /* Symbol value */ - b_elf_word st_size; /* Symbol size */ - unsigned char st_info; /* Symbol binding and type */ - unsigned char st_other; /* Visibility and other data */ - b_elf_half st_shndx; /* Symbol section index */ -} b_elf_sym; /* Elf_Sym. */ - -#else /* BACKTRACE_ELF_SIZE != 32 */ - -typedef struct -{ - b_elf_word st_name; /* Symbol name, index in string tbl */ - unsigned char st_info; /* Symbol binding and type */ - unsigned char st_other; /* Visibility and other data */ - b_elf_half st_shndx; /* Symbol section index */ - b_elf_addr st_value; /* Symbol value */ - b_elf_xword st_size; /* Symbol size */ -} b_elf_sym; /* Elf_Sym. */ - -#endif /* BACKTRACE_ELF_SIZE != 32 */ - -#define STT_OBJECT 1 -#define STT_FUNC 2 - -/* An index of ELF sections we care about. */ - -enum debug_section -{ - DEBUG_INFO, - DEBUG_LINE, - DEBUG_ABBREV, - DEBUG_RANGES, - DEBUG_STR, - DEBUG_MAX -}; - -/* Names of sections, indexed by enum elf_section. */ - -static const char * const debug_section_names[DEBUG_MAX] = -{ - ".debug_info", - ".debug_line", - ".debug_abbrev", - ".debug_ranges", - ".debug_str" -}; - -/* Information we gather for the sections we care about. */ - -struct debug_section_info -{ - /* Section file offset. */ - off_t offset; - /* Section size. */ - size_t size; - /* Section contents, after read from file. */ - const unsigned char *data; -}; - -/* Information we keep for an ELF symbol. */ - -struct elf_symbol -{ - /* The name of the symbol. */ - const char *name; - /* The address of the symbol. */ - uintptr_t address; - /* The size of the symbol. */ - size_t size; -}; - -/* Information to pass to elf_syminfo. */ - -struct elf_syminfo_data -{ - /* Symbols for the next module. */ - struct elf_syminfo_data *next; - /* The ELF symbols, sorted by address. */ - struct elf_symbol *symbols; - /* The number of symbols. */ - size_t count; -}; - -/* A dummy callback function used when we can't find any debug info. */ - -static int -elf_nodebug (struct backtrace_state *state ATTRIBUTE_UNUSED, - uintptr_t pc ATTRIBUTE_UNUSED, - backtrace_full_callback callback ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback, void *data) -{ - error_callback (data, "no debug info in ELF executable", -1); - return 0; -} - -/* A dummy callback function used when we can't find a symbol - table. */ - -static void -elf_nosyms (struct backtrace_state *state ATTRIBUTE_UNUSED, - uintptr_t addr ATTRIBUTE_UNUSED, - backtrace_syminfo_callback callback ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback, void *data) -{ - error_callback (data, "no symbol table in ELF executable", -1); -} - -/* Compare struct elf_symbol for qsort. */ - -static int -elf_symbol_compare (const void *v1, const void *v2) -{ - const struct elf_symbol *e1 = (const struct elf_symbol *) v1; - const struct elf_symbol *e2 = (const struct elf_symbol *) v2; - - if (e1->address < e2->address) - return -1; - else if (e1->address > e2->address) - return 1; - else - return 0; -} - -/* Compare an ADDR against an elf_symbol for bsearch. We allocate one - extra entry in the array so that this can look safely at the next - entry. */ - -static int -elf_symbol_search (const void *vkey, const void *ventry) -{ - const uintptr_t *key = (const uintptr_t *) vkey; - const struct elf_symbol *entry = (const struct elf_symbol *) ventry; - uintptr_t addr; - - addr = *key; - if (addr < entry->address) - return -1; - else if (addr >= entry->address + entry->size) - return 1; - else - return 0; -} - -/* Initialize the symbol table info for elf_syminfo. */ - -static int -elf_initialize_syminfo (struct backtrace_state *state, - uintptr_t base_address, - const unsigned char *symtab_data, size_t symtab_size, - const unsigned char *strtab, size_t strtab_size, - backtrace_error_callback error_callback, - void *data, struct elf_syminfo_data *sdata) -{ - size_t sym_count; - const b_elf_sym *sym; - size_t elf_symbol_count; - size_t elf_symbol_size; - struct elf_symbol *elf_symbols; - size_t i; - unsigned int j; - - sym_count = symtab_size / sizeof (b_elf_sym); - - /* We only care about function symbols. Count them. */ - sym = (const b_elf_sym *) symtab_data; - elf_symbol_count = 0; - for (i = 0; i < sym_count; ++i, ++sym) - { - int info; - - info = sym->st_info & 0xf; - if ((info == STT_FUNC || info == STT_OBJECT) - && sym->st_shndx != SHN_UNDEF) - ++elf_symbol_count; - } - - elf_symbol_size = elf_symbol_count * sizeof (struct elf_symbol); - elf_symbols = ((struct elf_symbol *) - backtrace_alloc (state, elf_symbol_size, error_callback, - data)); - if (elf_symbols == NULL) - return 0; - - sym = (const b_elf_sym *) symtab_data; - j = 0; - for (i = 0; i < sym_count; ++i, ++sym) - { - int info; - - info = sym->st_info & 0xf; - if (info != STT_FUNC && info != STT_OBJECT) - continue; - if (sym->st_shndx == SHN_UNDEF) - continue; - if (sym->st_name >= strtab_size) - { - error_callback (data, "symbol string index out of range", 0); - backtrace_free (state, elf_symbols, elf_symbol_size, error_callback, - data); - return 0; - } - elf_symbols[j].name = (const char *) strtab + sym->st_name; - elf_symbols[j].address = sym->st_value + base_address; - elf_symbols[j].size = sym->st_size; - ++j; - } - - backtrace_qsort (elf_symbols, elf_symbol_count, sizeof (struct elf_symbol), - elf_symbol_compare); - - sdata->next = NULL; - sdata->symbols = elf_symbols; - sdata->count = elf_symbol_count; - - return 1; -} - -/* Add EDATA to the list in STATE. */ - -static void -elf_add_syminfo_data (struct backtrace_state *state, - struct elf_syminfo_data *edata) -{ - if (!state->threaded) - { - struct elf_syminfo_data **pp; - - for (pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data; - *pp != NULL; - pp = &(*pp)->next) - ; - *pp = edata; - } - else - { - while (1) - { - struct elf_syminfo_data **pp; - - pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data; - - while (1) - { - struct elf_syminfo_data *p; - - p = backtrace_atomic_load_pointer (pp); - - if (p == NULL) - break; - - pp = &p->next; - } - - if (__sync_bool_compare_and_swap (pp, NULL, edata)) - break; - } - } -} - -/* Return the symbol name and value for an ADDR. */ - -static void -elf_syminfo (struct backtrace_state *state, uintptr_t addr, - backtrace_syminfo_callback callback, - backtrace_error_callback error_callback ATTRIBUTE_UNUSED, - void *data) -{ - struct elf_syminfo_data *edata; - struct elf_symbol *sym = NULL; - - if (!state->threaded) - { - for (edata = (struct elf_syminfo_data *) state->syminfo_data; - edata != NULL; - edata = edata->next) - { - sym = ((struct elf_symbol *) - bsearch (&addr, edata->symbols, edata->count, - sizeof (struct elf_symbol), elf_symbol_search)); - if (sym != NULL) - break; - } - } - else - { - struct elf_syminfo_data **pp; - - pp = (struct elf_syminfo_data **) (void *) &state->syminfo_data; - while (1) - { - edata = backtrace_atomic_load_pointer (pp); - if (edata == NULL) - break; - - sym = ((struct elf_symbol *) - bsearch (&addr, edata->symbols, edata->count, - sizeof (struct elf_symbol), elf_symbol_search)); - if (sym != NULL) - break; - - pp = &edata->next; - } - } - - if (sym == NULL) - callback (data, addr, NULL, 0, 0); - else - callback (data, addr, sym->name, sym->address, sym->size); -} - -/* Add the backtrace data for one ELF file. Returns 1 on success, - 0 on failure (in both cases descriptor is closed) or -1 if exe - is non-zero and the ELF file is ET_DYN, which tells the caller that - elf_add will need to be called on the descriptor again after - base_address is determined. */ - -static int -elf_add (struct backtrace_state *state, int descriptor, uintptr_t base_address, - backtrace_error_callback error_callback, void *data, - fileline *fileline_fn, int *found_sym, int *found_dwarf, int exe) -{ - struct backtrace_view ehdr_view; - b_elf_ehdr ehdr; - off_t shoff; - unsigned int shnum; - unsigned int shstrndx; - struct backtrace_view shdrs_view; - int shdrs_view_valid; - const b_elf_shdr *shdrs; - const b_elf_shdr *shstrhdr; - size_t shstr_size; - off_t shstr_off; - struct backtrace_view names_view; - int names_view_valid; - const char *names; - unsigned int symtab_shndx; - unsigned int dynsym_shndx; - unsigned int i; - struct debug_section_info sections[DEBUG_MAX]; - struct backtrace_view symtab_view; - int symtab_view_valid; - struct backtrace_view strtab_view; - int strtab_view_valid; - off_t min_offset; - off_t max_offset; - struct backtrace_view debug_view; - int debug_view_valid; - - *found_sym = 0; - *found_dwarf = 0; - - shdrs_view_valid = 0; - names_view_valid = 0; - symtab_view_valid = 0; - strtab_view_valid = 0; - debug_view_valid = 0; - - if (!backtrace_get_view (state, descriptor, 0, sizeof ehdr, error_callback, - data, &ehdr_view)) - goto fail; - - memcpy (&ehdr, ehdr_view.data, sizeof ehdr); - - backtrace_release_view (state, &ehdr_view, error_callback, data); - - if (ehdr.e_ident[EI_MAG0] != ELFMAG0 - || ehdr.e_ident[EI_MAG1] != ELFMAG1 - || ehdr.e_ident[EI_MAG2] != ELFMAG2 - || ehdr.e_ident[EI_MAG3] != ELFMAG3) - { - error_callback (data, "executable file is not ELF", 0); - goto fail; - } - if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) - { - error_callback (data, "executable file is unrecognized ELF version", 0); - goto fail; - } - -#if BACKTRACE_ELF_SIZE == 32 -#define BACKTRACE_ELFCLASS ELFCLASS32 -#else -#define BACKTRACE_ELFCLASS ELFCLASS64 -#endif - - if (ehdr.e_ident[EI_CLASS] != BACKTRACE_ELFCLASS) - { - error_callback (data, "executable file is unexpected ELF class", 0); - goto fail; - } - - if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB - && ehdr.e_ident[EI_DATA] != ELFDATA2MSB) - { - error_callback (data, "executable file has unknown endianness", 0); - goto fail; - } - - /* If the executable is ET_DYN, it is either a PIE, or we are running - directly a shared library with .interp. We need to wait for - dl_iterate_phdr in that case to determine the actual base_address. */ - if (exe && ehdr.e_type == ET_DYN) - return -1; - - shoff = ehdr.e_shoff; - shnum = ehdr.e_shnum; - shstrndx = ehdr.e_shstrndx; - - if ((shnum == 0 || shstrndx == SHN_XINDEX) - && shoff != 0) - { - struct backtrace_view shdr_view; - const b_elf_shdr *shdr; - - if (!backtrace_get_view (state, descriptor, shoff, sizeof shdr, - error_callback, data, &shdr_view)) - goto fail; - - shdr = (const b_elf_shdr *) shdr_view.data; - - if (shnum == 0) - shnum = shdr->sh_size; - - if (shstrndx == SHN_XINDEX) - { - shstrndx = shdr->sh_link; - - /* Versions of the GNU binutils between 2.12 and 2.18 did - not handle objects with more than SHN_LORESERVE sections - correctly. All large section indexes were offset by - 0x100. There is more information at - http://sourceware.org/bugzilla/show_bug.cgi?id-5900 . - Fortunately these object files are easy to detect, as the - GNU binutils always put the section header string table - near the end of the list of sections. Thus if the - section header string table index is larger than the - number of sections, then we know we have to subtract - 0x100 to get the real section index. */ - if (shstrndx >= shnum && shstrndx >= SHN_LORESERVE + 0x100) - shstrndx -= 0x100; - } - - backtrace_release_view (state, &shdr_view, error_callback, data); - } - - /* To translate PC to file/line when using DWARF, we need to find - the .debug_info and .debug_line sections. */ - - /* Read the section headers, skipping the first one. */ - - if (!backtrace_get_view (state, descriptor, shoff + sizeof (b_elf_shdr), - (shnum - 1) * sizeof (b_elf_shdr), - error_callback, data, &shdrs_view)) - goto fail; - shdrs_view_valid = 1; - shdrs = (const b_elf_shdr *) shdrs_view.data; - - /* Read the section names. */ - - shstrhdr = &shdrs[shstrndx - 1]; - shstr_size = shstrhdr->sh_size; - shstr_off = shstrhdr->sh_offset; - - if (!backtrace_get_view (state, descriptor, shstr_off, shstr_size, - error_callback, data, &names_view)) - goto fail; - names_view_valid = 1; - names = (const char *) names_view.data; - - symtab_shndx = 0; - dynsym_shndx = 0; - - memset (sections, 0, sizeof sections); - - /* Look for the symbol table. */ - for (i = 1; i < shnum; ++i) - { - const b_elf_shdr *shdr; - unsigned int sh_name; - const char *name; - int j; - - shdr = &shdrs[i - 1]; - - if (shdr->sh_type == SHT_SYMTAB) - symtab_shndx = i; - else if (shdr->sh_type == SHT_DYNSYM) - dynsym_shndx = i; - - sh_name = shdr->sh_name; - if (sh_name >= shstr_size) - { - error_callback (data, "ELF section name out of range", 0); - goto fail; - } - - name = names + sh_name; - - for (j = 0; j < (int) DEBUG_MAX; ++j) - { - if (strcmp (name, debug_section_names[j]) == 0) - { - sections[j].offset = shdr->sh_offset; - sections[j].size = shdr->sh_size; - break; - } - } - } - - if (symtab_shndx == 0) - symtab_shndx = dynsym_shndx; - if (symtab_shndx != 0) - { - const b_elf_shdr *symtab_shdr; - unsigned int strtab_shndx; - const b_elf_shdr *strtab_shdr; - struct elf_syminfo_data *sdata; - - symtab_shdr = &shdrs[symtab_shndx - 1]; - strtab_shndx = symtab_shdr->sh_link; - if (strtab_shndx >= shnum) - { - error_callback (data, - "ELF symbol table strtab link out of range", 0); - goto fail; - } - strtab_shdr = &shdrs[strtab_shndx - 1]; - - if (!backtrace_get_view (state, descriptor, symtab_shdr->sh_offset, - symtab_shdr->sh_size, error_callback, data, - &symtab_view)) - goto fail; - symtab_view_valid = 1; - - if (!backtrace_get_view (state, descriptor, strtab_shdr->sh_offset, - strtab_shdr->sh_size, error_callback, data, - &strtab_view)) - goto fail; - strtab_view_valid = 1; - - sdata = ((struct elf_syminfo_data *) - backtrace_alloc (state, sizeof *sdata, error_callback, data)); - if (sdata == NULL) - goto fail; - - if (!elf_initialize_syminfo (state, base_address, - symtab_view.data, symtab_shdr->sh_size, - strtab_view.data, strtab_shdr->sh_size, - error_callback, data, sdata)) - { - backtrace_free (state, sdata, sizeof *sdata, error_callback, data); - goto fail; - } - - /* We no longer need the symbol table, but we hold on to the - string table permanently. */ - backtrace_release_view (state, &symtab_view, error_callback, data); - - *found_sym = 1; - - elf_add_syminfo_data (state, sdata); - } - - /* FIXME: Need to handle compressed debug sections. */ - - backtrace_release_view (state, &shdrs_view, error_callback, data); - shdrs_view_valid = 0; - backtrace_release_view (state, &names_view, error_callback, data); - names_view_valid = 0; - - /* Read all the debug sections in a single view, since they are - probably adjacent in the file. We never release this view. */ - - min_offset = 0; - max_offset = 0; - for (i = 0; i < (int) DEBUG_MAX; ++i) - { - off_t end; - - if (sections[i].size == 0) - continue; - if (min_offset == 0 || sections[i].offset < min_offset) - min_offset = sections[i].offset; - end = sections[i].offset + sections[i].size; - if (end > max_offset) - max_offset = end; - } - if (min_offset == 0 || max_offset == 0) - { - if (!backtrace_close (descriptor, error_callback, data)) - goto fail; - return 1; - } - - if (!backtrace_get_view (state, descriptor, min_offset, - max_offset - min_offset, - error_callback, data, &debug_view)) - goto fail; - debug_view_valid = 1; - - /* We've read all we need from the executable. */ - if (!backtrace_close (descriptor, error_callback, data)) - goto fail; - descriptor = -1; - - for (i = 0; i < (int) DEBUG_MAX; ++i) - { - if (sections[i].size == 0) - sections[i].data = NULL; - else - sections[i].data = ((const unsigned char *) debug_view.data - + (sections[i].offset - min_offset)); - } - - if (!backtrace_dwarf_add (state, base_address, - sections[DEBUG_INFO].data, - sections[DEBUG_INFO].size, - sections[DEBUG_LINE].data, - sections[DEBUG_LINE].size, - sections[DEBUG_ABBREV].data, - sections[DEBUG_ABBREV].size, - sections[DEBUG_RANGES].data, - sections[DEBUG_RANGES].size, - sections[DEBUG_STR].data, - sections[DEBUG_STR].size, - ehdr.e_ident[EI_DATA] == ELFDATA2MSB, - error_callback, data, fileline_fn)) - goto fail; - - *found_dwarf = 1; - - return 1; - - fail: - if (shdrs_view_valid) - backtrace_release_view (state, &shdrs_view, error_callback, data); - if (names_view_valid) - backtrace_release_view (state, &names_view, error_callback, data); - if (symtab_view_valid) - backtrace_release_view (state, &symtab_view, error_callback, data); - if (strtab_view_valid) - backtrace_release_view (state, &strtab_view, error_callback, data); - if (debug_view_valid) - backtrace_release_view (state, &debug_view, error_callback, data); - if (descriptor != -1) - backtrace_close (descriptor, error_callback, data); - return 0; -} - -/* Data passed to phdr_callback. */ - -struct phdr_data -{ - struct backtrace_state *state; - backtrace_error_callback error_callback; - void *data; - fileline *fileline_fn; - int *found_sym; - int *found_dwarf; - int exe_descriptor; -}; - -/* Callback passed to dl_iterate_phdr. Load debug info from shared - libraries. */ - -static int -#ifdef __i386__ -__attribute__ ((__force_align_arg_pointer__)) -#endif -phdr_callback (struct dl_phdr_info *info, size_t size ATTRIBUTE_UNUSED, - void *pdata) -{ - struct phdr_data *pd = (struct phdr_data *) pdata; - int descriptor; - int does_not_exist; - fileline elf_fileline_fn; - int found_dwarf; - - /* There is not much we can do if we don't have the module name, - unless executable is ET_DYN, where we expect the very first - phdr_callback to be for the PIE. */ - if (info->dlpi_name == NULL || info->dlpi_name[0] == '\0') - { - if (pd->exe_descriptor == -1) - return 0; - descriptor = pd->exe_descriptor; - pd->exe_descriptor = -1; - } - else - { - if (pd->exe_descriptor != -1) - { - backtrace_close (pd->exe_descriptor, pd->error_callback, pd->data); - pd->exe_descriptor = -1; - } - - descriptor = backtrace_open (info->dlpi_name, pd->error_callback, - pd->data, &does_not_exist); - if (descriptor < 0) - return 0; - } - - if (elf_add (pd->state, descriptor, info->dlpi_addr, pd->error_callback, - pd->data, &elf_fileline_fn, pd->found_sym, &found_dwarf, 0)) - { - if (found_dwarf) - { - *pd->found_dwarf = 1; - *pd->fileline_fn = elf_fileline_fn; - } - } - - return 0; -} - -/* Initialize the backtrace data we need from an ELF executable. At - the ELF level, all we need to do is find the debug info - sections. */ - -int -backtrace_initialize (struct backtrace_state *state, int descriptor, - backtrace_error_callback error_callback, - void *data, fileline *fileline_fn) -{ - int ret; - int found_sym; - int found_dwarf; - fileline elf_fileline_fn = elf_nodebug; - struct phdr_data pd; - - ret = elf_add (state, descriptor, 0, error_callback, data, &elf_fileline_fn, - &found_sym, &found_dwarf, 1); - if (!ret) - return 0; - - pd.state = state; - pd.error_callback = error_callback; - pd.data = data; - pd.fileline_fn = &elf_fileline_fn; - pd.found_sym = &found_sym; - pd.found_dwarf = &found_dwarf; - pd.exe_descriptor = ret < 0 ? descriptor : -1; - - dl_iterate_phdr (phdr_callback, (void *) &pd); - - if (!state->threaded) - { - if (found_sym) - state->syminfo_fn = elf_syminfo; - else if (state->syminfo_fn == NULL) - state->syminfo_fn = elf_nosyms; - } - else - { - if (found_sym) - backtrace_atomic_store_pointer (&state->syminfo_fn, elf_syminfo); - else - (void) __sync_bool_compare_and_swap (&state->syminfo_fn, NULL, - elf_nosyms); - } - - if (!state->threaded) - { - if (state->fileline_fn == NULL || state->fileline_fn == elf_nodebug) - *fileline_fn = elf_fileline_fn; - } - else - { - fileline current_fn; - - current_fn = backtrace_atomic_load_pointer (&state->fileline_fn); - if (current_fn == NULL || current_fn == elf_nodebug) - *fileline_fn = elf_fileline_fn; - } - - return 1; -} diff --git a/src/libbacktrace/fileline.c b/src/libbacktrace/fileline.c deleted file mode 100644 index 27ebbedc21cc..000000000000 --- a/src/libbacktrace/fileline.c +++ /dev/null @@ -1,194 +0,0 @@ -/* fileline.c -- Get file and line number information in a backtrace. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include -#include -#include - -#include "backtrace.h" -#include "internal.h" - -#ifndef HAVE_GETEXECNAME -#define getexecname() NULL -#endif - -/* Initialize the fileline information from the executable. Returns 1 - on success, 0 on failure. */ - -static int -fileline_initialize (struct backtrace_state *state, - backtrace_error_callback error_callback, void *data) -{ - int failed; - fileline fileline_fn; - int pass; - int called_error_callback; - int descriptor; - - if (!state->threaded) - failed = state->fileline_initialization_failed; - else - failed = backtrace_atomic_load_int (&state->fileline_initialization_failed); - - if (failed) - { - error_callback (data, "failed to read executable information", -1); - return 0; - } - - if (!state->threaded) - fileline_fn = state->fileline_fn; - else - fileline_fn = backtrace_atomic_load_pointer (&state->fileline_fn); - if (fileline_fn != NULL) - return 1; - - /* We have not initialized the information. Do it now. */ - - descriptor = -1; - called_error_callback = 0; - for (pass = 0; pass < 4; ++pass) - { - const char *filename; - int does_not_exist; - - switch (pass) - { - case 0: - filename = state->filename; - break; - case 1: - filename = getexecname (); - break; - case 2: - filename = "/proc/self/exe"; - break; - case 3: - filename = "/proc/curproc/file"; - break; - default: - abort (); - } - - if (filename == NULL) - continue; - - descriptor = backtrace_open (filename, error_callback, data, - &does_not_exist); - if (descriptor < 0 && !does_not_exist) - { - called_error_callback = 1; - break; - } - if (descriptor >= 0) - break; - } - - if (descriptor < 0) - { - if (!called_error_callback) - { - if (state->filename != NULL) - error_callback (data, state->filename, ENOENT); - else - error_callback (data, - "libbacktrace could not find executable to open", - 0); - } - failed = 1; - } - - if (!failed) - { - if (!backtrace_initialize (state, descriptor, error_callback, data, - &fileline_fn)) - failed = 1; - } - - if (failed) - { - if (!state->threaded) - state->fileline_initialization_failed = 1; - else - backtrace_atomic_store_int (&state->fileline_initialization_failed, 1); - return 0; - } - - if (!state->threaded) - state->fileline_fn = fileline_fn; - else - { - backtrace_atomic_store_pointer (&state->fileline_fn, fileline_fn); - - /* Note that if two threads initialize at once, one of the data - sets may be leaked. */ - } - - return 1; -} - -/* Given a PC, find the file name, line number, and function name. */ - -int -backtrace_pcinfo (struct backtrace_state *state, uintptr_t pc, - backtrace_full_callback callback, - backtrace_error_callback error_callback, void *data) -{ - if (!fileline_initialize (state, error_callback, data)) - return 0; - - if (state->fileline_initialization_failed) - return 0; - - return state->fileline_fn (state, pc, callback, error_callback, data); -} - -/* Given a PC, find the symbol for it, and its value. */ - -int -backtrace_syminfo (struct backtrace_state *state, uintptr_t pc, - backtrace_syminfo_callback callback, - backtrace_error_callback error_callback, void *data) -{ - if (!fileline_initialize (state, error_callback, data)) - return 0; - - if (state->fileline_initialization_failed) - return 0; - - state->syminfo_fn (state, pc, callback, error_callback, data); - return 1; -} diff --git a/src/libbacktrace/filenames.h b/src/libbacktrace/filenames.h deleted file mode 100644 index 1161daaa4f35..000000000000 --- a/src/libbacktrace/filenames.h +++ /dev/null @@ -1,99 +0,0 @@ -/* Macros for taking apart, interpreting and processing file names. - - These are here because some non-Posix (a.k.a. DOSish) systems have - drive letter brain-damage at the beginning of an absolute file name, - use forward- and back-slash in path names interchangeably, and - some of them have case-insensitive file names. - - Copyright (C) 2000-2015 Free Software Foundation, Inc. - -This file is part of BFD, the Binary File Descriptor library. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ - -#ifndef FILENAMES_H -#define FILENAMES_H - -#include "hashtab.h" /* for hashval_t */ - -#ifdef __cplusplus -extern "C" { -#endif - -#if defined(__MSDOS__) || defined(_WIN32) || defined(__OS2__) || defined (__CYGWIN__) -# ifndef HAVE_DOS_BASED_FILE_SYSTEM -# define HAVE_DOS_BASED_FILE_SYSTEM 1 -# endif -# ifndef HAVE_CASE_INSENSITIVE_FILE_SYSTEM -# define HAVE_CASE_INSENSITIVE_FILE_SYSTEM 1 -# endif -# define HAS_DRIVE_SPEC(f) HAS_DOS_DRIVE_SPEC (f) -# define IS_DIR_SEPARATOR(c) IS_DOS_DIR_SEPARATOR (c) -# define IS_ABSOLUTE_PATH(f) IS_DOS_ABSOLUTE_PATH (f) -#else /* not DOSish */ -# if defined(__APPLE__) -# ifndef HAVE_CASE_INSENSITIVE_FILE_SYSTEM -# define HAVE_CASE_INSENSITIVE_FILE_SYSTEM 1 -# endif -# endif /* __APPLE__ */ -# define HAS_DRIVE_SPEC(f) (0) -# define IS_DIR_SEPARATOR(c) IS_UNIX_DIR_SEPARATOR (c) -# define IS_ABSOLUTE_PATH(f) IS_UNIX_ABSOLUTE_PATH (f) -#endif - -#define IS_DIR_SEPARATOR_1(dos_based, c) \ - (((c) == '/') \ - || (((c) == '\\') && (dos_based))) - -#define HAS_DRIVE_SPEC_1(dos_based, f) \ - ((f)[0] && ((f)[1] == ':') && (dos_based)) - -/* Remove the drive spec from F, assuming HAS_DRIVE_SPEC (f). - The result is a pointer to the remainder of F. */ -#define STRIP_DRIVE_SPEC(f) ((f) + 2) - -#define IS_DOS_DIR_SEPARATOR(c) IS_DIR_SEPARATOR_1 (1, c) -#define IS_DOS_ABSOLUTE_PATH(f) IS_ABSOLUTE_PATH_1 (1, f) -#define HAS_DOS_DRIVE_SPEC(f) HAS_DRIVE_SPEC_1 (1, f) - -#define IS_UNIX_DIR_SEPARATOR(c) IS_DIR_SEPARATOR_1 (0, c) -#define IS_UNIX_ABSOLUTE_PATH(f) IS_ABSOLUTE_PATH_1 (0, f) - -/* Note that when DOS_BASED is true, IS_ABSOLUTE_PATH accepts d:foo as - well, although it is only semi-absolute. This is because the users - of IS_ABSOLUTE_PATH want to know whether to prepend the current - working directory to a file name, which should not be done with a - name like d:foo. */ -#define IS_ABSOLUTE_PATH_1(dos_based, f) \ - (IS_DIR_SEPARATOR_1 (dos_based, (f)[0]) \ - || HAS_DRIVE_SPEC_1 (dos_based, f)) - -extern int filename_cmp (const char *s1, const char *s2); -#define FILENAME_CMP(s1, s2) filename_cmp(s1, s2) - -extern int filename_ncmp (const char *s1, const char *s2, - size_t n); - -extern hashval_t filename_hash (const void *s); - -extern int filename_eq (const void *s1, const void *s2); - -extern int canonical_filename_eq (const char *a, const char *b); - -#ifdef __cplusplus -} -#endif - -#endif /* FILENAMES_H */ diff --git a/src/libbacktrace/filetype.awk b/src/libbacktrace/filetype.awk deleted file mode 100644 index a5f6c8cc1800..000000000000 --- a/src/libbacktrace/filetype.awk +++ /dev/null @@ -1,11 +0,0 @@ -# An awk script to determine the type of a file. -/\177ELF\001/ { if (NR == 1) { print "elf32"; exit } } -/\177ELF\002/ { if (NR == 1) { print "elf64"; exit } } -/\114\001/ { if (NR == 1) { print "pecoff"; exit } } -/\144\206/ { if (NR == 1) { print "pecoff"; exit } } -/\xFE\xED\xFA\xCE/ { if (NR == 1) { print "macho32"; exit } } -/\xCE\xFA\xED\xFE/ { if (NR == 1) { print "macho32"; exit } } -/\xFE\xED\xFA\xCF/ { if (NR == 1) { print "macho64"; exit } } -/\xCF\xFA\xED\xFE/ { if (NR == 1) { print "macho64"; exit } } -/\xCA\xFE\xBA\xBE/ { if (NR == 1) { print "macho-fat"; exit } } -/\xBE\xBA\xFE\xCA/ { if (NR == 1) { print "macho-fat"; exit } } diff --git a/src/libbacktrace/hashtab.h b/src/libbacktrace/hashtab.h deleted file mode 100644 index b1b5877aae7c..000000000000 --- a/src/libbacktrace/hashtab.h +++ /dev/null @@ -1,204 +0,0 @@ -/* An expandable hash tables datatype. - Copyright (C) 1999-2015 Free Software Foundation, Inc. - Contributed by Vladimir Makarov (vmakarov@cygnus.com). - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ - -/* This package implements basic hash table functionality. It is possible - to search for an entry, create an entry and destroy an entry. - - Elements in the table are generic pointers. - - The size of the table is not fixed; if the occupancy of the table - grows too high the hash table will be expanded. - - The abstract data implementation is based on generalized Algorithm D - from Knuth's book "The art of computer programming". Hash table is - expanded by creation of new hash table and transferring elements from - the old table to the new table. */ - -#ifndef __HASHTAB_H__ -#define __HASHTAB_H__ - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -#include "ansidecl.h" - -/* The type for a hash code. */ -typedef unsigned int hashval_t; - -/* Callback function pointer types. */ - -/* Calculate hash of a table entry. */ -typedef hashval_t (*htab_hash) (const void *); - -/* Compare a table entry with a possible entry. The entry already in - the table always comes first, so the second element can be of a - different type (but in this case htab_find and htab_find_slot - cannot be used; instead the variants that accept a hash value - must be used). */ -typedef int (*htab_eq) (const void *, const void *); - -/* Cleanup function called whenever a live element is removed from - the hash table. */ -typedef void (*htab_del) (void *); - -/* Function called by htab_traverse for each live element. The first - arg is the slot of the element (which can be passed to htab_clear_slot - if desired), the second arg is the auxiliary pointer handed to - htab_traverse. Return 1 to continue scan, 0 to stop. */ -typedef int (*htab_trav) (void **, void *); - -/* Memory-allocation function, with the same functionality as calloc(). - Iff it returns NULL, the hash table implementation will pass an error - code back to the user, so if your code doesn't handle errors, - best if you use xcalloc instead. */ -typedef void *(*htab_alloc) (size_t, size_t); - -/* We also need a free() routine. */ -typedef void (*htab_free) (void *); - -/* Memory allocation and deallocation; variants which take an extra - argument. */ -typedef void *(*htab_alloc_with_arg) (void *, size_t, size_t); -typedef void (*htab_free_with_arg) (void *, void *); - -/* This macro defines reserved value for empty table entry. */ - -#define HTAB_EMPTY_ENTRY ((PTR) 0) - -/* This macro defines reserved value for table entry which contained - a deleted element. */ - -#define HTAB_DELETED_ENTRY ((PTR) 1) - -/* Hash tables are of the following type. The structure - (implementation) of this type is not needed for using the hash - tables. All work with hash table should be executed only through - functions mentioned below. The size of this structure is subject to - change. */ - -struct htab { - /* Pointer to hash function. */ - htab_hash hash_f; - - /* Pointer to comparison function. */ - htab_eq eq_f; - - /* Pointer to cleanup function. */ - htab_del del_f; - - /* Table itself. */ - void **entries; - - /* Current size (in entries) of the hash table. */ - size_t size; - - /* Current number of elements including also deleted elements. */ - size_t n_elements; - - /* Current number of deleted elements in the table. */ - size_t n_deleted; - - /* The following member is used for debugging. Its value is number - of all calls of `htab_find_slot' for the hash table. */ - unsigned int searches; - - /* The following member is used for debugging. Its value is number - of collisions fixed for time of work with the hash table. */ - unsigned int collisions; - - /* Pointers to allocate/free functions. */ - htab_alloc alloc_f; - htab_free free_f; - - /* Alternate allocate/free functions, which take an extra argument. */ - void *alloc_arg; - htab_alloc_with_arg alloc_with_arg_f; - htab_free_with_arg free_with_arg_f; - - /* Current size (in entries) of the hash table, as an index into the - table of primes. */ - unsigned int size_prime_index; -}; - -typedef struct htab *htab_t; - -/* An enum saying whether we insert into the hash table or not. */ -enum insert_option {NO_INSERT, INSERT}; - -/* The prototypes of the package functions. */ - -extern htab_t htab_create_alloc (size_t, htab_hash, - htab_eq, htab_del, - htab_alloc, htab_free); - -extern htab_t htab_create_alloc_ex (size_t, htab_hash, - htab_eq, htab_del, - void *, htab_alloc_with_arg, - htab_free_with_arg); - -extern htab_t htab_create_typed_alloc (size_t, htab_hash, htab_eq, htab_del, - htab_alloc, htab_alloc, htab_free); - -/* Backward-compatibility functions. */ -extern htab_t htab_create (size_t, htab_hash, htab_eq, htab_del); -extern htab_t htab_try_create (size_t, htab_hash, htab_eq, htab_del); - -extern void htab_set_functions_ex (htab_t, htab_hash, - htab_eq, htab_del, - void *, htab_alloc_with_arg, - htab_free_with_arg); - -extern void htab_delete (htab_t); -extern void htab_empty (htab_t); - -extern void * htab_find (htab_t, const void *); -extern void ** htab_find_slot (htab_t, const void *, enum insert_option); -extern void * htab_find_with_hash (htab_t, const void *, hashval_t); -extern void ** htab_find_slot_with_hash (htab_t, const void *, - hashval_t, enum insert_option); -extern void htab_clear_slot (htab_t, void **); -extern void htab_remove_elt (htab_t, void *); -extern void htab_remove_elt_with_hash (htab_t, void *, hashval_t); - -extern void htab_traverse (htab_t, htab_trav, void *); -extern void htab_traverse_noresize (htab_t, htab_trav, void *); - -extern size_t htab_size (htab_t); -extern size_t htab_elements (htab_t); -extern double htab_collisions (htab_t); - -/* A hash function for pointers. */ -extern htab_hash htab_hash_pointer; - -/* An equality function for pointers. */ -extern htab_eq htab_eq_pointer; - -/* A hash function for null-terminated strings. */ -extern hashval_t htab_hash_string (const void *); - -/* An iterative hash function for arbitrary data. */ -extern hashval_t iterative_hash (const void *, size_t, hashval_t); -/* Shorthand for hashing something with an intrinsic size. */ -#define iterative_hash_object(OB,INIT) iterative_hash (&OB, sizeof (OB), INIT) - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* __HASHTAB_H */ diff --git a/src/libbacktrace/install-sh b/src/libbacktrace/install-sh deleted file mode 100644 index 0b0fdcbba69a..000000000000 --- a/src/libbacktrace/install-sh +++ /dev/null @@ -1,501 +0,0 @@ -#!/bin/sh -# install - install a program, script, or datafile - -scriptversion=2013-12-25.23; # UTC - -# This originates from X11R5 (mit/util/scripts/install.sh), which was -# later released in X11R6 (xc/config/util/install.sh) with the -# following copyright and license. -# -# Copyright (C) 1994 X Consortium -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN -# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- -# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -# Except as contained in this notice, the name of the X Consortium shall not -# be used in advertising or otherwise to promote the sale, use or other deal- -# ings in this Software without prior written authorization from the X Consor- -# tium. -# -# -# FSF changes to this file are in the public domain. -# -# Calling this script install-sh is preferred over install.sh, to prevent -# 'make' implicit rules from creating a file called install from it -# when there is no Makefile. -# -# This script is compatible with the BSD install script, but was written -# from scratch. - -tab=' ' -nl=' -' -IFS=" $tab$nl" - -# Set DOITPROG to "echo" to test this script. - -doit=${DOITPROG-} -doit_exec=${doit:-exec} - -# Put in absolute file names if you don't have them in your path; -# or use environment vars. - -chgrpprog=${CHGRPPROG-chgrp} -chmodprog=${CHMODPROG-chmod} -chownprog=${CHOWNPROG-chown} -cmpprog=${CMPPROG-cmp} -cpprog=${CPPROG-cp} -mkdirprog=${MKDIRPROG-mkdir} -mvprog=${MVPROG-mv} -rmprog=${RMPROG-rm} -stripprog=${STRIPPROG-strip} - -posix_mkdir= - -# Desired mode of installed file. -mode=0755 - -chgrpcmd= -chmodcmd=$chmodprog -chowncmd= -mvcmd=$mvprog -rmcmd="$rmprog -f" -stripcmd= - -src= -dst= -dir_arg= -dst_arg= - -copy_on_change=false -is_target_a_directory=possibly - -usage="\ -Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE - or: $0 [OPTION]... SRCFILES... DIRECTORY - or: $0 [OPTION]... -t DIRECTORY SRCFILES... - or: $0 [OPTION]... -d DIRECTORIES... - -In the 1st form, copy SRCFILE to DSTFILE. -In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. -In the 4th, create DIRECTORIES. - -Options: - --help display this help and exit. - --version display version info and exit. - - -c (ignored) - -C install only if different (preserve the last data modification time) - -d create directories instead of installing files. - -g GROUP $chgrpprog installed files to GROUP. - -m MODE $chmodprog installed files to MODE. - -o USER $chownprog installed files to USER. - -s $stripprog installed files. - -t DIRECTORY install into DIRECTORY. - -T report an error if DSTFILE is a directory. - -Environment variables override the default commands: - CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG - RMPROG STRIPPROG -" - -while test $# -ne 0; do - case $1 in - -c) ;; - - -C) copy_on_change=true;; - - -d) dir_arg=true;; - - -g) chgrpcmd="$chgrpprog $2" - shift;; - - --help) echo "$usage"; exit $?;; - - -m) mode=$2 - case $mode in - *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*) - echo "$0: invalid mode: $mode" >&2 - exit 1;; - esac - shift;; - - -o) chowncmd="$chownprog $2" - shift;; - - -s) stripcmd=$stripprog;; - - -t) - is_target_a_directory=always - dst_arg=$2 - # Protect names problematic for 'test' and other utilities. - case $dst_arg in - -* | [=\(\)!]) dst_arg=./$dst_arg;; - esac - shift;; - - -T) is_target_a_directory=never;; - - --version) echo "$0 $scriptversion"; exit $?;; - - --) shift - break;; - - -*) echo "$0: invalid option: $1" >&2 - exit 1;; - - *) break;; - esac - shift -done - -# We allow the use of options -d and -T together, by making -d -# take the precedence; this is for compatibility with GNU install. - -if test -n "$dir_arg"; then - if test -n "$dst_arg"; then - echo "$0: target directory not allowed when installing a directory." >&2 - exit 1 - fi -fi - -if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then - # When -d is used, all remaining arguments are directories to create. - # When -t is used, the destination is already specified. - # Otherwise, the last argument is the destination. Remove it from $@. - for arg - do - if test -n "$dst_arg"; then - # $@ is not empty: it contains at least $arg. - set fnord "$@" "$dst_arg" - shift # fnord - fi - shift # arg - dst_arg=$arg - # Protect names problematic for 'test' and other utilities. - case $dst_arg in - -* | [=\(\)!]) dst_arg=./$dst_arg;; - esac - done -fi - -if test $# -eq 0; then - if test -z "$dir_arg"; then - echo "$0: no input file specified." >&2 - exit 1 - fi - # It's OK to call 'install-sh -d' without argument. - # This can happen when creating conditional directories. - exit 0 -fi - -if test -z "$dir_arg"; then - if test $# -gt 1 || test "$is_target_a_directory" = always; then - if test ! -d "$dst_arg"; then - echo "$0: $dst_arg: Is not a directory." >&2 - exit 1 - fi - fi -fi - -if test -z "$dir_arg"; then - do_exit='(exit $ret); exit $ret' - trap "ret=129; $do_exit" 1 - trap "ret=130; $do_exit" 2 - trap "ret=141; $do_exit" 13 - trap "ret=143; $do_exit" 15 - - # Set umask so as not to create temps with too-generous modes. - # However, 'strip' requires both read and write access to temps. - case $mode in - # Optimize common cases. - *644) cp_umask=133;; - *755) cp_umask=22;; - - *[0-7]) - if test -z "$stripcmd"; then - u_plus_rw= - else - u_plus_rw='% 200' - fi - cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; - *) - if test -z "$stripcmd"; then - u_plus_rw= - else - u_plus_rw=,u+rw - fi - cp_umask=$mode$u_plus_rw;; - esac -fi - -for src -do - # Protect names problematic for 'test' and other utilities. - case $src in - -* | [=\(\)!]) src=./$src;; - esac - - if test -n "$dir_arg"; then - dst=$src - dstdir=$dst - test -d "$dstdir" - dstdir_status=$? - else - - # Waiting for this to be detected by the "$cpprog $src $dsttmp" command - # might cause directories to be created, which would be especially bad - # if $src (and thus $dsttmp) contains '*'. - if test ! -f "$src" && test ! -d "$src"; then - echo "$0: $src does not exist." >&2 - exit 1 - fi - - if test -z "$dst_arg"; then - echo "$0: no destination specified." >&2 - exit 1 - fi - dst=$dst_arg - - # If destination is a directory, append the input filename; won't work - # if double slashes aren't ignored. - if test -d "$dst"; then - if test "$is_target_a_directory" = never; then - echo "$0: $dst_arg: Is a directory" >&2 - exit 1 - fi - dstdir=$dst - dst=$dstdir/`basename "$src"` - dstdir_status=0 - else - dstdir=`dirname "$dst"` - test -d "$dstdir" - dstdir_status=$? - fi - fi - - obsolete_mkdir_used=false - - if test $dstdir_status != 0; then - case $posix_mkdir in - '') - # Create intermediate dirs using mode 755 as modified by the umask. - # This is like FreeBSD 'install' as of 1997-10-28. - umask=`umask` - case $stripcmd.$umask in - # Optimize common cases. - *[2367][2367]) mkdir_umask=$umask;; - .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; - - *[0-7]) - mkdir_umask=`expr $umask + 22 \ - - $umask % 100 % 40 + $umask % 20 \ - - $umask % 10 % 4 + $umask % 2 - `;; - *) mkdir_umask=$umask,go-w;; - esac - - # With -d, create the new directory with the user-specified mode. - # Otherwise, rely on $mkdir_umask. - if test -n "$dir_arg"; then - mkdir_mode=-m$mode - else - mkdir_mode= - fi - - posix_mkdir=false - case $umask in - *[123567][0-7][0-7]) - # POSIX mkdir -p sets u+wx bits regardless of umask, which - # is incompatible with FreeBSD 'install' when (umask & 300) != 0. - ;; - *) - tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ - trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 - - if (umask $mkdir_umask && - exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 - then - if test -z "$dir_arg" || { - # Check for POSIX incompatibilities with -m. - # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or - # other-writable bit of parent directory when it shouldn't. - # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. - ls_ld_tmpdir=`ls -ld "$tmpdir"` - case $ls_ld_tmpdir in - d????-?r-*) different_mode=700;; - d????-?--*) different_mode=755;; - *) false;; - esac && - $mkdirprog -m$different_mode -p -- "$tmpdir" && { - ls_ld_tmpdir_1=`ls -ld "$tmpdir"` - test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" - } - } - then posix_mkdir=: - fi - rmdir "$tmpdir/d" "$tmpdir" - else - # Remove any dirs left behind by ancient mkdir implementations. - rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null - fi - trap '' 0;; - esac;; - esac - - if - $posix_mkdir && ( - umask $mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" - ) - then : - else - - # The umask is ridiculous, or mkdir does not conform to POSIX, - # or it failed possibly due to a race condition. Create the - # directory the slow way, step by step, checking for races as we go. - - case $dstdir in - /*) prefix='/';; - [-=\(\)!]*) prefix='./';; - *) prefix='';; - esac - - oIFS=$IFS - IFS=/ - set -f - set fnord $dstdir - shift - set +f - IFS=$oIFS - - prefixes= - - for d - do - test X"$d" = X && continue - - prefix=$prefix$d - if test -d "$prefix"; then - prefixes= - else - if $posix_mkdir; then - (umask=$mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break - # Don't fail if two instances are running concurrently. - test -d "$prefix" || exit 1 - else - case $prefix in - *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; - *) qprefix=$prefix;; - esac - prefixes="$prefixes '$qprefix'" - fi - fi - prefix=$prefix/ - done - - if test -n "$prefixes"; then - # Don't fail if two instances are running concurrently. - (umask $mkdir_umask && - eval "\$doit_exec \$mkdirprog $prefixes") || - test -d "$dstdir" || exit 1 - obsolete_mkdir_used=true - fi - fi - fi - - if test -n "$dir_arg"; then - { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && - { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && - { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || - test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 - else - - # Make a couple of temp file names in the proper directory. - dsttmp=$dstdir/_inst.$$_ - rmtmp=$dstdir/_rm.$$_ - - # Trap to clean up those temp files at exit. - trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 - - # Copy the file name to the temp name. - (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && - - # and set any options; do chmod last to preserve setuid bits. - # - # If any of these fail, we abort the whole thing. If we want to - # ignore errors from any of these, just make sure not to ignore - # errors from the above "$doit $cpprog $src $dsttmp" command. - # - { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && - { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && - { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && - { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && - - # If -C, don't bother to copy if it wouldn't change the file. - if $copy_on_change && - old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && - new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && - set -f && - set X $old && old=:$2:$4:$5:$6 && - set X $new && new=:$2:$4:$5:$6 && - set +f && - test "$old" = "$new" && - $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 - then - rm -f "$dsttmp" - else - # Rename the file to the real destination. - $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || - - # The rename failed, perhaps because mv can't rename something else - # to itself, or perhaps because mv is so ancient that it does not - # support -f. - { - # Now remove or move aside any old file at destination location. - # We try this two ways since rm can't unlink itself on some - # systems and the destination file might be busy for other - # reasons. In this case, the final cleanup might fail but the new - # file should still install successfully. - { - test ! -f "$dst" || - $doit $rmcmd -f "$dst" 2>/dev/null || - { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && - { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } - } || - { echo "$0: cannot unlink or rename $dst" >&2 - (exit 1); exit 1 - } - } && - - # Now rename the file to the real destination. - $doit $mvcmd "$dsttmp" "$dst" - } - fi || exit 1 - - trap '' 0 - fi -done - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "scriptversion=" -# time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" -# time-stamp-end: "; # UTC" -# End: diff --git a/src/libbacktrace/internal.h b/src/libbacktrace/internal.h deleted file mode 100644 index 73728da3f566..000000000000 --- a/src/libbacktrace/internal.h +++ /dev/null @@ -1,294 +0,0 @@ -/* internal.h -- Internal header file for stack backtrace library. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#ifndef BACKTRACE_INTERNAL_H -#define BACKTRACE_INTERNAL_H - -/* We assume that and "backtrace.h" have already been - included. */ - -#ifndef GCC_VERSION -# define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__) -#endif - -#if (GCC_VERSION < 2007) -# define __attribute__(x) -#endif - -#ifndef ATTRIBUTE_UNUSED -# define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) -#endif - -#ifndef ATTRIBUTE_MALLOC -# if (GCC_VERSION >= 2096) -# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__)) -# else -# define ATTRIBUTE_MALLOC -# endif -#endif - -#ifndef HAVE_SYNC_FUNCTIONS - -/* Define out the sync functions. These should never be called if - they are not available. */ - -#define __sync_bool_compare_and_swap(A, B, C) (abort(), 1) -#define __sync_lock_test_and_set(A, B) (abort(), 0) -#define __sync_lock_release(A) abort() - -#endif /* !defined (HAVE_SYNC_FUNCTIONS) */ - -#ifdef HAVE_ATOMIC_FUNCTIONS - -/* We have the atomic builtin functions. */ - -#define backtrace_atomic_load_pointer(p) \ - __atomic_load_n ((p), __ATOMIC_ACQUIRE) -#define backtrace_atomic_load_int(p) \ - __atomic_load_n ((p), __ATOMIC_ACQUIRE) -#define backtrace_atomic_store_pointer(p, v) \ - __atomic_store_n ((p), (v), __ATOMIC_RELEASE) -#define backtrace_atomic_store_size_t(p, v) \ - __atomic_store_n ((p), (v), __ATOMIC_RELEASE) -#define backtrace_atomic_store_int(p, v) \ - __atomic_store_n ((p), (v), __ATOMIC_RELEASE) - -#else /* !defined (HAVE_ATOMIC_FUNCTIONS) */ -#ifdef HAVE_SYNC_FUNCTIONS - -/* We have the sync functions but not the atomic functions. Define - the atomic ones in terms of the sync ones. */ - -extern void *backtrace_atomic_load_pointer (void *); -extern int backtrace_atomic_load_int (int *); -extern void backtrace_atomic_store_pointer (void *, void *); -extern void backtrace_atomic_store_size_t (size_t *, size_t); -extern void backtrace_atomic_store_int (int *, int); - -#else /* !defined (HAVE_SYNC_FUNCTIONS) */ - -/* We have neither the sync nor the atomic functions. These will - never be called. */ - -#define backtrace_atomic_load_pointer(p) (abort(), (void *) NULL) -#define backtrace_atomic_load_int(p) (abort(), 0) -#define backtrace_atomic_store_pointer(p, v) abort() -#define backtrace_atomic_store_size_t(p, v) abort() -#define backtrace_atomic_store_int(p, v) abort() - -#endif /* !defined (HAVE_SYNC_FUNCTIONS) */ -#endif /* !defined (HAVE_ATOMIC_FUNCTIONS) */ - -/* The type of the function that collects file/line information. This - is like backtrace_pcinfo. */ - -typedef int (*fileline) (struct backtrace_state *state, uintptr_t pc, - backtrace_full_callback callback, - backtrace_error_callback error_callback, void *data); - -/* The type of the function that collects symbol information. This is - like backtrace_syminfo. */ - -typedef void (*syminfo) (struct backtrace_state *state, uintptr_t pc, - backtrace_syminfo_callback callback, - backtrace_error_callback error_callback, void *data); - -/* What the backtrace state pointer points to. */ - -struct backtrace_state -{ - /* The name of the executable. */ - const char *filename; - /* Non-zero if threaded. */ - int threaded; - /* The master lock for fileline_fn, fileline_data, syminfo_fn, - syminfo_data, fileline_initialization_failed and everything the - data pointers point to. */ - void *lock; - /* The function that returns file/line information. */ - fileline fileline_fn; - /* The data to pass to FILELINE_FN. */ - void *fileline_data; - /* The function that returns symbol information. */ - syminfo syminfo_fn; - /* The data to pass to SYMINFO_FN. */ - void *syminfo_data; - /* Whether initializing the file/line information failed. */ - int fileline_initialization_failed; - /* The lock for the freelist. */ - int lock_alloc; - /* The freelist when using mmap. */ - struct backtrace_freelist_struct *freelist; -}; - -/* Open a file for reading. Returns -1 on error. If DOES_NOT_EXIST - is not NULL, *DOES_NOT_EXIST will be set to 0 normally and set to 1 - if the file does not exist. If the file does not exist and - DOES_NOT_EXIST is not NULL, the function will return -1 and will - not call ERROR_CALLBACK. On other errors, or if DOES_NOT_EXIST is - NULL, the function will call ERROR_CALLBACK before returning. */ -extern int backtrace_open (const char *filename, - backtrace_error_callback error_callback, - void *data, - int *does_not_exist); - -/* A view of the contents of a file. This supports mmap when - available. A view will remain in memory even after backtrace_close - is called on the file descriptor from which the view was - obtained. */ - -struct backtrace_view -{ - /* The data that the caller requested. */ - const void *data; - /* The base of the view. */ - void *base; - /* The total length of the view. */ - size_t len; -}; - -/* Create a view of SIZE bytes from DESCRIPTOR at OFFSET. Store the - result in *VIEW. Returns 1 on success, 0 on error. */ -extern int backtrace_get_view (struct backtrace_state *state, int descriptor, - off_t offset, size_t size, - backtrace_error_callback error_callback, - void *data, struct backtrace_view *view); - -/* Release a view created by backtrace_get_view. */ -extern void backtrace_release_view (struct backtrace_state *state, - struct backtrace_view *view, - backtrace_error_callback error_callback, - void *data); - -/* Close a file opened by backtrace_open. Returns 1 on success, 0 on - error. */ - -extern int backtrace_close (int descriptor, - backtrace_error_callback error_callback, - void *data); - -/* Sort without using memory. */ - -extern void backtrace_qsort (void *base, size_t count, size_t size, - int (*compar) (const void *, const void *)); - -/* Allocate memory. This is like malloc. If ERROR_CALLBACK is NULL, - this does not report an error, it just returns NULL. */ - -extern void *backtrace_alloc (struct backtrace_state *state, size_t size, - backtrace_error_callback error_callback, - void *data) ATTRIBUTE_MALLOC; - -/* Free memory allocated by backtrace_alloc. If ERROR_CALLBACK is - NULL, this does not report an error. */ - -extern void backtrace_free (struct backtrace_state *state, void *mem, - size_t size, - backtrace_error_callback error_callback, - void *data); - -/* A growable vector of some struct. This is used for more efficient - allocation when we don't know the final size of some group of data - that we want to represent as an array. */ - -struct backtrace_vector -{ - /* The base of the vector. */ - void *base; - /* The number of bytes in the vector. */ - size_t size; - /* The number of bytes available at the current allocation. */ - size_t alc; -}; - -/* Grow VEC by SIZE bytes. Return a pointer to the newly allocated - bytes. Note that this may move the entire vector to a new memory - location. Returns NULL on failure. */ - -extern void *backtrace_vector_grow (struct backtrace_state *state, size_t size, - backtrace_error_callback error_callback, - void *data, - struct backtrace_vector *vec); - -/* Finish the current allocation on VEC. Prepare to start a new - allocation. The finished allocation will never be freed. Returns - a pointer to the base of the finished entries, or NULL on - failure. */ - -extern void* backtrace_vector_finish (struct backtrace_state *state, - struct backtrace_vector *vec, - backtrace_error_callback error_callback, - void *data); - -/* Release any extra space allocated for VEC. This may change - VEC->base. Returns 1 on success, 0 on failure. */ - -extern int backtrace_vector_release (struct backtrace_state *state, - struct backtrace_vector *vec, - backtrace_error_callback error_callback, - void *data); - -/* Read initial debug data from a descriptor, and set the - fileline_data, syminfo_fn, and syminfo_data fields of STATE. - Return the fileln_fn field in *FILELN_FN--this is done this way so - that the synchronization code is only implemented once. This is - called after the descriptor has first been opened. It will close - the descriptor if it is no longer needed. Returns 1 on success, 0 - on error. There will be multiple implementations of this function, - for different file formats. Each system will compile the - appropriate one. */ - -extern int backtrace_initialize (struct backtrace_state *state, - int descriptor, - backtrace_error_callback error_callback, - void *data, - fileline *fileline_fn); - -/* Add file/line information for a DWARF module. */ - -extern int backtrace_dwarf_add (struct backtrace_state *state, - uintptr_t base_address, - const unsigned char* dwarf_info, - size_t dwarf_info_size, - const unsigned char *dwarf_line, - size_t dwarf_line_size, - const unsigned char *dwarf_abbrev, - size_t dwarf_abbrev_size, - const unsigned char *dwarf_ranges, - size_t dwarf_range_size, - const unsigned char *dwarf_str, - size_t dwarf_str_size, - int is_bigendian, - backtrace_error_callback error_callback, - void *data, fileline *fileline_fn); - -#endif diff --git a/src/libbacktrace/ltmain.sh b/src/libbacktrace/ltmain.sh deleted file mode 100644 index eff9e62be8a0..000000000000 --- a/src/libbacktrace/ltmain.sh +++ /dev/null @@ -1,8636 +0,0 @@ -# Generated from ltmain.m4sh. - -# libtool (GNU libtool 1.3134 2009-11-29) 2.2.7a -# Written by Gordon Matzigkeit , 1996 - -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, -# 2007, 2008, 2009 Free Software Foundation, Inc. -# This is free software; see the source for copying conditions. There is NO -# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - -# GNU Libtool is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# As a special exception to the GNU General Public License, -# if you distribute this file as part of a program or library that -# is built using GNU Libtool, you may include this file under the -# same distribution terms that you use for the rest of that program. -# -# GNU Libtool is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Libtool; see the file COPYING. If not, a copy -# can be downloaded from http://www.gnu.org/licenses/gpl.html, -# or obtained by writing to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - -# Usage: $progname [OPTION]... [MODE-ARG]... -# -# Provide generalized library-building support services. -# -# --config show all configuration variables -# --debug enable verbose shell tracing -# -n, --dry-run display commands without modifying any files -# --features display basic configuration information and exit -# --mode=MODE use operation mode MODE -# --no-finish let install mode avoid finish commands -# --preserve-dup-deps don't remove duplicate dependency libraries -# --quiet, --silent don't print informational messages -# --no-quiet, --no-silent -# print informational messages (default) -# --tag=TAG use configuration variables from tag TAG -# -v, --verbose print more informational messages than default -# --no-verbose don't print the extra informational messages -# --version print version information -# -h, --help, --help-all print short, long, or detailed help message -# -# MODE must be one of the following: -# -# clean remove files from the build directory -# compile compile a source file into a libtool object -# execute automatically set library path, then run a program -# finish complete the installation of libtool libraries -# install install libraries or executables -# link create a library or an executable -# uninstall remove libraries from an installed directory -# -# MODE-ARGS vary depending on the MODE. When passed as first option, -# `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that. -# Try `$progname --help --mode=MODE' for a more detailed description of MODE. -# -# When reporting a bug, please describe a test case to reproduce it and -# include the following information: -# -# host-triplet: $host -# shell: $SHELL -# compiler: $LTCC -# compiler flags: $LTCFLAGS -# linker: $LD (gnu? $with_gnu_ld) -# $progname: (GNU libtool 1.3134 2009-11-29) 2.2.7a -# automake: $automake_version -# autoconf: $autoconf_version -# -# Report bugs to . - -PROGRAM=libtool -PACKAGE=libtool -VERSION=2.2.7a -TIMESTAMP=" 1.3134 2009-11-29" -package_revision=1.3134 - -# Be Bourne compatible -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then - emulate sh - NULLCMD=: - # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac -fi -BIN_SH=xpg4; export BIN_SH # for Tru64 -DUALCASE=1; export DUALCASE # for MKS sh - -# A function that is used when there is no print builtin or printf. -func_fallback_echo () -{ - eval 'cat <<_LTECHO_EOF -$1 -_LTECHO_EOF' -} - -# NLS nuisances: We save the old values to restore during execute mode. -# Only set LANG and LC_ALL to C if already set. -# These must not be set unconditionally because not all systems understand -# e.g. LANG=C (notably SCO). -lt_user_locale= -lt_safe_locale= -for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES -do - eval "if test \"\${$lt_var+set}\" = set; then - save_$lt_var=\$$lt_var - $lt_var=C - export $lt_var - lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\" - lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" - fi" -done - -$lt_unset CDPATH - - - - - - - -# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh -# is ksh but when the shell is invoked as "sh" and the current value of -# the _XPG environment variable is not equal to 1 (one), the special -# positional parameter $0, within a function call, is the name of the -# function. -progpath="$0" - - - -: ${CP="cp -f"} -: ${ECHO=$as_echo} -: ${EGREP="/bin/grep -E"} -: ${FGREP="/bin/grep -F"} -: ${GREP="/bin/grep"} -: ${LN_S="ln -s"} -: ${MAKE="make"} -: ${MKDIR="mkdir"} -: ${MV="mv -f"} -: ${RM="rm -f"} -: ${SED="/mount/endor/wildenhu/local-x86_64/bin/sed"} -: ${SHELL="${CONFIG_SHELL-/bin/sh}"} -: ${Xsed="$SED -e 1s/^X//"} - -# Global variables: -EXIT_SUCCESS=0 -EXIT_FAILURE=1 -EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. -EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. - -exit_status=$EXIT_SUCCESS - -# Make sure IFS has a sensible default -lt_nl=' -' -IFS=" $lt_nl" - -dirname="s,/[^/]*$,," -basename="s,^.*/,," - -# func_dirname_and_basename file append nondir_replacement -# perform func_basename and func_dirname in a single function -# call: -# dirname: Compute the dirname of FILE. If nonempty, -# add APPEND to the result, otherwise set result -# to NONDIR_REPLACEMENT. -# value returned in "$func_dirname_result" -# basename: Compute filename of FILE. -# value returned in "$func_basename_result" -# Implementation must be kept synchronized with func_dirname -# and func_basename. For efficiency, we do not delegate to -# those functions but instead duplicate the functionality here. -func_dirname_and_basename () -{ - # Extract subdirectory from the argument. - func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` - if test "X$func_dirname_result" = "X${1}"; then - func_dirname_result="${3}" - else - func_dirname_result="$func_dirname_result${2}" - fi - func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` -} - -# Generated shell functions inserted here. - -# These SED scripts presuppose an absolute path with a trailing slash. -pathcar='s,^/\([^/]*\).*$,\1,' -pathcdr='s,^/[^/]*,,' -removedotparts=':dotsl - s@/\./@/@g - t dotsl - s,/\.$,/,' -collapseslashes='s@/\{1,\}@/@g' -finalslash='s,/*$,/,' - -# func_normal_abspath PATH -# Remove doubled-up and trailing slashes, "." path components, -# and cancel out any ".." path components in PATH after making -# it an absolute path. -# value returned in "$func_normal_abspath_result" -func_normal_abspath () -{ - # Start from root dir and reassemble the path. - func_normal_abspath_result= - func_normal_abspath_tpath=$1 - func_normal_abspath_altnamespace= - case $func_normal_abspath_tpath in - "") - # Empty path, that just means $cwd. - func_stripname '' '/' "`pwd`" - func_normal_abspath_result=$func_stripname_result - return - ;; - # The next three entries are used to spot a run of precisely - # two leading slashes without using negated character classes; - # we take advantage of case's first-match behaviour. - ///*) - # Unusual form of absolute path, do nothing. - ;; - //*) - # Not necessarily an ordinary path; POSIX reserves leading '//' - # and for example Cygwin uses it to access remote file shares - # over CIFS/SMB, so we conserve a leading double slash if found. - func_normal_abspath_altnamespace=/ - ;; - /*) - # Absolute path, do nothing. - ;; - *) - # Relative path, prepend $cwd. - func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath - ;; - esac - # Cancel out all the simple stuff to save iterations. We also want - # the path to end with a slash for ease of parsing, so make sure - # there is one (and only one) here. - func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ - -e "$removedotparts" -e "$collapseslashes" -e "$finalslash"` - while :; do - # Processed it all yet? - if test "$func_normal_abspath_tpath" = / ; then - # If we ascended to the root using ".." the result may be empty now. - if test -z "$func_normal_abspath_result" ; then - func_normal_abspath_result=/ - fi - break - fi - func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ - -e "$pathcar"` - func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ - -e "$pathcdr"` - # Figure out what to do with it - case $func_normal_abspath_tcomponent in - "") - # Trailing empty path component, ignore it. - ;; - ..) - # Parent dir; strip last assembled component from result. - func_dirname "$func_normal_abspath_result" - func_normal_abspath_result=$func_dirname_result - ;; - *) - # Actual path component, append it. - func_normal_abspath_result=$func_normal_abspath_result/$func_normal_abspath_tcomponent - ;; - esac - done - # Restore leading double-slash if one was found on entry. - func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result -} - -# func_relative_path SRCDIR DSTDIR -# generates a relative path from SRCDIR to DSTDIR, with a trailing -# slash if non-empty, suitable for immediately appending a filename -# without needing to append a separator. -# value returned in "$func_relative_path_result" -func_relative_path () -{ - func_relative_path_result= - func_normal_abspath "$1" - func_relative_path_tlibdir=$func_normal_abspath_result - func_normal_abspath "$2" - func_relative_path_tbindir=$func_normal_abspath_result - - # Ascend the tree starting from libdir - while :; do - # check if we have found a prefix of bindir - case $func_relative_path_tbindir in - $func_relative_path_tlibdir) - # found an exact match - func_relative_path_tcancelled= - break - ;; - $func_relative_path_tlibdir*) - # found a matching prefix - func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" - func_relative_path_tcancelled=$func_stripname_result - if test -z "$func_relative_path_result"; then - func_relative_path_result=. - fi - break - ;; - *) - func_dirname $func_relative_path_tlibdir - func_relative_path_tlibdir=${func_dirname_result} - if test "x$func_relative_path_tlibdir" = x ; then - # Have to descend all the way to the root! - func_relative_path_result=../$func_relative_path_result - func_relative_path_tcancelled=$func_relative_path_tbindir - break - fi - func_relative_path_result=../$func_relative_path_result - ;; - esac - done - - # Now calculate path; take care to avoid doubling-up slashes. - func_stripname '' '/' "$func_relative_path_result" - func_relative_path_result=$func_stripname_result - func_stripname '/' '/' "$func_relative_path_tcancelled" - if test "x$func_stripname_result" != x ; then - func_relative_path_result=${func_relative_path_result}/${func_stripname_result} - fi - - # Normalisation. If bindir is libdir, return empty string, - # else relative path ending with a slash; either way, target - # file name can be directly appended. - if test ! -z "$func_relative_path_result"; then - func_stripname './' '' "$func_relative_path_result/" - func_relative_path_result=$func_stripname_result - fi -} - -# The name of this program: -func_dirname_and_basename "$progpath" -progname=$func_basename_result - -# Make sure we have an absolute path for reexecution: -case $progpath in - [\\/]*|[A-Za-z]:\\*) ;; - *[\\/]*) - progdir=$func_dirname_result - progdir=`cd "$progdir" && pwd` - progpath="$progdir/$progname" - ;; - *) - save_IFS="$IFS" - IFS=: - for progdir in $PATH; do - IFS="$save_IFS" - test -x "$progdir/$progname" && break - done - IFS="$save_IFS" - test -n "$progdir" || progdir=`pwd` - progpath="$progdir/$progname" - ;; -esac - -# Sed substitution that helps us do robust quoting. It backslashifies -# metacharacters that are still active within double-quoted strings. -Xsed="${SED}"' -e 1s/^X//' -sed_quote_subst='s/\([`"$\\]\)/\\\1/g' - -# Same as above, but do not quote variable references. -double_quote_subst='s/\(["`\\]\)/\\\1/g' - -# Re-`\' parameter expansions in output of double_quote_subst that were -# `\'-ed in input to the same. If an odd number of `\' preceded a '$' -# in input to double_quote_subst, that '$' was protected from expansion. -# Since each input `\' is now two `\'s, look for any number of runs of -# four `\'s followed by two `\'s and then a '$'. `\' that '$'. -bs='\\' -bs2='\\\\' -bs4='\\\\\\\\' -dollar='\$' -sed_double_backslash="\ - s/$bs4/&\\ -/g - s/^$bs2$dollar/$bs&/ - s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g - s/\n//g" - -# Standard options: -opt_dry_run=false -opt_help=false -opt_quiet=false -opt_verbose=false -opt_warning=: - -# func_echo arg... -# Echo program name prefixed message, along with the current mode -# name if it has been set yet. -func_echo () -{ - $ECHO "$progname${mode+: }$mode: $*" -} - -# func_verbose arg... -# Echo program name prefixed message in verbose mode only. -func_verbose () -{ - $opt_verbose && func_echo ${1+"$@"} - - # A bug in bash halts the script if the last line of a function - # fails when set -e is in force, so we need another command to - # work around that: - : -} - -# func_echo_all arg... -# Invoke $ECHO with all args, space-separated. -func_echo_all () -{ - $ECHO "$*" -} - -# func_error arg... -# Echo program name prefixed message to standard error. -func_error () -{ - $ECHO "$progname${mode+: }$mode: "${1+"$@"} 1>&2 -} - -# func_warning arg... -# Echo program name prefixed warning message to standard error. -func_warning () -{ - $opt_warning && $ECHO "$progname${mode+: }$mode: warning: "${1+"$@"} 1>&2 - - # bash bug again: - : -} - -# func_fatal_error arg... -# Echo program name prefixed message to standard error, and exit. -func_fatal_error () -{ - func_error ${1+"$@"} - exit $EXIT_FAILURE -} - -# func_fatal_help arg... -# Echo program name prefixed message to standard error, followed by -# a help hint, and exit. -func_fatal_help () -{ - func_error ${1+"$@"} - func_fatal_error "$help" -} -help="Try \`$progname --help' for more information." ## default - - -# func_grep expression filename -# Check whether EXPRESSION matches any line of FILENAME, without output. -func_grep () -{ - $GREP "$1" "$2" >/dev/null 2>&1 -} - - -# func_mkdir_p directory-path -# Make sure the entire path to DIRECTORY-PATH is available. -func_mkdir_p () -{ - my_directory_path="$1" - my_dir_list= - - if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then - - # Protect directory names starting with `-' - case $my_directory_path in - -*) my_directory_path="./$my_directory_path" ;; - esac - - # While some portion of DIR does not yet exist... - while test ! -d "$my_directory_path"; do - # ...make a list in topmost first order. Use a colon delimited - # list incase some portion of path contains whitespace. - my_dir_list="$my_directory_path:$my_dir_list" - - # If the last portion added has no slash in it, the list is done - case $my_directory_path in */*) ;; *) break ;; esac - - # ...otherwise throw away the child directory and loop - my_directory_path=`$ECHO "$my_directory_path" | $SED -e "$dirname"` - done - my_dir_list=`$ECHO "$my_dir_list" | $SED 's,:*$,,'` - - save_mkdir_p_IFS="$IFS"; IFS=':' - for my_dir in $my_dir_list; do - IFS="$save_mkdir_p_IFS" - # mkdir can fail with a `File exist' error if two processes - # try to create one of the directories concurrently. Don't - # stop in that case! - $MKDIR "$my_dir" 2>/dev/null || : - done - IFS="$save_mkdir_p_IFS" - - # Bail out if we (or some other process) failed to create a directory. - test -d "$my_directory_path" || \ - func_fatal_error "Failed to create \`$1'" - fi -} - - -# func_mktempdir [string] -# Make a temporary directory that won't clash with other running -# libtool processes, and avoids race conditions if possible. If -# given, STRING is the basename for that directory. -func_mktempdir () -{ - my_template="${TMPDIR-/tmp}/${1-$progname}" - - if test "$opt_dry_run" = ":"; then - # Return a directory name, but don't create it in dry-run mode - my_tmpdir="${my_template}-$$" - else - - # If mktemp works, use that first and foremost - my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` - - if test ! -d "$my_tmpdir"; then - # Failing that, at least try and use $RANDOM to avoid a race - my_tmpdir="${my_template}-${RANDOM-0}$$" - - save_mktempdir_umask=`umask` - umask 0077 - $MKDIR "$my_tmpdir" - umask $save_mktempdir_umask - fi - - # If we're not in dry-run mode, bomb out on failure - test -d "$my_tmpdir" || \ - func_fatal_error "cannot create temporary directory \`$my_tmpdir'" - fi - - $ECHO "$my_tmpdir" -} - - -# func_quote_for_eval arg -# Aesthetically quote ARG to be evaled later. -# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT -# is double-quoted, suitable for a subsequent eval, whereas -# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters -# which are still active within double quotes backslashified. -func_quote_for_eval () -{ - case $1 in - *[\\\`\"\$]*) - func_quote_for_eval_unquoted_result=`$ECHO "$1" | $SED "$sed_quote_subst"` ;; - *) - func_quote_for_eval_unquoted_result="$1" ;; - esac - - case $func_quote_for_eval_unquoted_result in - # Double-quote args containing shell metacharacters to delay - # word splitting, command substitution and and variable - # expansion for a subsequent eval. - # Many Bourne shells cannot handle close brackets correctly - # in scan sets, so we specify it separately. - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" - ;; - *) - func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" - esac -} - - -# func_quote_for_expand arg -# Aesthetically quote ARG to be evaled later; same as above, -# but do not quote variable references. -func_quote_for_expand () -{ - case $1 in - *[\\\`\"]*) - my_arg=`$ECHO "$1" | $SED \ - -e "$double_quote_subst" -e "$sed_double_backslash"` ;; - *) - my_arg="$1" ;; - esac - - case $my_arg in - # Double-quote args containing shell metacharacters to delay - # word splitting and command substitution for a subsequent eval. - # Many Bourne shells cannot handle close brackets correctly - # in scan sets, so we specify it separately. - *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") - my_arg="\"$my_arg\"" - ;; - esac - - func_quote_for_expand_result="$my_arg" -} - - -# func_show_eval cmd [fail_exp] -# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is -# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP -# is given, then evaluate it. -func_show_eval () -{ - my_cmd="$1" - my_fail_exp="${2-:}" - - ${opt_silent-false} || { - func_quote_for_expand "$my_cmd" - eval "func_echo $func_quote_for_expand_result" - } - - if ${opt_dry_run-false}; then :; else - eval "$my_cmd" - my_status=$? - if test "$my_status" -eq 0; then :; else - eval "(exit $my_status); $my_fail_exp" - fi - fi -} - - -# func_show_eval_locale cmd [fail_exp] -# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is -# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP -# is given, then evaluate it. Use the saved locale for evaluation. -func_show_eval_locale () -{ - my_cmd="$1" - my_fail_exp="${2-:}" - - ${opt_silent-false} || { - func_quote_for_expand "$my_cmd" - eval "func_echo $func_quote_for_expand_result" - } - - if ${opt_dry_run-false}; then :; else - eval "$lt_user_locale - $my_cmd" - my_status=$? - eval "$lt_safe_locale" - if test "$my_status" -eq 0; then :; else - eval "(exit $my_status); $my_fail_exp" - fi - fi -} - - - - - -# func_version -# Echo version message to standard output and exit. -func_version () -{ - $SED -n '/(C)/!b go - :more - /\./!{ - N - s/\n# // - b more - } - :go - /^# '$PROGRAM' (GNU /,/# warranty; / { - s/^# // - s/^# *$// - s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ - p - }' < "$progpath" - exit $? -} - -# func_usage -# Echo short help message to standard output and exit. -func_usage () -{ - $SED -n '/^# Usage:/,/^# *-h/ { - s/^# // - s/^# *$// - s/\$progname/'$progname'/ - p - }' < "$progpath" - echo - $ECHO "run \`$progname --help | more' for full usage" - exit $? -} - -# func_help [NOEXIT] -# Echo long help message to standard output and exit, -# unless 'noexit' is passed as argument. -func_help () -{ - $SED -n '/^# Usage:/,/# Report bugs to/ { - s/^# // - s/^# *$// - s*\$progname*'$progname'* - s*\$host*'"$host"'* - s*\$SHELL*'"$SHELL"'* - s*\$LTCC*'"$LTCC"'* - s*\$LTCFLAGS*'"$LTCFLAGS"'* - s*\$LD*'"$LD"'* - s/\$with_gnu_ld/'"$with_gnu_ld"'/ - s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/ - s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/ - p - }' < "$progpath" - ret=$? - if test -z "$1"; then - exit $ret - fi -} - -# func_missing_arg argname -# Echo program name prefixed message to standard error and set global -# exit_cmd. -func_missing_arg () -{ - func_error "missing argument for $1" - exit_cmd=exit -} - -exit_cmd=: - - - - - - -magic="%%%MAGIC variable%%%" -magic_exe="%%%MAGIC EXE variable%%%" - -# Global variables. -# $mode is unset -nonopt= -execute_dlfiles= -preserve_args= -lo2o="s/\\.lo\$/.${objext}/" -o2lo="s/\\.${objext}\$/.lo/" -extracted_archives= -extracted_serial=0 - -opt_dry_run=false -opt_finish=: -opt_duplicate_deps=false -opt_silent=false -opt_debug=: - -# If this variable is set in any of the actions, the command in it -# will be execed at the end. This prevents here-documents from being -# left over by shells. -exec_cmd= - -# func_fatal_configuration arg... -# Echo program name prefixed message to standard error, followed by -# a configuration failure hint, and exit. -func_fatal_configuration () -{ - func_error ${1+"$@"} - func_error "See the $PACKAGE documentation for more information." - func_fatal_error "Fatal configuration error." -} - - -# func_config -# Display the configuration for all the tags in this script. -func_config () -{ - re_begincf='^# ### BEGIN LIBTOOL' - re_endcf='^# ### END LIBTOOL' - - # Default configuration. - $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" - - # Now print the configurations for the tags. - for tagname in $taglist; do - $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" - done - - exit $? -} - -# func_features -# Display the features supported by this script. -func_features () -{ - echo "host: $host" - if test "$build_libtool_libs" = yes; then - echo "enable shared libraries" - else - echo "disable shared libraries" - fi - if test "$build_old_libs" = yes; then - echo "enable static libraries" - else - echo "disable static libraries" - fi - - exit $? -} - -# func_enable_tag tagname -# Verify that TAGNAME is valid, and either flag an error and exit, or -# enable the TAGNAME tag. We also add TAGNAME to the global $taglist -# variable here. -func_enable_tag () -{ - # Global variable: - tagname="$1" - - re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" - re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" - sed_extractcf="/$re_begincf/,/$re_endcf/p" - - # Validate tagname. - case $tagname in - *[!-_A-Za-z0-9,/]*) - func_fatal_error "invalid tag name: $tagname" - ;; - esac - - # Don't test for the "default" C tag, as we know it's - # there but not specially marked. - case $tagname in - CC) ;; - *) - if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then - taglist="$taglist $tagname" - - # Evaluate the configuration. Be careful to quote the path - # and the sed script, to avoid splitting on whitespace, but - # also don't use non-portable quotes within backquotes within - # quotes we have to do it in 2 steps: - extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` - eval "$extractedcf" - else - func_error "ignoring unknown tag $tagname" - fi - ;; - esac -} - -# Parse options once, thoroughly. This comes as soon as possible in -# the script to make things like `libtool --version' happen quickly. -{ - - # Shorthand for --mode=foo, only valid as the first argument - case $1 in - clean|clea|cle|cl) - shift; set dummy --mode clean ${1+"$@"}; shift - ;; - compile|compil|compi|comp|com|co|c) - shift; set dummy --mode compile ${1+"$@"}; shift - ;; - execute|execut|execu|exec|exe|ex|e) - shift; set dummy --mode execute ${1+"$@"}; shift - ;; - finish|finis|fini|fin|fi|f) - shift; set dummy --mode finish ${1+"$@"}; shift - ;; - install|instal|insta|inst|ins|in|i) - shift; set dummy --mode install ${1+"$@"}; shift - ;; - link|lin|li|l) - shift; set dummy --mode link ${1+"$@"}; shift - ;; - uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) - shift; set dummy --mode uninstall ${1+"$@"}; shift - ;; - esac - - # Parse non-mode specific arguments: - while test "$#" -gt 0; do - opt="$1" - shift - - case $opt in - --config) func_config ;; - - --debug) preserve_args="$preserve_args $opt" - func_echo "enabling shell trace mode" - opt_debug='set -x' - $opt_debug - ;; - - -dlopen) test "$#" -eq 0 && func_missing_arg "$opt" && break - execute_dlfiles="$execute_dlfiles $1" - shift - ;; - - --dry-run | -n) opt_dry_run=: ;; - --features) func_features ;; - --finish) mode="finish" ;; - --no-finish) opt_finish=false ;; - - --mode) test "$#" -eq 0 && func_missing_arg "$opt" && break - case $1 in - # Valid mode arguments: - clean) ;; - compile) ;; - execute) ;; - finish) ;; - install) ;; - link) ;; - relink) ;; - uninstall) ;; - - # Catch anything else as an error - *) func_error "invalid argument for $opt" - exit_cmd=exit - break - ;; - esac - - mode="$1" - shift - ;; - - --preserve-dup-deps) - opt_duplicate_deps=: ;; - - --quiet|--silent) preserve_args="$preserve_args $opt" - opt_silent=: - opt_verbose=false - ;; - - --no-quiet|--no-silent) - preserve_args="$preserve_args $opt" - opt_silent=false - ;; - - --verbose| -v) preserve_args="$preserve_args $opt" - opt_silent=false - opt_verbose=: - ;; - - --no-verbose) preserve_args="$preserve_args $opt" - opt_verbose=false - ;; - - --tag) test "$#" -eq 0 && func_missing_arg "$opt" && break - preserve_args="$preserve_args $opt $1" - func_enable_tag "$1" # tagname is set here - shift - ;; - - # Separate optargs to long options: - -dlopen=*|--mode=*|--tag=*) - func_opt_split "$opt" - set dummy "$func_opt_split_opt" "$func_opt_split_arg" ${1+"$@"} - shift - ;; - - -\?|-h) func_usage ;; - --help) opt_help=: ;; - --help-all) opt_help=': help-all' ;; - --version) func_version ;; - - -*) func_fatal_help "unrecognized option \`$opt'" ;; - - *) nonopt="$opt" - break - ;; - esac - done - - - case $host in - *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* ) - # don't eliminate duplications in $postdeps and $predeps - opt_duplicate_compiler_generated_deps=: - ;; - *) - opt_duplicate_compiler_generated_deps=$opt_duplicate_deps - ;; - esac - - # Having warned about all mis-specified options, bail out if - # anything was wrong. - $exit_cmd $EXIT_FAILURE -} - -# func_check_version_match -# Ensure that we are using m4 macros, and libtool script from the same -# release of libtool. -func_check_version_match () -{ - if test "$package_revision" != "$macro_revision"; then - if test "$VERSION" != "$macro_version"; then - if test -z "$macro_version"; then - cat >&2 <<_LT_EOF -$progname: Version mismatch error. This is $PACKAGE $VERSION, but the -$progname: definition of this LT_INIT comes from an older release. -$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION -$progname: and run autoconf again. -_LT_EOF - else - cat >&2 <<_LT_EOF -$progname: Version mismatch error. This is $PACKAGE $VERSION, but the -$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. -$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION -$progname: and run autoconf again. -_LT_EOF - fi - else - cat >&2 <<_LT_EOF -$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, -$progname: but the definition of this LT_INIT comes from revision $macro_revision. -$progname: You should recreate aclocal.m4 with macros from revision $package_revision -$progname: of $PACKAGE $VERSION and run autoconf again. -_LT_EOF - fi - - exit $EXIT_MISMATCH - fi -} - - -## ----------- ## -## Main. ## -## ----------- ## - -$opt_help || { - # Sanity checks first: - func_check_version_match - - if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then - func_fatal_configuration "not configured to build any kind of library" - fi - - test -z "$mode" && func_fatal_error "error: you must specify a MODE." - - - # Darwin sucks - eval "std_shrext=\"$shrext_cmds\"" - - - # Only execute mode is allowed to have -dlopen flags. - if test -n "$execute_dlfiles" && test "$mode" != execute; then - func_error "unrecognized option \`-dlopen'" - $ECHO "$help" 1>&2 - exit $EXIT_FAILURE - fi - - # Change the help message to a mode-specific one. - generic_help="$help" - help="Try \`$progname --help --mode=$mode' for more information." -} - - -# func_lalib_p file -# True iff FILE is a libtool `.la' library or `.lo' object file. -# This function is only a basic sanity check; it will hardly flush out -# determined imposters. -func_lalib_p () -{ - test -f "$1" && - $SED -e 4q "$1" 2>/dev/null \ - | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 -} - -# func_lalib_unsafe_p file -# True iff FILE is a libtool `.la' library or `.lo' object file. -# This function implements the same check as func_lalib_p without -# resorting to external programs. To this end, it redirects stdin and -# closes it afterwards, without saving the original file descriptor. -# As a safety measure, use it only where a negative result would be -# fatal anyway. Works if `file' does not exist. -func_lalib_unsafe_p () -{ - lalib_p=no - if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then - for lalib_p_l in 1 2 3 4 - do - read lalib_p_line - case "$lalib_p_line" in - \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; - esac - done - exec 0<&5 5<&- - fi - test "$lalib_p" = yes -} - -# func_ltwrapper_script_p file -# True iff FILE is a libtool wrapper script -# This function is only a basic sanity check; it will hardly flush out -# determined imposters. -func_ltwrapper_script_p () -{ - func_lalib_p "$1" -} - -# func_ltwrapper_executable_p file -# True iff FILE is a libtool wrapper executable -# This function is only a basic sanity check; it will hardly flush out -# determined imposters. -func_ltwrapper_executable_p () -{ - func_ltwrapper_exec_suffix= - case $1 in - *.exe) ;; - *) func_ltwrapper_exec_suffix=.exe ;; - esac - $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 -} - -# func_ltwrapper_scriptname file -# Assumes file is an ltwrapper_executable -# uses $file to determine the appropriate filename for a -# temporary ltwrapper_script. -func_ltwrapper_scriptname () -{ - func_ltwrapper_scriptname_result="" - if func_ltwrapper_executable_p "$1"; then - func_dirname_and_basename "$1" "" "." - func_stripname '' '.exe' "$func_basename_result" - func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" - fi -} - -# func_ltwrapper_p file -# True iff FILE is a libtool wrapper script or wrapper executable -# This function is only a basic sanity check; it will hardly flush out -# determined imposters. -func_ltwrapper_p () -{ - func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" -} - - -# func_execute_cmds commands fail_cmd -# Execute tilde-delimited COMMANDS. -# If FAIL_CMD is given, eval that upon failure. -# FAIL_CMD may read-access the current command in variable CMD! -func_execute_cmds () -{ - $opt_debug - save_ifs=$IFS; IFS='~' - for cmd in $1; do - IFS=$save_ifs - eval "cmd=\"$cmd\"" - func_show_eval "$cmd" "${2-:}" - done - IFS=$save_ifs -} - - -# func_source file -# Source FILE, adding directory component if necessary. -# Note that it is not necessary on cygwin/mingw to append a dot to -# FILE even if both FILE and FILE.exe exist: automatic-append-.exe -# behavior happens only for exec(3), not for open(2)! Also, sourcing -# `FILE.' does not work on cygwin managed mounts. -func_source () -{ - $opt_debug - case $1 in - */* | *\\*) . "$1" ;; - *) . "./$1" ;; - esac -} - - -# func_infer_tag arg -# Infer tagged configuration to use if any are available and -# if one wasn't chosen via the "--tag" command line option. -# Only attempt this if the compiler in the base compile -# command doesn't match the default compiler. -# arg is usually of the form 'gcc ...' -func_infer_tag () -{ - $opt_debug - if test -n "$available_tags" && test -z "$tagname"; then - CC_quoted= - for arg in $CC; do - func_quote_for_eval "$arg" - CC_quoted="$CC_quoted $func_quote_for_eval_result" - done - CC_expanded=`func_echo_all $CC` - CC_quoted_expanded=`func_echo_all $CC_quoted` - case $@ in - # Blanks in the command may have been stripped by the calling shell, - # but not from the CC environment variable when configure was run. - " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ - " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; - # Blanks at the start of $base_compile will cause this to fail - # if we don't check for them as well. - *) - for z in $available_tags; do - if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then - # Evaluate the configuration. - eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" - CC_quoted= - for arg in $CC; do - # Double-quote args containing other shell metacharacters. - func_quote_for_eval "$arg" - CC_quoted="$CC_quoted $func_quote_for_eval_result" - done - CC_expanded=`func_echo_all $CC` - CC_quoted_expanded=`func_echo_all $CC_quoted` - case "$@ " in - " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ - " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) - # The compiler in the base compile command matches - # the one in the tagged configuration. - # Assume this is the tagged configuration we want. - tagname=$z - break - ;; - esac - fi - done - # If $tagname still isn't set, then no tagged configuration - # was found and let the user know that the "--tag" command - # line option must be used. - if test -z "$tagname"; then - func_echo "unable to infer tagged configuration" - func_fatal_error "specify a tag with \`--tag'" -# else -# func_verbose "using $tagname tagged configuration" - fi - ;; - esac - fi -} - - - -# func_write_libtool_object output_name pic_name nonpic_name -# Create a libtool object file (analogous to a ".la" file), -# but don't create it if we're doing a dry run. -func_write_libtool_object () -{ - write_libobj=${1} - if test "$build_libtool_libs" = yes; then - write_lobj=\'${2}\' - else - write_lobj=none - fi - - if test "$build_old_libs" = yes; then - write_oldobj=\'${3}\' - else - write_oldobj=none - fi - - $opt_dry_run || { - cat >${write_libobj}T <?"'"'"' &()|`$[]' \ - && func_warning "libobj name \`$libobj' may not contain shell special characters." - func_dirname_and_basename "$obj" "/" "" - objname="$func_basename_result" - xdir="$func_dirname_result" - lobj=${xdir}$objdir/$objname - - test -z "$base_compile" && \ - func_fatal_help "you must specify a compilation command" - - # Delete any leftover library objects. - if test "$build_old_libs" = yes; then - removelist="$obj $lobj $libobj ${libobj}T" - else - removelist="$lobj $libobj ${libobj}T" - fi - - # On Cygwin there's no "real" PIC flag so we must build both object types - case $host_os in - cygwin* | mingw* | pw32* | os2* | cegcc*) - pic_mode=default - ;; - esac - if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then - # non-PIC code in shared libraries is not supported - pic_mode=default - fi - - # Calculate the filename of the output object if compiler does - # not support -o with -c - if test "$compiler_c_o" = no; then - output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.${objext} - lockfile="$output_obj.lock" - else - output_obj= - need_locks=no - lockfile= - fi - - # Lock this critical section if it is needed - # We use this script file to make the link, it avoids creating a new file - if test "$need_locks" = yes; then - until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do - func_echo "Waiting for $lockfile to be removed" - sleep 2 - done - elif test "$need_locks" = warn; then - if test -f "$lockfile"; then - $ECHO "\ -*** ERROR, $lockfile exists and contains: -`cat $lockfile 2>/dev/null` - -This indicates that another process is trying to use the same -temporary object file, and libtool could not work around it because -your compiler does not support \`-c' and \`-o' together. If you -repeat this compilation, it may succeed, by chance, but you had better -avoid parallel builds (make -j) in this platform, or get a better -compiler." - - $opt_dry_run || $RM $removelist - exit $EXIT_FAILURE - fi - removelist="$removelist $output_obj" - $ECHO "$srcfile" > "$lockfile" - fi - - $opt_dry_run || $RM $removelist - removelist="$removelist $lockfile" - trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 - - if test -n "$fix_srcfile_path"; then - eval "srcfile=\"$fix_srcfile_path\"" - fi - func_quote_for_eval "$srcfile" - qsrcfile=$func_quote_for_eval_result - - # Only build a PIC object if we are building libtool libraries. - if test "$build_libtool_libs" = yes; then - # Without this assignment, base_compile gets emptied. - fbsd_hideous_sh_bug=$base_compile - - if test "$pic_mode" != no; then - command="$base_compile $qsrcfile $pic_flag" - else - # Don't build PIC code - command="$base_compile $qsrcfile" - fi - - func_mkdir_p "$xdir$objdir" - - if test -z "$output_obj"; then - # Place PIC objects in $objdir - command="$command -o $lobj" - fi - - func_show_eval_locale "$command" \ - 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' - - if test "$need_locks" = warn && - test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then - $ECHO "\ -*** ERROR, $lockfile contains: -`cat $lockfile 2>/dev/null` - -but it should contain: -$srcfile - -This indicates that another process is trying to use the same -temporary object file, and libtool could not work around it because -your compiler does not support \`-c' and \`-o' together. If you -repeat this compilation, it may succeed, by chance, but you had better -avoid parallel builds (make -j) in this platform, or get a better -compiler." - - $opt_dry_run || $RM $removelist - exit $EXIT_FAILURE - fi - - # Just move the object if needed, then go on to compile the next one - if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then - func_show_eval '$MV "$output_obj" "$lobj"' \ - 'error=$?; $opt_dry_run || $RM $removelist; exit $error' - fi - - # Allow error messages only from the first compilation. - if test "$suppress_opt" = yes; then - suppress_output=' >/dev/null 2>&1' - fi - fi - - # Only build a position-dependent object if we build old libraries. - if test "$build_old_libs" = yes; then - if test "$pic_mode" != yes; then - # Don't build PIC code - command="$base_compile $qsrcfile$pie_flag" - else - command="$base_compile $qsrcfile $pic_flag" - fi - if test "$compiler_c_o" = yes; then - command="$command -o $obj" - fi - - # Suppress compiler output if we already did a PIC compilation. - command="$command$suppress_output" - func_show_eval_locale "$command" \ - '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' - - if test "$need_locks" = warn && - test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then - $ECHO "\ -*** ERROR, $lockfile contains: -`cat $lockfile 2>/dev/null` - -but it should contain: -$srcfile - -This indicates that another process is trying to use the same -temporary object file, and libtool could not work around it because -your compiler does not support \`-c' and \`-o' together. If you -repeat this compilation, it may succeed, by chance, but you had better -avoid parallel builds (make -j) in this platform, or get a better -compiler." - - $opt_dry_run || $RM $removelist - exit $EXIT_FAILURE - fi - - # Just move the object if needed - if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then - func_show_eval '$MV "$output_obj" "$obj"' \ - 'error=$?; $opt_dry_run || $RM $removelist; exit $error' - fi - fi - - $opt_dry_run || { - func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" - - # Unlock the critical section if it was locked - if test "$need_locks" != no; then - removelist=$lockfile - $RM "$lockfile" - fi - } - - exit $EXIT_SUCCESS -} - -$opt_help || { - test "$mode" = compile && func_mode_compile ${1+"$@"} -} - -func_mode_help () -{ - # We need to display help for each of the modes. - case $mode in - "") - # Generic help is extracted from the usage comments - # at the start of this file. - func_help - ;; - - clean) - $ECHO \ -"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... - -Remove files from the build directory. - -RM is the name of the program to use to delete files associated with each FILE -(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed -to RM. - -If FILE is a libtool library, object or program, all the files associated -with it are deleted. Otherwise, only FILE itself is deleted using RM." - ;; - - compile) - $ECHO \ -"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE - -Compile a source file into a libtool library object. - -This mode accepts the following additional options: - - -o OUTPUT-FILE set the output file name to OUTPUT-FILE - -no-suppress do not suppress compiler output for multiple passes - -prefer-pic try to building PIC objects only - -prefer-non-pic try to building non-PIC objects only - -shared do not build a \`.o' file suitable for static linking - -static only build a \`.o' file suitable for static linking - -Wc,FLAG pass FLAG directly to the compiler - -COMPILE-COMMAND is a command to be used in creating a \`standard' object file -from the given SOURCEFILE. - -The output file name is determined by removing the directory component from -SOURCEFILE, then substituting the C source code suffix \`.c' with the -library object suffix, \`.lo'." - ;; - - execute) - $ECHO \ -"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... - -Automatically set library path, then run a program. - -This mode accepts the following additional options: - - -dlopen FILE add the directory containing FILE to the library path - -This mode sets the library path environment variable according to \`-dlopen' -flags. - -If any of the ARGS are libtool executable wrappers, then they are translated -into their corresponding uninstalled binary, and any of their required library -directories are added to the library path. - -Then, COMMAND is executed, with ARGS as arguments." - ;; - - finish) - $ECHO \ -"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... - -Complete the installation of libtool libraries. - -Each LIBDIR is a directory that contains libtool libraries. - -The commands that this mode executes may require superuser privileges. Use -the \`--dry-run' option if you just want to see what would be executed." - ;; - - install) - $ECHO \ -"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... - -Install executables or libraries. - -INSTALL-COMMAND is the installation command. The first component should be -either the \`install' or \`cp' program. - -The following components of INSTALL-COMMAND are treated specially: - - -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation - -The rest of the components are interpreted as arguments to that command (only -BSD-compatible install options are recognized)." - ;; - - link) - $ECHO \ -"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... - -Link object files or libraries together to form another library, or to -create an executable program. - -LINK-COMMAND is a command using the C compiler that you would use to create -a program from several object files. - -The following components of LINK-COMMAND are treated specially: - - -all-static do not do any dynamic linking at all - -avoid-version do not add a version suffix if possible - -bindir BINDIR specify path to binaries directory (for systems where - libraries must be found in the PATH setting at runtime) - -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime - -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols - -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) - -export-symbols SYMFILE - try to export only the symbols listed in SYMFILE - -export-symbols-regex REGEX - try to export only the symbols matching REGEX - -LLIBDIR search LIBDIR for required installed libraries - -lNAME OUTPUT-FILE requires the installed library libNAME - -module build a library that can dlopened - -no-fast-install disable the fast-install mode - -no-install link a not-installable executable - -no-undefined declare that a library does not refer to external symbols - -o OUTPUT-FILE create OUTPUT-FILE from the specified objects - -objectlist FILE Use a list of object files found in FILE to specify objects - -precious-files-regex REGEX - don't remove output files matching REGEX - -release RELEASE specify package release information - -rpath LIBDIR the created library will eventually be installed in LIBDIR - -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries - -shared only do dynamic linking of libtool libraries - -shrext SUFFIX override the standard shared library file extension - -static do not do any dynamic linking of uninstalled libtool libraries - -static-libtool-libs - do not do any dynamic linking of libtool libraries - -version-info CURRENT[:REVISION[:AGE]] - specify library version info [each variable defaults to 0] - -weak LIBNAME declare that the target provides the LIBNAME interface - -Wc,FLAG - -Xcompiler FLAG pass linker-specific FLAG directly to the compiler - -Wl,FLAG - -Xlinker FLAG pass linker-specific FLAG directly to the linker - -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) - -All other options (arguments beginning with \`-') are ignored. - -Every other argument is treated as a filename. Files ending in \`.la' are -treated as uninstalled libtool libraries, other files are standard or library -object files. - -If the OUTPUT-FILE ends in \`.la', then a libtool library is created, -only library objects (\`.lo' files) may be specified, and \`-rpath' is -required, except when creating a convenience library. - -If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created -using \`ar' and \`ranlib', or on Windows using \`lib'. - -If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file -is created, otherwise an executable program is created." - ;; - - uninstall) - $ECHO \ -"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... - -Remove libraries from an installation directory. - -RM is the name of the program to use to delete files associated with each FILE -(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed -to RM. - -If FILE is a libtool library, all the files associated with it are deleted. -Otherwise, only FILE itself is deleted using RM." - ;; - - *) - func_fatal_help "invalid operation mode \`$mode'" - ;; - esac - - echo - $ECHO "Try \`$progname --help' for more information about other modes." -} - -# Now that we've collected a possible --mode arg, show help if necessary -if $opt_help; then - if test "$opt_help" = :; then - func_mode_help - else - { - func_help noexit - for mode in compile link execute install finish uninstall clean; do - func_mode_help - done - } | sed -n '1p; 2,$s/^Usage:/ or: /p' - { - func_help noexit - for mode in compile link execute install finish uninstall clean; do - echo - func_mode_help - done - } | - sed '1d - /^When reporting/,/^Report/{ - H - d - } - $x - /information about other modes/d - /more detailed .*MODE/d - s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' - fi - exit $? -fi - - -# func_mode_execute arg... -func_mode_execute () -{ - $opt_debug - # The first argument is the command name. - cmd="$nonopt" - test -z "$cmd" && \ - func_fatal_help "you must specify a COMMAND" - - # Handle -dlopen flags immediately. - for file in $execute_dlfiles; do - test -f "$file" \ - || func_fatal_help "\`$file' is not a file" - - dir= - case $file in - *.la) - # Check to see that this really is a libtool archive. - func_lalib_unsafe_p "$file" \ - || func_fatal_help "\`$lib' is not a valid libtool archive" - - # Read the libtool library. - dlname= - library_names= - func_source "$file" - - # Skip this library if it cannot be dlopened. - if test -z "$dlname"; then - # Warn if it was a shared library. - test -n "$library_names" && \ - func_warning "\`$file' was not linked with \`-export-dynamic'" - continue - fi - - func_dirname "$file" "" "." - dir="$func_dirname_result" - - if test -f "$dir/$objdir/$dlname"; then - dir="$dir/$objdir" - else - if test ! -f "$dir/$dlname"; then - func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" - fi - fi - ;; - - *.lo) - # Just add the directory containing the .lo file. - func_dirname "$file" "" "." - dir="$func_dirname_result" - ;; - - *) - func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" - continue - ;; - esac - - # Get the absolute pathname. - absdir=`cd "$dir" && pwd` - test -n "$absdir" && dir="$absdir" - - # Now add the directory to shlibpath_var. - if eval test -z \"\$$shlibpath_var\"; then - eval $shlibpath_var=\$dir - else - eval $shlibpath_var=\$dir:\$$shlibpath_var - fi - done - - # This variable tells wrapper scripts just to set shlibpath_var - # rather than running their programs. - libtool_execute_magic="$magic" - - # Check if any of the arguments is a wrapper script. - args= - for file - do - case $file in - -* | *.la | *.lo ) ;; - *) - # Do a test to see if this is really a libtool program. - if func_ltwrapper_script_p "$file"; then - func_source "$file" - # Transform arg to wrapped name. - file="$progdir/$program" - elif func_ltwrapper_executable_p "$file"; then - func_ltwrapper_scriptname "$file" - func_source "$func_ltwrapper_scriptname_result" - # Transform arg to wrapped name. - file="$progdir/$program" - fi - ;; - esac - # Quote arguments (to preserve shell metacharacters). - func_quote_for_eval "$file" - args="$args $func_quote_for_eval_result" - done - - if test "X$opt_dry_run" = Xfalse; then - if test -n "$shlibpath_var"; then - # Export the shlibpath_var. - eval "export $shlibpath_var" - fi - - # Restore saved environment variables - for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES - do - eval "if test \"\${save_$lt_var+set}\" = set; then - $lt_var=\$save_$lt_var; export $lt_var - else - $lt_unset $lt_var - fi" - done - - # Now prepare to actually exec the command. - exec_cmd="\$cmd$args" - else - # Display what would be done. - if test -n "$shlibpath_var"; then - eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" - echo "export $shlibpath_var" - fi - $ECHO "$cmd$args" - exit $EXIT_SUCCESS - fi -} - -test "$mode" = execute && func_mode_execute ${1+"$@"} - - -# func_mode_finish arg... -func_mode_finish () -{ - $opt_debug - libdirs="$nonopt" - admincmds= - - if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then - for dir - do - libdirs="$libdirs $dir" - done - - for libdir in $libdirs; do - if test -n "$finish_cmds"; then - # Do each command in the finish commands. - func_execute_cmds "$finish_cmds" 'admincmds="$admincmds -'"$cmd"'"' - fi - if test -n "$finish_eval"; then - # Do the single finish_eval. - eval cmds=\"$finish_eval\" - $opt_dry_run || eval "$cmds" || admincmds="$admincmds - $cmds" - fi - done - fi - - # Exit here if they wanted silent mode. - $opt_silent && exit $EXIT_SUCCESS - - echo "----------------------------------------------------------------------" - echo "Libraries have been installed in:" - for libdir in $libdirs; do - $ECHO " $libdir" - done - echo - echo "If you ever happen to want to link against installed libraries" - echo "in a given directory, LIBDIR, you must either use libtool, and" - echo "specify the full pathname of the library, or use the \`-LLIBDIR'" - echo "flag during linking and do at least one of the following:" - if test -n "$shlibpath_var"; then - echo " - add LIBDIR to the \`$shlibpath_var' environment variable" - echo " during execution" - fi - if test -n "$runpath_var"; then - echo " - add LIBDIR to the \`$runpath_var' environment variable" - echo " during linking" - fi - if test -n "$hardcode_libdir_flag_spec"; then - libdir=LIBDIR - eval "flag=\"$hardcode_libdir_flag_spec\"" - - $ECHO " - use the \`$flag' linker flag" - fi - if test -n "$admincmds"; then - $ECHO " - have your system administrator run these commands:$admincmds" - fi - if test -f /etc/ld.so.conf; then - echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" - fi - echo - - echo "See any operating system documentation about shared libraries for" - case $host in - solaris2.[6789]|solaris2.1[0-9]) - echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" - echo "pages." - ;; - *) - echo "more information, such as the ld(1) and ld.so(8) manual pages." - ;; - esac - echo "----------------------------------------------------------------------" - exit $EXIT_SUCCESS -} - -test "$mode" = finish && func_mode_finish ${1+"$@"} - - -# func_mode_install arg... -func_mode_install () -{ - $opt_debug - # There may be an optional sh(1) argument at the beginning of - # install_prog (especially on Windows NT). - if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || - # Allow the use of GNU shtool's install command. - case $nonopt in *shtool*) :;; *) false;; esac; then - # Aesthetically quote it. - func_quote_for_eval "$nonopt" - install_prog="$func_quote_for_eval_result " - arg=$1 - shift - else - install_prog= - arg=$nonopt - fi - - # The real first argument should be the name of the installation program. - # Aesthetically quote it. - func_quote_for_eval "$arg" - install_prog="$install_prog$func_quote_for_eval_result" - install_shared_prog=$install_prog - case " $install_prog " in - *[\\\ /]cp\ *) install_cp=: ;; - *) install_cp=false ;; - esac - - # We need to accept at least all the BSD install flags. - dest= - files= - opts= - prev= - install_type= - isdir=no - stripme= - no_mode=: - for arg - do - arg2= - if test -n "$dest"; then - files="$files $dest" - dest=$arg - continue - fi - - case $arg in - -d) isdir=yes ;; - -f) - if $install_cp; then :; else - prev=$arg - fi - ;; - -g | -m | -o) - prev=$arg - ;; - -s) - stripme=" -s" - continue - ;; - -*) - ;; - *) - # If the previous option needed an argument, then skip it. - if test -n "$prev"; then - if test "x$prev" = x-m && test -n "$install_override_mode"; then - arg2=$install_override_mode - no_mode=false - fi - prev= - else - dest=$arg - continue - fi - ;; - esac - - # Aesthetically quote the argument. - func_quote_for_eval "$arg" - install_prog="$install_prog $func_quote_for_eval_result" - if test -n "$arg2"; then - func_quote_for_eval "$arg2" - fi - install_shared_prog="$install_shared_prog $func_quote_for_eval_result" - done - - test -z "$install_prog" && \ - func_fatal_help "you must specify an install program" - - test -n "$prev" && \ - func_fatal_help "the \`$prev' option requires an argument" - - if test -n "$install_override_mode" && $no_mode; then - if $install_cp; then :; else - func_quote_for_eval "$install_override_mode" - install_shared_prog="$install_shared_prog -m $func_quote_for_eval_result" - fi - fi - - if test -z "$files"; then - if test -z "$dest"; then - func_fatal_help "no file or destination specified" - else - func_fatal_help "you must specify a destination" - fi - fi - - # Strip any trailing slash from the destination. - func_stripname '' '/' "$dest" - dest=$func_stripname_result - - # Check to see that the destination is a directory. - test -d "$dest" && isdir=yes - if test "$isdir" = yes; then - destdir="$dest" - destname= - else - func_dirname_and_basename "$dest" "" "." - destdir="$func_dirname_result" - destname="$func_basename_result" - - # Not a directory, so check to see that there is only one file specified. - set dummy $files; shift - test "$#" -gt 1 && \ - func_fatal_help "\`$dest' is not a directory" - fi - case $destdir in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - for file in $files; do - case $file in - *.lo) ;; - *) - func_fatal_help "\`$destdir' must be an absolute directory name" - ;; - esac - done - ;; - esac - - # This variable tells wrapper scripts just to set variables rather - # than running their programs. - libtool_install_magic="$magic" - - staticlibs= - future_libdirs= - current_libdirs= - for file in $files; do - - # Do each installation. - case $file in - *.$libext) - # Do the static libraries later. - staticlibs="$staticlibs $file" - ;; - - *.la) - # Check to see that this really is a libtool archive. - func_lalib_unsafe_p "$file" \ - || func_fatal_help "\`$file' is not a valid libtool archive" - - library_names= - old_library= - relink_command= - func_source "$file" - - # Add the libdir to current_libdirs if it is the destination. - if test "X$destdir" = "X$libdir"; then - case "$current_libdirs " in - *" $libdir "*) ;; - *) current_libdirs="$current_libdirs $libdir" ;; - esac - else - # Note the libdir as a future libdir. - case "$future_libdirs " in - *" $libdir "*) ;; - *) future_libdirs="$future_libdirs $libdir" ;; - esac - fi - - func_dirname "$file" "/" "" - dir="$func_dirname_result" - dir="$dir$objdir" - - if test -n "$relink_command"; then - # Determine the prefix the user has applied to our future dir. - inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` - - # Don't allow the user to place us outside of our expected - # location b/c this prevents finding dependent libraries that - # are installed to the same prefix. - # At present, this check doesn't affect windows .dll's that - # are installed into $libdir/../bin (currently, that works fine) - # but it's something to keep an eye on. - test "$inst_prefix_dir" = "$destdir" && \ - func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" - - if test -n "$inst_prefix_dir"; then - # Stick the inst_prefix_dir data into the link command. - relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` - else - relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` - fi - - func_warning "relinking \`$file'" - func_show_eval "$relink_command" \ - 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' - fi - - # See the names of the shared library. - set dummy $library_names; shift - if test -n "$1"; then - realname="$1" - shift - - srcname="$realname" - test -n "$relink_command" && srcname="$realname"T - - # Install the shared library and build the symlinks. - func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ - 'exit $?' - tstripme="$stripme" - case $host_os in - cygwin* | mingw* | pw32* | cegcc*) - case $realname in - *.dll.a) - tstripme="" - ;; - esac - ;; - esac - if test -n "$tstripme" && test -n "$striplib"; then - func_show_eval "$striplib $destdir/$realname" 'exit $?' - fi - - if test "$#" -gt 0; then - # Delete the old symlinks, and create new ones. - # Try `ln -sf' first, because the `ln' binary might depend on - # the symlink we replace! Solaris /bin/ln does not understand -f, - # so we also need to try rm && ln -s. - for linkname - do - test "$linkname" != "$realname" \ - && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" - done - fi - - # Do each command in the postinstall commands. - lib="$destdir/$realname" - func_execute_cmds "$postinstall_cmds" 'exit $?' - fi - - # Install the pseudo-library for information purposes. - func_basename "$file" - name="$func_basename_result" - instname="$dir/$name"i - func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' - - # Maybe install the static library, too. - test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" - ;; - - *.lo) - # Install (i.e. copy) a libtool object. - - # Figure out destination file name, if it wasn't already specified. - if test -n "$destname"; then - destfile="$destdir/$destname" - else - func_basename "$file" - destfile="$func_basename_result" - destfile="$destdir/$destfile" - fi - - # Deduce the name of the destination old-style object file. - case $destfile in - *.lo) - func_lo2o "$destfile" - staticdest=$func_lo2o_result - ;; - *.$objext) - staticdest="$destfile" - destfile= - ;; - *) - func_fatal_help "cannot copy a libtool object to \`$destfile'" - ;; - esac - - # Install the libtool object if requested. - test -n "$destfile" && \ - func_show_eval "$install_prog $file $destfile" 'exit $?' - - # Install the old object if enabled. - if test "$build_old_libs" = yes; then - # Deduce the name of the old-style object file. - func_lo2o "$file" - staticobj=$func_lo2o_result - func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' - fi - exit $EXIT_SUCCESS - ;; - - *) - # Figure out destination file name, if it wasn't already specified. - if test -n "$destname"; then - destfile="$destdir/$destname" - else - func_basename "$file" - destfile="$func_basename_result" - destfile="$destdir/$destfile" - fi - - # If the file is missing, and there is a .exe on the end, strip it - # because it is most likely a libtool script we actually want to - # install - stripped_ext="" - case $file in - *.exe) - if test ! -f "$file"; then - func_stripname '' '.exe' "$file" - file=$func_stripname_result - stripped_ext=".exe" - fi - ;; - esac - - # Do a test to see if this is really a libtool program. - case $host in - *cygwin* | *mingw*) - if func_ltwrapper_executable_p "$file"; then - func_ltwrapper_scriptname "$file" - wrapper=$func_ltwrapper_scriptname_result - else - func_stripname '' '.exe' "$file" - wrapper=$func_stripname_result - fi - ;; - *) - wrapper=$file - ;; - esac - if func_ltwrapper_script_p "$wrapper"; then - notinst_deplibs= - relink_command= - - func_source "$wrapper" - - # Check the variables that should have been set. - test -z "$generated_by_libtool_version" && \ - func_fatal_error "invalid libtool wrapper script \`$wrapper'" - - finalize=yes - for lib in $notinst_deplibs; do - # Check to see that each library is installed. - libdir= - if test -f "$lib"; then - func_source "$lib" - fi - libfile="$libdir/"`$ECHO "$lib" | $SED 's%^.*/%%g'` ### testsuite: skip nested quoting test - if test -n "$libdir" && test ! -f "$libfile"; then - func_warning "\`$lib' has not been installed in \`$libdir'" - finalize=no - fi - done - - relink_command= - func_source "$wrapper" - - outputname= - if test "$fast_install" = no && test -n "$relink_command"; then - $opt_dry_run || { - if test "$finalize" = yes; then - tmpdir=`func_mktempdir` - func_basename "$file$stripped_ext" - file="$func_basename_result" - outputname="$tmpdir/$file" - # Replace the output file specification. - relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` - - $opt_silent || { - func_quote_for_expand "$relink_command" - eval "func_echo $func_quote_for_expand_result" - } - if eval "$relink_command"; then : - else - func_error "error: relink \`$file' with the above command before installing it" - $opt_dry_run || ${RM}r "$tmpdir" - continue - fi - file="$outputname" - else - func_warning "cannot relink \`$file'" - fi - } - else - # Install the binary that we compiled earlier. - file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` - fi - fi - - # remove .exe since cygwin /usr/bin/install will append another - # one anyway - case $install_prog,$host in - */usr/bin/install*,*cygwin*) - case $file:$destfile in - *.exe:*.exe) - # this is ok - ;; - *.exe:*) - destfile=$destfile.exe - ;; - *:*.exe) - func_stripname '' '.exe' "$destfile" - destfile=$func_stripname_result - ;; - esac - ;; - esac - func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' - $opt_dry_run || if test -n "$outputname"; then - ${RM}r "$tmpdir" - fi - ;; - esac - done - - for file in $staticlibs; do - func_basename "$file" - name="$func_basename_result" - - # Set up the ranlib parameters. - oldlib="$destdir/$name" - - func_show_eval "$install_prog \$file \$oldlib" 'exit $?' - - if test -n "$stripme" && test -n "$old_striplib"; then - func_show_eval "$old_striplib $oldlib" 'exit $?' - fi - - # Do each command in the postinstall commands. - func_execute_cmds "$old_postinstall_cmds" 'exit $?' - done - - test -n "$future_libdirs" && \ - func_warning "remember to run \`$progname --finish$future_libdirs'" - - if test -n "$current_libdirs" && $opt_finish; then - # Maybe just do a dry run. - $opt_dry_run && current_libdirs=" -n$current_libdirs" - exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' - else - exit $EXIT_SUCCESS - fi -} - -test "$mode" = install && func_mode_install ${1+"$@"} - - -# func_generate_dlsyms outputname originator pic_p -# Extract symbols from dlprefiles and create ${outputname}S.o with -# a dlpreopen symbol table. -func_generate_dlsyms () -{ - $opt_debug - my_outputname="$1" - my_originator="$2" - my_pic_p="${3-no}" - my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` - my_dlsyms= - - if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then - if test -n "$NM" && test -n "$global_symbol_pipe"; then - my_dlsyms="${my_outputname}S.c" - else - func_error "not configured to extract global symbols from dlpreopened files" - fi - fi - - if test -n "$my_dlsyms"; then - case $my_dlsyms in - "") ;; - *.c) - # Discover the nlist of each of the dlfiles. - nlist="$output_objdir/${my_outputname}.nm" - - func_show_eval "$RM $nlist ${nlist}S ${nlist}T" - - # Parse the name list into a source file. - func_verbose "creating $output_objdir/$my_dlsyms" - - $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ -/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ -/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ - -#ifdef __cplusplus -extern \"C\" { -#endif - -#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) -#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" -#endif - -/* External symbol declarations for the compiler. */\ -" - - if test "$dlself" = yes; then - func_verbose "generating symbol list for \`$output'" - - $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" - - # Add our own program objects to the symbol list. - progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` - for progfile in $progfiles; do - func_verbose "extracting global C symbols from \`$progfile'" - $opt_dry_run || eval "$NM $progfile | $global_symbol_pipe >> '$nlist'" - done - - if test -n "$exclude_expsyms"; then - $opt_dry_run || { - $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T - $MV "$nlist"T "$nlist" - } - fi - - if test -n "$export_symbols_regex"; then - $opt_dry_run || { - $EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T - $MV "$nlist"T "$nlist" - } - fi - - # Prepare the list of exported symbols - if test -z "$export_symbols"; then - export_symbols="$output_objdir/$outputname.exp" - $opt_dry_run || { - $RM $export_symbols - ${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' < "$nlist" > "$export_symbols" - case $host in - *cygwin* | *mingw* | *cegcc* ) - echo EXPORTS > "$output_objdir/$outputname.def" - cat "$export_symbols" >> "$output_objdir/$outputname.def" - ;; - esac - } - else - $opt_dry_run || { - ${SED} -e 's/\([].[*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/' < "$export_symbols" > "$output_objdir/$outputname.exp" - $GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T - $MV "$nlist"T "$nlist" - case $host in - *cygwin* | *mingw* | *cegcc* ) - echo EXPORTS > "$output_objdir/$outputname.def" - cat "$nlist" >> "$output_objdir/$outputname.def" - ;; - esac - } - fi - fi - - for dlprefile in $dlprefiles; do - func_verbose "extracting global C symbols from \`$dlprefile'" - func_basename "$dlprefile" - name="$func_basename_result" - $opt_dry_run || { - $ECHO ": $name " >> "$nlist" - eval "$NM $dlprefile 2>/dev/null | $global_symbol_pipe >> '$nlist'" - } - done - - $opt_dry_run || { - # Make sure we have at least an empty file. - test -f "$nlist" || : > "$nlist" - - if test -n "$exclude_expsyms"; then - $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T - $MV "$nlist"T "$nlist" - fi - - # Try sorting and uniquifying the output. - if $GREP -v "^: " < "$nlist" | - if sort -k 3 /dev/null 2>&1; then - sort -k 3 - else - sort +2 - fi | - uniq > "$nlist"S; then - : - else - $GREP -v "^: " < "$nlist" > "$nlist"S - fi - - if test -f "$nlist"S; then - eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' - else - echo '/* NONE */' >> "$output_objdir/$my_dlsyms" - fi - - echo >> "$output_objdir/$my_dlsyms" "\ - -/* The mapping between symbol names and symbols. */ -typedef struct { - const char *name; - void *address; -} lt_dlsymlist; -" - case $host in - *cygwin* | *mingw* | *cegcc* ) - echo >> "$output_objdir/$my_dlsyms" "\ -/* DATA imports from DLLs on WIN32 con't be const, because - runtime relocations are performed -- see ld's documentation - on pseudo-relocs. */" - lt_dlsym_const= ;; - *osf5*) - echo >> "$output_objdir/$my_dlsyms" "\ -/* This system does not cope well with relocations in const data */" - lt_dlsym_const= ;; - *) - lt_dlsym_const=const ;; - esac - - echo >> "$output_objdir/$my_dlsyms" "\ -extern $lt_dlsym_const lt_dlsymlist -lt_${my_prefix}_LTX_preloaded_symbols[]; -$lt_dlsym_const lt_dlsymlist -lt_${my_prefix}_LTX_preloaded_symbols[] = -{\ - { \"$my_originator\", (void *) 0 }," - - case $need_lib_prefix in - no) - eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" - ;; - *) - eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" - ;; - esac - echo >> "$output_objdir/$my_dlsyms" "\ - {0, (void *) 0} -}; - -/* This works around a problem in FreeBSD linker */ -#ifdef FREEBSD_WORKAROUND -static const void *lt_preloaded_setup() { - return lt_${my_prefix}_LTX_preloaded_symbols; -} -#endif - -#ifdef __cplusplus -} -#endif\ -" - } # !$opt_dry_run - - pic_flag_for_symtable= - case "$compile_command " in - *" -static "*) ;; - *) - case $host in - # compiling the symbol table file with pic_flag works around - # a FreeBSD bug that causes programs to crash when -lm is - # linked before any other PIC object. But we must not use - # pic_flag when linking with -static. The problem exists in - # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. - *-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) - pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; - *-*-hpux*) - pic_flag_for_symtable=" $pic_flag" ;; - *) - if test "X$my_pic_p" != Xno; then - pic_flag_for_symtable=" $pic_flag" - fi - ;; - esac - ;; - esac - symtab_cflags= - for arg in $LTCFLAGS; do - case $arg in - -pie | -fpie | -fPIE) ;; - *) symtab_cflags="$symtab_cflags $arg" ;; - esac - done - - # Now compile the dynamic symbol file. - func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' - - # Clean up the generated files. - func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' - - # Transform the symbol file into the correct name. - symfileobj="$output_objdir/${my_outputname}S.$objext" - case $host in - *cygwin* | *mingw* | *cegcc* ) - if test -f "$output_objdir/$my_outputname.def"; then - compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` - finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` - else - compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` - finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` - fi - ;; - *) - compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` - finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` - ;; - esac - ;; - *) - func_fatal_error "unknown suffix for \`$my_dlsyms'" - ;; - esac - else - # We keep going just in case the user didn't refer to - # lt_preloaded_symbols. The linker will fail if global_symbol_pipe - # really was required. - - # Nullify the symbol file. - compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` - finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` - fi -} - -# func_win32_libid arg -# return the library type of file 'arg' -# -# Need a lot of goo to handle *both* DLLs and import libs -# Has to be a shell function in order to 'eat' the argument -# that is supplied when $file_magic_command is called. -# Despite the name, also deal with 64 bit binaries. -func_win32_libid () -{ - $opt_debug - win32_libid_type="unknown" - win32_fileres=`file -L $1 2>/dev/null` - case $win32_fileres in - *ar\ archive\ import\ library*) # definitely import - win32_libid_type="x86 archive import" - ;; - *ar\ archive*) # could be an import, or static - if $OBJDUMP -f "$1" | $SED -e '10q' 2>/dev/null | - $EGREP 'file format (pe-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then - win32_nmres=`$NM -f posix -A "$1" | - $SED -n -e ' - 1,100{ - / I /{ - s,.*,import, - p - q - } - }'` - case $win32_nmres in - import*) win32_libid_type="x86 archive import";; - *) win32_libid_type="x86 archive static";; - esac - fi - ;; - *DLL*) - win32_libid_type="x86 DLL" - ;; - *executable*) # but shell scripts are "executable" too... - case $win32_fileres in - *MS\ Windows\ PE\ Intel*) - win32_libid_type="x86 DLL" - ;; - esac - ;; - esac - $ECHO "$win32_libid_type" -} - - - -# func_extract_an_archive dir oldlib -func_extract_an_archive () -{ - $opt_debug - f_ex_an_ar_dir="$1"; shift - f_ex_an_ar_oldlib="$1" - if test "$lock_old_archive_extraction" = yes; then - lockfile=$f_ex_an_ar_oldlib.lock - until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do - func_echo "Waiting for $lockfile to be removed" - sleep 2 - done - fi - func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ - 'stat=$?; rm -f "$lockfile"; exit $stat' - if test "$lock_old_archive_extraction" = yes; then - $opt_dry_run || rm -f "$lockfile" - fi - if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then - : - else - func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" - fi -} - - -# func_extract_archives gentop oldlib ... -func_extract_archives () -{ - $opt_debug - my_gentop="$1"; shift - my_oldlibs=${1+"$@"} - my_oldobjs="" - my_xlib="" - my_xabs="" - my_xdir="" - - for my_xlib in $my_oldlibs; do - # Extract the objects. - case $my_xlib in - [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; - *) my_xabs=`pwd`"/$my_xlib" ;; - esac - func_basename "$my_xlib" - my_xlib="$func_basename_result" - my_xlib_u=$my_xlib - while :; do - case " $extracted_archives " in - *" $my_xlib_u "*) - func_arith $extracted_serial + 1 - extracted_serial=$func_arith_result - my_xlib_u=lt$extracted_serial-$my_xlib ;; - *) break ;; - esac - done - extracted_archives="$extracted_archives $my_xlib_u" - my_xdir="$my_gentop/$my_xlib_u" - - func_mkdir_p "$my_xdir" - - case $host in - *-darwin*) - func_verbose "Extracting $my_xabs" - # Do not bother doing anything if just a dry run - $opt_dry_run || { - darwin_orig_dir=`pwd` - cd $my_xdir || exit $? - darwin_archive=$my_xabs - darwin_curdir=`pwd` - darwin_base_archive=`basename "$darwin_archive"` - darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` - if test -n "$darwin_arches"; then - darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` - darwin_arch= - func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" - for darwin_arch in $darwin_arches ; do - func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" - $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" - cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" - func_extract_an_archive "`pwd`" "${darwin_base_archive}" - cd "$darwin_curdir" - $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" - done # $darwin_arches - ## Okay now we've a bunch of thin objects, gotta fatten them up :) - darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` - darwin_file= - darwin_files= - for darwin_file in $darwin_filelist; do - darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` - $LIPO -create -output "$darwin_file" $darwin_files - done # $darwin_filelist - $RM -rf unfat-$$ - cd "$darwin_orig_dir" - else - cd $darwin_orig_dir - func_extract_an_archive "$my_xdir" "$my_xabs" - fi # $darwin_arches - } # !$opt_dry_run - ;; - *) - func_extract_an_archive "$my_xdir" "$my_xabs" - ;; - esac - my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` - done - - func_extract_archives_result="$my_oldobjs" -} - - -# func_emit_wrapper [arg=no] -# -# Emit a libtool wrapper script on stdout. -# Don't directly open a file because we may want to -# incorporate the script contents within a cygwin/mingw -# wrapper executable. Must ONLY be called from within -# func_mode_link because it depends on a number of variables -# set therein. -# -# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR -# variable will take. If 'yes', then the emitted script -# will assume that the directory in which it is stored is -# the $objdir directory. This is a cygwin/mingw-specific -# behavior. -func_emit_wrapper () -{ - func_emit_wrapper_arg1=${1-no} - - $ECHO "\ -#! $SHELL - -# $output - temporary wrapper script for $objdir/$outputname -# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION -# -# The $output program cannot be directly executed until all the libtool -# libraries that it depends on are installed. -# -# This wrapper script should never be moved out of the build directory. -# If it is, it will not operate correctly. - -# Sed substitution that helps us do robust quoting. It backslashifies -# metacharacters that are still active within double-quoted strings. -sed_quote_subst='$sed_quote_subst' - -# Be Bourne compatible -if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then - emulate sh - NULLCMD=: - # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac -fi -BIN_SH=xpg4; export BIN_SH # for Tru64 -DUALCASE=1; export DUALCASE # for MKS sh - -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -relink_command=\"$relink_command\" - -# This environment variable determines our operation mode. -if test \"\$libtool_install_magic\" = \"$magic\"; then - # install mode needs the following variables: - generated_by_libtool_version='$macro_version' - notinst_deplibs='$notinst_deplibs' -else - # When we are sourced in execute mode, \$file and \$ECHO are already set. - if test \"\$libtool_execute_magic\" != \"$magic\"; then - file=\"\$0\"" - - qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` - $ECHO "\ - -# A function that is used when there is no print builtin or printf. -func_fallback_echo () -{ - eval 'cat <<_LTECHO_EOF -\$1 -_LTECHO_EOF' -} - ECHO=\"$qECHO\" - fi\ - - # Find the directory that this script lives in. - thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` - test \"x\$thisdir\" = \"x\$file\" && thisdir=. - - # Follow symbolic links until we get to the real thisdir. - file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` - while test -n \"\$file\"; do - destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` - - # If there was a directory component, then change thisdir. - if test \"x\$destdir\" != \"x\$file\"; then - case \"\$destdir\" in - [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; - *) thisdir=\"\$thisdir/\$destdir\" ;; - esac - fi - - file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` - file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` - done - - # Usually 'no', except on cygwin/mingw when embedded into - # the cwrapper. - WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 - if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then - # special case for '.' - if test \"\$thisdir\" = \".\"; then - thisdir=\`pwd\` - fi - # remove .libs from thisdir - case \"\$thisdir\" in - *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; - $objdir ) thisdir=. ;; - esac - fi - - # Try to get the absolute directory name. - absdir=\`cd \"\$thisdir\" && pwd\` - test -n \"\$absdir\" && thisdir=\"\$absdir\" -" - - if test "$fast_install" = yes; then - $ECHO "\ - program=lt-'$outputname'$exeext - progdir=\"\$thisdir/$objdir\" - - if test ! -f \"\$progdir/\$program\" || - { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ - test \"X\$file\" != \"X\$progdir/\$program\"; }; then - - file=\"\$\$-\$program\" - - if test ! -d \"\$progdir\"; then - $MKDIR \"\$progdir\" - else - $RM \"\$progdir/\$file\" - fi" - - $ECHO "\ - - # relink executable if necessary - if test -n \"\$relink_command\"; then - if relink_command_output=\`eval \"\$relink_command\" 2>&1\`; then : - else - $ECHO \"\$relink_command_output\" >&2 - $RM \"\$progdir/\$file\" - exit 1 - fi - fi - - $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || - { $RM \"\$progdir/\$program\"; - $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } - $RM \"\$progdir/\$file\" - fi" - else - $ECHO "\ - program='$outputname' - progdir=\"\$thisdir/$objdir\" -" - fi - - $ECHO "\ - - if test -f \"\$progdir/\$program\"; then" - - # Export our shlibpath_var if we have one. - if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then - $ECHO "\ - # Add our own library path to $shlibpath_var - $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" - - # Some systems cannot cope with colon-terminated $shlibpath_var - # The second colon is a workaround for a bug in BeOS R4 sed - $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` - - export $shlibpath_var -" - fi - - # fixup the dll searchpath if we need to. - if test -n "$dllsearchpath"; then - $ECHO "\ - # Add the dll search path components to the executable PATH - PATH=$dllsearchpath:\$PATH -" - fi - - $ECHO "\ - if test \"\$libtool_execute_magic\" != \"$magic\"; then - # Run the actual program with our arguments. -" - case $host in - # Backslashes separate directories on plain windows - *-*-mingw | *-*-os2* | *-cegcc*) - $ECHO "\ - exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} -" - ;; - - *) - $ECHO "\ - exec \"\$progdir/\$program\" \${1+\"\$@\"} -" - ;; - esac - $ECHO "\ - \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 - exit 1 - fi - else - # The program doesn't exist. - \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 - \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 - \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 - exit 1 - fi -fi\ -" -} - - -# func_to_host_path arg -# -# Convert paths to host format when used with build tools. -# Intended for use with "native" mingw (where libtool itself -# is running under the msys shell), or in the following cross- -# build environments: -# $build $host -# mingw (msys) mingw [e.g. native] -# cygwin mingw -# *nix + wine mingw -# where wine is equipped with the `winepath' executable. -# In the native mingw case, the (msys) shell automatically -# converts paths for any non-msys applications it launches, -# but that facility isn't available from inside the cwrapper. -# Similar accommodations are necessary for $host mingw and -# $build cygwin. Calling this function does no harm for other -# $host/$build combinations not listed above. -# -# ARG is the path (on $build) that should be converted to -# the proper representation for $host. The result is stored -# in $func_to_host_path_result. -func_to_host_path () -{ - func_to_host_path_result="$1" - if test -n "$1"; then - case $host in - *mingw* ) - lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' - case $build in - *mingw* ) # actually, msys - # awkward: cmd appends spaces to result - func_to_host_path_result=`( cmd //c echo "$1" ) 2>/dev/null | - $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` - ;; - *cygwin* ) - func_to_host_path_result=`cygpath -w "$1" | - $SED -e "$lt_sed_naive_backslashify"` - ;; - * ) - # Unfortunately, winepath does not exit with a non-zero - # error code, so we are forced to check the contents of - # stdout. On the other hand, if the command is not - # found, the shell will set an exit code of 127 and print - # *an error message* to stdout. So we must check for both - # error code of zero AND non-empty stdout, which explains - # the odd construction: - func_to_host_path_tmp1=`winepath -w "$1" 2>/dev/null` - if test "$?" -eq 0 && test -n "${func_to_host_path_tmp1}"; then - func_to_host_path_result=`$ECHO "$func_to_host_path_tmp1" | - $SED -e "$lt_sed_naive_backslashify"` - else - # Allow warning below. - func_to_host_path_result= - fi - ;; - esac - if test -z "$func_to_host_path_result" ; then - func_error "Could not determine host path corresponding to" - func_error " \`$1'" - func_error "Continuing, but uninstalled executables may not work." - # Fallback: - func_to_host_path_result="$1" - fi - ;; - esac - fi -} -# end: func_to_host_path - -# func_to_host_pathlist arg -# -# Convert pathlists to host format when used with build tools. -# See func_to_host_path(), above. This function supports the -# following $build/$host combinations (but does no harm for -# combinations not listed here): -# $build $host -# mingw (msys) mingw [e.g. native] -# cygwin mingw -# *nix + wine mingw -# -# Path separators are also converted from $build format to -# $host format. If ARG begins or ends with a path separator -# character, it is preserved (but converted to $host format) -# on output. -# -# ARG is a pathlist (on $build) that should be converted to -# the proper representation on $host. The result is stored -# in $func_to_host_pathlist_result. -func_to_host_pathlist () -{ - func_to_host_pathlist_result="$1" - if test -n "$1"; then - case $host in - *mingw* ) - lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' - # Remove leading and trailing path separator characters from - # ARG. msys behavior is inconsistent here, cygpath turns them - # into '.;' and ';.', and winepath ignores them completely. - func_stripname : : "$1" - func_to_host_pathlist_tmp1=$func_stripname_result - case $build in - *mingw* ) # Actually, msys. - # Awkward: cmd appends spaces to result. - func_to_host_pathlist_result=` - ( cmd //c echo "$func_to_host_pathlist_tmp1" ) 2>/dev/null | - $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` - ;; - *cygwin* ) - func_to_host_pathlist_result=`cygpath -w -p "$func_to_host_pathlist_tmp1" | - $SED -e "$lt_sed_naive_backslashify"` - ;; - * ) - # unfortunately, winepath doesn't convert pathlists - func_to_host_pathlist_result="" - func_to_host_pathlist_oldIFS=$IFS - IFS=: - for func_to_host_pathlist_f in $func_to_host_pathlist_tmp1 ; do - IFS=$func_to_host_pathlist_oldIFS - if test -n "$func_to_host_pathlist_f" ; then - func_to_host_path "$func_to_host_pathlist_f" - if test -n "$func_to_host_path_result" ; then - if test -z "$func_to_host_pathlist_result" ; then - func_to_host_pathlist_result="$func_to_host_path_result" - else - func_append func_to_host_pathlist_result ";$func_to_host_path_result" - fi - fi - fi - done - IFS=$func_to_host_pathlist_oldIFS - ;; - esac - if test -z "$func_to_host_pathlist_result"; then - func_error "Could not determine the host path(s) corresponding to" - func_error " \`$1'" - func_error "Continuing, but uninstalled executables may not work." - # Fallback. This may break if $1 contains DOS-style drive - # specifications. The fix is not to complicate the expression - # below, but for the user to provide a working wine installation - # with winepath so that path translation in the cross-to-mingw - # case works properly. - lt_replace_pathsep_nix_to_dos="s|:|;|g" - func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp1" |\ - $SED -e "$lt_replace_pathsep_nix_to_dos"` - fi - # Now, add the leading and trailing path separators back - case "$1" in - :* ) func_to_host_pathlist_result=";$func_to_host_pathlist_result" - ;; - esac - case "$1" in - *: ) func_append func_to_host_pathlist_result ";" - ;; - esac - ;; - esac - fi -} -# end: func_to_host_pathlist - -# func_emit_cwrapperexe_src -# emit the source code for a wrapper executable on stdout -# Must ONLY be called from within func_mode_link because -# it depends on a number of variable set therein. -func_emit_cwrapperexe_src () -{ - cat < -#include -#ifdef _MSC_VER -# include -# include -# include -#else -# include -# include -# ifdef __CYGWIN__ -# include -# endif -#endif -#include -#include -#include -#include -#include -#include -#include -#include - -/* declarations of non-ANSI functions */ -#if defined(__MINGW32__) -# ifdef __STRICT_ANSI__ -int _putenv (const char *); -# endif -#elif defined(__CYGWIN__) -# ifdef __STRICT_ANSI__ -char *realpath (const char *, char *); -int putenv (char *); -int setenv (const char *, const char *, int); -# endif -/* #elif defined (other platforms) ... */ -#endif - -/* portability defines, excluding path handling macros */ -#if defined(_MSC_VER) -# define setmode _setmode -# define stat _stat -# define chmod _chmod -# define getcwd _getcwd -# define putenv _putenv -# define S_IXUSR _S_IEXEC -# ifndef _INTPTR_T_DEFINED -# define _INTPTR_T_DEFINED -# define intptr_t int -# endif -#elif defined(__MINGW32__) -# define setmode _setmode -# define stat _stat -# define chmod _chmod -# define getcwd _getcwd -# define putenv _putenv -#elif defined(__CYGWIN__) -# define HAVE_SETENV -# define FOPEN_WB "wb" -/* #elif defined (other platforms) ... */ -#endif - -#if defined(PATH_MAX) -# define LT_PATHMAX PATH_MAX -#elif defined(MAXPATHLEN) -# define LT_PATHMAX MAXPATHLEN -#else -# define LT_PATHMAX 1024 -#endif - -#ifndef S_IXOTH -# define S_IXOTH 0 -#endif -#ifndef S_IXGRP -# define S_IXGRP 0 -#endif - -/* path handling portability macros */ -#ifndef DIR_SEPARATOR -# define DIR_SEPARATOR '/' -# define PATH_SEPARATOR ':' -#endif - -#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ - defined (__OS2__) -# define HAVE_DOS_BASED_FILE_SYSTEM -# define FOPEN_WB "wb" -# ifndef DIR_SEPARATOR_2 -# define DIR_SEPARATOR_2 '\\' -# endif -# ifndef PATH_SEPARATOR_2 -# define PATH_SEPARATOR_2 ';' -# endif -#endif - -#ifndef DIR_SEPARATOR_2 -# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) -#else /* DIR_SEPARATOR_2 */ -# define IS_DIR_SEPARATOR(ch) \ - (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) -#endif /* DIR_SEPARATOR_2 */ - -#ifndef PATH_SEPARATOR_2 -# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) -#else /* PATH_SEPARATOR_2 */ -# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) -#endif /* PATH_SEPARATOR_2 */ - -#ifndef FOPEN_WB -# define FOPEN_WB "w" -#endif -#ifndef _O_BINARY -# define _O_BINARY 0 -#endif - -#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) -#define XFREE(stale) do { \ - if (stale) { free ((void *) stale); stale = 0; } \ -} while (0) - -#undef LTWRAPPER_DEBUGPRINTF -#if defined LT_DEBUGWRAPPER -# define LTWRAPPER_DEBUGPRINTF(args) ltwrapper_debugprintf args -static void -ltwrapper_debugprintf (const char *fmt, ...) -{ - va_list args; - va_start (args, fmt); - (void) vfprintf (stderr, fmt, args); - va_end (args); -} -#else -# define LTWRAPPER_DEBUGPRINTF(args) -#endif - -const char *program_name = NULL; - -void *xmalloc (size_t num); -char *xstrdup (const char *string); -const char *base_name (const char *name); -char *find_executable (const char *wrapper); -char *chase_symlinks (const char *pathspec); -int make_executable (const char *path); -int check_executable (const char *path); -char *strendzap (char *str, const char *pat); -void lt_fatal (const char *message, ...); -void lt_setenv (const char *name, const char *value); -char *lt_extend_str (const char *orig_value, const char *add, int to_end); -void lt_update_exe_path (const char *name, const char *value); -void lt_update_lib_path (const char *name, const char *value); -char **prepare_spawn (char **argv); -void lt_dump_script (FILE *f); -EOF - - cat <"))); - for (i = 0; i < newargc; i++) - { - LTWRAPPER_DEBUGPRINTF (("(main) newargz[%d] : %s\n", i, (newargz[i] ? newargz[i] : ""))); - } - -EOF - - case $host_os in - mingw*) - cat <<"EOF" - /* execv doesn't actually work on mingw as expected on unix */ - newargz = prepare_spawn (newargz); - rval = _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz); - if (rval == -1) - { - /* failed to start process */ - LTWRAPPER_DEBUGPRINTF (("(main) failed to launch target \"%s\": errno = %d\n", lt_argv_zero, errno)); - return 127; - } - return rval; -EOF - ;; - *) - cat <<"EOF" - execv (lt_argv_zero, newargz); - return rval; /* =127, but avoids unused variable warning */ -EOF - ;; - esac - - cat <<"EOF" -} - -void * -xmalloc (size_t num) -{ - void *p = (void *) malloc (num); - if (!p) - lt_fatal ("Memory exhausted"); - - return p; -} - -char * -xstrdup (const char *string) -{ - return string ? strcpy ((char *) xmalloc (strlen (string) + 1), - string) : NULL; -} - -const char * -base_name (const char *name) -{ - const char *base; - -#if defined (HAVE_DOS_BASED_FILE_SYSTEM) - /* Skip over the disk name in MSDOS pathnames. */ - if (isalpha ((unsigned char) name[0]) && name[1] == ':') - name += 2; -#endif - - for (base = name; *name; name++) - if (IS_DIR_SEPARATOR (*name)) - base = name + 1; - return base; -} - -int -check_executable (const char *path) -{ - struct stat st; - - LTWRAPPER_DEBUGPRINTF (("(check_executable) : %s\n", - path ? (*path ? path : "EMPTY!") : "NULL!")); - if ((!path) || (!*path)) - return 0; - - if ((stat (path, &st) >= 0) - && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) - return 1; - else - return 0; -} - -int -make_executable (const char *path) -{ - int rval = 0; - struct stat st; - - LTWRAPPER_DEBUGPRINTF (("(make_executable) : %s\n", - path ? (*path ? path : "EMPTY!") : "NULL!")); - if ((!path) || (!*path)) - return 0; - - if (stat (path, &st) >= 0) - { - rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); - } - return rval; -} - -/* Searches for the full path of the wrapper. Returns - newly allocated full path name if found, NULL otherwise - Does not chase symlinks, even on platforms that support them. -*/ -char * -find_executable (const char *wrapper) -{ - int has_slash = 0; - const char *p; - const char *p_next; - /* static buffer for getcwd */ - char tmp[LT_PATHMAX + 1]; - int tmp_len; - char *concat_name; - - LTWRAPPER_DEBUGPRINTF (("(find_executable) : %s\n", - wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!")); - - if ((wrapper == NULL) || (*wrapper == '\0')) - return NULL; - - /* Absolute path? */ -#if defined (HAVE_DOS_BASED_FILE_SYSTEM) - if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') - { - concat_name = xstrdup (wrapper); - if (check_executable (concat_name)) - return concat_name; - XFREE (concat_name); - } - else - { -#endif - if (IS_DIR_SEPARATOR (wrapper[0])) - { - concat_name = xstrdup (wrapper); - if (check_executable (concat_name)) - return concat_name; - XFREE (concat_name); - } -#if defined (HAVE_DOS_BASED_FILE_SYSTEM) - } -#endif - - for (p = wrapper; *p; p++) - if (*p == '/') - { - has_slash = 1; - break; - } - if (!has_slash) - { - /* no slashes; search PATH */ - const char *path = getenv ("PATH"); - if (path != NULL) - { - for (p = path; *p; p = p_next) - { - const char *q; - size_t p_len; - for (q = p; *q; q++) - if (IS_PATH_SEPARATOR (*q)) - break; - p_len = q - p; - p_next = (*q == '\0' ? q : q + 1); - if (p_len == 0) - { - /* empty path: current directory */ - if (getcwd (tmp, LT_PATHMAX) == NULL) - lt_fatal ("getcwd failed"); - tmp_len = strlen (tmp); - concat_name = - XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); - memcpy (concat_name, tmp, tmp_len); - concat_name[tmp_len] = '/'; - strcpy (concat_name + tmp_len + 1, wrapper); - } - else - { - concat_name = - XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); - memcpy (concat_name, p, p_len); - concat_name[p_len] = '/'; - strcpy (concat_name + p_len + 1, wrapper); - } - if (check_executable (concat_name)) - return concat_name; - XFREE (concat_name); - } - } - /* not found in PATH; assume curdir */ - } - /* Relative path | not found in path: prepend cwd */ - if (getcwd (tmp, LT_PATHMAX) == NULL) - lt_fatal ("getcwd failed"); - tmp_len = strlen (tmp); - concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); - memcpy (concat_name, tmp, tmp_len); - concat_name[tmp_len] = '/'; - strcpy (concat_name + tmp_len + 1, wrapper); - - if (check_executable (concat_name)) - return concat_name; - XFREE (concat_name); - return NULL; -} - -char * -chase_symlinks (const char *pathspec) -{ -#ifndef S_ISLNK - return xstrdup (pathspec); -#else - char buf[LT_PATHMAX]; - struct stat s; - char *tmp_pathspec = xstrdup (pathspec); - char *p; - int has_symlinks = 0; - while (strlen (tmp_pathspec) && !has_symlinks) - { - LTWRAPPER_DEBUGPRINTF (("checking path component for symlinks: %s\n", - tmp_pathspec)); - if (lstat (tmp_pathspec, &s) == 0) - { - if (S_ISLNK (s.st_mode) != 0) - { - has_symlinks = 1; - break; - } - - /* search backwards for last DIR_SEPARATOR */ - p = tmp_pathspec + strlen (tmp_pathspec) - 1; - while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) - p--; - if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) - { - /* no more DIR_SEPARATORS left */ - break; - } - *p = '\0'; - } - else - { - char *errstr = strerror (errno); - lt_fatal ("Error accessing file %s (%s)", tmp_pathspec, errstr); - } - } - XFREE (tmp_pathspec); - - if (!has_symlinks) - { - return xstrdup (pathspec); - } - - tmp_pathspec = realpath (pathspec, buf); - if (tmp_pathspec == 0) - { - lt_fatal ("Could not follow symlinks for %s", pathspec); - } - return xstrdup (tmp_pathspec); -#endif -} - -char * -strendzap (char *str, const char *pat) -{ - size_t len, patlen; - - assert (str != NULL); - assert (pat != NULL); - - len = strlen (str); - patlen = strlen (pat); - - if (patlen <= len) - { - str += len - patlen; - if (strcmp (str, pat) == 0) - *str = '\0'; - } - return str; -} - -static void -lt_error_core (int exit_status, const char *mode, - const char *message, va_list ap) -{ - fprintf (stderr, "%s: %s: ", program_name, mode); - vfprintf (stderr, message, ap); - fprintf (stderr, ".\n"); - - if (exit_status >= 0) - exit (exit_status); -} - -void -lt_fatal (const char *message, ...) -{ - va_list ap; - va_start (ap, message); - lt_error_core (EXIT_FAILURE, "FATAL", message, ap); - va_end (ap); -} - -void -lt_setenv (const char *name, const char *value) -{ - LTWRAPPER_DEBUGPRINTF (("(lt_setenv) setting '%s' to '%s'\n", - (name ? name : ""), - (value ? value : ""))); - { -#ifdef HAVE_SETENV - /* always make a copy, for consistency with !HAVE_SETENV */ - char *str = xstrdup (value); - setenv (name, str, 1); -#else - int len = strlen (name) + 1 + strlen (value) + 1; - char *str = XMALLOC (char, len); - sprintf (str, "%s=%s", name, value); - if (putenv (str) != EXIT_SUCCESS) - { - XFREE (str); - } -#endif - } -} - -char * -lt_extend_str (const char *orig_value, const char *add, int to_end) -{ - char *new_value; - if (orig_value && *orig_value) - { - int orig_value_len = strlen (orig_value); - int add_len = strlen (add); - new_value = XMALLOC (char, add_len + orig_value_len + 1); - if (to_end) - { - strcpy (new_value, orig_value); - strcpy (new_value + orig_value_len, add); - } - else - { - strcpy (new_value, add); - strcpy (new_value + add_len, orig_value); - } - } - else - { - new_value = xstrdup (add); - } - return new_value; -} - -void -lt_update_exe_path (const char *name, const char *value) -{ - LTWRAPPER_DEBUGPRINTF (("(lt_update_exe_path) modifying '%s' by prepending '%s'\n", - (name ? name : ""), - (value ? value : ""))); - - if (name && *name && value && *value) - { - char *new_value = lt_extend_str (getenv (name), value, 0); - /* some systems can't cope with a ':'-terminated path #' */ - int len = strlen (new_value); - while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) - { - new_value[len-1] = '\0'; - } - lt_setenv (name, new_value); - XFREE (new_value); - } -} - -void -lt_update_lib_path (const char *name, const char *value) -{ - LTWRAPPER_DEBUGPRINTF (("(lt_update_lib_path) modifying '%s' by prepending '%s'\n", - (name ? name : ""), - (value ? value : ""))); - - if (name && *name && value && *value) - { - char *new_value = lt_extend_str (getenv (name), value, 0); - lt_setenv (name, new_value); - XFREE (new_value); - } -} - -EOF - case $host_os in - mingw*) - cat <<"EOF" - -/* Prepares an argument vector before calling spawn(). - Note that spawn() does not by itself call the command interpreter - (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : - ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); - GetVersionEx(&v); - v.dwPlatformId == VER_PLATFORM_WIN32_NT; - }) ? "cmd.exe" : "command.com"). - Instead it simply concatenates the arguments, separated by ' ', and calls - CreateProcess(). We must quote the arguments since Win32 CreateProcess() - interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a - special way: - - Space and tab are interpreted as delimiters. They are not treated as - delimiters if they are surrounded by double quotes: "...". - - Unescaped double quotes are removed from the input. Their only effect is - that within double quotes, space and tab are treated like normal - characters. - - Backslashes not followed by double quotes are not special. - - But 2*n+1 backslashes followed by a double quote become - n backslashes followed by a double quote (n >= 0): - \" -> " - \\\" -> \" - \\\\\" -> \\" - */ -#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" -#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" -char ** -prepare_spawn (char **argv) -{ - size_t argc; - char **new_argv; - size_t i; - - /* Count number of arguments. */ - for (argc = 0; argv[argc] != NULL; argc++) - ; - - /* Allocate new argument vector. */ - new_argv = XMALLOC (char *, argc + 1); - - /* Put quoted arguments into the new argument vector. */ - for (i = 0; i < argc; i++) - { - const char *string = argv[i]; - - if (string[0] == '\0') - new_argv[i] = xstrdup ("\"\""); - else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) - { - int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); - size_t length; - unsigned int backslashes; - const char *s; - char *quoted_string; - char *p; - - length = 0; - backslashes = 0; - if (quote_around) - length++; - for (s = string; *s != '\0'; s++) - { - char c = *s; - if (c == '"') - length += backslashes + 1; - length++; - if (c == '\\') - backslashes++; - else - backslashes = 0; - } - if (quote_around) - length += backslashes + 1; - - quoted_string = XMALLOC (char, length + 1); - - p = quoted_string; - backslashes = 0; - if (quote_around) - *p++ = '"'; - for (s = string; *s != '\0'; s++) - { - char c = *s; - if (c == '"') - { - unsigned int j; - for (j = backslashes + 1; j > 0; j--) - *p++ = '\\'; - } - *p++ = c; - if (c == '\\') - backslashes++; - else - backslashes = 0; - } - if (quote_around) - { - unsigned int j; - for (j = backslashes; j > 0; j--) - *p++ = '\\'; - *p++ = '"'; - } - *p = '\0'; - - new_argv[i] = quoted_string; - } - else - new_argv[i] = (char *) string; - } - new_argv[argc] = NULL; - - return new_argv; -} -EOF - ;; - esac - - cat <<"EOF" -void lt_dump_script (FILE* f) -{ -EOF - func_emit_wrapper yes | - $SED -e 's/\([\\"]\)/\\\1/g' \ - -e 's/^/ fputs ("/' -e 's/$/\\n", f);/' - - cat <<"EOF" -} -EOF -} -# end: func_emit_cwrapperexe_src - -# func_win32_import_lib_p ARG -# True if ARG is an import lib, as indicated by $file_magic_cmd -func_win32_import_lib_p () -{ - $opt_debug - case `eval "$file_magic_cmd \"\$1\" 2>/dev/null" | $SED -e 10q` in - *import*) : ;; - *) false ;; - esac -} - -# func_mode_link arg... -func_mode_link () -{ - $opt_debug - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) - # It is impossible to link a dll without this setting, and - # we shouldn't force the makefile maintainer to figure out - # which system we are compiling for in order to pass an extra - # flag for every libtool invocation. - # allow_undefined=no - - # FIXME: Unfortunately, there are problems with the above when trying - # to make a dll which has undefined symbols, in which case not - # even a static library is built. For now, we need to specify - # -no-undefined on the libtool link line when we can be certain - # that all symbols are satisfied, otherwise we get a static library. - allow_undefined=yes - ;; - *) - allow_undefined=yes - ;; - esac - libtool_args=$nonopt - base_compile="$nonopt $@" - compile_command=$nonopt - finalize_command=$nonopt - - compile_rpath= - finalize_rpath= - compile_shlibpath= - finalize_shlibpath= - convenience= - old_convenience= - deplibs= - old_deplibs= - compiler_flags= - linker_flags= - dllsearchpath= - lib_search_path=`pwd` - inst_prefix_dir= - new_inherited_linker_flags= - - avoid_version=no - bindir= - dlfiles= - dlprefiles= - dlself=no - export_dynamic=no - export_symbols= - export_symbols_regex= - generated= - libobjs= - ltlibs= - module=no - no_install=no - objs= - non_pic_objects= - precious_files_regex= - prefer_static_libs=no - preload=no - prev= - prevarg= - release= - rpath= - xrpath= - perm_rpath= - temp_rpath= - thread_safe=no - vinfo= - vinfo_number=no - weak_libs= - single_module="${wl}-single_module" - func_infer_tag $base_compile - - # We need to know -static, to get the right output filenames. - for arg - do - case $arg in - -shared) - test "$build_libtool_libs" != yes && \ - func_fatal_configuration "can not build a shared library" - build_old_libs=no - break - ;; - -all-static | -static | -static-libtool-libs) - case $arg in - -all-static) - if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then - func_warning "complete static linking is impossible in this configuration" - fi - if test -n "$link_static_flag"; then - dlopen_self=$dlopen_self_static - fi - prefer_static_libs=yes - ;; - -static) - if test -z "$pic_flag" && test -n "$link_static_flag"; then - dlopen_self=$dlopen_self_static - fi - prefer_static_libs=built - ;; - -static-libtool-libs) - if test -z "$pic_flag" && test -n "$link_static_flag"; then - dlopen_self=$dlopen_self_static - fi - prefer_static_libs=yes - ;; - esac - build_libtool_libs=no - build_old_libs=yes - break - ;; - esac - done - - # See if our shared archives depend on static archives. - test -n "$old_archive_from_new_cmds" && build_old_libs=yes - - # Go through the arguments, transforming them on the way. - while test "$#" -gt 0; do - arg="$1" - shift - func_quote_for_eval "$arg" - qarg=$func_quote_for_eval_unquoted_result - func_append libtool_args " $func_quote_for_eval_result" - - # If the previous option needs an argument, assign it. - if test -n "$prev"; then - case $prev in - output) - func_append compile_command " @OUTPUT@" - func_append finalize_command " @OUTPUT@" - ;; - esac - - case $prev in - bindir) - bindir="$arg" - prev= - continue - ;; - dlfiles|dlprefiles) - if test "$preload" = no; then - # Add the symbol object into the linking commands. - func_append compile_command " @SYMFILE@" - func_append finalize_command " @SYMFILE@" - preload=yes - fi - case $arg in - *.la | *.lo) ;; # We handle these cases below. - force) - if test "$dlself" = no; then - dlself=needless - export_dynamic=yes - fi - prev= - continue - ;; - self) - if test "$prev" = dlprefiles; then - dlself=yes - elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then - dlself=yes - else - dlself=needless - export_dynamic=yes - fi - prev= - continue - ;; - *) - if test "$prev" = dlfiles; then - dlfiles="$dlfiles $arg" - else - dlprefiles="$dlprefiles $arg" - fi - prev= - continue - ;; - esac - ;; - expsyms) - export_symbols="$arg" - test -f "$arg" \ - || func_fatal_error "symbol file \`$arg' does not exist" - prev= - continue - ;; - expsyms_regex) - export_symbols_regex="$arg" - prev= - continue - ;; - framework) - case $host in - *-*-darwin*) - case "$deplibs " in - *" $qarg.ltframework "*) ;; - *) deplibs="$deplibs $qarg.ltframework" # this is fixed later - ;; - esac - ;; - esac - prev= - continue - ;; - inst_prefix) - inst_prefix_dir="$arg" - prev= - continue - ;; - objectlist) - if test -f "$arg"; then - save_arg=$arg - moreargs= - for fil in `cat "$save_arg"` - do -# moreargs="$moreargs $fil" - arg=$fil - # A libtool-controlled object. - - # Check to see that this really is a libtool object. - if func_lalib_unsafe_p "$arg"; then - pic_object= - non_pic_object= - - # Read the .lo file - func_source "$arg" - - if test -z "$pic_object" || - test -z "$non_pic_object" || - test "$pic_object" = none && - test "$non_pic_object" = none; then - func_fatal_error "cannot find name of object for \`$arg'" - fi - - # Extract subdirectory from the argument. - func_dirname "$arg" "/" "" - xdir="$func_dirname_result" - - if test "$pic_object" != none; then - # Prepend the subdirectory the object is found in. - pic_object="$xdir$pic_object" - - if test "$prev" = dlfiles; then - if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then - dlfiles="$dlfiles $pic_object" - prev= - continue - else - # If libtool objects are unsupported, then we need to preload. - prev=dlprefiles - fi - fi - - # CHECK ME: I think I busted this. -Ossama - if test "$prev" = dlprefiles; then - # Preload the old-style object. - dlprefiles="$dlprefiles $pic_object" - prev= - fi - - # A PIC object. - func_append libobjs " $pic_object" - arg="$pic_object" - fi - - # Non-PIC object. - if test "$non_pic_object" != none; then - # Prepend the subdirectory the object is found in. - non_pic_object="$xdir$non_pic_object" - - # A standard non-PIC object - func_append non_pic_objects " $non_pic_object" - if test -z "$pic_object" || test "$pic_object" = none ; then - arg="$non_pic_object" - fi - else - # If the PIC object exists, use it instead. - # $xdir was prepended to $pic_object above. - non_pic_object="$pic_object" - func_append non_pic_objects " $non_pic_object" - fi - else - # Only an error if not doing a dry-run. - if $opt_dry_run; then - # Extract subdirectory from the argument. - func_dirname "$arg" "/" "" - xdir="$func_dirname_result" - - func_lo2o "$arg" - pic_object=$xdir$objdir/$func_lo2o_result - non_pic_object=$xdir$func_lo2o_result - func_append libobjs " $pic_object" - func_append non_pic_objects " $non_pic_object" - else - func_fatal_error "\`$arg' is not a valid libtool object" - fi - fi - done - else - func_fatal_error "link input file \`$arg' does not exist" - fi - arg=$save_arg - prev= - continue - ;; - precious_regex) - precious_files_regex="$arg" - prev= - continue - ;; - release) - release="-$arg" - prev= - continue - ;; - rpath | xrpath) - # We need an absolute path. - case $arg in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - func_fatal_error "only absolute run-paths are allowed" - ;; - esac - if test "$prev" = rpath; then - case "$rpath " in - *" $arg "*) ;; - *) rpath="$rpath $arg" ;; - esac - else - case "$xrpath " in - *" $arg "*) ;; - *) xrpath="$xrpath $arg" ;; - esac - fi - prev= - continue - ;; - shrext) - shrext_cmds="$arg" - prev= - continue - ;; - weak) - weak_libs="$weak_libs $arg" - prev= - continue - ;; - xcclinker) - linker_flags="$linker_flags $qarg" - compiler_flags="$compiler_flags $qarg" - prev= - func_append compile_command " $qarg" - func_append finalize_command " $qarg" - continue - ;; - xcompiler) - compiler_flags="$compiler_flags $qarg" - prev= - func_append compile_command " $qarg" - func_append finalize_command " $qarg" - continue - ;; - xlinker) - linker_flags="$linker_flags $qarg" - compiler_flags="$compiler_flags $wl$qarg" - prev= - func_append compile_command " $wl$qarg" - func_append finalize_command " $wl$qarg" - continue - ;; - *) - eval "$prev=\"\$arg\"" - prev= - continue - ;; - esac - fi # test -n "$prev" - - prevarg="$arg" - - case $arg in - -all-static) - if test -n "$link_static_flag"; then - # See comment for -static flag below, for more details. - func_append compile_command " $link_static_flag" - func_append finalize_command " $link_static_flag" - fi - continue - ;; - - -allow-undefined) - # FIXME: remove this flag sometime in the future. - func_fatal_error "\`-allow-undefined' must not be used because it is the default" - ;; - - -avoid-version) - avoid_version=yes - continue - ;; - - -bindir) - prev=bindir - continue - ;; - - -dlopen) - prev=dlfiles - continue - ;; - - -dlpreopen) - prev=dlprefiles - continue - ;; - - -export-dynamic) - export_dynamic=yes - continue - ;; - - -export-symbols | -export-symbols-regex) - if test -n "$export_symbols" || test -n "$export_symbols_regex"; then - func_fatal_error "more than one -exported-symbols argument is not allowed" - fi - if test "X$arg" = "X-export-symbols"; then - prev=expsyms - else - prev=expsyms_regex - fi - continue - ;; - - -framework) - prev=framework - continue - ;; - - -inst-prefix-dir) - prev=inst_prefix - continue - ;; - - # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* - # so, if we see these flags be careful not to treat them like -L - -L[A-Z][A-Z]*:*) - case $with_gcc/$host in - no/*-*-irix* | /*-*-irix*) - func_append compile_command " $arg" - func_append finalize_command " $arg" - ;; - esac - continue - ;; - - -L*) - func_stripname '-L' '' "$arg" - dir=$func_stripname_result - if test -z "$dir"; then - if test "$#" -gt 0; then - func_fatal_error "require no space between \`-L' and \`$1'" - else - func_fatal_error "need path for \`-L' option" - fi - fi - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - absdir=`cd "$dir" && pwd` - test -z "$absdir" && \ - func_fatal_error "cannot determine absolute directory name of \`$dir'" - dir="$absdir" - ;; - esac - case "$deplibs " in - *" -L$dir "*) ;; - *) - deplibs="$deplibs -L$dir" - lib_search_path="$lib_search_path $dir" - ;; - esac - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) - testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` - case :$dllsearchpath: in - *":$dir:"*) ;; - ::) dllsearchpath=$dir;; - *) dllsearchpath="$dllsearchpath:$dir";; - esac - case :$dllsearchpath: in - *":$testbindir:"*) ;; - ::) dllsearchpath=$testbindir;; - *) dllsearchpath="$dllsearchpath:$testbindir";; - esac - ;; - esac - continue - ;; - - -l*) - if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) - # These systems don't actually have a C or math library (as such) - continue - ;; - *-*-os2*) - # These systems don't actually have a C library (as such) - test "X$arg" = "X-lc" && continue - ;; - *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) - # Do not include libc due to us having libc/libc_r. - test "X$arg" = "X-lc" && continue - ;; - *-*-rhapsody* | *-*-darwin1.[012]) - # Rhapsody C and math libraries are in the System framework - deplibs="$deplibs System.ltframework" - continue - ;; - *-*-sco3.2v5* | *-*-sco5v6*) - # Causes problems with __ctype - test "X$arg" = "X-lc" && continue - ;; - *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) - # Compiler inserts libc in the correct place for threads to work - test "X$arg" = "X-lc" && continue - ;; - *-*-linux*) - test "X$arg" = "X-lc" && continue - ;; - esac - elif test "X$arg" = "X-lc_r"; then - case $host in - *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) - # Do not include libc_r directly, use -pthread flag. - continue - ;; - esac - fi - deplibs="$deplibs $arg" - continue - ;; - - -module) - module=yes - continue - ;; - - # Tru64 UNIX uses -model [arg] to determine the layout of C++ - # classes, name mangling, and exception handling. - # Darwin uses the -arch flag to determine output architecture. - -model|-arch|-isysroot) - compiler_flags="$compiler_flags $arg" - func_append compile_command " $arg" - func_append finalize_command " $arg" - prev=xcompiler - continue - ;; - - -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) - compiler_flags="$compiler_flags $arg" - func_append compile_command " $arg" - func_append finalize_command " $arg" - case "$new_inherited_linker_flags " in - *" $arg "*) ;; - * ) new_inherited_linker_flags="$new_inherited_linker_flags $arg" ;; - esac - continue - ;; - - -multi_module) - single_module="${wl}-multi_module" - continue - ;; - - -no-fast-install) - fast_install=no - continue - ;; - - -no-install) - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) - # The PATH hackery in wrapper scripts is required on Windows - # and Darwin in order for the loader to find any dlls it needs. - func_warning "\`-no-install' is ignored for $host" - func_warning "assuming \`-no-fast-install' instead" - fast_install=no - ;; - *) no_install=yes ;; - esac - continue - ;; - - -no-undefined) - allow_undefined=no - continue - ;; - - -objectlist) - prev=objectlist - continue - ;; - - -o) prev=output ;; - - -precious-files-regex) - prev=precious_regex - continue - ;; - - -release) - prev=release - continue - ;; - - -rpath) - prev=rpath - continue - ;; - - -R) - prev=xrpath - continue - ;; - - -R*) - func_stripname '-R' '' "$arg" - dir=$func_stripname_result - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) ;; - *) - func_fatal_error "only absolute run-paths are allowed" - ;; - esac - case "$xrpath " in - *" $dir "*) ;; - *) xrpath="$xrpath $dir" ;; - esac - continue - ;; - - -shared) - # The effects of -shared are defined in a previous loop. - continue - ;; - - -shrext) - prev=shrext - continue - ;; - - -static | -static-libtool-libs) - # The effects of -static are defined in a previous loop. - # We used to do the same as -all-static on platforms that - # didn't have a PIC flag, but the assumption that the effects - # would be equivalent was wrong. It would break on at least - # Digital Unix and AIX. - continue - ;; - - -thread-safe) - thread_safe=yes - continue - ;; - - -version-info) - prev=vinfo - continue - ;; - - -version-number) - prev=vinfo - vinfo_number=yes - continue - ;; - - -weak) - prev=weak - continue - ;; - - -Wc,*) - func_stripname '-Wc,' '' "$arg" - args=$func_stripname_result - arg= - save_ifs="$IFS"; IFS=',' - for flag in $args; do - IFS="$save_ifs" - func_quote_for_eval "$flag" - arg="$arg $func_quote_for_eval_result" - compiler_flags="$compiler_flags $func_quote_for_eval_result" - done - IFS="$save_ifs" - func_stripname ' ' '' "$arg" - arg=$func_stripname_result - ;; - - -Wl,*) - func_stripname '-Wl,' '' "$arg" - args=$func_stripname_result - arg= - save_ifs="$IFS"; IFS=',' - for flag in $args; do - IFS="$save_ifs" - func_quote_for_eval "$flag" - arg="$arg $wl$func_quote_for_eval_result" - compiler_flags="$compiler_flags $wl$func_quote_for_eval_result" - linker_flags="$linker_flags $func_quote_for_eval_result" - done - IFS="$save_ifs" - func_stripname ' ' '' "$arg" - arg=$func_stripname_result - ;; - - -Xcompiler) - prev=xcompiler - continue - ;; - - -Xlinker) - prev=xlinker - continue - ;; - - -XCClinker) - prev=xcclinker - continue - ;; - - # -msg_* for osf cc - -msg_*) - func_quote_for_eval "$arg" - arg="$func_quote_for_eval_result" - ;; - - # -64, -mips[0-9] enable 64-bit mode on the SGI compiler - # -r[0-9][0-9]* specifies the processor on the SGI compiler - # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler - # +DA*, +DD* enable 64-bit mode on the HP compiler - # -q* pass through compiler args for the IBM compiler - # -m*, -t[45]*, -txscale* pass through architecture-specific - # compiler args for GCC - # -F/path gives path to uninstalled frameworks, gcc on darwin - # -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC - # @file GCC response files - # -tp=* Portland pgcc target processor selection - -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ - -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*) - func_quote_for_eval "$arg" - arg="$func_quote_for_eval_result" - func_append compile_command " $arg" - func_append finalize_command " $arg" - compiler_flags="$compiler_flags $arg" - continue - ;; - - # Some other compiler flag. - -* | +*) - func_quote_for_eval "$arg" - arg="$func_quote_for_eval_result" - ;; - - *.$objext) - # A standard object. - objs="$objs $arg" - ;; - - *.lo) - # A libtool-controlled object. - - # Check to see that this really is a libtool object. - if func_lalib_unsafe_p "$arg"; then - pic_object= - non_pic_object= - - # Read the .lo file - func_source "$arg" - - if test -z "$pic_object" || - test -z "$non_pic_object" || - test "$pic_object" = none && - test "$non_pic_object" = none; then - func_fatal_error "cannot find name of object for \`$arg'" - fi - - # Extract subdirectory from the argument. - func_dirname "$arg" "/" "" - xdir="$func_dirname_result" - - if test "$pic_object" != none; then - # Prepend the subdirectory the object is found in. - pic_object="$xdir$pic_object" - - if test "$prev" = dlfiles; then - if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then - dlfiles="$dlfiles $pic_object" - prev= - continue - else - # If libtool objects are unsupported, then we need to preload. - prev=dlprefiles - fi - fi - - # CHECK ME: I think I busted this. -Ossama - if test "$prev" = dlprefiles; then - # Preload the old-style object. - dlprefiles="$dlprefiles $pic_object" - prev= - fi - - # A PIC object. - func_append libobjs " $pic_object" - arg="$pic_object" - fi - - # Non-PIC object. - if test "$non_pic_object" != none; then - # Prepend the subdirectory the object is found in. - non_pic_object="$xdir$non_pic_object" - - # A standard non-PIC object - func_append non_pic_objects " $non_pic_object" - if test -z "$pic_object" || test "$pic_object" = none ; then - arg="$non_pic_object" - fi - else - # If the PIC object exists, use it instead. - # $xdir was prepended to $pic_object above. - non_pic_object="$pic_object" - func_append non_pic_objects " $non_pic_object" - fi - else - # Only an error if not doing a dry-run. - if $opt_dry_run; then - # Extract subdirectory from the argument. - func_dirname "$arg" "/" "" - xdir="$func_dirname_result" - - func_lo2o "$arg" - pic_object=$xdir$objdir/$func_lo2o_result - non_pic_object=$xdir$func_lo2o_result - func_append libobjs " $pic_object" - func_append non_pic_objects " $non_pic_object" - else - func_fatal_error "\`$arg' is not a valid libtool object" - fi - fi - ;; - - *.$libext) - # An archive. - deplibs="$deplibs $arg" - old_deplibs="$old_deplibs $arg" - continue - ;; - - *.la) - # A libtool-controlled library. - - if test "$prev" = dlfiles; then - # This library was specified with -dlopen. - dlfiles="$dlfiles $arg" - prev= - elif test "$prev" = dlprefiles; then - # The library was specified with -dlpreopen. - dlprefiles="$dlprefiles $arg" - prev= - else - deplibs="$deplibs $arg" - fi - continue - ;; - - # Some other compiler argument. - *) - # Unknown arguments in both finalize_command and compile_command need - # to be aesthetically quoted because they are evaled later. - func_quote_for_eval "$arg" - arg="$func_quote_for_eval_result" - ;; - esac # arg - - # Now actually substitute the argument into the commands. - if test -n "$arg"; then - func_append compile_command " $arg" - func_append finalize_command " $arg" - fi - done # argument parsing loop - - test -n "$prev" && \ - func_fatal_help "the \`$prevarg' option requires an argument" - - if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then - eval "arg=\"$export_dynamic_flag_spec\"" - func_append compile_command " $arg" - func_append finalize_command " $arg" - fi - - oldlibs= - # calculate the name of the file, without its directory - func_basename "$output" - outputname="$func_basename_result" - libobjs_save="$libobjs" - - if test -n "$shlibpath_var"; then - # get the directories listed in $shlibpath_var - eval shlib_search_path=\`\$ECHO \"\${$shlibpath_var}\" \| \$SED \'s/:/ /g\'\` - else - shlib_search_path= - fi - eval "sys_lib_search_path=\"$sys_lib_search_path_spec\"" - eval "sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"" - - func_dirname "$output" "/" "" - output_objdir="$func_dirname_result$objdir" - # Create the object directory. - func_mkdir_p "$output_objdir" - - # Determine the type of output - case $output in - "") - func_fatal_help "you must specify an output file" - ;; - *.$libext) linkmode=oldlib ;; - *.lo | *.$objext) linkmode=obj ;; - *.la) linkmode=lib ;; - *) linkmode=prog ;; # Anything else should be a program. - esac - - specialdeplibs= - - libs= - # Find all interdependent deplibs by searching for libraries - # that are linked more than once (e.g. -la -lb -la) - for deplib in $deplibs; do - if $opt_duplicate_deps ; then - case "$libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - libs="$libs $deplib" - done - - if test "$linkmode" = lib; then - libs="$predeps $libs $compiler_lib_search_path $postdeps" - - # Compute libraries that are listed more than once in $predeps - # $postdeps and mark them as special (i.e., whose duplicates are - # not to be eliminated). - pre_post_deps= - if $opt_duplicate_compiler_generated_deps; then - for pre_post_dep in $predeps $postdeps; do - case "$pre_post_deps " in - *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; - esac - pre_post_deps="$pre_post_deps $pre_post_dep" - done - fi - pre_post_deps= - fi - - deplibs= - newdependency_libs= - newlib_search_path= - need_relink=no # whether we're linking any uninstalled libtool libraries - notinst_deplibs= # not-installed libtool libraries - notinst_path= # paths that contain not-installed libtool libraries - - case $linkmode in - lib) - passes="conv dlpreopen link" - for file in $dlfiles $dlprefiles; do - case $file in - *.la) ;; - *) - func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" - ;; - esac - done - ;; - prog) - compile_deplibs= - finalize_deplibs= - alldeplibs=no - newdlfiles= - newdlprefiles= - passes="conv scan dlopen dlpreopen link" - ;; - *) passes="conv" - ;; - esac - - for pass in $passes; do - # The preopen pass in lib mode reverses $deplibs; put it back here - # so that -L comes before libs that need it for instance... - if test "$linkmode,$pass" = "lib,link"; then - ## FIXME: Find the place where the list is rebuilt in the wrong - ## order, and fix it there properly - tmp_deplibs= - for deplib in $deplibs; do - tmp_deplibs="$deplib $tmp_deplibs" - done - deplibs="$tmp_deplibs" - fi - - if test "$linkmode,$pass" = "lib,link" || - test "$linkmode,$pass" = "prog,scan"; then - libs="$deplibs" - deplibs= - fi - if test "$linkmode" = prog; then - case $pass in - dlopen) libs="$dlfiles" ;; - dlpreopen) libs="$dlprefiles" ;; - link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; - esac - fi - if test "$linkmode,$pass" = "lib,dlpreopen"; then - # Collect and forward deplibs of preopened libtool libs - for lib in $dlprefiles; do - # Ignore non-libtool-libs - dependency_libs= - case $lib in - *.la) func_source "$lib" ;; - esac - - # Collect preopened libtool deplibs, except any this library - # has declared as weak libs - for deplib in $dependency_libs; do - func_basename "$deplib" - deplib_base=$func_basename_result - case " $weak_libs " in - *" $deplib_base "*) ;; - *) deplibs="$deplibs $deplib" ;; - esac - done - done - libs="$dlprefiles" - fi - if test "$pass" = dlopen; then - # Collect dlpreopened libraries - save_deplibs="$deplibs" - deplibs= - fi - - for deplib in $libs; do - lib= - found=no - case $deplib in - -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) - if test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - compiler_flags="$compiler_flags $deplib" - if test "$linkmode" = lib ; then - case "$new_inherited_linker_flags " in - *" $deplib "*) ;; - * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; - esac - fi - fi - continue - ;; - -l*) - if test "$linkmode" != lib && test "$linkmode" != prog; then - func_warning "\`-l' is ignored for archives/objects" - continue - fi - func_stripname '-l' '' "$deplib" - name=$func_stripname_result - if test "$linkmode" = lib; then - searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" - else - searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" - fi - for searchdir in $searchdirs; do - for search_ext in .la $std_shrext .so .a; do - # Search the libtool library - lib="$searchdir/lib${name}${search_ext}" - if test -f "$lib"; then - if test "$search_ext" = ".la"; then - found=yes - else - found=no - fi - break 2 - fi - done - done - if test "$found" != yes; then - # deplib doesn't seem to be a libtool library - if test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - deplibs="$deplib $deplibs" - test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" - fi - continue - else # deplib is a libtool library - # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, - # We need to do some special things here, and not later. - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - case " $predeps $postdeps " in - *" $deplib "*) - if func_lalib_p "$lib"; then - library_names= - old_library= - func_source "$lib" - for l in $old_library $library_names; do - ll="$l" - done - if test "X$ll" = "X$old_library" ; then # only static version available - found=no - func_dirname "$lib" "" "." - ladir="$func_dirname_result" - lib=$ladir/$old_library - if test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - deplibs="$deplib $deplibs" - test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" - fi - continue - fi - fi - ;; - *) ;; - esac - fi - fi - ;; # -l - *.ltframework) - if test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - deplibs="$deplib $deplibs" - if test "$linkmode" = lib ; then - case "$new_inherited_linker_flags " in - *" $deplib "*) ;; - * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; - esac - fi - fi - continue - ;; - -L*) - case $linkmode in - lib) - deplibs="$deplib $deplibs" - test "$pass" = conv && continue - newdependency_libs="$deplib $newdependency_libs" - func_stripname '-L' '' "$deplib" - newlib_search_path="$newlib_search_path $func_stripname_result" - ;; - prog) - if test "$pass" = conv; then - deplibs="$deplib $deplibs" - continue - fi - if test "$pass" = scan; then - deplibs="$deplib $deplibs" - else - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - fi - func_stripname '-L' '' "$deplib" - newlib_search_path="$newlib_search_path $func_stripname_result" - ;; - *) - func_warning "\`-L' is ignored for archives/objects" - ;; - esac # linkmode - continue - ;; # -L - -R*) - if test "$pass" = link; then - func_stripname '-R' '' "$deplib" - dir=$func_stripname_result - # Make sure the xrpath contains only unique directories. - case "$xrpath " in - *" $dir "*) ;; - *) xrpath="$xrpath $dir" ;; - esac - fi - deplibs="$deplib $deplibs" - continue - ;; - *.la) lib="$deplib" ;; - *.$libext) - if test "$pass" = conv; then - deplibs="$deplib $deplibs" - continue - fi - case $linkmode in - lib) - # Linking convenience modules into shared libraries is allowed, - # but linking other static libraries is non-portable. - case " $dlpreconveniencelibs " in - *" $deplib "*) ;; - *) - valid_a_lib=no - case $deplibs_check_method in - match_pattern*) - set dummy $deplibs_check_method; shift - match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` - if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ - | $EGREP "$match_pattern_regex" > /dev/null; then - valid_a_lib=yes - fi - ;; - pass_all) - valid_a_lib=yes - ;; - esac - if test "$valid_a_lib" != yes; then - echo - $ECHO "*** Warning: Trying to link with static lib archive $deplib." - echo "*** I have the capability to make that library automatically link in when" - echo "*** you link to this library. But I can only do this if you have a" - echo "*** shared version of the library, which you do not appear to have" - echo "*** because the file extensions .$libext of this argument makes me believe" - echo "*** that it is just a static archive that I should not use here." - else - echo - $ECHO "*** Warning: Linking the shared library $output against the" - $ECHO "*** static library $deplib is not portable!" - deplibs="$deplib $deplibs" - fi - ;; - esac - continue - ;; - prog) - if test "$pass" != link; then - deplibs="$deplib $deplibs" - else - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - fi - continue - ;; - esac # linkmode - ;; # *.$libext - *.lo | *.$objext) - if test "$pass" = conv; then - deplibs="$deplib $deplibs" - elif test "$linkmode" = prog; then - if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then - # If there is no dlopen support or we're linking statically, - # we need to preload. - newdlprefiles="$newdlprefiles $deplib" - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else - newdlfiles="$newdlfiles $deplib" - fi - fi - continue - ;; - %DEPLIBS%) - alldeplibs=yes - continue - ;; - esac # case $deplib - - if test "$found" = yes || test -f "$lib"; then : - else - func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" - fi - - # Check to see that this really is a libtool archive. - func_lalib_unsafe_p "$lib" \ - || func_fatal_error "\`$lib' is not a valid libtool archive" - - func_dirname "$lib" "" "." - ladir="$func_dirname_result" - - dlname= - dlopen= - dlpreopen= - libdir= - library_names= - old_library= - inherited_linker_flags= - # If the library was installed with an old release of libtool, - # it will not redefine variables installed, or shouldnotlink - installed=yes - shouldnotlink=no - avoidtemprpath= - - - # Read the .la file - func_source "$lib" - - # Convert "-framework foo" to "foo.ltframework" - if test -n "$inherited_linker_flags"; then - tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` - for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do - case " $new_inherited_linker_flags " in - *" $tmp_inherited_linker_flag "*) ;; - *) new_inherited_linker_flags="$new_inherited_linker_flags $tmp_inherited_linker_flag";; - esac - done - fi - dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` - if test "$linkmode,$pass" = "lib,link" || - test "$linkmode,$pass" = "prog,scan" || - { test "$linkmode" != prog && test "$linkmode" != lib; }; then - test -n "$dlopen" && dlfiles="$dlfiles $dlopen" - test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" - fi - - if test "$pass" = conv; then - # Only check for convenience libraries - deplibs="$lib $deplibs" - if test -z "$libdir"; then - if test -z "$old_library"; then - func_fatal_error "cannot find name of link library for \`$lib'" - fi - # It is a libtool convenience library, so add in its objects. - convenience="$convenience $ladir/$objdir/$old_library" - old_convenience="$old_convenience $ladir/$objdir/$old_library" - elif test "$linkmode" != prog && test "$linkmode" != lib; then - func_fatal_error "\`$lib' is not a convenience library" - fi - tmp_libs= - for deplib in $dependency_libs; do - deplibs="$deplib $deplibs" - if $opt_duplicate_deps ; then - case "$tmp_libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - tmp_libs="$tmp_libs $deplib" - done - continue - fi # $pass = conv - - - # Get the name of the library we link against. - linklib= - for l in $old_library $library_names; do - linklib="$l" - done - if test -z "$linklib"; then - func_fatal_error "cannot find name of link library for \`$lib'" - fi - - # This library was specified with -dlopen. - if test "$pass" = dlopen; then - if test -z "$libdir"; then - func_fatal_error "cannot -dlopen a convenience library: \`$lib'" - fi - if test -z "$dlname" || - test "$dlopen_support" != yes || - test "$build_libtool_libs" = no; then - # If there is no dlname, no dlopen support or we're linking - # statically, we need to preload. We also need to preload any - # dependent libraries so libltdl's deplib preloader doesn't - # bomb out in the load deplibs phase. - dlprefiles="$dlprefiles $lib $dependency_libs" - else - newdlfiles="$newdlfiles $lib" - fi - continue - fi # $pass = dlopen - - # We need an absolute path. - case $ladir in - [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; - *) - abs_ladir=`cd "$ladir" && pwd` - if test -z "$abs_ladir"; then - func_warning "cannot determine absolute directory name of \`$ladir'" - func_warning "passing it literally to the linker, although it might fail" - abs_ladir="$ladir" - fi - ;; - esac - func_basename "$lib" - laname="$func_basename_result" - - # Find the relevant object directory and library name. - if test "X$installed" = Xyes; then - if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then - func_warning "library \`$lib' was moved." - dir="$ladir" - absdir="$abs_ladir" - libdir="$abs_ladir" - else - dir="$libdir" - absdir="$libdir" - fi - test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes - else - if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then - dir="$ladir" - absdir="$abs_ladir" - # Remove this search path later - notinst_path="$notinst_path $abs_ladir" - else - dir="$ladir/$objdir" - absdir="$abs_ladir/$objdir" - # Remove this search path later - notinst_path="$notinst_path $abs_ladir" - fi - fi # $installed = yes - func_stripname 'lib' '.la' "$laname" - name=$func_stripname_result - - # This library was specified with -dlpreopen. - if test "$pass" = dlpreopen; then - if test -z "$libdir" && test "$linkmode" = prog; then - func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" - fi - # Prefer using a static library (so that no silly _DYNAMIC symbols - # are required to link). - if test -n "$old_library"; then - newdlprefiles="$newdlprefiles $dir/$old_library" - # Keep a list of preopened convenience libraries to check - # that they are being used correctly in the link pass. - test -z "$libdir" && \ - dlpreconveniencelibs="$dlpreconveniencelibs $dir/$old_library" - # Otherwise, use the dlname, so that lt_dlopen finds it. - elif test -n "$dlname"; then - newdlprefiles="$newdlprefiles $dir/$dlname" - else - newdlprefiles="$newdlprefiles $dir/$linklib" - fi - fi # $pass = dlpreopen - - if test -z "$libdir"; then - # Link the convenience library - if test "$linkmode" = lib; then - deplibs="$dir/$old_library $deplibs" - elif test "$linkmode,$pass" = "prog,link"; then - compile_deplibs="$dir/$old_library $compile_deplibs" - finalize_deplibs="$dir/$old_library $finalize_deplibs" - else - deplibs="$lib $deplibs" # used for prog,scan pass - fi - continue - fi - - - if test "$linkmode" = prog && test "$pass" != link; then - newlib_search_path="$newlib_search_path $ladir" - deplibs="$lib $deplibs" - - linkalldeplibs=no - if test "$link_all_deplibs" != no || test -z "$library_names" || - test "$build_libtool_libs" = no; then - linkalldeplibs=yes - fi - - tmp_libs= - for deplib in $dependency_libs; do - case $deplib in - -L*) func_stripname '-L' '' "$deplib" - newlib_search_path="$newlib_search_path $func_stripname_result" - ;; - esac - # Need to link against all dependency_libs? - if test "$linkalldeplibs" = yes; then - deplibs="$deplib $deplibs" - else - # Need to hardcode shared library paths - # or/and link against static libraries - newdependency_libs="$deplib $newdependency_libs" - fi - if $opt_duplicate_deps ; then - case "$tmp_libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - tmp_libs="$tmp_libs $deplib" - done # for deplib - continue - fi # $linkmode = prog... - - if test "$linkmode,$pass" = "prog,link"; then - if test -n "$library_names" && - { { test "$prefer_static_libs" = no || - test "$prefer_static_libs,$installed" = "built,yes"; } || - test -z "$old_library"; }; then - # We need to hardcode the library path - if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then - # Make sure the rpath contains only unique directories. - case "$temp_rpath:" in - *"$absdir:"*) ;; - *) temp_rpath="$temp_rpath$absdir:" ;; - esac - fi - - # Hardcode the library path. - # Skip directories that are in the system default run-time - # search path. - case " $sys_lib_dlsearch_path " in - *" $absdir "*) ;; - *) - case "$compile_rpath " in - *" $absdir "*) ;; - *) compile_rpath="$compile_rpath $absdir" - esac - ;; - esac - case " $sys_lib_dlsearch_path " in - *" $libdir "*) ;; - *) - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" - esac - ;; - esac - fi # $linkmode,$pass = prog,link... - - if test "$alldeplibs" = yes && - { test "$deplibs_check_method" = pass_all || - { test "$build_libtool_libs" = yes && - test -n "$library_names"; }; }; then - # We only need to search for static libraries - continue - fi - fi - - link_static=no # Whether the deplib will be linked statically - use_static_libs=$prefer_static_libs - if test "$use_static_libs" = built && test "$installed" = yes; then - use_static_libs=no - fi - if test -n "$library_names" && - { test "$use_static_libs" = no || test -z "$old_library"; }; then - case $host in - *cygwin* | *mingw* | *cegcc*) - # No point in relinking DLLs because paths are not encoded - notinst_deplibs="$notinst_deplibs $lib" - need_relink=no - ;; - *) - if test "$installed" = no; then - notinst_deplibs="$notinst_deplibs $lib" - need_relink=yes - fi - ;; - esac - # This is a shared library - - # Warn about portability, can't link against -module's on some - # systems (darwin). Don't bleat about dlopened modules though! - dlopenmodule="" - for dlpremoduletest in $dlprefiles; do - if test "X$dlpremoduletest" = "X$lib"; then - dlopenmodule="$dlpremoduletest" - break - fi - done - if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then - echo - if test "$linkmode" = prog; then - $ECHO "*** Warning: Linking the executable $output against the loadable module" - else - $ECHO "*** Warning: Linking the shared library $output against the loadable module" - fi - $ECHO "*** $linklib is not portable!" - fi - if test "$linkmode" = lib && - test "$hardcode_into_libs" = yes; then - # Hardcode the library path. - # Skip directories that are in the system default run-time - # search path. - case " $sys_lib_dlsearch_path " in - *" $absdir "*) ;; - *) - case "$compile_rpath " in - *" $absdir "*) ;; - *) compile_rpath="$compile_rpath $absdir" - esac - ;; - esac - case " $sys_lib_dlsearch_path " in - *" $libdir "*) ;; - *) - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" - esac - ;; - esac - fi - - if test -n "$old_archive_from_expsyms_cmds"; then - # figure out the soname - set dummy $library_names - shift - realname="$1" - shift - eval "libname=\"$libname_spec\"" - # use dlname if we got it. it's perfectly good, no? - if test -n "$dlname"; then - soname="$dlname" - elif test -n "$soname_spec"; then - # bleh windows - case $host in - *cygwin* | mingw* | *cegcc*) - func_arith $current - $age - major=$func_arith_result - versuffix="-$major" - ;; - esac - eval "soname=\"$soname_spec\"" - else - soname="$realname" - fi - - # Make a new name for the extract_expsyms_cmds to use - soroot="$soname" - func_basename "$soroot" - soname="$func_basename_result" - func_stripname 'lib' '.dll' "$soname" - newlib=libimp-$func_stripname_result.a - - # If the library has no export list, then create one now - if test -f "$output_objdir/$soname-def"; then : - else - func_verbose "extracting exported symbol list from \`$soname'" - func_execute_cmds "$extract_expsyms_cmds" 'exit $?' - fi - - # Create $newlib - if test -f "$output_objdir/$newlib"; then :; else - func_verbose "generating import library for \`$soname'" - func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' - fi - # make sure the library variables are pointing to the new library - dir=$output_objdir - linklib=$newlib - fi # test -n "$old_archive_from_expsyms_cmds" - - if test "$linkmode" = prog || test "$mode" != relink; then - add_shlibpath= - add_dir= - add= - lib_linked=yes - case $hardcode_action in - immediate | unsupported) - if test "$hardcode_direct" = no; then - add="$dir/$linklib" - case $host in - *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; - *-*-sysv4*uw2*) add_dir="-L$dir" ;; - *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ - *-*-unixware7*) add_dir="-L$dir" ;; - *-*-darwin* ) - # if the lib is a (non-dlopened) module then we can not - # link against it, someone is ignoring the earlier warnings - if /usr/bin/file -L $add 2> /dev/null | - $GREP ": [^:]* bundle" >/dev/null ; then - if test "X$dlopenmodule" != "X$lib"; then - $ECHO "*** Warning: lib $linklib is a module, not a shared library" - if test -z "$old_library" ; then - echo - echo "*** And there doesn't seem to be a static archive available" - echo "*** The link will probably fail, sorry" - else - add="$dir/$old_library" - fi - elif test -n "$old_library"; then - add="$dir/$old_library" - fi - fi - esac - elif test "$hardcode_minus_L" = no; then - case $host in - *-*-sunos*) add_shlibpath="$dir" ;; - esac - add_dir="-L$dir" - add="-l$name" - elif test "$hardcode_shlibpath_var" = no; then - add_shlibpath="$dir" - add="-l$name" - else - lib_linked=no - fi - ;; - relink) - if test "$hardcode_direct" = yes && - test "$hardcode_direct_absolute" = no; then - add="$dir/$linklib" - elif test "$hardcode_minus_L" = yes; then - add_dir="-L$absdir" - # Try looking first in the location we're being installed to. - if test -n "$inst_prefix_dir"; then - case $libdir in - [\\/]*) - add_dir="$add_dir -L$inst_prefix_dir$libdir" - ;; - esac - fi - add="-l$name" - elif test "$hardcode_shlibpath_var" = yes; then - add_shlibpath="$dir" - add="-l$name" - else - lib_linked=no - fi - ;; - *) lib_linked=no ;; - esac - - if test "$lib_linked" != yes; then - func_fatal_configuration "unsupported hardcode properties" - fi - - if test -n "$add_shlibpath"; then - case :$compile_shlibpath: in - *":$add_shlibpath:"*) ;; - *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; - esac - fi - if test "$linkmode" = prog; then - test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" - test -n "$add" && compile_deplibs="$add $compile_deplibs" - else - test -n "$add_dir" && deplibs="$add_dir $deplibs" - test -n "$add" && deplibs="$add $deplibs" - if test "$hardcode_direct" != yes && - test "$hardcode_minus_L" != yes && - test "$hardcode_shlibpath_var" = yes; then - case :$finalize_shlibpath: in - *":$libdir:"*) ;; - *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; - esac - fi - fi - fi - - if test "$linkmode" = prog || test "$mode" = relink; then - add_shlibpath= - add_dir= - add= - # Finalize command for both is simple: just hardcode it. - if test "$hardcode_direct" = yes && - test "$hardcode_direct_absolute" = no; then - add="$libdir/$linklib" - elif test "$hardcode_minus_L" = yes; then - add_dir="-L$libdir" - add="-l$name" - elif test "$hardcode_shlibpath_var" = yes; then - case :$finalize_shlibpath: in - *":$libdir:"*) ;; - *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; - esac - add="-l$name" - elif test "$hardcode_automatic" = yes; then - if test -n "$inst_prefix_dir" && - test -f "$inst_prefix_dir$libdir/$linklib" ; then - add="$inst_prefix_dir$libdir/$linklib" - else - add="$libdir/$linklib" - fi - else - # We cannot seem to hardcode it, guess we'll fake it. - add_dir="-L$libdir" - # Try looking first in the location we're being installed to. - if test -n "$inst_prefix_dir"; then - case $libdir in - [\\/]*) - add_dir="$add_dir -L$inst_prefix_dir$libdir" - ;; - esac - fi - add="-l$name" - fi - - if test "$linkmode" = prog; then - test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" - test -n "$add" && finalize_deplibs="$add $finalize_deplibs" - else - test -n "$add_dir" && deplibs="$add_dir $deplibs" - test -n "$add" && deplibs="$add $deplibs" - fi - fi - elif test "$linkmode" = prog; then - # Here we assume that one of hardcode_direct or hardcode_minus_L - # is not unsupported. This is valid on all known static and - # shared platforms. - if test "$hardcode_direct" != unsupported; then - test -n "$old_library" && linklib="$old_library" - compile_deplibs="$dir/$linklib $compile_deplibs" - finalize_deplibs="$dir/$linklib $finalize_deplibs" - else - compile_deplibs="-l$name -L$dir $compile_deplibs" - finalize_deplibs="-l$name -L$dir $finalize_deplibs" - fi - elif test "$build_libtool_libs" = yes; then - # Not a shared library - if test "$deplibs_check_method" != pass_all; then - # We're trying link a shared library against a static one - # but the system doesn't support it. - - # Just print a warning and add the library to dependency_libs so - # that the program can be linked against the static library. - echo - $ECHO "*** Warning: This system can not link to static lib archive $lib." - echo "*** I have the capability to make that library automatically link in when" - echo "*** you link to this library. But I can only do this if you have a" - echo "*** shared version of the library, which you do not appear to have." - if test "$module" = yes; then - echo "*** But as you try to build a module library, libtool will still create " - echo "*** a static module, that should work as long as the dlopening application" - echo "*** is linked with the -dlopen flag to resolve symbols at runtime." - if test -z "$global_symbol_pipe"; then - echo - echo "*** However, this would only work if libtool was able to extract symbol" - echo "*** lists from a program, using \`nm' or equivalent, but libtool could" - echo "*** not find such a program. So, this module is probably useless." - echo "*** \`nm' from GNU binutils and a full rebuild may help." - fi - if test "$build_old_libs" = no; then - build_libtool_libs=module - build_old_libs=yes - else - build_libtool_libs=no - fi - fi - else - deplibs="$dir/$old_library $deplibs" - link_static=yes - fi - fi # link shared/static library? - - if test "$linkmode" = lib; then - if test -n "$dependency_libs" && - { test "$hardcode_into_libs" != yes || - test "$build_old_libs" = yes || - test "$link_static" = yes; }; then - # Extract -R from dependency_libs - temp_deplibs= - for libdir in $dependency_libs; do - case $libdir in - -R*) func_stripname '-R' '' "$libdir" - temp_xrpath=$func_stripname_result - case " $xrpath " in - *" $temp_xrpath "*) ;; - *) xrpath="$xrpath $temp_xrpath";; - esac;; - *) temp_deplibs="$temp_deplibs $libdir";; - esac - done - dependency_libs="$temp_deplibs" - fi - - newlib_search_path="$newlib_search_path $absdir" - # Link against this library - test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" - # ... and its dependency_libs - tmp_libs= - for deplib in $dependency_libs; do - newdependency_libs="$deplib $newdependency_libs" - if $opt_duplicate_deps ; then - case "$tmp_libs " in - *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; - esac - fi - tmp_libs="$tmp_libs $deplib" - done - - if test "$link_all_deplibs" != no; then - # Add the search paths of all dependency libraries - for deplib in $dependency_libs; do - path= - case $deplib in - -L*) path="$deplib" ;; - *.la) - func_dirname "$deplib" "" "." - dir="$func_dirname_result" - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; - *) - absdir=`cd "$dir" && pwd` - if test -z "$absdir"; then - func_warning "cannot determine absolute directory name of \`$dir'" - absdir="$dir" - fi - ;; - esac - if $GREP "^installed=no" $deplib > /dev/null; then - case $host in - *-*-darwin*) - depdepl= - deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` - if test -n "$deplibrary_names" ; then - for tmp in $deplibrary_names ; do - depdepl=$tmp - done - if test -f "$absdir/$objdir/$depdepl" ; then - depdepl="$absdir/$objdir/$depdepl" - darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` - if test -z "$darwin_install_name"; then - darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` - fi - compiler_flags="$compiler_flags ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" - linker_flags="$linker_flags -dylib_file ${darwin_install_name}:${depdepl}" - path= - fi - fi - ;; - *) - path="-L$absdir/$objdir" - ;; - esac - else - libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` - test -z "$libdir" && \ - func_fatal_error "\`$deplib' is not a valid libtool archive" - test "$absdir" != "$libdir" && \ - func_warning "\`$deplib' seems to be moved" - - path="-L$absdir" - fi - ;; - esac - case " $deplibs " in - *" $path "*) ;; - *) deplibs="$path $deplibs" ;; - esac - done - fi # link_all_deplibs != no - fi # linkmode = lib - done # for deplib in $libs - if test "$pass" = link; then - if test "$linkmode" = "prog"; then - compile_deplibs="$new_inherited_linker_flags $compile_deplibs" - finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" - else - compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` - fi - fi - dependency_libs="$newdependency_libs" - if test "$pass" = dlpreopen; then - # Link the dlpreopened libraries before other libraries - for deplib in $save_deplibs; do - deplibs="$deplib $deplibs" - done - fi - if test "$pass" != dlopen; then - if test "$pass" != conv; then - # Make sure lib_search_path contains only unique directories. - lib_search_path= - for dir in $newlib_search_path; do - case "$lib_search_path " in - *" $dir "*) ;; - *) lib_search_path="$lib_search_path $dir" ;; - esac - done - newlib_search_path= - fi - - if test "$linkmode,$pass" != "prog,link"; then - vars="deplibs" - else - vars="compile_deplibs finalize_deplibs" - fi - for var in $vars dependency_libs; do - # Add libraries to $var in reverse order - eval tmp_libs=\$$var - new_libs= - for deplib in $tmp_libs; do - # FIXME: Pedantically, this is the right thing to do, so - # that some nasty dependency loop isn't accidentally - # broken: - #new_libs="$deplib $new_libs" - # Pragmatically, this seems to cause very few problems in - # practice: - case $deplib in - -L*) new_libs="$deplib $new_libs" ;; - -R*) ;; - *) - # And here is the reason: when a library appears more - # than once as an explicit dependence of a library, or - # is implicitly linked in more than once by the - # compiler, it is considered special, and multiple - # occurrences thereof are not removed. Compare this - # with having the same library being listed as a - # dependency of multiple other libraries: in this case, - # we know (pedantically, we assume) the library does not - # need to be listed more than once, so we keep only the - # last copy. This is not always right, but it is rare - # enough that we require users that really mean to play - # such unportable linking tricks to link the library - # using -Wl,-lname, so that libtool does not consider it - # for duplicate removal. - case " $specialdeplibs " in - *" $deplib "*) new_libs="$deplib $new_libs" ;; - *) - case " $new_libs " in - *" $deplib "*) ;; - *) new_libs="$deplib $new_libs" ;; - esac - ;; - esac - ;; - esac - done - tmp_libs= - for deplib in $new_libs; do - case $deplib in - -L*) - case " $tmp_libs " in - *" $deplib "*) ;; - *) tmp_libs="$tmp_libs $deplib" ;; - esac - ;; - *) tmp_libs="$tmp_libs $deplib" ;; - esac - done - eval $var=\$tmp_libs - done # for var - fi - # Last step: remove runtime libs from dependency_libs - # (they stay in deplibs) - tmp_libs= - for i in $dependency_libs ; do - case " $predeps $postdeps $compiler_lib_search_path " in - *" $i "*) - i="" - ;; - esac - if test -n "$i" ; then - tmp_libs="$tmp_libs $i" - fi - done - dependency_libs=$tmp_libs - done # for pass - if test "$linkmode" = prog; then - dlfiles="$newdlfiles" - fi - if test "$linkmode" = prog || test "$linkmode" = lib; then - dlprefiles="$newdlprefiles" - fi - - case $linkmode in - oldlib) - if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then - func_warning "\`-dlopen' is ignored for archives" - fi - - case " $deplibs" in - *\ -l* | *\ -L*) - func_warning "\`-l' and \`-L' are ignored for archives" ;; - esac - - test -n "$rpath" && \ - func_warning "\`-rpath' is ignored for archives" - - test -n "$xrpath" && \ - func_warning "\`-R' is ignored for archives" - - test -n "$vinfo" && \ - func_warning "\`-version-info/-version-number' is ignored for archives" - - test -n "$release" && \ - func_warning "\`-release' is ignored for archives" - - test -n "$export_symbols$export_symbols_regex" && \ - func_warning "\`-export-symbols' is ignored for archives" - - # Now set the variables for building old libraries. - build_libtool_libs=no - oldlibs="$output" - objs="$objs$old_deplibs" - ;; - - lib) - # Make sure we only generate libraries of the form `libNAME.la'. - case $outputname in - lib*) - func_stripname 'lib' '.la' "$outputname" - name=$func_stripname_result - eval "shared_ext=\"$shrext_cmds\"" - eval "libname=\"$libname_spec\"" - ;; - *) - test "$module" = no && \ - func_fatal_help "libtool library \`$output' must begin with \`lib'" - - if test "$need_lib_prefix" != no; then - # Add the "lib" prefix for modules if required - func_stripname '' '.la' "$outputname" - name=$func_stripname_result - eval "shared_ext=\"$shrext_cmds\"" - eval "libname=\"$libname_spec\"" - else - func_stripname '' '.la' "$outputname" - libname=$func_stripname_result - fi - ;; - esac - - if test -n "$objs"; then - if test "$deplibs_check_method" != pass_all; then - func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" - else - echo - $ECHO "*** Warning: Linking the shared library $output against the non-libtool" - $ECHO "*** objects $objs is not portable!" - libobjs="$libobjs $objs" - fi - fi - - test "$dlself" != no && \ - func_warning "\`-dlopen self' is ignored for libtool libraries" - - set dummy $rpath - shift - test "$#" -gt 1 && \ - func_warning "ignoring multiple \`-rpath's for a libtool library" - - install_libdir="$1" - - oldlibs= - if test -z "$rpath"; then - if test "$build_libtool_libs" = yes; then - # Building a libtool convenience library. - # Some compilers have problems with a `.al' extension so - # convenience libraries should have the same extension an - # archive normally would. - oldlibs="$output_objdir/$libname.$libext $oldlibs" - build_libtool_libs=convenience - build_old_libs=yes - fi - - test -n "$vinfo" && \ - func_warning "\`-version-info/-version-number' is ignored for convenience libraries" - - test -n "$release" && \ - func_warning "\`-release' is ignored for convenience libraries" - else - - # Parse the version information argument. - save_ifs="$IFS"; IFS=':' - set dummy $vinfo 0 0 0 - shift - IFS="$save_ifs" - - test -n "$7" && \ - func_fatal_help "too many parameters to \`-version-info'" - - # convert absolute version numbers to libtool ages - # this retains compatibility with .la files and attempts - # to make the code below a bit more comprehensible - - case $vinfo_number in - yes) - number_major="$1" - number_minor="$2" - number_revision="$3" - # - # There are really only two kinds -- those that - # use the current revision as the major version - # and those that subtract age and use age as - # a minor version. But, then there is irix - # which has an extra 1 added just for fun - # - case $version_type in - darwin|linux|osf|windows|none) - func_arith $number_major + $number_minor - current=$func_arith_result - age="$number_minor" - revision="$number_revision" - ;; - freebsd-aout|freebsd-elf|qnx|sunos) - current="$number_major" - revision="$number_minor" - age="0" - ;; - irix|nonstopux) - func_arith $number_major + $number_minor - current=$func_arith_result - age="$number_minor" - revision="$number_minor" - lt_irix_increment=no - ;; - esac - ;; - no) - current="$1" - revision="$2" - age="$3" - ;; - esac - - # Check that each of the things are valid numbers. - case $current in - 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; - *) - func_error "CURRENT \`$current' must be a nonnegative integer" - func_fatal_error "\`$vinfo' is not valid version information" - ;; - esac - - case $revision in - 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; - *) - func_error "REVISION \`$revision' must be a nonnegative integer" - func_fatal_error "\`$vinfo' is not valid version information" - ;; - esac - - case $age in - 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; - *) - func_error "AGE \`$age' must be a nonnegative integer" - func_fatal_error "\`$vinfo' is not valid version information" - ;; - esac - - if test "$age" -gt "$current"; then - func_error "AGE \`$age' is greater than the current interface number \`$current'" - func_fatal_error "\`$vinfo' is not valid version information" - fi - - # Calculate the version variables. - major= - versuffix= - verstring= - case $version_type in - none) ;; - - darwin) - # Like Linux, but with the current version available in - # verstring for coding it into the library header - func_arith $current - $age - major=.$func_arith_result - versuffix="$major.$age.$revision" - # Darwin ld doesn't like 0 for these options... - func_arith $current + 1 - minor_current=$func_arith_result - xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" - verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" - ;; - - freebsd-aout) - major=".$current" - versuffix=".$current.$revision"; - ;; - - freebsd-elf) - major=".$current" - versuffix=".$current" - ;; - - irix | nonstopux) - if test "X$lt_irix_increment" = "Xno"; then - func_arith $current - $age - else - func_arith $current - $age + 1 - fi - major=$func_arith_result - - case $version_type in - nonstopux) verstring_prefix=nonstopux ;; - *) verstring_prefix=sgi ;; - esac - verstring="$verstring_prefix$major.$revision" - - # Add in all the interfaces that we are compatible with. - loop=$revision - while test "$loop" -ne 0; do - func_arith $revision - $loop - iface=$func_arith_result - func_arith $loop - 1 - loop=$func_arith_result - verstring="$verstring_prefix$major.$iface:$verstring" - done - - # Before this point, $major must not contain `.'. - major=.$major - versuffix="$major.$revision" - ;; - - linux) - func_arith $current - $age - major=.$func_arith_result - versuffix="$major.$age.$revision" - ;; - - osf) - func_arith $current - $age - major=.$func_arith_result - versuffix=".$current.$age.$revision" - verstring="$current.$age.$revision" - - # Add in all the interfaces that we are compatible with. - loop=$age - while test "$loop" -ne 0; do - func_arith $current - $loop - iface=$func_arith_result - func_arith $loop - 1 - loop=$func_arith_result - verstring="$verstring:${iface}.0" - done - - # Make executables depend on our current version. - verstring="$verstring:${current}.0" - ;; - - qnx) - major=".$current" - versuffix=".$current" - ;; - - sunos) - major=".$current" - versuffix=".$current.$revision" - ;; - - windows) - # Use '-' rather than '.', since we only want one - # extension on DOS 8.3 filesystems. - func_arith $current - $age - major=$func_arith_result - versuffix="-$major" - ;; - - *) - func_fatal_configuration "unknown library version type \`$version_type'" - ;; - esac - - # Clear the version info if we defaulted, and they specified a release. - if test -z "$vinfo" && test -n "$release"; then - major= - case $version_type in - darwin) - # we can't check for "0.0" in archive_cmds due to quoting - # problems, so we reset it completely - verstring= - ;; - *) - verstring="0.0" - ;; - esac - if test "$need_version" = no; then - versuffix= - else - versuffix=".0.0" - fi - fi - - # Remove version info from name if versioning should be avoided - if test "$avoid_version" = yes && test "$need_version" = no; then - major= - versuffix= - verstring="" - fi - - # Check to see if the archive will have undefined symbols. - if test "$allow_undefined" = yes; then - if test "$allow_undefined_flag" = unsupported; then - func_warning "undefined symbols not allowed in $host shared libraries" - build_libtool_libs=no - build_old_libs=yes - fi - else - # Don't allow undefined symbols. - allow_undefined_flag="$no_undefined_flag" - fi - - fi - - func_generate_dlsyms "$libname" "$libname" "yes" - libobjs="$libobjs $symfileobj" - test "X$libobjs" = "X " && libobjs= - - if test "$mode" != relink; then - # Remove our outputs, but don't remove object files since they - # may have been created when compiling PIC objects. - removelist= - tempremovelist=`$ECHO "$output_objdir/*"` - for p in $tempremovelist; do - case $p in - *.$objext | *.gcno) - ;; - $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) - if test "X$precious_files_regex" != "X"; then - if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 - then - continue - fi - fi - removelist="$removelist $p" - ;; - *) ;; - esac - done - test -n "$removelist" && \ - func_show_eval "${RM}r \$removelist" - fi - - # Now set the variables for building old libraries. - if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then - oldlibs="$oldlibs $output_objdir/$libname.$libext" - - # Transform .lo files to .o files. - oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP` - fi - - # Eliminate all temporary directories. - #for path in $notinst_path; do - # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` - # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` - # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` - #done - - if test -n "$xrpath"; then - # If the user specified any rpath flags, then add them. - temp_xrpath= - for libdir in $xrpath; do - temp_xrpath="$temp_xrpath -R$libdir" - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" ;; - esac - done - if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then - dependency_libs="$temp_xrpath $dependency_libs" - fi - fi - - # Make sure dlfiles contains only unique files that won't be dlpreopened - old_dlfiles="$dlfiles" - dlfiles= - for lib in $old_dlfiles; do - case " $dlprefiles $dlfiles " in - *" $lib "*) ;; - *) dlfiles="$dlfiles $lib" ;; - esac - done - - # Make sure dlprefiles contains only unique files - old_dlprefiles="$dlprefiles" - dlprefiles= - for lib in $old_dlprefiles; do - case "$dlprefiles " in - *" $lib "*) ;; - *) dlprefiles="$dlprefiles $lib" ;; - esac - done - - if test "$build_libtool_libs" = yes; then - if test -n "$rpath"; then - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) - # these systems don't actually have a c library (as such)! - ;; - *-*-rhapsody* | *-*-darwin1.[012]) - # Rhapsody C library is in the System framework - deplibs="$deplibs System.ltframework" - ;; - *-*-netbsd*) - # Don't link with libc until the a.out ld.so is fixed. - ;; - *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) - # Do not include libc due to us having libc/libc_r. - ;; - *-*-sco3.2v5* | *-*-sco5v6*) - # Causes problems with __ctype - ;; - *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) - # Compiler inserts libc in the correct place for threads to work - ;; - *) - # Add libc to deplibs on all other systems if necessary. - if test "$build_libtool_need_lc" = "yes"; then - deplibs="$deplibs -lc" - fi - ;; - esac - fi - - # Transform deplibs into only deplibs that can be linked in shared. - name_save=$name - libname_save=$libname - release_save=$release - versuffix_save=$versuffix - major_save=$major - # I'm not sure if I'm treating the release correctly. I think - # release should show up in the -l (ie -lgmp5) so we don't want to - # add it in twice. Is that correct? - release="" - versuffix="" - major="" - newdeplibs= - droppeddeps=no - case $deplibs_check_method in - pass_all) - # Don't check for shared/static. Everything works. - # This might be a little naive. We might want to check - # whether the library exists or not. But this is on - # osf3 & osf4 and I'm not really sure... Just - # implementing what was already the behavior. - newdeplibs=$deplibs - ;; - test_compile) - # This code stresses the "libraries are programs" paradigm to its - # limits. Maybe even breaks it. We compile a program, linking it - # against the deplibs as a proxy for the library. Then we can check - # whether they linked in statically or dynamically with ldd. - $opt_dry_run || $RM conftest.c - cat > conftest.c </dev/null` - for potent_lib in $potential_libs; do - # Follow soft links. - if ls -lLd "$potent_lib" 2>/dev/null | - $GREP " -> " >/dev/null; then - continue - fi - # The statement above tries to avoid entering an - # endless loop below, in case of cyclic links. - # We might still enter an endless loop, since a link - # loop can be closed while we follow links, - # but so what? - potlib="$potent_lib" - while test -h "$potlib" 2>/dev/null; do - potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` - case $potliblink in - [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; - *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";; - esac - done - if eval "$file_magic_cmd \"\$potlib\"" 2>/dev/null | - $SED -e 10q | - $EGREP "$file_magic_regex" > /dev/null; then - newdeplibs="$newdeplibs $a_deplib" - a_deplib="" - break 2 - fi - done - done - fi - if test -n "$a_deplib" ; then - droppeddeps=yes - echo - $ECHO "*** Warning: linker path does not have real file for library $a_deplib." - echo "*** I have the capability to make that library automatically link in when" - echo "*** you link to this library. But I can only do this if you have a" - echo "*** shared version of the library, which you do not appear to have" - echo "*** because I did check the linker path looking for a file starting" - if test -z "$potlib" ; then - $ECHO "*** with $libname but no candidates were found. (...for file magic test)" - else - $ECHO "*** with $libname and none of the candidates passed a file format test" - $ECHO "*** using a file magic. Last file checked: $potlib" - fi - fi - ;; - *) - # Add a -L argument. - newdeplibs="$newdeplibs $a_deplib" - ;; - esac - done # Gone through all deplibs. - ;; - match_pattern*) - set dummy $deplibs_check_method; shift - match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` - for a_deplib in $deplibs; do - case $a_deplib in - -l*) - func_stripname -l '' "$a_deplib" - name=$func_stripname_result - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - case " $predeps $postdeps " in - *" $a_deplib "*) - newdeplibs="$newdeplibs $a_deplib" - a_deplib="" - ;; - esac - fi - if test -n "$a_deplib" ; then - eval "libname=\"$libname_spec\"" - for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do - potential_libs=`ls $i/$libname[.-]* 2>/dev/null` - for potent_lib in $potential_libs; do - potlib="$potent_lib" # see symlink-check above in file_magic test - if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ - $EGREP "$match_pattern_regex" > /dev/null; then - newdeplibs="$newdeplibs $a_deplib" - a_deplib="" - break 2 - fi - done - done - fi - if test -n "$a_deplib" ; then - droppeddeps=yes - echo - $ECHO "*** Warning: linker path does not have real file for library $a_deplib." - echo "*** I have the capability to make that library automatically link in when" - echo "*** you link to this library. But I can only do this if you have a" - echo "*** shared version of the library, which you do not appear to have" - echo "*** because I did check the linker path looking for a file starting" - if test -z "$potlib" ; then - $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" - else - $ECHO "*** with $libname and none of the candidates passed a file format test" - $ECHO "*** using a regex pattern. Last file checked: $potlib" - fi - fi - ;; - *) - # Add a -L argument. - newdeplibs="$newdeplibs $a_deplib" - ;; - esac - done # Gone through all deplibs. - ;; - none | unknown | *) - newdeplibs="" - tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - for i in $predeps $postdeps ; do - # can't use Xsed below, because $i might contain '/' - tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s,$i,,"` - done - fi - case $tmp_deplibs in - *[!\ \ ]*) - echo - if test "X$deplibs_check_method" = "Xnone"; then - echo "*** Warning: inter-library dependencies are not supported in this platform." - else - echo "*** Warning: inter-library dependencies are not known to be supported." - fi - echo "*** All declared inter-library dependencies are being dropped." - droppeddeps=yes - ;; - esac - ;; - esac - versuffix=$versuffix_save - major=$major_save - release=$release_save - libname=$libname_save - name=$name_save - - case $host in - *-*-rhapsody* | *-*-darwin1.[012]) - # On Rhapsody replace the C library with the System framework - newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` - ;; - esac - - if test "$droppeddeps" = yes; then - if test "$module" = yes; then - echo - echo "*** Warning: libtool could not satisfy all declared inter-library" - $ECHO "*** dependencies of module $libname. Therefore, libtool will create" - echo "*** a static module, that should work as long as the dlopening" - echo "*** application is linked with the -dlopen flag." - if test -z "$global_symbol_pipe"; then - echo - echo "*** However, this would only work if libtool was able to extract symbol" - echo "*** lists from a program, using \`nm' or equivalent, but libtool could" - echo "*** not find such a program. So, this module is probably useless." - echo "*** \`nm' from GNU binutils and a full rebuild may help." - fi - if test "$build_old_libs" = no; then - oldlibs="$output_objdir/$libname.$libext" - build_libtool_libs=module - build_old_libs=yes - else - build_libtool_libs=no - fi - else - echo "*** The inter-library dependencies that have been dropped here will be" - echo "*** automatically added whenever a program is linked with this library" - echo "*** or is declared to -dlopen it." - - if test "$allow_undefined" = no; then - echo - echo "*** Since this library must not contain undefined symbols," - echo "*** because either the platform does not support them or" - echo "*** it was explicitly requested with -no-undefined," - echo "*** libtool will only create a static version of it." - if test "$build_old_libs" = no; then - oldlibs="$output_objdir/$libname.$libext" - build_libtool_libs=module - build_old_libs=yes - else - build_libtool_libs=no - fi - fi - fi - fi - # Done checking deplibs! - deplibs=$newdeplibs - fi - # Time to change all our "foo.ltframework" stuff back to "-framework foo" - case $host in - *-*-darwin*) - newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` - new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` - deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` - ;; - esac - - # move library search paths that coincide with paths to not yet - # installed libraries to the beginning of the library search list - new_libs= - for path in $notinst_path; do - case " $new_libs " in - *" -L$path/$objdir "*) ;; - *) - case " $deplibs " in - *" -L$path/$objdir "*) - new_libs="$new_libs -L$path/$objdir" ;; - esac - ;; - esac - done - for deplib in $deplibs; do - case $deplib in - -L*) - case " $new_libs " in - *" $deplib "*) ;; - *) new_libs="$new_libs $deplib" ;; - esac - ;; - *) new_libs="$new_libs $deplib" ;; - esac - done - deplibs="$new_libs" - - # All the library-specific variables (install_libdir is set above). - library_names= - old_library= - dlname= - - # Test again, we may have decided not to build it any more - if test "$build_libtool_libs" = yes; then - if test "$hardcode_into_libs" = yes; then - # Hardcode the library paths - hardcode_libdirs= - dep_rpath= - rpath="$finalize_rpath" - test "$mode" != relink && rpath="$compile_rpath$rpath" - for libdir in $rpath; do - if test -n "$hardcode_libdir_flag_spec"; then - if test -n "$hardcode_libdir_separator"; then - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" - else - # Just accumulate the unique libdirs. - case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) - hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" - ;; - esac - fi - else - eval "flag=\"$hardcode_libdir_flag_spec\"" - dep_rpath="$dep_rpath $flag" - fi - elif test -n "$runpath_var"; then - case "$perm_rpath " in - *" $libdir "*) ;; - *) perm_rpath="$perm_rpath $libdir" ;; - esac - fi - done - # Substitute the hardcoded libdirs into the rpath. - if test -n "$hardcode_libdir_separator" && - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" - if test -n "$hardcode_libdir_flag_spec_ld"; then - eval "dep_rpath=\"$hardcode_libdir_flag_spec_ld\"" - else - eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" - fi - fi - if test -n "$runpath_var" && test -n "$perm_rpath"; then - # We should set the runpath_var. - rpath= - for dir in $perm_rpath; do - rpath="$rpath$dir:" - done - eval $runpath_var=\$rpath\$$runpath_var - export $runpath_var - fi - test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" - fi - - shlibpath="$finalize_shlibpath" - test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" - if test -n "$shlibpath"; then - eval $shlibpath_var=\$shlibpath\$$shlibpath_var - export $shlibpath_var - fi - - # Get the real and link names of the library. - eval "shared_ext=\"$shrext_cmds\"" - eval "library_names=\"$library_names_spec\"" - set dummy $library_names - shift - realname="$1" - shift - - if test -n "$soname_spec"; then - eval "soname=\"$soname_spec\"" - else - soname="$realname" - fi - if test -z "$dlname"; then - dlname=$soname - fi - - lib="$output_objdir/$realname" - linknames= - for link - do - linknames="$linknames $link" - done - - # Use standard objects if they are pic - test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` - test "X$libobjs" = "X " && libobjs= - - delfiles= - if test -n "$export_symbols" && test -n "$include_expsyms"; then - $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" - export_symbols="$output_objdir/$libname.uexp" - delfiles="$delfiles $export_symbols" - fi - - orig_export_symbols= - case $host_os in - cygwin* | mingw* | cegcc*) - if test -n "$export_symbols" && test -z "$export_symbols_regex"; then - # exporting using user supplied symfile - if test "x`$SED 1q $export_symbols`" != xEXPORTS; then - # and it's NOT already a .def file. Must figure out - # which of the given symbols are data symbols and tag - # them as such. So, trigger use of export_symbols_cmds. - # export_symbols gets reassigned inside the "prepare - # the list of exported symbols" if statement, so the - # include_expsyms logic still works. - orig_export_symbols="$export_symbols" - export_symbols= - always_export_symbols=yes - fi - fi - ;; - esac - - # Prepare the list of exported symbols - if test -z "$export_symbols"; then - if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then - func_verbose "generating symbol list for \`$libname.la'" - export_symbols="$output_objdir/$libname.exp" - $opt_dry_run || $RM $export_symbols - cmds=$export_symbols_cmds - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval "cmd=\"$cmd\"" - func_len " $cmd" - len=$func_len_result - if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then - func_show_eval "$cmd" 'exit $?' - skipped_export=false - else - # The command line is too long to execute in one step. - func_verbose "using reloadable object file for export list..." - skipped_export=: - # Break out early, otherwise skipped_export may be - # set to false by a later but shorter cmd. - break - fi - done - IFS="$save_ifs" - if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then - func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' - func_show_eval '$MV "${export_symbols}T" "$export_symbols"' - fi - fi - fi - - if test -n "$export_symbols" && test -n "$include_expsyms"; then - tmp_export_symbols="$export_symbols" - test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" - $opt_dry_run || $ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols" - fi - - if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then - # The given exports_symbols file has to be filtered, so filter it. - func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" - # FIXME: $output_objdir/$libname.filter potentially contains lots of - # 's' commands which not all seds can handle. GNU sed should be fine - # though. Also, the filter scales superlinearly with the number of - # global variables. join(1) would be nice here, but unfortunately - # isn't a blessed tool. - $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter - delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" - export_symbols=$output_objdir/$libname.def - $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols - fi - - tmp_deplibs= - for test_deplib in $deplibs; do - case " $convenience " in - *" $test_deplib "*) ;; - *) - tmp_deplibs="$tmp_deplibs $test_deplib" - ;; - esac - done - deplibs="$tmp_deplibs" - - if test -n "$convenience"; then - if test -n "$whole_archive_flag_spec" && - test "$compiler_needs_object" = yes && - test -z "$libobjs"; then - # extract the archives, so we have objects to list. - # TODO: could optimize this to just extract one archive. - whole_archive_flag_spec= - fi - if test -n "$whole_archive_flag_spec"; then - save_libobjs=$libobjs - eval "libobjs=\"\$libobjs $whole_archive_flag_spec\"" - test "X$libobjs" = "X " && libobjs= - else - gentop="$output_objdir/${outputname}x" - generated="$generated $gentop" - - func_extract_archives $gentop $convenience - libobjs="$libobjs $func_extract_archives_result" - test "X$libobjs" = "X " && libobjs= - fi - fi - - if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then - eval "flag=\"$thread_safe_flag_spec\"" - linker_flags="$linker_flags $flag" - fi - - # Make a backup of the uninstalled library when relinking - if test "$mode" = relink; then - $opt_dry_run || (cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U) || exit $? - fi - - # Do each of the archive commands. - if test "$module" = yes && test -n "$module_cmds" ; then - if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then - eval "test_cmds=\"$module_expsym_cmds\"" - cmds=$module_expsym_cmds - else - eval "test_cmds=\"$module_cmds\"" - cmds=$module_cmds - fi - else - if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then - eval "test_cmds=\"$archive_expsym_cmds\"" - cmds=$archive_expsym_cmds - else - eval "test_cmds=\"$archive_cmds\"" - cmds=$archive_cmds - fi - fi - - if test "X$skipped_export" != "X:" && - func_len " $test_cmds" && - len=$func_len_result && - test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then - : - else - # The command line is too long to link in one step, link piecewise - # or, if using GNU ld and skipped_export is not :, use a linker - # script. - - # Save the value of $output and $libobjs because we want to - # use them later. If we have whole_archive_flag_spec, we - # want to use save_libobjs as it was before - # whole_archive_flag_spec was expanded, because we can't - # assume the linker understands whole_archive_flag_spec. - # This may have to be revisited, in case too many - # convenience libraries get linked in and end up exceeding - # the spec. - if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then - save_libobjs=$libobjs - fi - save_output=$output - func_basename "$output" - output_la=$func_basename_result - - # Clear the reloadable object creation command queue and - # initialize k to one. - test_cmds= - concat_cmds= - objlist= - last_robj= - k=1 - - if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then - output=${output_objdir}/${output_la}.lnkscript - func_verbose "creating GNU ld script: $output" - echo 'INPUT (' > $output - for obj in $save_libobjs - do - $ECHO "$obj" >> $output - done - echo ')' >> $output - delfiles="$delfiles $output" - elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then - output=${output_objdir}/${output_la}.lnk - func_verbose "creating linker input file list: $output" - : > $output - set x $save_libobjs - shift - firstobj= - if test "$compiler_needs_object" = yes; then - firstobj="$1 " - shift - fi - for obj - do - $ECHO "$obj" >> $output - done - delfiles="$delfiles $output" - output=$firstobj\"$file_list_spec$output\" - else - if test -n "$save_libobjs"; then - func_verbose "creating reloadable object files..." - output=$output_objdir/$output_la-${k}.$objext - eval "test_cmds=\"$reload_cmds\"" - func_len " $test_cmds" - len0=$func_len_result - len=$len0 - - # Loop over the list of objects to be linked. - for obj in $save_libobjs - do - func_len " $obj" - func_arith $len + $func_len_result - len=$func_arith_result - if test "X$objlist" = X || - test "$len" -lt "$max_cmd_len"; then - func_append objlist " $obj" - else - # The command $test_cmds is almost too long, add a - # command to the queue. - if test "$k" -eq 1 ; then - # The first file doesn't have a previous command to add. - reload_objs=$objlist - eval "concat_cmds=\"$reload_cmds\"" - else - # All subsequent reloadable object files will link in - # the last one created. - reload_objs="$objlist $last_robj" - eval "concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\"" - fi - last_robj=$output_objdir/$output_la-${k}.$objext - func_arith $k + 1 - k=$func_arith_result - output=$output_objdir/$output_la-${k}.$objext - objlist=" $obj" - func_len " $last_robj" - func_arith $len0 + $func_len_result - len=$func_arith_result - fi - done - # Handle the remaining objects by creating one last - # reloadable object file. All subsequent reloadable object - # files will link in the last one created. - test -z "$concat_cmds" || concat_cmds=$concat_cmds~ - reload_objs="$objlist $last_robj" - eval "concat_cmds=\"\${concat_cmds}$reload_cmds\"" - if test -n "$last_robj"; then - eval "concat_cmds=\"\${concat_cmds}~\$RM $last_robj\"" - fi - delfiles="$delfiles $output" - - else - output= - fi - - if ${skipped_export-false}; then - func_verbose "generating symbol list for \`$libname.la'" - export_symbols="$output_objdir/$libname.exp" - $opt_dry_run || $RM $export_symbols - libobjs=$output - # Append the command to create the export file. - test -z "$concat_cmds" || concat_cmds=$concat_cmds~ - eval "concat_cmds=\"\$concat_cmds$export_symbols_cmds\"" - if test -n "$last_robj"; then - eval "concat_cmds=\"\$concat_cmds~\$RM $last_robj\"" - fi - fi - - test -n "$save_libobjs" && - func_verbose "creating a temporary reloadable object file: $output" - - # Loop through the commands generated above and execute them. - save_ifs="$IFS"; IFS='~' - for cmd in $concat_cmds; do - IFS="$save_ifs" - $opt_silent || { - func_quote_for_expand "$cmd" - eval "func_echo $func_quote_for_expand_result" - } - $opt_dry_run || eval "$cmd" || { - lt_exit=$? - - # Restore the uninstalled library and exit - if test "$mode" = relink; then - ( cd "$output_objdir" && \ - $RM "${realname}T" && \ - $MV "${realname}U" "$realname" ) - fi - - exit $lt_exit - } - done - IFS="$save_ifs" - - if test -n "$export_symbols_regex" && ${skipped_export-false}; then - func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' - func_show_eval '$MV "${export_symbols}T" "$export_symbols"' - fi - fi - - if ${skipped_export-false}; then - if test -n "$export_symbols" && test -n "$include_expsyms"; then - tmp_export_symbols="$export_symbols" - test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" - $opt_dry_run || $ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols" - fi - - if test -n "$orig_export_symbols"; then - # The given exports_symbols file has to be filtered, so filter it. - func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" - # FIXME: $output_objdir/$libname.filter potentially contains lots of - # 's' commands which not all seds can handle. GNU sed should be fine - # though. Also, the filter scales superlinearly with the number of - # global variables. join(1) would be nice here, but unfortunately - # isn't a blessed tool. - $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter - delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" - export_symbols=$output_objdir/$libname.def - $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols - fi - fi - - libobjs=$output - # Restore the value of output. - output=$save_output - - if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then - eval "libobjs=\"\$libobjs $whole_archive_flag_spec\"" - test "X$libobjs" = "X " && libobjs= - fi - # Expand the library linking commands again to reset the - # value of $libobjs for piecewise linking. - - # Do each of the archive commands. - if test "$module" = yes && test -n "$module_cmds" ; then - if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then - cmds=$module_expsym_cmds - else - cmds=$module_cmds - fi - else - if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then - cmds=$archive_expsym_cmds - else - cmds=$archive_cmds - fi - fi - fi - - if test -n "$delfiles"; then - # Append the command to remove temporary files to $cmds. - eval "cmds=\"\$cmds~\$RM $delfiles\"" - fi - - # Add any objects from preloaded convenience libraries - if test -n "$dlprefiles"; then - gentop="$output_objdir/${outputname}x" - generated="$generated $gentop" - - func_extract_archives $gentop $dlprefiles - libobjs="$libobjs $func_extract_archives_result" - test "X$libobjs" = "X " && libobjs= - fi - - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" - eval "cmd=\"$cmd\"" - $opt_silent || { - func_quote_for_expand "$cmd" - eval "func_echo $func_quote_for_expand_result" - } - $opt_dry_run || eval "$cmd" || { - lt_exit=$? - - # Restore the uninstalled library and exit - if test "$mode" = relink; then - ( cd "$output_objdir" && \ - $RM "${realname}T" && \ - $MV "${realname}U" "$realname" ) - fi - - exit $lt_exit - } - done - IFS="$save_ifs" - - # Restore the uninstalled library and exit - if test "$mode" = relink; then - $opt_dry_run || (cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname) || exit $? - - if test -n "$convenience"; then - if test -z "$whole_archive_flag_spec"; then - func_show_eval '${RM}r "$gentop"' - fi - fi - - exit $EXIT_SUCCESS - fi - - # Create links to the real library. - for linkname in $linknames; do - if test "$realname" != "$linkname"; then - func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' - fi - done - - # If -module or -export-dynamic was specified, set the dlname. - if test "$module" = yes || test "$export_dynamic" = yes; then - # On all known operating systems, these are identical. - dlname="$soname" - fi - fi - ;; - - obj) - if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then - func_warning "\`-dlopen' is ignored for objects" - fi - - case " $deplibs" in - *\ -l* | *\ -L*) - func_warning "\`-l' and \`-L' are ignored for objects" ;; - esac - - test -n "$rpath" && \ - func_warning "\`-rpath' is ignored for objects" - - test -n "$xrpath" && \ - func_warning "\`-R' is ignored for objects" - - test -n "$vinfo" && \ - func_warning "\`-version-info' is ignored for objects" - - test -n "$release" && \ - func_warning "\`-release' is ignored for objects" - - case $output in - *.lo) - test -n "$objs$old_deplibs" && \ - func_fatal_error "cannot build library object \`$output' from non-libtool objects" - - libobj=$output - func_lo2o "$libobj" - obj=$func_lo2o_result - ;; - *) - libobj= - obj="$output" - ;; - esac - - # Delete the old objects. - $opt_dry_run || $RM $obj $libobj - - # Objects from convenience libraries. This assumes - # single-version convenience libraries. Whenever we create - # different ones for PIC/non-PIC, this we'll have to duplicate - # the extraction. - reload_conv_objs= - gentop= - # reload_cmds runs $LD directly, so let us get rid of - # -Wl from whole_archive_flag_spec and hope we can get by with - # turning comma into space.. - wl= - - if test -n "$convenience"; then - if test -n "$whole_archive_flag_spec"; then - eval "tmp_whole_archive_flags=\"$whole_archive_flag_spec\"" - reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` - else - gentop="$output_objdir/${obj}x" - generated="$generated $gentop" - - func_extract_archives $gentop $convenience - reload_conv_objs="$reload_objs $func_extract_archives_result" - fi - fi - - # Create the old-style object. - reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test - - output="$obj" - func_execute_cmds "$reload_cmds" 'exit $?' - - # Exit if we aren't doing a library object file. - if test -z "$libobj"; then - if test -n "$gentop"; then - func_show_eval '${RM}r "$gentop"' - fi - - exit $EXIT_SUCCESS - fi - - if test "$build_libtool_libs" != yes; then - if test -n "$gentop"; then - func_show_eval '${RM}r "$gentop"' - fi - - # Create an invalid libtool object if no PIC, so that we don't - # accidentally link it into a program. - # $show "echo timestamp > $libobj" - # $opt_dry_run || echo timestamp > $libobj || exit $? - exit $EXIT_SUCCESS - fi - - if test -n "$pic_flag" || test "$pic_mode" != default; then - # Only do commands if we really have different PIC objects. - reload_objs="$libobjs $reload_conv_objs" - output="$libobj" - func_execute_cmds "$reload_cmds" 'exit $?' - fi - - if test -n "$gentop"; then - func_show_eval '${RM}r "$gentop"' - fi - - exit $EXIT_SUCCESS - ;; - - prog) - case $host in - *cygwin*) func_stripname '' '.exe' "$output" - output=$func_stripname_result.exe;; - esac - test -n "$vinfo" && \ - func_warning "\`-version-info' is ignored for programs" - - test -n "$release" && \ - func_warning "\`-release' is ignored for programs" - - test "$preload" = yes \ - && test "$dlopen_support" = unknown \ - && test "$dlopen_self" = unknown \ - && test "$dlopen_self_static" = unknown && \ - func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." - - case $host in - *-*-rhapsody* | *-*-darwin1.[012]) - # On Rhapsody replace the C library is the System framework - compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` - finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` - ;; - esac - - case $host in - *-*-darwin*) - # Don't allow lazy linking, it breaks C++ global constructors - # But is supposedly fixed on 10.4 or later (yay!). - if test "$tagname" = CXX ; then - case ${MACOSX_DEPLOYMENT_TARGET-10.0} in - 10.[0123]) - compile_command="$compile_command ${wl}-bind_at_load" - finalize_command="$finalize_command ${wl}-bind_at_load" - ;; - esac - fi - # Time to change all our "foo.ltframework" stuff back to "-framework foo" - compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` - finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` - ;; - esac - - - # move library search paths that coincide with paths to not yet - # installed libraries to the beginning of the library search list - new_libs= - for path in $notinst_path; do - case " $new_libs " in - *" -L$path/$objdir "*) ;; - *) - case " $compile_deplibs " in - *" -L$path/$objdir "*) - new_libs="$new_libs -L$path/$objdir" ;; - esac - ;; - esac - done - for deplib in $compile_deplibs; do - case $deplib in - -L*) - case " $new_libs " in - *" $deplib "*) ;; - *) new_libs="$new_libs $deplib" ;; - esac - ;; - *) new_libs="$new_libs $deplib" ;; - esac - done - compile_deplibs="$new_libs" - - - compile_command="$compile_command $compile_deplibs" - finalize_command="$finalize_command $finalize_deplibs" - - if test -n "$rpath$xrpath"; then - # If the user specified any rpath flags, then add them. - for libdir in $rpath $xrpath; do - # This is the magic to use -rpath. - case "$finalize_rpath " in - *" $libdir "*) ;; - *) finalize_rpath="$finalize_rpath $libdir" ;; - esac - done - fi - - # Now hardcode the library paths - rpath= - hardcode_libdirs= - for libdir in $compile_rpath $finalize_rpath; do - if test -n "$hardcode_libdir_flag_spec"; then - if test -n "$hardcode_libdir_separator"; then - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" - else - # Just accumulate the unique libdirs. - case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) - hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" - ;; - esac - fi - else - eval "flag=\"$hardcode_libdir_flag_spec\"" - rpath="$rpath $flag" - fi - elif test -n "$runpath_var"; then - case "$perm_rpath " in - *" $libdir "*) ;; - *) perm_rpath="$perm_rpath $libdir" ;; - esac - fi - case $host in - *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) - testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` - case :$dllsearchpath: in - *":$libdir:"*) ;; - ::) dllsearchpath=$libdir;; - *) dllsearchpath="$dllsearchpath:$libdir";; - esac - case :$dllsearchpath: in - *":$testbindir:"*) ;; - ::) dllsearchpath=$testbindir;; - *) dllsearchpath="$dllsearchpath:$testbindir";; - esac - ;; - esac - done - # Substitute the hardcoded libdirs into the rpath. - if test -n "$hardcode_libdir_separator" && - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" - eval "rpath=\" $hardcode_libdir_flag_spec\"" - fi - compile_rpath="$rpath" - - rpath= - hardcode_libdirs= - for libdir in $finalize_rpath; do - if test -n "$hardcode_libdir_flag_spec"; then - if test -n "$hardcode_libdir_separator"; then - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" - else - # Just accumulate the unique libdirs. - case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) - hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" - ;; - esac - fi - else - eval "flag=\"$hardcode_libdir_flag_spec\"" - rpath="$rpath $flag" - fi - elif test -n "$runpath_var"; then - case "$finalize_perm_rpath " in - *" $libdir "*) ;; - *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; - esac - fi - done - # Substitute the hardcoded libdirs into the rpath. - if test -n "$hardcode_libdir_separator" && - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" - eval "rpath=\" $hardcode_libdir_flag_spec\"" - fi - finalize_rpath="$rpath" - - if test -n "$libobjs" && test "$build_old_libs" = yes; then - # Transform all the library objects into standard objects. - compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` - finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` - fi - - func_generate_dlsyms "$outputname" "@PROGRAM@" "no" - - # template prelinking step - if test -n "$prelink_cmds"; then - func_execute_cmds "$prelink_cmds" 'exit $?' - fi - - wrappers_required=yes - case $host in - *cegcc* | *mingw32ce*) - # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. - wrappers_required=no - ;; - *cygwin* | *mingw* ) - if test "$build_libtool_libs" != yes; then - wrappers_required=no - fi - ;; - *) - if test "$need_relink" = no || test "$build_libtool_libs" != yes; then - wrappers_required=no - fi - ;; - esac - if test "$wrappers_required" = no; then - # Replace the output file specification. - compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` - link_command="$compile_command$compile_rpath" - - # We have no uninstalled library dependencies, so finalize right now. - exit_status=0 - func_show_eval "$link_command" 'exit_status=$?' - - # Delete the generated files. - if test -f "$output_objdir/${outputname}S.${objext}"; then - func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' - fi - - exit $exit_status - fi - - if test -n "$compile_shlibpath$finalize_shlibpath"; then - compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" - fi - if test -n "$finalize_shlibpath"; then - finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" - fi - - compile_var= - finalize_var= - if test -n "$runpath_var"; then - if test -n "$perm_rpath"; then - # We should set the runpath_var. - rpath= - for dir in $perm_rpath; do - rpath="$rpath$dir:" - done - compile_var="$runpath_var=\"$rpath\$$runpath_var\" " - fi - if test -n "$finalize_perm_rpath"; then - # We should set the runpath_var. - rpath= - for dir in $finalize_perm_rpath; do - rpath="$rpath$dir:" - done - finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " - fi - fi - - if test "$no_install" = yes; then - # We don't need to create a wrapper script. - link_command="$compile_var$compile_command$compile_rpath" - # Replace the output file specification. - link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` - # Delete the old output file. - $opt_dry_run || $RM $output - # Link the executable and exit - func_show_eval "$link_command" 'exit $?' - exit $EXIT_SUCCESS - fi - - if test "$hardcode_action" = relink; then - # Fast installation is not supported - link_command="$compile_var$compile_command$compile_rpath" - relink_command="$finalize_var$finalize_command$finalize_rpath" - - func_warning "this platform does not like uninstalled shared libraries" - func_warning "\`$output' will be relinked during installation" - else - if test "$fast_install" != no; then - link_command="$finalize_var$compile_command$finalize_rpath" - if test "$fast_install" = yes; then - relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` - else - # fast_install is set to needless - relink_command= - fi - else - link_command="$compile_var$compile_command$compile_rpath" - relink_command="$finalize_var$finalize_command$finalize_rpath" - fi - fi - - # Replace the output file specification. - link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` - - # Delete the old output files. - $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname - - func_show_eval "$link_command" 'exit $?' - - # Now create the wrapper script. - func_verbose "creating $output" - - # Quote the relink command for shipping. - if test -n "$relink_command"; then - # Preserve any variables that may affect compiler behavior - for var in $variables_saved_for_relink; do - if eval test -z \"\${$var+set}\"; then - relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" - elif eval var_value=\$$var; test -z "$var_value"; then - relink_command="$var=; export $var; $relink_command" - else - func_quote_for_eval "$var_value" - relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" - fi - done - relink_command="(cd `pwd`; $relink_command)" - relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` - fi - - # Only actually do things if not in dry run mode. - $opt_dry_run || { - # win32 will think the script is a binary if it has - # a .exe suffix, so we strip it off here. - case $output in - *.exe) func_stripname '' '.exe' "$output" - output=$func_stripname_result ;; - esac - # test for cygwin because mv fails w/o .exe extensions - case $host in - *cygwin*) - exeext=.exe - func_stripname '' '.exe' "$outputname" - outputname=$func_stripname_result ;; - *) exeext= ;; - esac - case $host in - *cygwin* | *mingw* ) - func_dirname_and_basename "$output" "" "." - output_name=$func_basename_result - output_path=$func_dirname_result - cwrappersource="$output_path/$objdir/lt-$output_name.c" - cwrapper="$output_path/$output_name.exe" - $RM $cwrappersource $cwrapper - trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 - - func_emit_cwrapperexe_src > $cwrappersource - - # The wrapper executable is built using the $host compiler, - # because it contains $host paths and files. If cross- - # compiling, it, like the target executable, must be - # executed on the $host or under an emulation environment. - $opt_dry_run || { - $LTCC $LTCFLAGS -o $cwrapper $cwrappersource - $STRIP $cwrapper - } - - # Now, create the wrapper script for func_source use: - func_ltwrapper_scriptname $cwrapper - $RM $func_ltwrapper_scriptname_result - trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 - $opt_dry_run || { - # note: this script will not be executed, so do not chmod. - if test "x$build" = "x$host" ; then - $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result - else - func_emit_wrapper no > $func_ltwrapper_scriptname_result - fi - } - ;; - * ) - $RM $output - trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 - - func_emit_wrapper no > $output - chmod +x $output - ;; - esac - } - exit $EXIT_SUCCESS - ;; - esac - - # See if we need to build an old-fashioned archive. - for oldlib in $oldlibs; do - - if test "$build_libtool_libs" = convenience; then - oldobjs="$libobjs_save $symfileobj" - addlibs="$convenience" - build_libtool_libs=no - else - if test "$build_libtool_libs" = module; then - oldobjs="$libobjs_save" - build_libtool_libs=no - else - oldobjs="$old_deplibs $non_pic_objects" - if test "$preload" = yes && test -f "$symfileobj"; then - oldobjs="$oldobjs $symfileobj" - fi - fi - addlibs="$old_convenience" - fi - - if test -n "$addlibs"; then - gentop="$output_objdir/${outputname}x" - generated="$generated $gentop" - - func_extract_archives $gentop $addlibs - oldobjs="$oldobjs $func_extract_archives_result" - fi - - # Do each command in the archive commands. - if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then - cmds=$old_archive_from_new_cmds - else - - # Add any objects from preloaded convenience libraries - if test -n "$dlprefiles"; then - gentop="$output_objdir/${outputname}x" - generated="$generated $gentop" - - func_extract_archives $gentop $dlprefiles - oldobjs="$oldobjs $func_extract_archives_result" - fi - - # POSIX demands no paths to be encoded in archives. We have - # to avoid creating archives with duplicate basenames if we - # might have to extract them afterwards, e.g., when creating a - # static archive out of a convenience library, or when linking - # the entirety of a libtool archive into another (currently - # not supported by libtool). - if (for obj in $oldobjs - do - func_basename "$obj" - $ECHO "$func_basename_result" - done | sort | sort -uc >/dev/null 2>&1); then - : - else - echo "copying selected object files to avoid basename conflicts..." - gentop="$output_objdir/${outputname}x" - generated="$generated $gentop" - func_mkdir_p "$gentop" - save_oldobjs=$oldobjs - oldobjs= - counter=1 - for obj in $save_oldobjs - do - func_basename "$obj" - objbase="$func_basename_result" - case " $oldobjs " in - " ") oldobjs=$obj ;; - *[\ /]"$objbase "*) - while :; do - # Make sure we don't pick an alternate name that also - # overlaps. - newobj=lt$counter-$objbase - func_arith $counter + 1 - counter=$func_arith_result - case " $oldobjs " in - *[\ /]"$newobj "*) ;; - *) if test ! -f "$gentop/$newobj"; then break; fi ;; - esac - done - func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" - oldobjs="$oldobjs $gentop/$newobj" - ;; - *) oldobjs="$oldobjs $obj" ;; - esac - done - fi - eval "cmds=\"$old_archive_cmds\"" - - func_len " $cmds" - len=$func_len_result - if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then - cmds=$old_archive_cmds - else - # the command line is too long to link in one step, link in parts - func_verbose "using piecewise archive linking..." - save_RANLIB=$RANLIB - RANLIB=: - objlist= - concat_cmds= - save_oldobjs=$oldobjs - oldobjs= - # Is there a better way of finding the last object in the list? - for obj in $save_oldobjs - do - last_oldobj=$obj - done - eval "test_cmds=\"$old_archive_cmds\"" - func_len " $test_cmds" - len0=$func_len_result - len=$len0 - for obj in $save_oldobjs - do - func_len " $obj" - func_arith $len + $func_len_result - len=$func_arith_result - func_append objlist " $obj" - if test "$len" -lt "$max_cmd_len"; then - : - else - # the above command should be used before it gets too long - oldobjs=$objlist - if test "$obj" = "$last_oldobj" ; then - RANLIB=$save_RANLIB - fi - test -z "$concat_cmds" || concat_cmds=$concat_cmds~ - eval "concat_cmds=\"\${concat_cmds}$old_archive_cmds\"" - objlist= - len=$len0 - fi - done - RANLIB=$save_RANLIB - oldobjs=$objlist - if test "X$oldobjs" = "X" ; then - eval "cmds=\"\$concat_cmds\"" - else - eval "cmds=\"\$concat_cmds~\$old_archive_cmds\"" - fi - fi - fi - func_execute_cmds "$cmds" 'exit $?' - done - - test -n "$generated" && \ - func_show_eval "${RM}r$generated" - - # Now create the libtool archive. - case $output in - *.la) - old_library= - test "$build_old_libs" = yes && old_library="$libname.$libext" - func_verbose "creating $output" - - # Preserve any variables that may affect compiler behavior - for var in $variables_saved_for_relink; do - if eval test -z \"\${$var+set}\"; then - relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" - elif eval var_value=\$$var; test -z "$var_value"; then - relink_command="$var=; export $var; $relink_command" - else - func_quote_for_eval "$var_value" - relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" - fi - done - # Quote the link command for shipping. - relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" - relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` - if test "$hardcode_automatic" = yes ; then - relink_command= - fi - - # Only create the output if not a dry run. - $opt_dry_run || { - for installed in no yes; do - if test "$installed" = yes; then - if test -z "$install_libdir"; then - break - fi - output="$output_objdir/$outputname"i - # Replace all uninstalled libtool libraries with the installed ones - newdependency_libs= - for deplib in $dependency_libs; do - case $deplib in - *.la) - func_basename "$deplib" - name="$func_basename_result" - libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` - test -z "$libdir" && \ - func_fatal_error "\`$deplib' is not a valid libtool archive" - newdependency_libs="$newdependency_libs $libdir/$name" - ;; - *) newdependency_libs="$newdependency_libs $deplib" ;; - esac - done - dependency_libs="$newdependency_libs" - newdlfiles= - - for lib in $dlfiles; do - case $lib in - *.la) - func_basename "$lib" - name="$func_basename_result" - libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` - test -z "$libdir" && \ - func_fatal_error "\`$lib' is not a valid libtool archive" - newdlfiles="$newdlfiles $libdir/$name" - ;; - *) newdlfiles="$newdlfiles $lib" ;; - esac - done - dlfiles="$newdlfiles" - newdlprefiles= - for lib in $dlprefiles; do - case $lib in - *.la) - # Only pass preopened files to the pseudo-archive (for - # eventual linking with the app. that links it) if we - # didn't already link the preopened objects directly into - # the library: - func_basename "$lib" - name="$func_basename_result" - libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` - test -z "$libdir" && \ - func_fatal_error "\`$lib' is not a valid libtool archive" - newdlprefiles="$newdlprefiles $libdir/$name" - ;; - esac - done - dlprefiles="$newdlprefiles" - else - newdlfiles= - for lib in $dlfiles; do - case $lib in - [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; - *) abs=`pwd`"/$lib" ;; - esac - newdlfiles="$newdlfiles $abs" - done - dlfiles="$newdlfiles" - newdlprefiles= - for lib in $dlprefiles; do - case $lib in - [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; - *) abs=`pwd`"/$lib" ;; - esac - newdlprefiles="$newdlprefiles $abs" - done - dlprefiles="$newdlprefiles" - fi - $RM $output - # place dlname in correct position for cygwin - # In fact, it would be nice if we could use this code for all target - # systems that can't hard-code library paths into their executables - # and that have no shared library path variable independent of PATH, - # but it turns out we can't easily determine that from inspecting - # libtool variables, so we have to hard-code the OSs to which it - # applies here; at the moment, that means platforms that use the PE - # object format with DLL files. See the long comment at the top of - # tests/bindir.at for full details. - tdlname=$dlname - case $host,$output,$installed,$module,$dlname in - *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) - # If a -bindir argument was supplied, place the dll there. - if test "x$bindir" != x ; - then - func_relative_path "$install_libdir" "$bindir" - tdlname=$func_relative_path_result$dlname - else - # Otherwise fall back on heuristic. - tdlname=../bin/$dlname - fi - ;; - esac - $ECHO > $output "\ -# $outputname - a libtool library file -# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION -# -# Please DO NOT delete this file! -# It is necessary for linking the library. - -# The name that we can dlopen(3). -dlname='$tdlname' - -# Names of this library. -library_names='$library_names' - -# The name of the static archive. -old_library='$old_library' - -# Linker flags that can not go in dependency_libs. -inherited_linker_flags='$new_inherited_linker_flags' - -# Libraries that this one depends upon. -dependency_libs='$dependency_libs' - -# Names of additional weak libraries provided by this library -weak_library_names='$weak_libs' - -# Version information for $libname. -current=$current -age=$age -revision=$revision - -# Is this an already installed library? -installed=$installed - -# Should we warn about portability when linking against -modules? -shouldnotlink=$module - -# Files to dlopen/dlpreopen -dlopen='$dlfiles' -dlpreopen='$dlprefiles' - -# Directory that this library needs to be installed in: -libdir='$install_libdir'" - if test "$installed" = no && test "$need_relink" = yes; then - $ECHO >> $output "\ -relink_command=\"$relink_command\"" - fi - done - } - - # Do a symbolic link so that the libtool archive can be found in - # LD_LIBRARY_PATH before the program is installed. - func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' - ;; - esac - exit $EXIT_SUCCESS -} - -{ test "$mode" = link || test "$mode" = relink; } && - func_mode_link ${1+"$@"} - - -# func_mode_uninstall arg... -func_mode_uninstall () -{ - $opt_debug - RM="$nonopt" - files= - rmforce= - exit_status=0 - - # This variable tells wrapper scripts just to set variables rather - # than running their programs. - libtool_install_magic="$magic" - - for arg - do - case $arg in - -f) RM="$RM $arg"; rmforce=yes ;; - -*) RM="$RM $arg" ;; - *) files="$files $arg" ;; - esac - done - - test -z "$RM" && \ - func_fatal_help "you must specify an RM program" - - rmdirs= - - origobjdir="$objdir" - for file in $files; do - func_dirname "$file" "" "." - dir="$func_dirname_result" - if test "X$dir" = X.; then - objdir="$origobjdir" - else - objdir="$dir/$origobjdir" - fi - func_basename "$file" - name="$func_basename_result" - test "$mode" = uninstall && objdir="$dir" - - # Remember objdir for removal later, being careful to avoid duplicates - if test "$mode" = clean; then - case " $rmdirs " in - *" $objdir "*) ;; - *) rmdirs="$rmdirs $objdir" ;; - esac - fi - - # Don't error if the file doesn't exist and rm -f was used. - if { test -L "$file"; } >/dev/null 2>&1 || - { test -h "$file"; } >/dev/null 2>&1 || - test -f "$file"; then - : - elif test -d "$file"; then - exit_status=1 - continue - elif test "$rmforce" = yes; then - continue - fi - - rmfiles="$file" - - case $name in - *.la) - # Possibly a libtool archive, so verify it. - if func_lalib_p "$file"; then - func_source $dir/$name - - # Delete the libtool libraries and symlinks. - for n in $library_names; do - rmfiles="$rmfiles $objdir/$n" - done - test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library" - - case "$mode" in - clean) - case " $library_names " in - # " " in the beginning catches empty $dlname - *" $dlname "*) ;; - *) rmfiles="$rmfiles $objdir/$dlname" ;; - esac - test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" - ;; - uninstall) - if test -n "$library_names"; then - # Do each command in the postuninstall commands. - func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' - fi - - if test -n "$old_library"; then - # Do each command in the old_postuninstall commands. - func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' - fi - # FIXME: should reinstall the best remaining shared library. - ;; - esac - fi - ;; - - *.lo) - # Possibly a libtool object, so verify it. - if func_lalib_p "$file"; then - - # Read the .lo file - func_source $dir/$name - - # Add PIC object to the list of files to remove. - if test -n "$pic_object" && - test "$pic_object" != none; then - rmfiles="$rmfiles $dir/$pic_object" - fi - - # Add non-PIC object to the list of files to remove. - if test -n "$non_pic_object" && - test "$non_pic_object" != none; then - rmfiles="$rmfiles $dir/$non_pic_object" - fi - fi - ;; - - *) - if test "$mode" = clean ; then - noexename=$name - case $file in - *.exe) - func_stripname '' '.exe' "$file" - file=$func_stripname_result - func_stripname '' '.exe' "$name" - noexename=$func_stripname_result - # $file with .exe has already been added to rmfiles, - # add $file without .exe - rmfiles="$rmfiles $file" - ;; - esac - # Do a test to see if this is a libtool program. - if func_ltwrapper_p "$file"; then - if func_ltwrapper_executable_p "$file"; then - func_ltwrapper_scriptname "$file" - relink_command= - func_source $func_ltwrapper_scriptname_result - rmfiles="$rmfiles $func_ltwrapper_scriptname_result" - else - relink_command= - func_source $dir/$noexename - fi - - # note $name still contains .exe if it was in $file originally - # as does the version of $file that was added into $rmfiles - rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}" - if test "$fast_install" = yes && test -n "$relink_command"; then - rmfiles="$rmfiles $objdir/lt-$name" - fi - if test "X$noexename" != "X$name" ; then - rmfiles="$rmfiles $objdir/lt-${noexename}.c" - fi - fi - fi - ;; - esac - func_show_eval "$RM $rmfiles" 'exit_status=1' - done - objdir="$origobjdir" - - # Try to remove the ${objdir}s in the directories where we deleted files - for dir in $rmdirs; do - if test -d "$dir"; then - func_show_eval "rmdir $dir >/dev/null 2>&1" - fi - done - - exit $exit_status -} - -{ test "$mode" = uninstall || test "$mode" = clean; } && - func_mode_uninstall ${1+"$@"} - -test -z "$mode" && { - help="$generic_help" - func_fatal_help "you must specify a MODE" -} - -test -z "$exec_cmd" && \ - func_fatal_help "invalid operation mode \`$mode'" - -if test -n "$exec_cmd"; then - eval exec "$exec_cmd" - exit $EXIT_FAILURE -fi - -exit $exit_status - - -# The TAGs below are defined such that we never get into a situation -# in which we disable both kinds of libraries. Given conflicting -# choices, we go for a static library, that is the most portable, -# since we can't tell whether shared libraries were disabled because -# the user asked for that or because the platform doesn't support -# them. This is particularly important on AIX, because we don't -# support having both static and shared libraries enabled at the same -# time on that platform, so we default to a shared-only configuration. -# If a disable-shared tag is given, we'll fallback to a static-only -# configuration. But we'll never go from static-only to shared-only. - -# ### BEGIN LIBTOOL TAG CONFIG: disable-shared -build_libtool_libs=no -build_old_libs=yes -# ### END LIBTOOL TAG CONFIG: disable-shared - -# ### BEGIN LIBTOOL TAG CONFIG: disable-static -build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` -# ### END LIBTOOL TAG CONFIG: disable-static - -# Local Variables: -# mode:shell-script -# sh-indentation:2 -# End: -# vi:sw=2 - diff --git a/src/libbacktrace/macho.c b/src/libbacktrace/macho.c deleted file mode 100644 index 9af14e724b40..000000000000 --- a/src/libbacktrace/macho.c +++ /dev/null @@ -1,1416 +0,0 @@ -/* macho.c -- Get debug data from an Mach-O file for backtraces. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by John Colanduoni. - - Pending upstream pull request: - https://github.com/ianlancetaylor/libbacktrace/pull/2 - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -/* We can't use autotools to detect the pointer width of our program because - we may be building a fat Mach-O file containing both 32-bit and 64-bit - variants. However Mach-O runs a limited set of platforms so detection - via preprocessor is not difficult. */ - -#if defined(__MACH__) -#if defined(__LP64__) -#define BACKTRACE_BITS 64 -#else -#define BACKTRACE_BITS 32 -#endif -#else -#error Attempting to build Mach-O support on incorrect platform -#endif - -#if defined(__x86_64__) -#define NATIVE_CPU_TYPE CPU_TYPE_X86_64 -#elif defined(__i386__) -#define NATIVE_CPU_TYPE CPU_TYPE_X86 -#elif defined(__aarch64__) -#define NATIVE_CPU_TYPE CPU_TYPE_ARM64 -#elif defined(__arm__) -#define NATIVE_CPU_TYPE CPU_TYPE_ARM -#else -#error Could not detect native Mach-O cpu_type_t -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "backtrace.h" -#include "internal.h" - -struct macho_commands_view -{ - struct backtrace_view view; - uint32_t commands_count; - uint32_t commands_total_size; - int bytes_swapped; - size_t base_offset; -}; - -enum debug_section -{ - DEBUG_INFO, - DEBUG_LINE, - DEBUG_ABBREV, - DEBUG_RANGES, - DEBUG_STR, - DEBUG_MAX -}; - -static const char *const debug_section_names[DEBUG_MAX] = - { - "__debug_info", - "__debug_line", - "__debug_abbrev", - "__debug_ranges", - "__debug_str" - }; - -struct found_dwarf_section -{ - uint32_t file_offset; - uintptr_t file_size; - const unsigned char *data; -}; - -/* Mach-O symbols don't have a length. As a result we have to infer it - by sorting the symbol addresses for each image and recording the - memory range attributed to each image. */ -struct macho_symbol -{ - uintptr_t addr; - size_t size; - const char *name; -}; - -struct macho_syminfo_data -{ - struct macho_syminfo_data *next; - struct macho_symbol *symbols; - size_t symbol_count; - uintptr_t min_addr; - uintptr_t max_addr; -}; - -uint16_t -macho_file_to_host_u16 (int file_bytes_swapped, uint16_t input) -{ - if (file_bytes_swapped) - return (input >> 8) | (input << 8); - else - return input; -} - -uint32_t -macho_file_to_host_u32 (int file_bytes_swapped, uint32_t input) -{ - if (file_bytes_swapped) - { - return ((input >> 24) & 0x000000FF) - | ((input >> 8) & 0x0000FF00) - | ((input << 8) & 0x00FF0000) - | ((input << 24) & 0xFF000000); - } - else - { - return input; - } -} - -uint64_t -macho_file_to_host_u64 (int file_bytes_swapped, uint64_t input) -{ - if (file_bytes_swapped) - { - return macho_file_to_host_u32 (file_bytes_swapped, - (uint32_t) (input >> 32)) - | (((uint64_t) macho_file_to_host_u32 (file_bytes_swapped, - (uint32_t) input)) << 32); - } - else - { - return input; - } -} - -#if BACKTRACE_BITS == 64 -#define macho_file_to_host_usize macho_file_to_host_u64 -typedef struct mach_header_64 mach_header_native_t; -#define LC_SEGMENT_NATIVE LC_SEGMENT_64 -typedef struct segment_command_64 segment_command_native_t; -typedef struct nlist_64 nlist_native_t; -typedef struct section_64 section_native_t; -#else /* BACKTRACE_BITS == 32 */ -#define macho_file_to_host_usize macho_file_to_host_u32 -typedef struct mach_header mach_header_native_t; -#define LC_SEGMENT_NATIVE LC_SEGMENT -typedef struct segment_command segment_command_native_t; -typedef struct nlist nlist_native_t; -typedef struct section section_native_t; -#endif - -// Gets a view into a Mach-O image, taking any slice offset into account -int -macho_get_view (struct backtrace_state *state, int descriptor, - off_t offset, size_t size, - backtrace_error_callback error_callback, - void *data, struct macho_commands_view *commands_view, - struct backtrace_view *view) -{ - return backtrace_get_view (state, descriptor, - commands_view->base_offset + offset, size, - error_callback, data, view); -} - -int -macho_get_commands (struct backtrace_state *state, int descriptor, - backtrace_error_callback error_callback, - void *data, struct macho_commands_view *commands_view, - int *incompatible) -{ - int ret = 0; - int is_fat = 0; - struct backtrace_view file_header_view; - int file_header_view_valid = 0; - struct backtrace_view fat_archs_view; - int fat_archs_view_valid = 0; - const mach_header_native_t *file_header; - uint64_t commands_offset; - - *incompatible = 0; - - if (!backtrace_get_view (state, descriptor, 0, sizeof (mach_header_native_t), - error_callback, data, &file_header_view)) - goto end; - file_header_view_valid = 1; - - switch (*(uint32_t *) file_header_view.data) - { - case MH_MAGIC: - if (BACKTRACE_BITS == 32) - commands_view->bytes_swapped = 0; - else - { - *incompatible = 1; - goto end; - } - break; - case MH_CIGAM: - if (BACKTRACE_BITS == 32) - commands_view->bytes_swapped = 1; - else - { - *incompatible = 1; - goto end; - } - break; - case MH_MAGIC_64: - if (BACKTRACE_BITS == 64) - commands_view->bytes_swapped = 0; - else - { - *incompatible = 1; - goto end; - } - break; - case MH_CIGAM_64: - if (BACKTRACE_BITS == 64) - commands_view->bytes_swapped = 1; - else - { - *incompatible = 1; - goto end; - } - break; - case FAT_MAGIC: - is_fat = 1; - commands_view->bytes_swapped = 0; - break; - case FAT_CIGAM: - is_fat = 1; - commands_view->bytes_swapped = 1; - break; - default: - goto end; - } - - if (is_fat) - { - uint32_t native_slice_offset; - size_t archs_total_size; - uint32_t arch_count; - const struct fat_header *fat_header; - const struct fat_arch *archs; - uint32_t i; - - fat_header = file_header_view.data; - arch_count = - macho_file_to_host_u32 (commands_view->bytes_swapped, - fat_header->nfat_arch); - - archs_total_size = arch_count * sizeof (struct fat_arch); - - if (!backtrace_get_view (state, descriptor, sizeof (struct fat_header), - archs_total_size, error_callback, - data, &fat_archs_view)) - goto end; - fat_archs_view_valid = 1; - - native_slice_offset = 0; - archs = fat_archs_view.data; - for (i = 0; i < arch_count; i++) - { - const struct fat_arch *raw_arch = archs + i; - int cpu_type = - (int) macho_file_to_host_u32 (commands_view->bytes_swapped, - (uint32_t) raw_arch->cputype); - - if (cpu_type == NATIVE_CPU_TYPE) - { - native_slice_offset = - macho_file_to_host_u32 (commands_view->bytes_swapped, - raw_arch->offset); - - break; - } - } - - if (native_slice_offset == 0) - { - *incompatible = 1; - goto end; - } - - backtrace_release_view (state, &file_header_view, error_callback, data); - file_header_view_valid = 0; - if (!backtrace_get_view (state, descriptor, native_slice_offset, - sizeof (mach_header_native_t), error_callback, - data, &file_header_view)) - goto end; - file_header_view_valid = 1; - - // The endianess of the slice may be different than the fat image - switch (*(uint32_t *) file_header_view.data) - { - case MH_MAGIC: - if (BACKTRACE_BITS == 32) - commands_view->bytes_swapped = 0; - else - goto end; - break; - case MH_CIGAM: - if (BACKTRACE_BITS == 32) - commands_view->bytes_swapped = 1; - else - goto end; - break; - case MH_MAGIC_64: - if (BACKTRACE_BITS == 64) - commands_view->bytes_swapped = 0; - else - goto end; - break; - case MH_CIGAM_64: - if (BACKTRACE_BITS == 64) - commands_view->bytes_swapped = 1; - else - goto end; - break; - default: - goto end; - } - - commands_view->base_offset = native_slice_offset; - } - else - commands_view->base_offset = 0; - - file_header = file_header_view.data; - commands_view->commands_count = - macho_file_to_host_u32 (commands_view->bytes_swapped, - file_header->ncmds); - commands_view->commands_total_size = - macho_file_to_host_u32 (commands_view->bytes_swapped, - file_header->sizeofcmds); - commands_offset = - commands_view->base_offset + sizeof (mach_header_native_t); - - if (!backtrace_get_view (state, descriptor, commands_offset, - commands_view->commands_total_size, error_callback, - data, &commands_view->view)) - goto end; - - ret = 1; - -end: - if (file_header_view_valid) - backtrace_release_view (state, &file_header_view, error_callback, data); - if (fat_archs_view_valid) - backtrace_release_view (state, &fat_archs_view, error_callback, data); - return ret; -} - -int -macho_get_uuid (struct backtrace_state *state ATTRIBUTE_UNUSED, - int descriptor ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback, - void *data, struct macho_commands_view *commands_view, - uuid_t *uuid) -{ - size_t offset = 0; - uint32_t i = 0; - - for (i = 0; i < commands_view->commands_count; i++) - { - const struct load_command *raw_command; - struct load_command command; - - if (offset + sizeof (struct load_command) - > commands_view->commands_total_size) - { - error_callback (data, - "executable file contains out of range command offset", - 0); - return 0; - } - - raw_command = - commands_view->view.data + offset; - command.cmd = macho_file_to_host_u32 (commands_view->bytes_swapped, - raw_command->cmd); - command.cmdsize = macho_file_to_host_u32 (commands_view->bytes_swapped, - raw_command->cmdsize); - - if (command.cmd == LC_UUID) - { - const struct uuid_command *uuid_command; - - if (offset + sizeof (struct uuid_command) - > commands_view->commands_total_size) - { - error_callback (data, - "executable file contains out of range command offset", - 0); - return 0; - } - - uuid_command = - (struct uuid_command *) raw_command; - memcpy (uuid, uuid_command->uuid, sizeof (uuid_t)); - return 1; - } - - offset += command.cmdsize; - } - - error_callback (data, "executable file is missing an identifying UUID", 0); - return 0; -} - -/* Returns the base address of a Mach-O image, as encoded in the file header. - * WARNING: This does not take ASLR into account, which is ubiquitous on recent - * Darwin platforms. - */ -int -macho_get_addr_range (struct backtrace_state *state ATTRIBUTE_UNUSED, - int descriptor ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback, - void *data, struct macho_commands_view *commands_view, - uintptr_t *base_address, uintptr_t *max_address) -{ - size_t offset = 0; - int found_text = 0; - uint32_t i = 0; - - *max_address = 0; - - for (i = 0; i < commands_view->commands_count; i++) - { - const struct load_command *raw_command; - struct load_command command; - - if (offset + sizeof (struct load_command) - > commands_view->commands_total_size) - { - error_callback (data, - "executable file contains out of range command offset", - 0); - return 0; - } - - raw_command = commands_view->view.data + offset; - command.cmd = macho_file_to_host_u32 (commands_view->bytes_swapped, - raw_command->cmd); - command.cmdsize = macho_file_to_host_u32 (commands_view->bytes_swapped, - raw_command->cmdsize); - - if (command.cmd == LC_SEGMENT_NATIVE) - { - const segment_command_native_t *raw_segment; - uintptr_t segment_vmaddr; - uintptr_t segment_vmsize; - uintptr_t segment_maxaddr; - uintptr_t text_fileoff; - - if (offset + sizeof (segment_command_native_t) - > commands_view->commands_total_size) - { - error_callback (data, - "executable file contains out of range command offset", - 0); - return 0; - } - - raw_segment = (segment_command_native_t *) raw_command; - - segment_vmaddr = macho_file_to_host_usize ( - commands_view->bytes_swapped, raw_segment->vmaddr); - segment_vmsize = macho_file_to_host_usize ( - commands_view->bytes_swapped, raw_segment->vmsize); - segment_maxaddr = segment_vmaddr + segment_vmsize; - - if (strncmp (raw_segment->segname, "__TEXT", - sizeof (raw_segment->segname)) == 0) - { - text_fileoff = macho_file_to_host_usize ( - commands_view->bytes_swapped, raw_segment->fileoff); - *base_address = segment_vmaddr - text_fileoff; - - found_text = 1; - } - - if (segment_maxaddr > *max_address) - *max_address = segment_maxaddr; - } - - offset += command.cmdsize; - } - - if (found_text) - return 1; - else - { - error_callback (data, "executable is missing __TEXT segment", 0); - return 0; - } -} - -static int -macho_symbol_compare_addr (const void *left_raw, const void *right_raw) -{ - const struct macho_symbol *left = left_raw; - const struct macho_symbol *right = right_raw; - - if (left->addr > right->addr) - return 1; - else if (left->addr < right->addr) - return -1; - else - return 0; -} - -int -macho_symbol_type_relevant (uint8_t type) -{ - uint8_t type_field = (uint8_t) (type & N_TYPE); - - return !(type & N_EXT) && - (type_field == N_ABS || type_field == N_SECT); -} - -int -macho_add_symtab (struct backtrace_state *state, - backtrace_error_callback error_callback, - void *data, int descriptor, - struct macho_commands_view *commands_view, - uintptr_t base_address, uintptr_t max_image_address, - intptr_t vmslide, int *found_sym) -{ - struct macho_syminfo_data *syminfo_data; - - int ret = 0; - size_t offset = 0; - struct backtrace_view symtab_view; - int symtab_view_valid = 0; - struct backtrace_view strtab_view; - int strtab_view_valid = 0; - size_t syminfo_index = 0; - size_t function_count = 0; - uint32_t i = 0; - uint32_t j = 0; - uint32_t symtab_index = 0; - - *found_sym = 0; - - for (i = 0; i < commands_view->commands_count; i++) - { - const struct load_command *raw_command; - struct load_command command; - - if (offset + sizeof (struct load_command) - > commands_view->commands_total_size) - { - error_callback (data, - "executable file contains out of range command offset", - 0); - return 0; - } - - raw_command = commands_view->view.data + offset; - command.cmd = macho_file_to_host_u32 (commands_view->bytes_swapped, - raw_command->cmd); - command.cmdsize = macho_file_to_host_u32 (commands_view->bytes_swapped, - raw_command->cmdsize); - - if (command.cmd == LC_SYMTAB) - { - const struct symtab_command *symtab_command; - uint32_t symbol_table_offset; - uint32_t symbol_count; - uint32_t string_table_offset; - uint32_t string_table_size; - - if (offset + sizeof (struct symtab_command) - > commands_view->commands_total_size) - { - error_callback (data, - "executable file contains out of range command offset", - 0); - return 0; - } - - symtab_command = (struct symtab_command *) raw_command; - - symbol_table_offset = macho_file_to_host_u32 ( - commands_view->bytes_swapped, symtab_command->symoff); - symbol_count = macho_file_to_host_u32 ( - commands_view->bytes_swapped, symtab_command->nsyms); - string_table_offset = macho_file_to_host_u32 ( - commands_view->bytes_swapped, symtab_command->stroff); - string_table_size = macho_file_to_host_u32 ( - commands_view->bytes_swapped, symtab_command->strsize); - - - if (!macho_get_view (state, descriptor, symbol_table_offset, - symbol_count * sizeof (nlist_native_t), - error_callback, data, commands_view, - &symtab_view)) - goto end; - symtab_view_valid = 1; - - if (!macho_get_view (state, descriptor, string_table_offset, - string_table_size, error_callback, data, - commands_view, &strtab_view)) - goto end; - strtab_view_valid = 1; - - // Count functions first - for (j = 0; j < symbol_count; j++) - { - const nlist_native_t *raw_sym = - ((const nlist_native_t *) symtab_view.data) + j; - - if (macho_symbol_type_relevant (raw_sym->n_type)) - { - function_count += 1; - } - } - - // Allocate space for the: - // (a) macho_syminfo_data for this image - // (b) macho_symbol entries - syminfo_data = - backtrace_alloc (state, - sizeof (struct macho_syminfo_data), - error_callback, data); - if (syminfo_data == NULL) - goto end; - - syminfo_data->symbols = backtrace_alloc ( - state, function_count * sizeof (struct macho_symbol), - error_callback, data); - if (syminfo_data->symbols == NULL) - goto end; - - syminfo_data->symbol_count = function_count; - syminfo_data->next = NULL; - syminfo_data->min_addr = base_address; - syminfo_data->max_addr = max_image_address; - - for (symtab_index = 0; - symtab_index < symbol_count; symtab_index++) - { - const nlist_native_t *raw_sym = - ((const nlist_native_t *) symtab_view.data) + - symtab_index; - - if (macho_symbol_type_relevant (raw_sym->n_type)) - { - size_t strtab_index; - const char *name; - size_t max_len_plus_one; - - syminfo_data->symbols[syminfo_index].addr = - macho_file_to_host_usize (commands_view->bytes_swapped, - raw_sym->n_value) + vmslide; - - strtab_index = macho_file_to_host_u32 ( - commands_view->bytes_swapped, - raw_sym->n_un.n_strx); - - // Check the range of the supposed "string" we've been - // given - if (strtab_index >= string_table_size) - { - error_callback ( - data, - "dSYM file contains out of range string table index", - 0); - goto end; - } - - name = strtab_view.data + strtab_index; - max_len_plus_one = string_table_size - strtab_index; - - if (strnlen (name, max_len_plus_one) >= max_len_plus_one) - { - error_callback ( - data, - "dSYM file contains unterminated string", - 0); - goto end; - } - - // Remove underscore prefixes - if (name[0] == '_') - { - name = name + 1; - } - - syminfo_data->symbols[syminfo_index].name = name; - - syminfo_index += 1; - } - } - - backtrace_qsort (syminfo_data->symbols, - syminfo_data->symbol_count, - sizeof (struct macho_symbol), - macho_symbol_compare_addr); - - // Calculate symbol sizes - for (syminfo_index = 0; - syminfo_index < syminfo_data->symbol_count; syminfo_index++) - { - if (syminfo_index + 1 < syminfo_data->symbol_count) - { - syminfo_data->symbols[syminfo_index].size = - syminfo_data->symbols[syminfo_index + 1].addr - - syminfo_data->symbols[syminfo_index].addr; - } - else - { - syminfo_data->symbols[syminfo_index].size = - max_image_address - - syminfo_data->symbols[syminfo_index].addr; - } - } - - if (!state->threaded) - { - struct macho_syminfo_data **pp; - - for (pp = (struct macho_syminfo_data **) (void *) &state->syminfo_data; - *pp != NULL; - pp = &(*pp)->next); - *pp = syminfo_data; - } - else - { - while (1) - { - struct macho_syminfo_data **pp; - - pp = (struct macho_syminfo_data **) (void *) &state->syminfo_data; - - while (1) - { - struct macho_syminfo_data *p; - - p = backtrace_atomic_load_pointer (pp); - - if (p == NULL) - break; - - pp = &p->next; - } - - if (__sync_bool_compare_and_swap (pp, NULL, syminfo_data)) - break; - } - } - - strtab_view_valid = 0; // We need to keep string table around - *found_sym = 1; - ret = 1; - goto end; - } - - offset += command.cmdsize; - } - - // No symbol table here - ret = 1; - goto end; - -end: - if (symtab_view_valid) - backtrace_release_view (state, &symtab_view, error_callback, data); - if (strtab_view_valid) - backtrace_release_view (state, &strtab_view, error_callback, data); - return ret; -} - -int -macho_try_dwarf (struct backtrace_state *state, - backtrace_error_callback error_callback, - void *data, fileline *fileline_fn, uuid_t *executable_uuid, - uintptr_t base_address, uintptr_t max_image_address, - intptr_t vmslide, char *dwarf_filename, int *matched, - int *found_sym, int *found_dwarf) -{ - uuid_t dwarf_uuid; - - int ret = 0; - int dwarf_descriptor; - int dwarf_descriptor_valid = 0; - struct macho_commands_view commands_view; - int commands_view_valid = 0; - struct backtrace_view dwarf_view; - int dwarf_view_valid = 0; - size_t offset = 0; - struct found_dwarf_section dwarf_sections[DEBUG_MAX]; - uintptr_t min_dwarf_offset = 0; - uintptr_t max_dwarf_offset = 0; - uint32_t i = 0; - uint32_t j = 0; - int k = 0; - - *matched = 0; - *found_sym = 0; - *found_dwarf = 0; - - if ((dwarf_descriptor = backtrace_open (dwarf_filename, error_callback, - data, NULL)) == 0) - goto end; - dwarf_descriptor_valid = 1; - - int incompatible; - if (!macho_get_commands (state, dwarf_descriptor, error_callback, data, - &commands_view, &incompatible)) - { - // Failing to read the header here is fine, because this dSYM may be - // for a different architecture - if (incompatible) - { - ret = 1; - } - goto end; - } - commands_view_valid = 1; - - // Get dSYM UUID and compare - if (!macho_get_uuid (state, dwarf_descriptor, error_callback, data, - &commands_view, &dwarf_uuid)) - { - error_callback (data, "dSYM file is missing an identifying uuid", 0); - goto end; - } - if (memcmp (executable_uuid, &dwarf_uuid, sizeof (uuid_t)) != 0) - { - // DWARF doesn't belong to desired executable - ret = 1; - goto end; - } - - *matched = 1; - - // Read symbol table - if (!macho_add_symtab (state, error_callback, data, dwarf_descriptor, - &commands_view, base_address, max_image_address, - vmslide, found_sym)) - goto end; - - // Get DWARF sections - - memset (dwarf_sections, 0, sizeof (dwarf_sections)); - offset = 0; - for (i = 0; i < commands_view.commands_count; i++) - { - const struct load_command *raw_command; - struct load_command command; - - if (offset + sizeof (struct load_command) - > commands_view.commands_total_size) - { - error_callback (data, - "dSYM file contains out of range command offset", 0); - goto end; - } - - raw_command = commands_view.view.data + offset; - command.cmd = macho_file_to_host_u32 (commands_view.bytes_swapped, - raw_command->cmd); - command.cmdsize = macho_file_to_host_u32 (commands_view.bytes_swapped, - raw_command->cmdsize); - - if (command.cmd == LC_SEGMENT_NATIVE) - { - uint32_t section_count; - size_t section_offset; - const segment_command_native_t *raw_segment; - - if (offset + sizeof (segment_command_native_t) - > commands_view.commands_total_size) - { - error_callback (data, - "dSYM file contains out of range command offset", - 0); - goto end; - } - - raw_segment = (const segment_command_native_t *) raw_command; - - if (strncmp (raw_segment->segname, "__DWARF", - sizeof (raw_segment->segname)) == 0) - { - section_count = macho_file_to_host_u32 ( - commands_view.bytes_swapped, - raw_segment->nsects); - - section_offset = offset + sizeof (segment_command_native_t); - - // Search sections for relevant DWARF section names - for (j = 0; j < section_count; j++) - { - const section_native_t *raw_section; - - if (section_offset + sizeof (section_native_t) > - commands_view.commands_total_size) - { - error_callback (data, - "dSYM file contains out of range command offset", - 0); - goto end; - } - - raw_section = commands_view.view.data + section_offset; - - for (k = 0; k < DEBUG_MAX; k++) - { - uintptr_t dwarf_section_end; - - if (strncmp (raw_section->sectname, - debug_section_names[k], - sizeof (raw_section->sectname)) == 0) - { - *found_dwarf = 1; - - dwarf_sections[k].file_offset = - macho_file_to_host_u32 ( - commands_view.bytes_swapped, - raw_section->offset); - dwarf_sections[k].file_size = - macho_file_to_host_usize ( - commands_view.bytes_swapped, - raw_section->size); - - if (min_dwarf_offset == 0 || - dwarf_sections[k].file_offset < - min_dwarf_offset) - min_dwarf_offset = dwarf_sections[k].file_offset; - - dwarf_section_end = - dwarf_sections[k].file_offset + - dwarf_sections[k].file_size; - if (dwarf_section_end > max_dwarf_offset) - max_dwarf_offset = dwarf_section_end; - - break; - } - } - - section_offset += sizeof (section_native_t); - } - - break; - } - } - - offset += command.cmdsize; - } - - if (!*found_dwarf) - { - // No DWARF in this file - ret = 1; - goto end; - } - - if (!macho_get_view (state, dwarf_descriptor, (off_t) min_dwarf_offset, - max_dwarf_offset - min_dwarf_offset, error_callback, - data, &commands_view, &dwarf_view)) - goto end; - dwarf_view_valid = 1; - - for (i = 0; i < DEBUG_MAX; i++) - { - if (dwarf_sections[i].file_offset == 0) - dwarf_sections[i].data = NULL; - else - dwarf_sections[i].data = - dwarf_view.data + dwarf_sections[i].file_offset - min_dwarf_offset; - } - - if (!backtrace_dwarf_add (state, vmslide, - dwarf_sections[DEBUG_INFO].data, - dwarf_sections[DEBUG_INFO].file_size, - dwarf_sections[DEBUG_LINE].data, - dwarf_sections[DEBUG_LINE].file_size, - dwarf_sections[DEBUG_ABBREV].data, - dwarf_sections[DEBUG_ABBREV].file_size, - dwarf_sections[DEBUG_RANGES].data, - dwarf_sections[DEBUG_RANGES].file_size, - dwarf_sections[DEBUG_STR].data, - dwarf_sections[DEBUG_STR].file_size, - ((__DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN) - ^ commands_view.bytes_swapped), - error_callback, data, fileline_fn)) - goto end; - - // Don't release the DWARF view because it is still in use - dwarf_descriptor_valid = 0; - dwarf_view_valid = 0; - ret = 1; - -end: - if (dwarf_descriptor_valid) - backtrace_close (dwarf_descriptor, error_callback, data); - if (commands_view_valid) - backtrace_release_view (state, &commands_view.view, error_callback, data); - if (dwarf_view_valid) - backtrace_release_view (state, &dwarf_view, error_callback, data); - return ret; -} - -int -macho_try_dsym (struct backtrace_state *state, - backtrace_error_callback error_callback, - void *data, fileline *fileline_fn, uuid_t *executable_uuid, - uintptr_t base_address, uintptr_t max_image_address, - intptr_t vmslide, char *dsym_filename, int *matched, - int *found_sym, int *found_dwarf) -{ - int ret = 0; - char dwarf_image_dir_path[PATH_MAX]; - DIR *dwarf_image_dir; - int dwarf_image_dir_valid = 0; - struct dirent *directory_entry; - char dwarf_filename[PATH_MAX]; - int dwarf_matched; - int dwarf_had_sym; - int dwarf_had_dwarf; - - *matched = 0; - *found_sym = 0; - *found_dwarf = 0; - - strncpy (dwarf_image_dir_path, dsym_filename, PATH_MAX); - strncat (dwarf_image_dir_path, "/Contents/Resources/DWARF", PATH_MAX); - - if (!(dwarf_image_dir = opendir (dwarf_image_dir_path))) - { - error_callback (data, "could not open DWARF directory in dSYM", - 0); - goto end; - } - dwarf_image_dir_valid = 1; - - while ((directory_entry = readdir (dwarf_image_dir))) - { - if (directory_entry->d_type != DT_REG) - continue; - - strncpy (dwarf_filename, dwarf_image_dir_path, PATH_MAX); - strncat (dwarf_filename, "/", PATH_MAX); - strncat (dwarf_filename, directory_entry->d_name, PATH_MAX); - - if (!macho_try_dwarf (state, error_callback, data, fileline_fn, - executable_uuid, base_address, max_image_address, - vmslide, dwarf_filename, - &dwarf_matched, &dwarf_had_sym, &dwarf_had_dwarf)) - goto end; - - if (dwarf_matched) - { - *matched = 1; - *found_sym = dwarf_had_sym; - *found_dwarf = dwarf_had_dwarf; - ret = 1; - goto end; - } - } - - // No matching DWARF in this dSYM - ret = 1; - goto end; - -end: - if (dwarf_image_dir_valid) - closedir (dwarf_image_dir); - return ret; -} - -int -macho_add (struct backtrace_state *state, - backtrace_error_callback error_callback, void *data, int descriptor, - const char *filename, fileline *fileline_fn, intptr_t vmslide, - int *found_sym, int *found_dwarf) -{ - uuid_t image_uuid; - uintptr_t image_file_base_address; - uintptr_t image_file_max_address; - uintptr_t image_actual_base_address = 0; - uintptr_t image_actual_max_address = 0; - - int ret = 0; - struct macho_commands_view commands_view; - int commands_view_valid = 0; - char executable_dirname[PATH_MAX]; - size_t filename_len; - DIR *executable_dir = NULL; - int executable_dir_valid = 0; - struct dirent *directory_entry; - char dsym_full_path[PATH_MAX]; - static const char *extension; - size_t extension_len; - ssize_t i; - - *found_sym = 0; - *found_dwarf = 0; - - // Find Mach-O commands list - int incompatible; - if (!macho_get_commands (state, descriptor, error_callback, data, - &commands_view, &incompatible)) - goto end; - commands_view_valid = 1; - - // First we need to get the uuid of our file so we can hunt down the correct - // dSYM - if (!macho_get_uuid (state, descriptor, error_callback, data, &commands_view, - &image_uuid)) - goto end; - - // Now we need to find the in memory base address. Step one is to find out - // what the executable thinks the base address is - if (!macho_get_addr_range (state, descriptor, error_callback, data, - &commands_view, - &image_file_base_address, - &image_file_max_address)) - goto end; - - image_actual_base_address = - image_file_base_address + vmslide; - image_actual_max_address = - image_file_max_address + vmslide; - - if (image_actual_base_address == 0) - { - error_callback (data, "executable file is not loaded", 0); - goto end; - } - - // Look for dSYM in our executable's directory - strncpy (executable_dirname, filename, PATH_MAX); - filename_len = strlen (executable_dirname); - for (i = filename_len - 1; i >= 0; i--) - { - if (executable_dirname[i] == '/') - { - executable_dirname[i] = '\0'; - break; - } - else if (i == 0) - { - executable_dirname[0] = '.'; - executable_dirname[1] = '\0'; - break; - } - } - - if (!(executable_dir = opendir (executable_dirname))) - { - error_callback (data, "could not open directory containing executable", - 0); - goto end; - } - executable_dir_valid = 1; - - extension = ".dSYM"; - extension_len = strlen (extension); - while ((directory_entry = readdir (executable_dir))) - { - if (directory_entry->d_namlen < extension_len) - continue; - if (strncasecmp (directory_entry->d_name + directory_entry->d_namlen - - extension_len, extension, extension_len) == 0) - { - int matched; - int dsym_had_sym; - int dsym_had_dwarf; - - // Found a dSYM - strncpy (dsym_full_path, executable_dirname, PATH_MAX); - strncat (dsym_full_path, "/", PATH_MAX); - strncat (dsym_full_path, directory_entry->d_name, PATH_MAX); - - if (!macho_try_dsym (state, error_callback, data, - fileline_fn, &image_uuid, - image_actual_base_address, - image_actual_max_address, vmslide, - dsym_full_path, - &matched, &dsym_had_sym, &dsym_had_dwarf)) - goto end; - - if (matched) - { - *found_sym = dsym_had_sym; - *found_dwarf = dsym_had_dwarf; - ret = 1; - goto end; - } - } - } - - // No matching dSYM - ret = 1; - goto end; - -end: - if (commands_view_valid) - backtrace_release_view (state, &commands_view.view, error_callback, - data); - if (executable_dir_valid) - closedir (executable_dir); - return ret; -} - -static int -macho_symbol_search (const void *vkey, const void *ventry) -{ - const uintptr_t *key = (const uintptr_t *) vkey; - const struct macho_symbol *entry = (const struct macho_symbol *) ventry; - uintptr_t addr; - - addr = *key; - if (addr < entry->addr) - return -1; - else if (addr >= entry->addr + entry->size) - return 1; - else - return 0; -} - -static void -macho_syminfo (struct backtrace_state *state, - uintptr_t addr, - backtrace_syminfo_callback callback, - backtrace_error_callback error_callback ATTRIBUTE_UNUSED, - void *data) -{ - struct macho_syminfo_data *edata; - struct macho_symbol *sym = NULL; - - if (!state->threaded) - { - for (edata = (struct macho_syminfo_data *) state->syminfo_data; - edata != NULL; - edata = edata->next) - { - if (addr >= edata->min_addr && addr <= edata->max_addr) - { - sym = ((struct macho_symbol *) - bsearch (&addr, edata->symbols, edata->symbol_count, - sizeof (struct macho_symbol), macho_symbol_search)); - if (sym != NULL) - break; - } - } - } - else - { - struct macho_syminfo_data **pp; - - pp = (struct macho_syminfo_data **) (void *) &state->syminfo_data; - while (1) - { - edata = backtrace_atomic_load_pointer (pp); - if (edata == NULL) - break; - - if (addr >= edata->min_addr && addr <= edata->max_addr) - { - sym = ((struct macho_symbol *) - bsearch (&addr, edata->symbols, edata->symbol_count, - sizeof (struct macho_symbol), macho_symbol_search)); - if (sym != NULL) - break; - } - - pp = &edata->next; - } - } - - if (sym == NULL) - callback (data, addr, NULL, 0, 0); - else - callback (data, addr, sym->name, sym->addr, sym->size); -} - - -static int -macho_nodebug (struct backtrace_state *state ATTRIBUTE_UNUSED, - uintptr_t pc ATTRIBUTE_UNUSED, - backtrace_full_callback callback ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback, void *data) -{ - error_callback (data, "no debug info in Mach-O executable", -1); - return 0; -} - -static void -macho_nosyms (struct backtrace_state *state ATTRIBUTE_UNUSED, - uintptr_t addr ATTRIBUTE_UNUSED, - backtrace_syminfo_callback callback ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback, void *data) -{ - error_callback (data, "no symbol table in Mach-O executable", -1); -} - -int -backtrace_initialize (struct backtrace_state *state, int descriptor, - backtrace_error_callback error_callback, - void *data, fileline *fileline_fn) -{ - int ret; - fileline macho_fileline_fn = macho_nodebug; - int found_sym = 0; - int found_dwarf = 0; - uint32_t i = 0; - uint32_t loaded_image_count; - - // Add all loaded images - loaded_image_count = _dyld_image_count (); - for (i = 0; i < loaded_image_count; i++) - { - int current_found_sym; - int current_found_dwarf; - int current_descriptor; - intptr_t current_vmslide; - const char *current_name; - - current_vmslide = _dyld_get_image_vmaddr_slide (i); - current_name = _dyld_get_image_name (i); - - if (current_name == NULL || (i != 0 && current_vmslide == 0)) - continue; - - if (!(current_descriptor = - backtrace_open (current_name, error_callback, data, NULL))) - { - continue; - } - - if (macho_add (state, error_callback, data, current_descriptor, - current_name, &macho_fileline_fn, current_vmslide, - ¤t_found_sym, ¤t_found_dwarf)) - { - found_sym = found_sym || current_found_sym; - found_dwarf = found_dwarf || current_found_dwarf; - } - - backtrace_close (current_descriptor, error_callback, data); - } - - if (!state->threaded) - { - if (found_sym) - state->syminfo_fn = macho_syminfo; - else if (state->syminfo_fn == NULL) - state->syminfo_fn = macho_nosyms; - } - else - { - if (found_sym) - backtrace_atomic_store_pointer (&state->syminfo_fn, macho_syminfo); - else - (void) __sync_bool_compare_and_swap (&state->syminfo_fn, NULL, - macho_nosyms); - } - - if (!state->threaded) - { - if (state->fileline_fn == NULL || state->fileline_fn == macho_nodebug) - *fileline_fn = macho_fileline_fn; - } - else - { - fileline current_fn; - - current_fn = backtrace_atomic_load_pointer (&state->fileline_fn); - if (current_fn == NULL || current_fn == macho_nodebug) - *fileline_fn = macho_fileline_fn; - } - - return 1; -} - diff --git a/src/libbacktrace/mmap.c b/src/libbacktrace/mmap.c deleted file mode 100644 index 138ef70711a0..000000000000 --- a/src/libbacktrace/mmap.c +++ /dev/null @@ -1,303 +0,0 @@ -/* mmap.c -- Memory allocation with mmap. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include -#include -#include -#include - -#include "backtrace.h" -#include "internal.h" - -/* Memory allocation on systems that provide anonymous mmap. This - permits the backtrace functions to be invoked from a signal - handler, assuming that mmap is async-signal safe. */ - -#ifndef MAP_ANONYMOUS -#define MAP_ANONYMOUS MAP_ANON -#endif - -#ifndef MAP_FAILED -#define MAP_FAILED ((void *)-1) -#endif - -/* A list of free memory blocks. */ - -struct backtrace_freelist_struct -{ - /* Next on list. */ - struct backtrace_freelist_struct *next; - /* Size of this block, including this structure. */ - size_t size; -}; - -/* Free memory allocated by backtrace_alloc. */ - -static void -backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size) -{ - /* Just leak small blocks. We don't have to be perfect. */ - if (size >= sizeof (struct backtrace_freelist_struct)) - { - struct backtrace_freelist_struct *p; - - p = (struct backtrace_freelist_struct *) addr; - p->next = state->freelist; - p->size = size; - state->freelist = p; - } -} - -/* Allocate memory like malloc. If ERROR_CALLBACK is NULL, don't - report an error. */ - -void * -backtrace_alloc (struct backtrace_state *state, - size_t size, backtrace_error_callback error_callback, - void *data) -{ - void *ret; - int locked; - struct backtrace_freelist_struct **pp; - size_t pagesize; - size_t asksize; - void *page; - - ret = NULL; - - /* If we can acquire the lock, then see if there is space on the - free list. If we can't acquire the lock, drop straight into - using mmap. __sync_lock_test_and_set returns the old state of - the lock, so we have acquired it if it returns 0. */ - - if (!state->threaded) - locked = 1; - else - locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0; - - if (locked) - { - for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next) - { - if ((*pp)->size >= size) - { - struct backtrace_freelist_struct *p; - - p = *pp; - *pp = p->next; - - /* Round for alignment; we assume that no type we care about - is more than 8 bytes. */ - size = (size + 7) & ~ (size_t) 7; - if (size < p->size) - backtrace_free_locked (state, (char *) p + size, - p->size - size); - - ret = (void *) p; - - break; - } - } - - if (state->threaded) - __sync_lock_release (&state->lock_alloc); - } - - if (ret == NULL) - { - /* Allocate a new page. */ - - pagesize = getpagesize (); - asksize = (size + pagesize - 1) & ~ (pagesize - 1); - page = mmap (NULL, asksize, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (page == MAP_FAILED) - { - if (error_callback) - error_callback (data, "mmap", errno); - } - else - { - size = (size + 7) & ~ (size_t) 7; - if (size < asksize) - backtrace_free (state, (char *) page + size, asksize - size, - error_callback, data); - - ret = page; - } - } - - return ret; -} - -/* Free memory allocated by backtrace_alloc. */ - -void -backtrace_free (struct backtrace_state *state, void *addr, size_t size, - backtrace_error_callback error_callback ATTRIBUTE_UNUSED, - void *data ATTRIBUTE_UNUSED) -{ - int locked; - - /* If we are freeing a large aligned block, just release it back to - the system. This case arises when growing a vector for a large - binary with lots of debug info. Calling munmap here may cause us - to call mmap again if there is also a large shared library; we - just live with that. */ - if (size >= 16 * 4096) - { - size_t pagesize; - - pagesize = getpagesize (); - if (((uintptr_t) addr & (pagesize - 1)) == 0 - && (size & (pagesize - 1)) == 0) - { - /* If munmap fails for some reason, just add the block to - the freelist. */ - if (munmap (addr, size) == 0) - return; - } - } - - /* If we can acquire the lock, add the new space to the free list. - If we can't acquire the lock, just leak the memory. - __sync_lock_test_and_set returns the old state of the lock, so we - have acquired it if it returns 0. */ - - if (!state->threaded) - locked = 1; - else - locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0; - - if (locked) - { - backtrace_free_locked (state, addr, size); - - if (state->threaded) - __sync_lock_release (&state->lock_alloc); - } -} - -/* Grow VEC by SIZE bytes. */ - -void * -backtrace_vector_grow (struct backtrace_state *state,size_t size, - backtrace_error_callback error_callback, - void *data, struct backtrace_vector *vec) -{ - void *ret; - - if (size > vec->alc) - { - size_t pagesize; - size_t alc; - void *base; - - pagesize = getpagesize (); - alc = vec->size + size; - if (vec->size == 0) - alc = 16 * size; - else if (alc < pagesize) - { - alc *= 2; - if (alc > pagesize) - alc = pagesize; - } - else - { - alc *= 2; - alc = (alc + pagesize - 1) & ~ (pagesize - 1); - } - base = backtrace_alloc (state, alc, error_callback, data); - if (base == NULL) - return NULL; - if (vec->base != NULL) - { - memcpy (base, vec->base, vec->size); - backtrace_free (state, vec->base, vec->size + vec->alc, - error_callback, data); - } - vec->base = base; - vec->alc = alc - vec->size; - } - - ret = (char *) vec->base + vec->size; - vec->size += size; - vec->alc -= size; - return ret; -} - -/* Finish the current allocation on VEC. */ - -void * -backtrace_vector_finish ( - struct backtrace_state *state ATTRIBUTE_UNUSED, - struct backtrace_vector *vec, - backtrace_error_callback error_callback ATTRIBUTE_UNUSED, - void *data ATTRIBUTE_UNUSED) -{ - void *ret; - - ret = vec->base; - vec->base = (char *) vec->base + vec->size; - vec->size = 0; - return ret; -} - -/* Release any extra space allocated for VEC. */ - -int -backtrace_vector_release (struct backtrace_state *state, - struct backtrace_vector *vec, - backtrace_error_callback error_callback, - void *data) -{ - size_t size; - size_t alc; - size_t aligned; - - /* Make sure that the block that we free is aligned on an 8-byte - boundary. */ - size = vec->size; - alc = vec->alc; - aligned = (size + 7) & ~ (size_t) 7; - alc -= aligned - size; - - backtrace_free (state, (char *) vec->base + aligned, alc, - error_callback, data); - vec->alc = 0; - return 1; -} diff --git a/src/libbacktrace/mmapio.c b/src/libbacktrace/mmapio.c deleted file mode 100644 index dfdaf6fa52e6..000000000000 --- a/src/libbacktrace/mmapio.c +++ /dev/null @@ -1,100 +0,0 @@ -/* mmapio.c -- File views using mmap. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include -#include - -#include "backtrace.h" -#include "internal.h" - -#ifndef MAP_FAILED -#define MAP_FAILED ((void *)-1) -#endif - -/* This file implements file views and memory allocation when mmap is - available. */ - -/* Create a view of SIZE bytes from DESCRIPTOR at OFFSET. */ - -int -backtrace_get_view (struct backtrace_state *state ATTRIBUTE_UNUSED, - int descriptor, off_t offset, size_t size, - backtrace_error_callback error_callback, - void *data, struct backtrace_view *view) -{ - size_t pagesize; - unsigned int inpage; - off_t pageoff; - void *map; - - pagesize = getpagesize (); - inpage = offset % pagesize; - pageoff = offset - inpage; - - size += inpage; - size = (size + (pagesize - 1)) & ~ (pagesize - 1); - - map = mmap (NULL, size, PROT_READ, MAP_PRIVATE, descriptor, pageoff); - if (map == MAP_FAILED) - { - error_callback (data, "mmap", errno); - return 0; - } - - view->data = (char *) map + inpage; - view->base = map; - view->len = size; - - return 1; -} - -/* Release a view read by backtrace_get_view. */ - -void -backtrace_release_view (struct backtrace_state *state ATTRIBUTE_UNUSED, - struct backtrace_view *view, - backtrace_error_callback error_callback, - void *data) -{ - union { - const void *cv; - void *v; - } const_cast; - - const_cast.cv = view->base; - if (munmap (const_cast.v, view->len) < 0) - error_callback (data, "munmap", errno); -} diff --git a/src/libbacktrace/nounwind.c b/src/libbacktrace/nounwind.c deleted file mode 100644 index 448a2049f1d1..000000000000 --- a/src/libbacktrace/nounwind.c +++ /dev/null @@ -1,66 +0,0 @@ -/* backtrace.c -- Entry point for stack backtrace library. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include - -#include "backtrace.h" - -#include "internal.h" - -/* This source file is compiled if the unwind library is not - available. */ - -int -backtrace_full (struct backtrace_state *state ATTRIBUTE_UNUSED, - int skip ATTRIBUTE_UNUSED, - backtrace_full_callback callback ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback, void *data) -{ - error_callback (data, - "no stack trace because unwind library not available", - 0); - return 0; -} - -int -backtrace_simple (struct backtrace_state *state ATTRIBUTE_UNUSED, - int skip ATTRIBUTE_UNUSED, - backtrace_simple_callback callback ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback, void *data) -{ - error_callback (data, - "no stack trace because unwind library not available", - 0); - return 0; -} diff --git a/src/libbacktrace/pecoff.c b/src/libbacktrace/pecoff.c deleted file mode 100644 index 2d6a9877219d..000000000000 --- a/src/libbacktrace/pecoff.c +++ /dev/null @@ -1,942 +0,0 @@ -/* pecoff.c -- Get debug data from a PE/COFFF file for backtraces. - Copyright (C) 2015-2016 Free Software Foundation, Inc. - Adapted from elf.c by Tristan Gingold, AdaCore. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include - -#include "backtrace.h" -#include "internal.h" - -/* Coff file header. */ - -typedef struct { - uint16_t machine; - uint16_t number_of_sections; - uint32_t time_date_stamp; - uint32_t pointer_to_symbol_table; - uint32_t number_of_symbols; - uint16_t size_of_optional_header; - uint16_t characteristics; -} b_coff_file_header; - -/* Coff optional header. */ - -typedef struct { - uint16_t magic; - uint8_t major_linker_version; - uint8_t minor_linker_version; - uint32_t size_of_code; - uint32_t size_of_initialized_data; - uint32_t size_of_uninitialized_data; - uint32_t address_of_entry_point; - uint32_t base_of_code; - union { - struct { - uint32_t base_of_data; - uint32_t image_base; - } pe; - struct { - uint64_t image_base; - } pep; - } u; -} b_coff_optional_header; - -/* Values of magic in optional header. */ - -#define PE_MAGIC 0x10b /* PE32 executable. */ -#define PEP_MAGIC 0x20b /* PE32+ executable (for 64bit targets). */ - -/* Coff section header. */ - -typedef struct { - char name[8]; - uint32_t virtual_size; - uint32_t virtual_address; - uint32_t size_of_raw_data; - uint32_t pointer_to_raw_data; - uint32_t pointer_to_relocations; - uint32_t pointer_to_line_numbers; - uint16_t number_of_relocations; - uint16_t number_of_line_numbers; - uint32_t characteristics; -} b_coff_section_header; - -/* Coff symbol name. */ - -typedef union { - char short_name[8]; - struct { - unsigned char zeroes[4]; - unsigned char off[4]; - } long_name; -} b_coff_name; - -/* Coff symbol (external representation which is unaligned). */ - -typedef struct { - b_coff_name name; - unsigned char value[4]; - unsigned char section_number[2]; - unsigned char type[2]; - unsigned char storage_class; - unsigned char number_of_aux_symbols; -} b_coff_external_symbol; - -/* Symbol types. */ - -#define N_TBSHFT 4 /* Shift for the derived type. */ -#define IMAGE_SYM_DTYPE_FUNCTION 2 /* Function derived type. */ - -/* Size of a coff symbol. */ - -#define SYM_SZ 18 - -/* Coff symbol, internal representation (aligned). */ - -typedef struct { - const char *name; - uint32_t value; - int16_t sec; - uint16_t type; - uint16_t sc; -} b_coff_internal_symbol; - -/* An index of sections we care about. */ - -enum debug_section -{ - DEBUG_INFO, - DEBUG_LINE, - DEBUG_ABBREV, - DEBUG_RANGES, - DEBUG_STR, - DEBUG_MAX -}; - -/* Names of sections, indexed by enum debug_section. */ - -static const char * const debug_section_names[DEBUG_MAX] = -{ - ".debug_info", - ".debug_line", - ".debug_abbrev", - ".debug_ranges", - ".debug_str" -}; - -/* Information we gather for the sections we care about. */ - -struct debug_section_info -{ - /* Section file offset. */ - off_t offset; - /* Section size. */ - size_t size; - /* Section contents, after read from file. */ - const unsigned char *data; -}; - -/* Information we keep for an coff symbol. */ - -struct coff_symbol -{ - /* The name of the symbol. */ - const char *name; - /* The address of the symbol. */ - uintptr_t address; -}; - -/* Information to pass to coff_syminfo. */ - -struct coff_syminfo_data -{ - /* Symbols for the next module. */ - struct coff_syminfo_data *next; - /* The COFF symbols, sorted by address. */ - struct coff_symbol *symbols; - /* The number of symbols. */ - size_t count; -}; - -/* A dummy callback function used when we can't find any debug info. */ - -static int -coff_nodebug (struct backtrace_state *state ATTRIBUTE_UNUSED, - uintptr_t pc ATTRIBUTE_UNUSED, - backtrace_full_callback callback ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback, void *data) -{ - error_callback (data, "no debug info in PE/COFF executable", -1); - return 0; -} - -/* A dummy callback function used when we can't find a symbol - table. */ - -static void -coff_nosyms (struct backtrace_state *state ATTRIBUTE_UNUSED, - uintptr_t addr ATTRIBUTE_UNUSED, - backtrace_syminfo_callback callback ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback, void *data) -{ - error_callback (data, "no symbol table in PE/COFF executable", -1); -} - -/* Read a potentially unaligned 4 byte word at P, using native endianness. */ - -static uint32_t -coff_read4 (const unsigned char *p) -{ - uint32_t res; - - memcpy (&res, p, 4); - return res; -} - -/* Read a potentially unaligned 2 byte word at P, using native endianness. - All 2 byte word in symbols are always aligned, but for coherency all - fields are declared as char arrays. */ - -static uint16_t -coff_read2 (const unsigned char *p) -{ - uint16_t res; - - memcpy (&res, p, sizeof (res)); - return res; -} - -/* Return the length (without the trailing 0) of a COFF short name. */ - -static size_t -coff_short_name_len (const char *name) -{ - int i; - - for (i = 0; i < 8; i++) - if (name[i] == 0) - return i; - return 8; -} - -/* Return true iff COFF short name CNAME is the same as NAME (a NUL-terminated - string). */ - -static int -coff_short_name_eq (const char *name, const char *cname) -{ - int i; - - for (i = 0; i < 8; i++) - { - if (name[i] != cname[i]) - return 0; - if (name[i] == 0) - return 1; - } - return name[8] == 0; -} - -/* Return true iff NAME is the same as string at offset OFF. */ - -static int -coff_long_name_eq (const char *name, unsigned int off, - struct backtrace_view *str_view) -{ - if (off >= str_view->len) - return 0; - return strcmp (name, (const char *)str_view->data + off) == 0; -} - -/* Compare struct coff_symbol for qsort. */ - -static int -coff_symbol_compare (const void *v1, const void *v2) -{ - const struct coff_symbol *e1 = (const struct coff_symbol *) v1; - const struct coff_symbol *e2 = (const struct coff_symbol *) v2; - - if (e1->address < e2->address) - return -1; - else if (e1->address > e2->address) - return 1; - else - return 0; -} - -/* Convert SYM to internal (and aligned) format ISYM, using string table - from STRTAB and STRTAB_SIZE, and number of sections SECTS_NUM. - Return -1 in case of error (invalid section number or string index). */ - -static int -coff_expand_symbol (b_coff_internal_symbol *isym, - const b_coff_external_symbol *sym, - uint16_t sects_num, - const unsigned char *strtab, size_t strtab_size) -{ - isym->type = coff_read2 (sym->type); - isym->sec = coff_read2 (sym->section_number); - isym->sc = sym->storage_class; - - if (isym->sec > 0 && (uint16_t) isym->sec > sects_num) - return -1; - if (sym->name.short_name[0] != 0) - isym->name = sym->name.short_name; - else - { - uint32_t off = coff_read4 (sym->name.long_name.off); - - if (off >= strtab_size) - return -1; - isym->name = (const char *) strtab + off; - } - return 0; -} - -/* Return true iff SYM is a defined symbol for a function. Data symbols - aren't considered because they aren't easily identified (same type as - section names, presence of symbols defined by the linker script). */ - -static int -coff_is_function_symbol (const b_coff_internal_symbol *isym) -{ - return (isym->type >> N_TBSHFT) == IMAGE_SYM_DTYPE_FUNCTION - && isym->sec > 0; -} - -/* Initialize the symbol table info for coff_syminfo. */ - -static int -coff_initialize_syminfo (struct backtrace_state *state, - uintptr_t base_address, - const b_coff_section_header *sects, size_t sects_num, - const b_coff_external_symbol *syms, size_t syms_size, - const unsigned char *strtab, size_t strtab_size, - backtrace_error_callback error_callback, - void *data, struct coff_syminfo_data *sdata) -{ - size_t syms_count; - char *coff_symstr; - size_t coff_symstr_len; - size_t coff_symbol_count; - size_t coff_symbol_size; - struct coff_symbol *coff_symbols; - struct coff_symbol *coff_sym; - char *coff_str; - size_t i; - - syms_count = syms_size / SYM_SZ; - - /* We only care about function symbols. Count them. Also count size of - strings for in-symbol names. */ - coff_symbol_count = 0; - coff_symstr_len = 0; - for (i = 0; i < syms_count; ++i) - { - const b_coff_external_symbol *asym = &syms[i]; - b_coff_internal_symbol isym; - - if (coff_expand_symbol (&isym, asym, sects_num, strtab, strtab_size) < 0) - { - error_callback (data, "invalid section or offset in coff symbol", 0); - return 0; - } - if (coff_is_function_symbol (&isym)) - { - ++coff_symbol_count; - if (asym->name.short_name[0] != 0) - coff_symstr_len += coff_short_name_len (asym->name.short_name) + 1; - } - - i += asym->number_of_aux_symbols; - } - - coff_symbol_size = (coff_symbol_count + 1) * sizeof (struct coff_symbol); - coff_symbols = ((struct coff_symbol *) - backtrace_alloc (state, coff_symbol_size, error_callback, - data)); - if (coff_symbols == NULL) - return 0; - - /* Allocate memory for symbols strings. */ - if (coff_symstr_len > 0) - { - coff_symstr = ((char *) - backtrace_alloc (state, coff_symstr_len, error_callback, - data)); - if (coff_symstr == NULL) - { - backtrace_free (state, coff_symbols, coff_symbol_size, - error_callback, data); - return 0; - } - } - else - coff_symstr = NULL; - - /* Copy symbols. */ - coff_sym = coff_symbols; - coff_str = coff_symstr; - for (i = 0; i < syms_count; ++i) - { - const b_coff_external_symbol *asym = &syms[i]; - b_coff_internal_symbol isym; - - if (coff_expand_symbol (&isym, asym, sects_num, strtab, strtab_size)) - { - /* Should not fail, as it was already tested in the previous - loop. */ - abort (); - } - if (coff_is_function_symbol (&isym)) - { - const char *name; - int16_t secnum; - - if (asym->name.short_name[0] != 0) - { - size_t len = coff_short_name_len (isym.name); - name = coff_str; - memcpy (coff_str, isym.name, len); - coff_str[len] = 0; - coff_str += len + 1; - } - else - name = isym.name; - - /* Strip leading '_'. */ - if (name[0] == '_') - name++; - - /* Symbol value is section relative, so we need to read the address - of its section. */ - secnum = coff_read2 (asym->section_number); - - coff_sym->name = name; - coff_sym->address = (coff_read4 (asym->value) - + sects[secnum - 1].virtual_address - + base_address); - coff_sym++; - } - - i += asym->number_of_aux_symbols; - } - - /* End of symbols marker. */ - coff_sym->name = NULL; - coff_sym->address = -1; - - backtrace_qsort (coff_symbols, coff_symbol_count, - sizeof (struct coff_symbol), coff_symbol_compare); - - sdata->next = NULL; - sdata->symbols = coff_symbols; - sdata->count = coff_symbol_count; - - return 1; -} - -/* Add EDATA to the list in STATE. */ - -static void -coff_add_syminfo_data (struct backtrace_state *state, - struct coff_syminfo_data *sdata) -{ - if (!state->threaded) - { - struct coff_syminfo_data **pp; - - for (pp = (struct coff_syminfo_data **) (void *) &state->syminfo_data; - *pp != NULL; - pp = &(*pp)->next) - ; - *pp = sdata; - } - else - { - while (1) - { - struct coff_syminfo_data **pp; - - pp = (struct coff_syminfo_data **) (void *) &state->syminfo_data; - - while (1) - { - struct coff_syminfo_data *p; - - p = backtrace_atomic_load_pointer (pp); - - if (p == NULL) - break; - - pp = &p->next; - } - - if (__sync_bool_compare_and_swap (pp, NULL, sdata)) - break; - } - } -} - -/* Compare an ADDR against an elf_symbol for bsearch. We allocate one - extra entry in the array so that this can look safely at the next - entry. */ - -static int -coff_symbol_search (const void *vkey, const void *ventry) -{ - const uintptr_t *key = (const uintptr_t *) vkey; - const struct coff_symbol *entry = (const struct coff_symbol *) ventry; - uintptr_t addr; - - addr = *key; - if (addr < entry->address) - return -1; - else if (addr >= entry[1].address) - return 1; - else - return 0; -} - -/* Return the symbol name and value for an ADDR. */ - -static void -coff_syminfo (struct backtrace_state *state, uintptr_t addr, - backtrace_syminfo_callback callback, - backtrace_error_callback error_callback ATTRIBUTE_UNUSED, - void *data) -{ - struct coff_syminfo_data *sdata; - struct coff_symbol *sym = NULL; - - if (!state->threaded) - { - for (sdata = (struct coff_syminfo_data *) state->syminfo_data; - sdata != NULL; - sdata = sdata->next) - { - sym = ((struct coff_symbol *) - bsearch (&addr, sdata->symbols, sdata->count, - sizeof (struct coff_symbol), coff_symbol_search)); - if (sym != NULL) - break; - } - } - else - { - struct coff_syminfo_data **pp; - - pp = (struct coff_syminfo_data **) (void *) &state->syminfo_data; - while (1) - { - sdata = backtrace_atomic_load_pointer (pp); - if (sdata == NULL) - break; - - sym = ((struct coff_symbol *) - bsearch (&addr, sdata->symbols, sdata->count, - sizeof (struct coff_symbol), coff_symbol_search)); - if (sym != NULL) - break; - - pp = &sdata->next; - } - } - - if (sym == NULL) - callback (data, addr, NULL, 0, 0); - else - callback (data, addr, sym->name, sym->address, 0); -} - -/* Add the backtrace data for one PE/COFF file. Returns 1 on success, - 0 on failure (in both cases descriptor is closed). */ - -static int -coff_add (struct backtrace_state *state, int descriptor, - backtrace_error_callback error_callback, void *data, - fileline *fileline_fn, int *found_sym, int *found_dwarf) -{ - struct backtrace_view fhdr_view; - off_t fhdr_off; - int magic_ok; - b_coff_file_header fhdr; - off_t opt_sects_off; - size_t opt_sects_size; - unsigned int sects_num; - struct backtrace_view sects_view; - int sects_view_valid; - const b_coff_optional_header *opt_hdr; - const b_coff_section_header *sects; - struct backtrace_view str_view; - int str_view_valid; - // NOTE: upstream this is a `size_t` but this was fixed in Rust commit - // 55e2b7e1b, see #33729 for more info. If you see this in a diff - // against the upstream libbacktrace, that's what's going on. - uint32_t str_size; - off_t str_off; - // NOTE: upstream doesn't have `{0}`, this is a fix for Rust issue #39468. - // If syms_view is not initialized, then `free(syms_view.base)` may segfault later. - struct backtrace_view syms_view = {0}; - off_t syms_off; - size_t syms_size; - int syms_view_valid; - unsigned int syms_num; - unsigned int i; - struct debug_section_info sections[DEBUG_MAX]; - off_t min_offset; - off_t max_offset; - struct backtrace_view debug_view; - int debug_view_valid; - uintptr_t image_base; - - *found_sym = 0; - *found_dwarf = 0; - - sects_view_valid = 0; - syms_view_valid = 0; - str_view_valid = 0; - debug_view_valid = 0; - - /* Map the MS-DOS stub (if any) and extract file header offset. */ - if (!backtrace_get_view (state, descriptor, 0, 0x40, error_callback, - data, &fhdr_view)) - goto fail; - - { - const char *vptr = (const char *)fhdr_view.data; - - if (vptr[0] == 'M' && vptr[1] == 'Z') - memcpy (&fhdr_off, vptr + 0x3c, 4); - else - fhdr_off = 0; - } - - backtrace_release_view (state, &fhdr_view, error_callback, data); - - /* Map the coff file header. */ - if (!backtrace_get_view (state, descriptor, fhdr_off, - sizeof (b_coff_file_header) + 4, - error_callback, data, &fhdr_view)) - goto fail; - - if (fhdr_off != 0) - { - const char *magic = (const char *) fhdr_view.data; - magic_ok = memcmp (magic, "PE\0", 4) == 0; - fhdr_off += 4; - - memcpy (&fhdr, fhdr_view.data + 4, sizeof fhdr); - } - else - { - memcpy (&fhdr, fhdr_view.data, sizeof fhdr); - /* TODO: test fhdr.machine for coff but non-PE platforms. */ - magic_ok = 0; - } - backtrace_release_view (state, &fhdr_view, error_callback, data); - - if (!magic_ok) - { - error_callback (data, "executable file is not COFF", 0); - goto fail; - } - - sects_num = fhdr.number_of_sections; - syms_num = fhdr.number_of_symbols; - - opt_sects_off = fhdr_off + sizeof (fhdr); - opt_sects_size = (fhdr.size_of_optional_header - + sects_num * sizeof (b_coff_section_header)); - - /* To translate PC to file/line when using DWARF, we need to find - the .debug_info and .debug_line sections. */ - - /* Read the optional header and the section headers. */ - - if (!backtrace_get_view (state, descriptor, opt_sects_off, opt_sects_size, - error_callback, data, §s_view)) - goto fail; - sects_view_valid = 1; - opt_hdr = (const b_coff_optional_header *) sects_view.data; - sects = (const b_coff_section_header *) - (sects_view.data + fhdr.size_of_optional_header); - - if (fhdr.size_of_optional_header > sizeof (*opt_hdr)) - { - if (opt_hdr->magic == PE_MAGIC) - image_base = opt_hdr->u.pe.image_base; - else if (opt_hdr->magic == PEP_MAGIC) - image_base = opt_hdr->u.pep.image_base; - else - { - error_callback (data, "bad magic in PE optional header", 0); - goto fail; - } - } - else - image_base = 0; - - /* Read the symbol table and the string table. */ - - if (fhdr.pointer_to_symbol_table == 0) - { - /* No symbol table, no string table. */ - str_off = 0; - str_size = 0; - syms_num = 0; - syms_size = 0; - } - else - { - /* Symbol table is followed by the string table. The string table - starts with its length (on 4 bytes). - Map the symbol table and the length of the string table. */ - syms_off = fhdr.pointer_to_symbol_table; - syms_size = syms_num * SYM_SZ; - - if (!backtrace_get_view (state, descriptor, syms_off, syms_size + 4, - error_callback, data, &syms_view)) - goto fail; - syms_view_valid = 1; - - memcpy (&str_size, syms_view.data + syms_size, 4); - - str_off = syms_off + syms_size; - - if (str_size > 4) - { - /* Map string table (including the length word). */ - - if (!backtrace_get_view (state, descriptor, str_off, str_size, - error_callback, data, &str_view)) - goto fail; - str_view_valid = 1; - } - } - - memset (sections, 0, sizeof sections); - - /* Look for the symbol table. */ - for (i = 0; i < sects_num; ++i) - { - const b_coff_section_header *s = sects + i; - unsigned int str_off; - int j; - - if (s->name[0] == '/') - { - /* Extended section name. */ - str_off = atoi (s->name + 1); - } - else - str_off = 0; - - for (j = 0; j < (int) DEBUG_MAX; ++j) - { - const char *dbg_name = debug_section_names[j]; - int match; - - if (str_off != 0) - match = coff_long_name_eq (dbg_name, str_off, &str_view); - else - match = coff_short_name_eq (dbg_name, s->name); - if (match) - { - sections[j].offset = s->pointer_to_raw_data; - sections[j].size = s->virtual_size <= s->size_of_raw_data ? - s->virtual_size : s->size_of_raw_data; - break; - } - } - } - - if (syms_num != 0) - { - struct coff_syminfo_data *sdata; - - sdata = ((struct coff_syminfo_data *) - backtrace_alloc (state, sizeof *sdata, error_callback, data)); - if (sdata == NULL) - goto fail; - - if (!coff_initialize_syminfo (state, image_base, - sects, sects_num, - syms_view.data, syms_size, - str_view.data, str_size, - error_callback, data, sdata)) - { - backtrace_free (state, sdata, sizeof *sdata, error_callback, data); - goto fail; - } - - *found_sym = 1; - - coff_add_syminfo_data (state, sdata); - } - - backtrace_release_view (state, §s_view, error_callback, data); - sects_view_valid = 0; - backtrace_release_view (state, &syms_view, error_callback, data); - syms_view_valid = 0; - - /* Read all the debug sections in a single view, since they are - probably adjacent in the file. We never release this view. */ - - min_offset = 0; - max_offset = 0; - for (i = 0; i < (int) DEBUG_MAX; ++i) - { - off_t end; - - if (sections[i].size == 0) - continue; - if (min_offset == 0 || sections[i].offset < min_offset) - min_offset = sections[i].offset; - end = sections[i].offset + sections[i].size; - if (end > max_offset) - max_offset = end; - } - if (min_offset == 0 || max_offset == 0) - { - if (!backtrace_close (descriptor, error_callback, data)) - goto fail; - *fileline_fn = coff_nodebug; - return 1; - } - - if (!backtrace_get_view (state, descriptor, min_offset, - max_offset - min_offset, - error_callback, data, &debug_view)) - goto fail; - debug_view_valid = 1; - - /* We've read all we need from the executable. */ - if (!backtrace_close (descriptor, error_callback, data)) - goto fail; - descriptor = -1; - - for (i = 0; i < (int) DEBUG_MAX; ++i) - { - if (sections[i].size == 0) - sections[i].data = NULL; - else - sections[i].data = ((const unsigned char *) debug_view.data - + (sections[i].offset - min_offset)); - } - - if (!backtrace_dwarf_add (state, /* base_address */ 0, - sections[DEBUG_INFO].data, - sections[DEBUG_INFO].size, - sections[DEBUG_LINE].data, - sections[DEBUG_LINE].size, - sections[DEBUG_ABBREV].data, - sections[DEBUG_ABBREV].size, - sections[DEBUG_RANGES].data, - sections[DEBUG_RANGES].size, - sections[DEBUG_STR].data, - sections[DEBUG_STR].size, - 0, /* FIXME */ - error_callback, data, fileline_fn)) - goto fail; - - *found_dwarf = 1; - - return 1; - - fail: - if (sects_view_valid) - backtrace_release_view (state, §s_view, error_callback, data); - if (str_view_valid) - backtrace_release_view (state, &str_view, error_callback, data); - if (syms_view_valid) - backtrace_release_view (state, &syms_view, error_callback, data); - if (debug_view_valid) - backtrace_release_view (state, &debug_view, error_callback, data); - if (descriptor != -1) - backtrace_close (descriptor, error_callback, data); - return 0; -} - -/* Initialize the backtrace data we need from an ELF executable. At - the ELF level, all we need to do is find the debug info - sections. */ - -int -backtrace_initialize (struct backtrace_state *state, int descriptor, - backtrace_error_callback error_callback, - void *data, fileline *fileline_fn) -{ - int ret; - int found_sym; - int found_dwarf; - fileline coff_fileline_fn; - - ret = coff_add (state, descriptor, error_callback, data, - &coff_fileline_fn, &found_sym, &found_dwarf); - if (!ret) - return 0; - - if (!state->threaded) - { - if (found_sym) - state->syminfo_fn = coff_syminfo; - else if (state->syminfo_fn == NULL) - state->syminfo_fn = coff_nosyms; - } - else - { - if (found_sym) - backtrace_atomic_store_pointer (&state->syminfo_fn, coff_syminfo); - else - __sync_bool_compare_and_swap (&state->syminfo_fn, NULL, coff_nosyms); - } - - if (!state->threaded) - { - if (state->fileline_fn == NULL || state->fileline_fn == coff_nodebug) - *fileline_fn = coff_fileline_fn; - } - else - { - fileline current_fn; - - current_fn = backtrace_atomic_load_pointer (&state->fileline_fn); - if (current_fn == NULL || current_fn == coff_nodebug) - *fileline_fn = coff_fileline_fn; - } - - return 1; -} diff --git a/src/libbacktrace/posix.c b/src/libbacktrace/posix.c deleted file mode 100644 index 09f5e95a6e42..000000000000 --- a/src/libbacktrace/posix.c +++ /dev/null @@ -1,100 +0,0 @@ -/* posix.c -- POSIX file I/O routines for the backtrace library. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include -#include -#include - -#include "backtrace.h" -#include "internal.h" - -#ifndef O_BINARY -#define O_BINARY 0 -#endif - -#ifndef O_CLOEXEC -#define O_CLOEXEC 0 -#endif - -#ifndef FD_CLOEXEC -#define FD_CLOEXEC 1 -#endif - -/* Open a file for reading. */ - -int -backtrace_open (const char *filename, backtrace_error_callback error_callback, - void *data, int *does_not_exist) -{ - int descriptor; - - if (does_not_exist != NULL) - *does_not_exist = 0; - - descriptor = open (filename, (int) (O_RDONLY | O_BINARY | O_CLOEXEC)); - if (descriptor < 0) - { - if (does_not_exist != NULL && errno == ENOENT) - *does_not_exist = 1; - else - error_callback (data, filename, errno); - return -1; - } - -#ifdef HAVE_FCNTL - /* Set FD_CLOEXEC just in case the kernel does not support - O_CLOEXEC. It doesn't matter if this fails for some reason. - FIXME: At some point it should be safe to only do this if - O_CLOEXEC == 0. */ - fcntl (descriptor, F_SETFD, FD_CLOEXEC); -#endif - - return descriptor; -} - -/* Close DESCRIPTOR. */ - -int -backtrace_close (int descriptor, backtrace_error_callback error_callback, - void *data) -{ - if (close (descriptor) < 0) - { - error_callback (data, "close", errno); - return 0; - } - return 1; -} diff --git a/src/libbacktrace/print.c b/src/libbacktrace/print.c deleted file mode 100644 index 74c8fcbee5a1..000000000000 --- a/src/libbacktrace/print.c +++ /dev/null @@ -1,92 +0,0 @@ -/* print.c -- Print the current backtrace. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include - -#include "backtrace.h" -#include "internal.h" - -/* Passed to callbacks. */ - -struct print_data -{ - struct backtrace_state *state; - FILE *f; -}; - -/* Print one level of a backtrace. */ - -static int -print_callback (void *data, uintptr_t pc, const char *filename, int lineno, - const char *function) -{ - struct print_data *pdata = (struct print_data *) data; - - fprintf (pdata->f, "0x%lx %s\n\t%s:%d\n", - (unsigned long) pc, - function == NULL ? "???" : function, - filename == NULL ? "???" : filename, - lineno); - return 0; -} - -/* Print errors to stderr. */ - -static void -error_callback (void *data, const char *msg, int errnum) -{ - struct print_data *pdata = (struct print_data *) data; - - if (pdata->state->filename != NULL) - fprintf (stderr, "%s: ", pdata->state->filename); - fprintf (stderr, "libbacktrace: %s", msg); - if (errnum > 0) - fprintf (stderr, ": %s", strerror (errnum)); - fputc ('\n', stderr); -} - -/* Print a backtrace. */ - -void -backtrace_print (struct backtrace_state *state, int skip, FILE *f) -{ - struct print_data data; - - data.state = state; - data.f = f; - backtrace_full (state, skip + 1, print_callback, error_callback, - (void *) &data); -} diff --git a/src/libbacktrace/read.c b/src/libbacktrace/read.c deleted file mode 100644 index 7f0317c3a8ce..000000000000 --- a/src/libbacktrace/read.c +++ /dev/null @@ -1,96 +0,0 @@ -/* read.c -- File views without mmap. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include -#include - -#include "backtrace.h" -#include "internal.h" - -/* This file implements file views when mmap is not available. */ - -/* Create a view of SIZE bytes from DESCRIPTOR at OFFSET. */ - -int -backtrace_get_view (struct backtrace_state *state, int descriptor, - off_t offset, size_t size, - backtrace_error_callback error_callback, - void *data, struct backtrace_view *view) -{ - ssize_t got; - - if (lseek (descriptor, offset, SEEK_SET) < 0) - { - error_callback (data, "lseek", errno); - return 0; - } - - view->base = backtrace_alloc (state, size, error_callback, data); - if (view->base == NULL) - return 0; - view->data = view->base; - view->len = size; - - got = read (descriptor, view->base, size); - if (got < 0) - { - error_callback (data, "read", errno); - free (view->base); - return 0; - } - - if ((size_t) got < size) - { - error_callback (data, "file too short", 0); - free (view->base); - return 0; - } - - return 1; -} - -/* Release a view read by backtrace_get_view. */ - -void -backtrace_release_view (struct backtrace_state *state, - struct backtrace_view *view, - backtrace_error_callback error_callback, - void *data) -{ - backtrace_free (state, view->base, view->len, error_callback, data); - view->data = NULL; - view->base = NULL; -} diff --git a/src/libbacktrace/simple.c b/src/libbacktrace/simple.c deleted file mode 100644 index 018773a7e5db..000000000000 --- a/src/libbacktrace/simple.c +++ /dev/null @@ -1,108 +0,0 @@ -/* simple.c -- The backtrace_simple function. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include "unwind.h" -#include "backtrace.h" - -/* The simple_backtrace routine. */ - -/* Data passed through _Unwind_Backtrace. */ - -struct backtrace_simple_data -{ - /* Number of frames to skip. */ - int skip; - /* Library state. */ - struct backtrace_state *state; - /* Callback routine. */ - backtrace_simple_callback callback; - /* Error callback routine. */ - backtrace_error_callback error_callback; - /* Data to pass to callback routine. */ - void *data; - /* Value to return from backtrace. */ - int ret; -}; - -/* Unwind library callback routine. This is passd to - _Unwind_Backtrace. */ - -static _Unwind_Reason_Code -simple_unwind (struct _Unwind_Context *context, void *vdata) -{ - struct backtrace_simple_data *bdata = (struct backtrace_simple_data *) vdata; - uintptr_t pc; - int ip_before_insn = 0; - -#ifdef HAVE_GETIPINFO - pc = _Unwind_GetIPInfo (context, &ip_before_insn); -#else - pc = _Unwind_GetIP (context); -#endif - - if (bdata->skip > 0) - { - --bdata->skip; - return _URC_NO_REASON; - } - - if (!ip_before_insn) - --pc; - - bdata->ret = bdata->callback (bdata->data, pc); - - if (bdata->ret != 0) - return _URC_END_OF_STACK; - - return _URC_NO_REASON; -} - -/* Get a simple stack backtrace. */ - -int -backtrace_simple (struct backtrace_state *state, int skip, - backtrace_simple_callback callback, - backtrace_error_callback error_callback, void *data) -{ - struct backtrace_simple_data bdata; - - bdata.skip = skip + 1; - bdata.state = state; - bdata.callback = callback; - bdata.error_callback = error_callback; - bdata.data = data; - bdata.ret = 0; - _Unwind_Backtrace (simple_unwind, &bdata); - return bdata.ret; -} diff --git a/src/libbacktrace/sort.c b/src/libbacktrace/sort.c deleted file mode 100644 index 68a7df65a47f..000000000000 --- a/src/libbacktrace/sort.c +++ /dev/null @@ -1,108 +0,0 @@ -/* sort.c -- Sort without allocating memory - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include - -#include "backtrace.h" -#include "internal.h" - -/* The GNU glibc version of qsort allocates memory, which we must not - do if we are invoked by a signal handler. So provide our own - sort. */ - -static void -swap (char *a, char *b, size_t size) -{ - size_t i; - - for (i = 0; i < size; i++, a++, b++) - { - char t; - - t = *a; - *a = *b; - *b = t; - } -} - -void -backtrace_qsort (void *basearg, size_t count, size_t size, - int (*compar) (const void *, const void *)) -{ - char *base = (char *) basearg; - size_t i; - size_t mid; - - tail_recurse: - if (count < 2) - return; - - /* The symbol table and DWARF tables, which is all we use this - routine for, tend to be roughly sorted. Pick the middle element - in the array as our pivot point, so that we are more likely to - cut the array in half for each recursion step. */ - swap (base, base + (count / 2) * size, size); - - mid = 0; - for (i = 1; i < count; i++) - { - if ((*compar) (base, base + i * size) > 0) - { - ++mid; - if (i != mid) - swap (base + mid * size, base + i * size, size); - } - } - - if (mid > 0) - swap (base, base + mid * size, size); - - /* Recurse with the smaller array, loop with the larger one. That - ensures that our maximum stack depth is log count. */ - if (2 * mid < count) - { - backtrace_qsort (base, mid, size, compar); - base += (mid + 1) * size; - count -= mid + 1; - goto tail_recurse; - } - else - { - backtrace_qsort (base + (mid + 1) * size, count - (mid + 1), - size, compar); - count = mid; - goto tail_recurse; - } -} diff --git a/src/libbacktrace/state.c b/src/libbacktrace/state.c deleted file mode 100644 index 93420d9c61b1..000000000000 --- a/src/libbacktrace/state.c +++ /dev/null @@ -1,72 +0,0 @@ -/* state.c -- Create the backtrace state. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include - -#include "backtrace.h" -#include "backtrace-supported.h" -#include "internal.h" - -/* Create the backtrace state. This will then be passed to all the - other routines. */ - -struct backtrace_state * -backtrace_create_state (const char *filename, int threaded, - backtrace_error_callback error_callback, - void *data) -{ - struct backtrace_state init_state; - struct backtrace_state *state; - -#ifndef HAVE_SYNC_FUNCTIONS - if (threaded) - { - error_callback (data, "backtrace library does not support threads", 0); - return NULL; - } -#endif - - memset (&init_state, 0, sizeof init_state); - init_state.filename = filename; - init_state.threaded = threaded; - - state = ((struct backtrace_state *) - backtrace_alloc (&init_state, sizeof *state, error_callback, data)); - if (state == NULL) - return NULL; - *state = init_state; - - return state; -} diff --git a/src/libbacktrace/stest.c b/src/libbacktrace/stest.c deleted file mode 100644 index 55ec31d10bc8..000000000000 --- a/src/libbacktrace/stest.c +++ /dev/null @@ -1,137 +0,0 @@ -/* stest.c -- Test for libbacktrace internal sort function - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include -#include -#include -#include - -#include "backtrace.h" -#include "internal.h" - -/* Test the local qsort implementation. */ - -#define MAX 10 - -struct test -{ - size_t count; - int input[MAX]; - int output[MAX]; -}; - -static struct test tests[] = - { - { - 10, - { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, - { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 } - }, - { - 9, - { 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - { 1, 2, 3, 4, 5, 6, 7, 8, 9 } - }, - { - 10, - { 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 }, - { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, - }, - { - 9, - { 9, 8, 7, 6, 5, 4, 3, 2, 1 }, - { 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - }, - { - 10, - { 2, 4, 6, 8, 10, 1, 3, 5, 7, 9 }, - { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, - }, - { - 5, - { 4, 5, 3, 1, 2 }, - { 1, 2, 3, 4, 5 }, - }, - { - 5, - { 1, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 1 }, - }, - { - 5, - { 1, 1, 2, 1, 1 }, - { 1, 1, 1, 1, 2 }, - }, - { - 5, - { 2, 1, 1, 1, 1 }, - { 1, 1, 1, 1, 2 }, - }, - }; - -static int -compare (const void *a, const void *b) -{ - const int *ai = (const int *) a; - const int *bi = (const int *) b; - - return *ai - *bi; -} - -int -main (int argc ATTRIBUTE_UNUSED, char **argv ATTRIBUTE_UNUSED) -{ - int failures; - size_t i; - int a[MAX]; - - failures = 0; - for (i = 0; i < sizeof tests / sizeof tests[0]; i++) - { - memcpy (a, tests[i].input, tests[i].count * sizeof (int)); - backtrace_qsort (a, tests[i].count, sizeof (int), compare); - if (memcmp (a, tests[i].output, tests[i].count * sizeof (int)) != 0) - { - size_t j; - - fprintf (stderr, "test %d failed:", (int) i); - for (j = 0; j < tests[i].count; j++) - fprintf (stderr, " %d", a[j]); - fprintf (stderr, "\n"); - ++failures; - } - } - - exit (failures > 0 ? EXIT_FAILURE : EXIT_SUCCESS); -} diff --git a/src/libbacktrace/unknown.c b/src/libbacktrace/unknown.c deleted file mode 100644 index 8d06c31549f7..000000000000 --- a/src/libbacktrace/unknown.c +++ /dev/null @@ -1,64 +0,0 @@ -/* unknown.c -- used when backtrace configury does not know file format. - Copyright (C) 2012-2016 Free Software Foundation, Inc. - Written by Ian Lance Taylor, Google. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - (1) Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - (2) Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - (3) The name of the author may not be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. */ - -#include "config.h" - -#include - -#include "backtrace.h" -#include "internal.h" - -/* A trivial routine that always fails to find fileline data. */ - -static int -unknown_fileline (struct backtrace_state *state ATTRIBUTE_UNUSED, - uintptr_t pc, backtrace_full_callback callback, - backtrace_error_callback error_callback ATTRIBUTE_UNUSED, - void *data) - -{ - return callback (data, pc, NULL, 0, NULL); -} - -/* Initialize the backtrace data when we don't know how to read the - debug info. */ - -int -backtrace_initialize (struct backtrace_state *state ATTRIBUTE_UNUSED, - int descriptor ATTRIBUTE_UNUSED, - backtrace_error_callback error_callback ATTRIBUTE_UNUSED, - void *data ATTRIBUTE_UNUSED, fileline *fileline_fn) -{ - state->fileline_data = NULL; - *fileline_fn = unknown_fileline; - return 1; -} diff --git a/src/libcompiler_builtins b/src/libcompiler_builtins index 0ba07e49264a..d549d85b1735 160000 --- a/src/libcompiler_builtins +++ b/src/libcompiler_builtins @@ -1 +1 @@ -Subproject commit 0ba07e49264a54cb5bbd4856fcea083bb3fbec15 +Subproject commit d549d85b1735dc5066b2973f8549557a813bb9c8 diff --git a/src/libcore/Cargo.toml b/src/libcore/Cargo.toml index 5af63aa970f2..321ed892ea9a 100644 --- a/src/libcore/Cargo.toml +++ b/src/libcore/Cargo.toml @@ -2,6 +2,8 @@ authors = ["The Rust Project Developers"] name = "core" version = "0.0.0" +autotests = false +autobenches = false [lib] name = "core" @@ -16,3 +18,6 @@ path = "../libcore/tests/lib.rs" [[bench]] name = "corebenches" path = "../libcore/benches/lib.rs" + +[dev-dependencies] +rand = "0.4" diff --git a/src/libcore/alloc.rs b/src/libcore/alloc.rs new file mode 100644 index 000000000000..35e4eea756d4 --- /dev/null +++ b/src/libcore/alloc.rs @@ -0,0 +1,1233 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Memory allocation APIs + +#![stable(feature = "alloc_module", since = "1.28.0")] + +use cmp; +use fmt; +use mem; +use usize; +use ptr::{self, NonNull}; +use num::NonZeroUsize; + +/// Represents the combination of a starting address and +/// a total capacity of the returned block. +#[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Debug)] +pub struct Excess(pub NonNull, pub usize); + +fn size_align() -> (usize, usize) { + (mem::size_of::(), mem::align_of::()) +} + +/// Layout of a block of memory. +/// +/// An instance of `Layout` describes a particular layout of memory. +/// You build a `Layout` up as an input to give to an allocator. +/// +/// All layouts have an associated non-negative size and a +/// power-of-two alignment. +/// +/// (Note however that layouts are *not* required to have positive +/// size, even though many allocators require that all memory +/// requests have positive size. A caller to the `Alloc::alloc` +/// method must either ensure that conditions like this are met, or +/// use specific allocators with looser requirements.) +#[stable(feature = "alloc_layout", since = "1.28.0")] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[lang = "alloc_layout"] +pub struct Layout { + // size of the requested block of memory, measured in bytes. + size_: usize, + + // alignment of the requested block of memory, measured in bytes. + // we ensure that this is always a power-of-two, because API's + // like `posix_memalign` require it and it is a reasonable + // constraint to impose on Layout constructors. + // + // (However, we do not analogously require `align >= sizeof(void*)`, + // even though that is *also* a requirement of `posix_memalign`.) + align_: NonZeroUsize, +} + +impl Layout { + /// Constructs a `Layout` from a given `size` and `align`, + /// or returns `LayoutErr` if either of the following conditions + /// are not met: + /// + /// * `align` must not be zero, + /// + /// * `align` must be a power of two, + /// + /// * `size`, when rounded up to the nearest multiple of `align`, + /// must not overflow (i.e. the rounded value must be less than + /// `usize::MAX`). + #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub fn from_size_align(size: usize, align: usize) -> Result { + if !align.is_power_of_two() { + return Err(LayoutErr { private: () }); + } + + // (power-of-two implies align != 0.) + + // Rounded up size is: + // size_rounded_up = (size + align - 1) & !(align - 1); + // + // We know from above that align != 0. If adding (align - 1) + // does not overflow, then rounding up will be fine. + // + // Conversely, &-masking with !(align - 1) will subtract off + // only low-order-bits. Thus if overflow occurs with the sum, + // the &-mask cannot subtract enough to undo that overflow. + // + // Above implies that checking for summation overflow is both + // necessary and sufficient. + if size > usize::MAX - (align - 1) { + return Err(LayoutErr { private: () }); + } + + unsafe { + Ok(Layout::from_size_align_unchecked(size, align)) + } + } + + /// Creates a layout, bypassing all checks. + /// + /// # Safety + /// + /// This function is unsafe as it does not verify the preconditions from + /// [`Layout::from_size_align`](#method.from_size_align). + #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self { + Layout { size_: size, align_: NonZeroUsize::new_unchecked(align) } + } + + /// The minimum size in bytes for a memory block of this layout. + #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub fn size(&self) -> usize { self.size_ } + + /// The minimum byte alignment for a memory block of this layout. + #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub fn align(&self) -> usize { self.align_.get() } + + /// Constructs a `Layout` suitable for holding a value of type `T`. + #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub fn new() -> Self { + let (size, align) = size_align::(); + // Note that the align is guaranteed by rustc to be a power of two and + // the size+align combo is guaranteed to fit in our address space. As a + // result use the unchecked constructor here to avoid inserting code + // that panics if it isn't optimized well enough. + debug_assert!(Layout::from_size_align(size, align).is_ok()); + unsafe { + Layout::from_size_align_unchecked(size, align) + } + } + + /// Produces layout describing a record that could be used to + /// allocate backing structure for `T` (which could be a trait + /// or other unsized type like a slice). + #[stable(feature = "alloc_layout", since = "1.28.0")] + #[inline] + pub fn for_value(t: &T) -> Self { + let (size, align) = (mem::size_of_val(t), mem::align_of_val(t)); + // See rationale in `new` for why this us using an unsafe variant below + debug_assert!(Layout::from_size_align(size, align).is_ok()); + unsafe { + Layout::from_size_align_unchecked(size, align) + } + } + + /// Creates a layout describing the record that can hold a value + /// of the same layout as `self`, but that also is aligned to + /// alignment `align` (measured in bytes). + /// + /// If `self` already meets the prescribed alignment, then returns + /// `self`. + /// + /// Note that this method does not add any padding to the overall + /// size, regardless of whether the returned layout has a different + /// alignment. In other words, if `K` has size 16, `K.align_to(32)` + /// will *still* have size 16. + /// + /// # Panics + /// + /// Panics if the combination of `self.size()` and the given `align` + /// violates the conditions listed in + /// [`Layout::from_size_align`](#method.from_size_align). + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn align_to(&self, align: usize) -> Self { + Layout::from_size_align(self.size(), cmp::max(self.align(), align)).unwrap() + } + + /// Returns the amount of padding we must insert after `self` + /// to ensure that the following address will satisfy `align` + /// (measured in bytes). + /// + /// E.g. if `self.size()` is 9, then `self.padding_needed_for(4)` + /// returns 3, because that is the minimum number of bytes of + /// padding required to get a 4-aligned address (assuming that the + /// corresponding memory block starts at a 4-aligned address). + /// + /// The return value of this function has no meaning if `align` is + /// not a power-of-two. + /// + /// Note that the utility of the returned value requires `align` + /// to be less than or equal to the alignment of the starting + /// address for the whole allocated block of memory. One way to + /// satisfy this constraint is to ensure `align <= self.align()`. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn padding_needed_for(&self, align: usize) -> usize { + let len = self.size(); + + // Rounded up value is: + // len_rounded_up = (len + align - 1) & !(align - 1); + // and then we return the padding difference: `len_rounded_up - len`. + // + // We use modular arithmetic throughout: + // + // 1. align is guaranteed to be > 0, so align - 1 is always + // valid. + // + // 2. `len + align - 1` can overflow by at most `align - 1`, + // so the &-mask wth `!(align - 1)` will ensure that in the + // case of overflow, `len_rounded_up` will itself be 0. + // Thus the returned padding, when added to `len`, yields 0, + // which trivially satisfies the alignment `align`. + // + // (Of course, attempts to allocate blocks of memory whose + // size and padding overflow in the above manner should cause + // the allocator to yield an error anyway.) + + let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) + & !align.wrapping_sub(1); + len_rounded_up.wrapping_sub(len) + } + + /// Creates a layout describing the record for `n` instances of + /// `self`, with a suitable amount of padding between each to + /// ensure that each instance is given its requested size and + /// alignment. On success, returns `(k, offs)` where `k` is the + /// layout of the array and `offs` is the distance between the start + /// of each element in the array. + /// + /// On arithmetic overflow, returns `LayoutErr`. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutErr> { + let padded_size = self.size().checked_add(self.padding_needed_for(self.align())) + .ok_or(LayoutErr { private: () })?; + let alloc_size = padded_size.checked_mul(n) + .ok_or(LayoutErr { private: () })?; + + unsafe { + // self.align is already known to be valid and alloc_size has been + // padded already. + Ok((Layout::from_size_align_unchecked(alloc_size, self.align()), padded_size)) + } + } + + /// Creates a layout describing the record for `self` followed by + /// `next`, including any necessary padding to ensure that `next` + /// will be properly aligned. Note that the result layout will + /// satisfy the alignment properties of both `self` and `next`. + /// + /// Returns `Some((k, offset))`, where `k` is layout of the concatenated + /// record and `offset` is the relative location, in bytes, of the + /// start of the `next` embedded within the concatenated record + /// (assuming that the record itself starts at offset 0). + /// + /// On arithmetic overflow, returns `LayoutErr`. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutErr> { + let new_align = cmp::max(self.align(), next.align()); + let pad = self.padding_needed_for(next.align()); + + let offset = self.size().checked_add(pad) + .ok_or(LayoutErr { private: () })?; + let new_size = offset.checked_add(next.size()) + .ok_or(LayoutErr { private: () })?; + + let layout = Layout::from_size_align(new_size, new_align)?; + Ok((layout, offset)) + } + + /// Creates a layout describing the record for `n` instances of + /// `self`, with no padding between each instance. + /// + /// Note that, unlike `repeat`, `repeat_packed` does not guarantee + /// that the repeated instances of `self` will be properly + /// aligned, even if a given instance of `self` is properly + /// aligned. In other words, if the layout returned by + /// `repeat_packed` is used to allocate an array, it is not + /// guaranteed that all elements in the array will be properly + /// aligned. + /// + /// On arithmetic overflow, returns `LayoutErr`. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn repeat_packed(&self, n: usize) -> Result { + let size = self.size().checked_mul(n).ok_or(LayoutErr { private: () })?; + Layout::from_size_align(size, self.align()) + } + + /// Creates a layout describing the record for `self` followed by + /// `next` with no additional padding between the two. Since no + /// padding is inserted, the alignment of `next` is irrelevant, + /// and is not incorporated *at all* into the resulting layout. + /// + /// Returns `(k, offset)`, where `k` is layout of the concatenated + /// record and `offset` is the relative location, in bytes, of the + /// start of the `next` embedded within the concatenated record + /// (assuming that the record itself starts at offset 0). + /// + /// (The `offset` is always the same as `self.size()`; we use this + /// signature out of convenience in matching the signature of + /// `extend`.) + /// + /// On arithmetic overflow, returns `LayoutErr`. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn extend_packed(&self, next: Self) -> Result<(Self, usize), LayoutErr> { + let new_size = self.size().checked_add(next.size()) + .ok_or(LayoutErr { private: () })?; + let layout = Layout::from_size_align(new_size, self.align())?; + Ok((layout, self.size())) + } + + /// Creates a layout describing the record for a `[T; n]`. + /// + /// On arithmetic overflow, returns `LayoutErr`. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn array(n: usize) -> Result { + Layout::new::() + .repeat(n) + .map(|(k, offs)| { + debug_assert!(offs == mem::size_of::()); + k + }) + } +} + +/// The parameters given to `Layout::from_size_align` +/// or some other `Layout` constructor +/// do not satisfy its documented constraints. +#[stable(feature = "alloc_layout", since = "1.28.0")] +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct LayoutErr { + private: () +} + +// (we need this for downstream impl of trait Error) +#[stable(feature = "alloc_layout", since = "1.28.0")] +impl fmt::Display for LayoutErr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("invalid parameters to Layout::from_size_align") + } +} + +/// The `AllocErr` error indicates an allocation failure +/// that may be due to resource exhaustion or to +/// something wrong when combining the given input arguments with this +/// allocator. +#[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct AllocErr; + +// (we need this for downstream impl of trait Error) +#[unstable(feature = "allocator_api", issue = "32838")] +impl fmt::Display for AllocErr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("memory allocation failed") + } +} + +/// The `CannotReallocInPlace` error is used when `grow_in_place` or +/// `shrink_in_place` were unable to reuse the given memory block for +/// a requested layout. +#[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct CannotReallocInPlace; + +#[unstable(feature = "allocator_api", issue = "32838")] +impl CannotReallocInPlace { + pub fn description(&self) -> &str { + "cannot reallocate allocator's memory in place" + } +} + +// (we need this for downstream impl of trait Error) +#[unstable(feature = "allocator_api", issue = "32838")] +impl fmt::Display for CannotReallocInPlace { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} + +/// A memory allocator that can be registered as the standard library’s default +/// though the `#[global_allocator]` attributes. +/// +/// Some of the methods require that a memory block be *currently +/// allocated* via an allocator. This means that: +/// +/// * the starting address for that memory block was previously +/// returned by a previous call to an allocation method +/// such as `alloc`, and +/// +/// * the memory block has not been subsequently deallocated, where +/// blocks are deallocated either by being passed to a deallocation +/// method such as `dealloc` or by being +/// passed to a reallocation method that returns a non-null pointer. +/// +/// +/// # Example +/// +/// ```no_run +/// use std::alloc::{GlobalAlloc, Layout, alloc}; +/// use std::ptr::null_mut; +/// +/// struct MyAllocator; +/// +/// unsafe impl GlobalAlloc for MyAllocator { +/// unsafe fn alloc(&self, _layout: Layout) -> *mut u8 { null_mut() } +/// unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {} +/// } +/// +/// #[global_allocator] +/// static A: MyAllocator = MyAllocator; +/// +/// fn main() { +/// unsafe { +/// assert!(alloc(Layout::new::()).is_null()) +/// } +/// } +/// ``` +/// +/// # Unsafety +/// +/// The `GlobalAlloc` trait is an `unsafe` trait for a number of reasons, and +/// implementors must ensure that they adhere to these contracts: +/// +/// * It's undefined behavior if global allocators unwind. This restriction may +/// be lifted in the future, but currently a panic from any of these +/// functions may lead to memory unsafety. +/// +/// * `Layout` queries and calculations in general must be correct. Callers of +/// this trait are allowed to rely on the contracts defined on each method, +/// and implementors must ensure such contracts remain true. +#[stable(feature = "global_alloc", since = "1.28.0")] +pub unsafe trait GlobalAlloc { + /// Allocate memory as described by the given `layout`. + /// + /// Returns a pointer to newly-allocated memory, + /// or null to indicate allocation failure. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure that `layout` has non-zero size. + /// + /// (Extension subtraits might provide more specific bounds on + /// behavior, e.g. guarantee a sentinel address or a null pointer + /// in response to a zero-size allocation request.) + /// + /// The allocated block of memory may or may not be initialized. + /// + /// # Errors + /// + /// Returning a null pointer indicates that either memory is exhausted + /// or `layout` does not meet allocator's size or alignment constraints. + /// + /// Implementations are encouraged to return null on memory + /// exhaustion rather than aborting, but this is not + /// a strict requirement. (Specifically: it is *legal* to + /// implement this trait atop an underlying native allocation + /// library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + #[stable(feature = "global_alloc", since = "1.28.0")] + unsafe fn alloc(&self, layout: Layout) -> *mut u8; + + /// Deallocate the block of memory at the given `ptr` pointer with the given `layout`. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must denote a block of memory currently allocated via + /// this allocator, + /// + /// * `layout` must be the same layout that was used + /// to allocated that block of memory, + #[stable(feature = "global_alloc", since = "1.28.0")] + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout); + + /// Behaves like `alloc`, but also ensures that the contents + /// are set to zero before being returned. + /// + /// # Safety + /// + /// This function is unsafe for the same reasons that `alloc` is. + /// However the allocated block of memory is guaranteed to be initialized. + /// + /// # Errors + /// + /// Returning a null pointer indicates that either memory is exhausted + /// or `layout` does not meet allocator's size or alignment constraints, + /// just as in `alloc`. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + #[stable(feature = "global_alloc", since = "1.28.0")] + unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { + let size = layout.size(); + let ptr = self.alloc(layout); + if !ptr.is_null() { + ptr::write_bytes(ptr, 0, size); + } + ptr + } + + /// Shink or grow a block of memory to the given `new_size`. + /// The block is described by the given `ptr` pointer and `layout`. + /// + /// If this returns a non-null pointer, then ownership of the memory block + /// referenced by `ptr` has been transferred to this alloctor. + /// The memory may or may not have been deallocated, + /// and should be considered unusable (unless of course it was + /// transferred back to the caller again via the return value of + /// this method). + /// + /// If this method returns null, then ownership of the memory + /// block has not been transferred to this allocator, and the + /// contents of the memory block are unaltered. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * `layout` must be the same layout that was used + /// to allocated that block of memory, + /// + /// * `new_size` must be greater than zero. + /// + /// * `new_size`, when rounded up to the nearest multiple of `layout.align()`, + /// must not overflow (i.e. the rounded value must be less than `usize::MAX`). + /// + /// (Extension subtraits might provide more specific bounds on + /// behavior, e.g. guarantee a sentinel address or a null pointer + /// in response to a zero-size allocation request.) + /// + /// # Errors + /// + /// Returns null if the new layout does not meet the size + /// and alignment constraints of the allocator, or if reallocation + /// otherwise fails. + /// + /// Implementations are encouraged to return null on memory + /// exhaustion rather than panicking or aborting, but this is not + /// a strict requirement. (Specifically: it is *legal* to + /// implement this trait atop an underlying native allocation + /// library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to a + /// reallocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + #[stable(feature = "global_alloc", since = "1.28.0")] + unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); + let new_ptr = self.alloc(new_layout); + if !new_ptr.is_null() { + ptr::copy_nonoverlapping( + ptr, + new_ptr, + cmp::min(layout.size(), new_size), + ); + self.dealloc(ptr, layout); + } + new_ptr + } +} + +/// An implementation of `Alloc` can allocate, reallocate, and +/// deallocate arbitrary blocks of data described via `Layout`. +/// +/// Some of the methods require that a memory block be *currently +/// allocated* via an allocator. This means that: +/// +/// * the starting address for that memory block was previously +/// returned by a previous call to an allocation method (`alloc`, +/// `alloc_zeroed`, `alloc_excess`, `alloc_one`, `alloc_array`) or +/// reallocation method (`realloc`, `realloc_excess`, or +/// `realloc_array`), and +/// +/// * the memory block has not been subsequently deallocated, where +/// blocks are deallocated either by being passed to a deallocation +/// method (`dealloc`, `dealloc_one`, `dealloc_array`) or by being +/// passed to a reallocation method (see above) that returns `Ok`. +/// +/// A note regarding zero-sized types and zero-sized layouts: many +/// methods in the `Alloc` trait state that allocation requests +/// must be non-zero size, or else undefined behavior can result. +/// +/// * However, some higher-level allocation methods (`alloc_one`, +/// `alloc_array`) are well-defined on zero-sized types and can +/// optionally support them: it is left up to the implementor +/// whether to return `Err`, or to return `Ok` with some pointer. +/// +/// * If an `Alloc` implementation chooses to return `Ok` in this +/// case (i.e. the pointer denotes a zero-sized inaccessible block) +/// then that returned pointer must be considered "currently +/// allocated". On such an allocator, *all* methods that take +/// currently-allocated pointers as inputs must accept these +/// zero-sized pointers, *without* causing undefined behavior. +/// +/// * In other words, if a zero-sized pointer can flow out of an +/// allocator, then that allocator must likewise accept that pointer +/// flowing back into its deallocation and reallocation methods. +/// +/// Some of the methods require that a layout *fit* a memory block. +/// What it means for a layout to "fit" a memory block means (or +/// equivalently, for a memory block to "fit" a layout) is that the +/// following two conditions must hold: +/// +/// 1. The block's starting address must be aligned to `layout.align()`. +/// +/// 2. The block's size must fall in the range `[use_min, use_max]`, where: +/// +/// * `use_min` is `self.usable_size(layout).0`, and +/// +/// * `use_max` is the capacity that was (or would have been) +/// returned when (if) the block was allocated via a call to +/// `alloc_excess` or `realloc_excess`. +/// +/// Note that: +/// +/// * the size of the layout most recently used to allocate the block +/// is guaranteed to be in the range `[use_min, use_max]`, and +/// +/// * a lower-bound on `use_max` can be safely approximated by a call to +/// `usable_size`. +/// +/// * if a layout `k` fits a memory block (denoted by `ptr`) +/// currently allocated via an allocator `a`, then it is legal to +/// use that layout to deallocate it, i.e. `a.dealloc(ptr, k);`. +/// +/// # Unsafety +/// +/// The `Alloc` trait is an `unsafe` trait for a number of reasons, and +/// implementors must ensure that they adhere to these contracts: +/// +/// * Pointers returned from allocation functions must point to valid memory and +/// retain their validity until at least the instance of `Alloc` is dropped +/// itself. +/// +/// * `Layout` queries and calculations in general must be correct. Callers of +/// this trait are allowed to rely on the contracts defined on each method, +/// and implementors must ensure such contracts remain true. +/// +/// Note that this list may get tweaked over time as clarifications are made in +/// the future. +#[unstable(feature = "allocator_api", issue = "32838")] +pub unsafe trait Alloc { + + // (Note: some existing allocators have unspecified but well-defined + // behavior in response to a zero size allocation request ; + // e.g. in C, `malloc` of 0 will either return a null pointer or a + // unique pointer, but will not have arbitrary undefined + // behavior. + // However in jemalloc for example, + // `mallocx(0)` is documented as undefined behavior.) + + /// Returns a pointer meeting the size and alignment guarantees of + /// `layout`. + /// + /// If this method returns an `Ok(addr)`, then the `addr` returned + /// will be non-null address pointing to a block of storage + /// suitable for holding an instance of `layout`. + /// + /// The returned block of storage may or may not have its contents + /// initialized. (Extension subtraits might restrict this + /// behavior, e.g. to ensure initialization to particular sets of + /// bit patterns.) + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure that `layout` has non-zero size. + /// + /// (Extension subtraits might provide more specific bounds on + /// behavior, e.g. guarantee a sentinel address or a null pointer + /// in response to a zero-size allocation request.) + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints. + /// + /// Implementations are encouraged to return `Err` on memory + /// exhaustion rather than panicking or aborting, but this is not + /// a strict requirement. (Specifically: it is *legal* to + /// implement this trait atop an underlying native allocation + /// library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn alloc(&mut self, layout: Layout) -> Result, AllocErr>; + + /// Deallocate the memory referenced by `ptr`. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must denote a block of memory currently allocated via + /// this allocator, + /// + /// * `layout` must *fit* that block of memory, + /// + /// * In addition to fitting the block of memory `layout`, the + /// alignment of the `layout` must match the alignment used + /// to allocate that block of memory. + unsafe fn dealloc(&mut self, ptr: NonNull, layout: Layout); + + // == ALLOCATOR-SPECIFIC QUANTITIES AND LIMITS == + // usable_size + + /// Returns bounds on the guaranteed usable size of a successful + /// allocation created with the specified `layout`. + /// + /// In particular, if one has a memory block allocated via a given + /// allocator `a` and layout `k` where `a.usable_size(k)` returns + /// `(l, u)`, then one can pass that block to `a.dealloc()` with a + /// layout in the size range [l, u]. + /// + /// (All implementors of `usable_size` must ensure that + /// `l <= k.size() <= u`) + /// + /// Both the lower- and upper-bounds (`l` and `u` respectively) + /// are provided, because an allocator based on size classes could + /// misbehave if one attempts to deallocate a block without + /// providing a correct value for its size (i.e., one within the + /// range `[l, u]`). + /// + /// Clients who wish to make use of excess capacity are encouraged + /// to use the `alloc_excess` and `realloc_excess` instead, as + /// this method is constrained to report conservative values that + /// serve as valid bounds for *all possible* allocation method + /// calls. + /// + /// However, for clients that do not wish to track the capacity + /// returned by `alloc_excess` locally, this method is likely to + /// produce useful results. + #[inline] + fn usable_size(&self, layout: &Layout) -> (usize, usize) { + (layout.size(), layout.size()) + } + + // == METHODS FOR MEMORY REUSE == + // realloc. alloc_excess, realloc_excess + + /// Returns a pointer suitable for holding data described by + /// a new layout with `layout`’s alginment and a size given + /// by `new_size`. To + /// accomplish this, this may extend or shrink the allocation + /// referenced by `ptr` to fit the new layout. + /// + /// If this returns `Ok`, then ownership of the memory block + /// referenced by `ptr` has been transferred to this + /// allocator. The memory may or may not have been freed, and + /// should be considered unusable (unless of course it was + /// transferred back to the caller again via the return value of + /// this method). + /// + /// If this method returns `Err`, then ownership of the memory + /// block has not been transferred to this allocator, and the + /// contents of the memory block are unaltered. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * `layout` must *fit* the `ptr` (see above). (The `new_size` + /// argument need not fit it.) + /// + /// * `new_size` must be greater than zero. + /// + /// * `new_size`, when rounded up to the nearest multiple of `layout.align()`, + /// must not overflow (i.e. the rounded value must be less than `usize::MAX`). + /// + /// (Extension subtraits might provide more specific bounds on + /// behavior, e.g. guarantee a sentinel address or a null pointer + /// in response to a zero-size allocation request.) + /// + /// # Errors + /// + /// Returns `Err` only if the new layout + /// does not meet the allocator's size + /// and alignment constraints of the allocator, or if reallocation + /// otherwise fails. + /// + /// Implementations are encouraged to return `Err` on memory + /// exhaustion rather than panicking or aborting, but this is not + /// a strict requirement. (Specifically: it is *legal* to + /// implement this trait atop an underlying native allocation + /// library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to a + /// reallocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn realloc(&mut self, + ptr: NonNull, + layout: Layout, + new_size: usize) -> Result, AllocErr> { + let old_size = layout.size(); + + if new_size >= old_size { + if let Ok(()) = self.grow_in_place(ptr, layout.clone(), new_size) { + return Ok(ptr); + } + } else if new_size < old_size { + if let Ok(()) = self.shrink_in_place(ptr, layout.clone(), new_size) { + return Ok(ptr); + } + } + + // otherwise, fall back on alloc + copy + dealloc. + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); + let result = self.alloc(new_layout); + if let Ok(new_ptr) = result { + ptr::copy_nonoverlapping(ptr.as_ptr(), + new_ptr.as_ptr(), + cmp::min(old_size, new_size)); + self.dealloc(ptr, layout); + } + result + } + + /// Behaves like `alloc`, but also ensures that the contents + /// are set to zero before being returned. + /// + /// # Safety + /// + /// This function is unsafe for the same reasons that `alloc` is. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints, just as in `alloc`. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result, AllocErr> { + let size = layout.size(); + let p = self.alloc(layout); + if let Ok(p) = p { + ptr::write_bytes(p.as_ptr(), 0, size); + } + p + } + + /// Behaves like `alloc`, but also returns the whole size of + /// the returned block. For some `layout` inputs, like arrays, this + /// may include extra storage usable for additional data. + /// + /// # Safety + /// + /// This function is unsafe for the same reasons that `alloc` is. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints, just as in `alloc`. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn alloc_excess(&mut self, layout: Layout) -> Result { + let usable_size = self.usable_size(&layout); + self.alloc(layout).map(|p| Excess(p, usable_size.1)) + } + + /// Behaves like `realloc`, but also returns the whole size of + /// the returned block. For some `layout` inputs, like arrays, this + /// may include extra storage usable for additional data. + /// + /// # Safety + /// + /// This function is unsafe for the same reasons that `realloc` is. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `layout` does not meet allocator's size or alignment + /// constraints, just as in `realloc`. + /// + /// Clients wishing to abort computation in response to a + /// reallocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn realloc_excess(&mut self, + ptr: NonNull, + layout: Layout, + new_size: usize) -> Result { + let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); + let usable_size = self.usable_size(&new_layout); + self.realloc(ptr, layout, new_size) + .map(|p| Excess(p, usable_size.1)) + } + + /// Attempts to extend the allocation referenced by `ptr` to fit `new_size`. + /// + /// If this returns `Ok`, then the allocator has asserted that the + /// memory block referenced by `ptr` now fits `new_size`, and thus can + /// be used to carry data of a layout of that size and same alignment as + /// `layout`. (The allocator is allowed to + /// expend effort to accomplish this, such as extending the memory block to + /// include successor blocks, or virtual memory tricks.) + /// + /// Regardless of what this method returns, ownership of the + /// memory block referenced by `ptr` has not been transferred, and + /// the contents of the memory block are unaltered. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * `layout` must *fit* the `ptr` (see above); note the + /// `new_size` argument need not fit it, + /// + /// * `new_size` must not be less than `layout.size()`, + /// + /// # Errors + /// + /// Returns `Err(CannotReallocInPlace)` when the allocator is + /// unable to assert that the memory block referenced by `ptr` + /// could fit `layout`. + /// + /// Note that one cannot pass `CannotReallocInPlace` to the `handle_alloc_error` + /// function; clients are expected either to be able to recover from + /// `grow_in_place` failures without aborting, or to fall back on + /// another reallocation method before resorting to an abort. + unsafe fn grow_in_place(&mut self, + ptr: NonNull, + layout: Layout, + new_size: usize) -> Result<(), CannotReallocInPlace> { + let _ = ptr; // this default implementation doesn't care about the actual address. + debug_assert!(new_size >= layout.size()); + let (_l, u) = self.usable_size(&layout); + // _l <= layout.size() [guaranteed by usable_size()] + // layout.size() <= new_layout.size() [required by this method] + if new_size <= u { + Ok(()) + } else { + Err(CannotReallocInPlace) + } + } + + /// Attempts to shrink the allocation referenced by `ptr` to fit `new_size`. + /// + /// If this returns `Ok`, then the allocator has asserted that the + /// memory block referenced by `ptr` now fits `new_size`, and + /// thus can only be used to carry data of that smaller + /// layout. (The allocator is allowed to take advantage of this, + /// carving off portions of the block for reuse elsewhere.) The + /// truncated contents of the block within the smaller layout are + /// unaltered, and ownership of block has not been transferred. + /// + /// If this returns `Err`, then the memory block is considered to + /// still represent the original (larger) `layout`. None of the + /// block has been carved off for reuse elsewhere, ownership of + /// the memory block has not been transferred, and the contents of + /// the memory block are unaltered. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * `layout` must *fit* the `ptr` (see above); note the + /// `new_size` argument need not fit it, + /// + /// * `new_size` must not be greater than `layout.size()` + /// (and must be greater than zero), + /// + /// # Errors + /// + /// Returns `Err(CannotReallocInPlace)` when the allocator is + /// unable to assert that the memory block referenced by `ptr` + /// could fit `layout`. + /// + /// Note that one cannot pass `CannotReallocInPlace` to the `handle_alloc_error` + /// function; clients are expected either to be able to recover from + /// `shrink_in_place` failures without aborting, or to fall back + /// on another reallocation method before resorting to an abort. + unsafe fn shrink_in_place(&mut self, + ptr: NonNull, + layout: Layout, + new_size: usize) -> Result<(), CannotReallocInPlace> { + let _ = ptr; // this default implementation doesn't care about the actual address. + debug_assert!(new_size <= layout.size()); + let (l, _u) = self.usable_size(&layout); + // layout.size() <= _u [guaranteed by usable_size()] + // new_layout.size() <= layout.size() [required by this method] + if l <= new_size { + Ok(()) + } else { + Err(CannotReallocInPlace) + } + } + + + // == COMMON USAGE PATTERNS == + // alloc_one, dealloc_one, alloc_array, realloc_array. dealloc_array + + /// Allocates a block suitable for holding an instance of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// The returned block is suitable for passing to the + /// `alloc`/`realloc` methods of this allocator. + /// + /// Note to implementors: If this returns `Ok(ptr)`, then `ptr` + /// must be considered "currently allocated" and must be + /// acceptable input to methods such as `realloc` or `dealloc`, + /// *even if* `T` is a zero-sized type. In other words, if your + /// `Alloc` implementation overrides this method in a manner + /// that can return a zero-sized `ptr`, then all reallocation and + /// deallocation methods need to be similarly overridden to accept + /// such values as input. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `T` does not meet allocator's size or alignment constraints. + /// + /// For zero-sized `T`, may return either of `Ok` or `Err`, but + /// will *not* yield undefined behavior. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + fn alloc_one(&mut self) -> Result, AllocErr> + where Self: Sized + { + let k = Layout::new::(); + if k.size() > 0 { + unsafe { self.alloc(k).map(|p| p.cast()) } + } else { + Err(AllocErr) + } + } + + /// Deallocates a block suitable for holding an instance of `T`. + /// + /// The given block must have been produced by this allocator, + /// and must be suitable for storing a `T` (in terms of alignment + /// as well as minimum and maximum size); otherwise yields + /// undefined behavior. + /// + /// Captures a common usage pattern for allocators. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure both: + /// + /// * `ptr` must denote a block of memory currently allocated via this allocator + /// + /// * the layout of `T` must *fit* that block of memory. + unsafe fn dealloc_one(&mut self, ptr: NonNull) + where Self: Sized + { + let k = Layout::new::(); + if k.size() > 0 { + self.dealloc(ptr.cast(), k); + } + } + + /// Allocates a block suitable for holding `n` instances of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// The returned block is suitable for passing to the + /// `alloc`/`realloc` methods of this allocator. + /// + /// Note to implementors: If this returns `Ok(ptr)`, then `ptr` + /// must be considered "currently allocated" and must be + /// acceptable input to methods such as `realloc` or `dealloc`, + /// *even if* `T` is a zero-sized type. In other words, if your + /// `Alloc` implementation overrides this method in a manner + /// that can return a zero-sized `ptr`, then all reallocation and + /// deallocation methods need to be similarly overridden to accept + /// such values as input. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `[T; n]` does not meet allocator's size or alignment + /// constraints. + /// + /// For zero-sized `T` or `n == 0`, may return either of `Ok` or + /// `Err`, but will *not* yield undefined behavior. + /// + /// Always returns `Err` on arithmetic overflow. + /// + /// Clients wishing to abort computation in response to an + /// allocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + fn alloc_array(&mut self, n: usize) -> Result, AllocErr> + where Self: Sized + { + match Layout::array::(n) { + Ok(ref layout) if layout.size() > 0 => { + unsafe { + self.alloc(layout.clone()).map(|p| p.cast()) + } + } + _ => Err(AllocErr), + } + } + + /// Reallocates a block previously suitable for holding `n_old` + /// instances of `T`, returning a block suitable for holding + /// `n_new` instances of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// The returned block is suitable for passing to the + /// `alloc`/`realloc` methods of this allocator. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure all of the following: + /// + /// * `ptr` must be currently allocated via this allocator, + /// + /// * the layout of `[T; n_old]` must *fit* that block of memory. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or + /// `[T; n_new]` does not meet allocator's size or alignment + /// constraints. + /// + /// For zero-sized `T` or `n_new == 0`, may return either of `Ok` or + /// `Err`, but will *not* yield undefined behavior. + /// + /// Always returns `Err` on arithmetic overflow. + /// + /// Clients wishing to abort computation in response to a + /// reallocation error are encouraged to call the [`handle_alloc_error`] function, + /// rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn realloc_array(&mut self, + ptr: NonNull, + n_old: usize, + n_new: usize) -> Result, AllocErr> + where Self: Sized + { + match (Layout::array::(n_old), Layout::array::(n_new)) { + (Ok(ref k_old), Ok(ref k_new)) if k_old.size() > 0 && k_new.size() > 0 => { + debug_assert!(k_old.align() == k_new.align()); + self.realloc(ptr.cast(), k_old.clone(), k_new.size()).map(NonNull::cast) + } + _ => { + Err(AllocErr) + } + } + } + + /// Deallocates a block suitable for holding `n` instances of `T`. + /// + /// Captures a common usage pattern for allocators. + /// + /// # Safety + /// + /// This function is unsafe because undefined behavior can result + /// if the caller does not ensure both: + /// + /// * `ptr` must denote a block of memory currently allocated via this allocator + /// + /// * the layout of `[T; n]` must *fit* that block of memory. + /// + /// # Errors + /// + /// Returning `Err` indicates that either `[T; n]` or the given + /// memory block does not meet allocator's size or alignment + /// constraints. + /// + /// Always returns `Err` on arithmetic overflow. + unsafe fn dealloc_array(&mut self, ptr: NonNull, n: usize) -> Result<(), AllocErr> + where Self: Sized + { + match Layout::array::(n) { + Ok(ref k) if k.size() > 0 => { + Ok(self.dealloc(ptr.cast(), k.clone())) + } + _ => { + Err(AllocErr) + } + } + } +} diff --git a/src/libcore/any.rs b/src/libcore/any.rs index 338e5c7fd95b..6b26093439e4 100644 --- a/src/libcore/any.rs +++ b/src/libcore/any.rs @@ -120,7 +120,7 @@ impl Any for T { /////////////////////////////////////////////////////////////////////////////// #[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Any { +impl fmt::Debug for dyn Any { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.pad("Any") } @@ -130,13 +130,20 @@ impl fmt::Debug for Any { // hence used with `unwrap`. May eventually no longer be needed if // dispatch works with upcasting. #[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Any + Send { +impl fmt::Debug for dyn Any + Send { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.pad("Any") } } -impl Any { +#[stable(feature = "any_send_sync_methods", since = "1.28.0")] +impl fmt::Debug for dyn Any + Send + Sync { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("Any") + } +} + +impl dyn Any { /// Returns `true` if the boxed type is the same as `T`. /// /// # Examples @@ -196,7 +203,7 @@ impl Any { pub fn downcast_ref(&self) -> Option<&T> { if self.is::() { unsafe { - Some(&*(self as *const Any as *const T)) + Some(&*(self as *const dyn Any as *const T)) } } else { None @@ -233,7 +240,7 @@ impl Any { pub fn downcast_mut(&mut self) -> Option<&mut T> { if self.is::() { unsafe { - Some(&mut *(self as *mut Any as *mut T)) + Some(&mut *(self as *mut dyn Any as *mut T)) } } else { None @@ -241,7 +248,7 @@ impl Any { } } -impl Any+Send { +impl dyn Any+Send { /// Forwards to the method defined on the type `Any`. /// /// # Examples @@ -301,7 +308,7 @@ impl Any+Send { /// ``` /// use std::any::Any; /// - /// fn modify_if_u32(s: &mut (Any+ Send)) { + /// fn modify_if_u32(s: &mut (Any + Send)) { /// if let Some(num) = s.downcast_mut::() { /// *num = 42; /// } @@ -325,6 +332,89 @@ impl Any+Send { } } +impl dyn Any+Send+Sync { + /// Forwards to the method defined on the type `Any`. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn is_string(s: &(Any + Send + Sync)) { + /// if s.is::() { + /// println!("It's a string!"); + /// } else { + /// println!("Not a string..."); + /// } + /// } + /// + /// fn main() { + /// is_string(&0); + /// is_string(&"cookie monster".to_string()); + /// } + /// ``` + #[stable(feature = "any_send_sync_methods", since = "1.28.0")] + #[inline] + pub fn is(&self) -> bool { + Any::is::(self) + } + + /// Forwards to the method defined on the type `Any`. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn print_if_string(s: &(Any + Send + Sync)) { + /// if let Some(string) = s.downcast_ref::() { + /// println!("It's a string({}): '{}'", string.len(), string); + /// } else { + /// println!("Not a string..."); + /// } + /// } + /// + /// fn main() { + /// print_if_string(&0); + /// print_if_string(&"cookie monster".to_string()); + /// } + /// ``` + #[stable(feature = "any_send_sync_methods", since = "1.28.0")] + #[inline] + pub fn downcast_ref(&self) -> Option<&T> { + Any::downcast_ref::(self) + } + + /// Forwards to the method defined on the type `Any`. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn modify_if_u32(s: &mut (Any + Send + Sync)) { + /// if let Some(num) = s.downcast_mut::() { + /// *num = 42; + /// } + /// } + /// + /// fn main() { + /// let mut x = 10u32; + /// let mut s = "starlord".to_string(); + /// + /// modify_if_u32(&mut x); + /// modify_if_u32(&mut s); + /// + /// assert_eq!(x, 42); + /// assert_eq!(&s, "starlord"); + /// } + /// ``` + #[stable(feature = "any_send_sync_methods", since = "1.28.0")] + #[inline] + pub fn downcast_mut(&mut self) -> Option<&mut T> { + Any::downcast_mut::(self) + } +} /////////////////////////////////////////////////////////////////////////////// // TypeID and its methods @@ -341,7 +431,7 @@ impl Any+Send { /// /// While `TypeId` implements `Hash`, `PartialOrd`, and `Ord`, it is worth /// noting that the hashes and ordering will vary between Rust releases. Beware -/// of relying on them outside of your code! +/// of relying on them inside of your code! #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)] #[stable(feature = "rust1", since = "1.0.0")] pub struct TypeId { @@ -367,7 +457,8 @@ impl TypeId { /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn of() -> TypeId { + #[rustc_const_unstable(feature="const_type_id")] + pub const fn of() -> TypeId { TypeId { t: unsafe { intrinsics::type_id::() }, } diff --git a/src/libcore/ascii.rs b/src/libcore/ascii.rs new file mode 100644 index 000000000000..6ee91e0b22ff --- /dev/null +++ b/src/libcore/ascii.rs @@ -0,0 +1,147 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Operations on ASCII strings and characters. +//! +//! Most string operations in Rust act on UTF-8 strings. However, at times it +//! makes more sense to only consider the ASCII character set for a specific +//! operation. +//! +//! The [`escape_default`] function provides an iterator over the bytes of an +//! escaped version of the character given. +//! +//! [`escape_default`]: fn.escape_default.html + +#![stable(feature = "core_ascii", since = "1.26.0")] + +use fmt; +use ops::Range; +use iter::FusedIterator; + +/// An iterator over the escaped version of a byte. +/// +/// This `struct` is created by the [`escape_default`] function. See its +/// documentation for more. +/// +/// [`escape_default`]: fn.escape_default.html +#[stable(feature = "rust1", since = "1.0.0")] +pub struct EscapeDefault { + range: Range, + data: [u8; 4], +} + +/// Returns an iterator that produces an escaped version of a `u8`. +/// +/// The default is chosen with a bias toward producing literals that are +/// legal in a variety of languages, including C++11 and similar C-family +/// languages. The exact rules are: +/// +/// * Tab is escaped as `\t`. +/// * Carriage return is escaped as `\r`. +/// * Line feed is escaped as `\n`. +/// * Single quote is escaped as `\'`. +/// * Double quote is escaped as `\"`. +/// * Backslash is escaped as `\\`. +/// * Any character in the 'printable ASCII' range `0x20` .. `0x7e` +/// inclusive is not escaped. +/// * Any other chars are given hex escapes of the form '\xNN'. +/// * Unicode escapes are never generated by this function. +/// +/// # Examples +/// +/// ``` +/// use std::ascii; +/// +/// let escaped = ascii::escape_default(b'0').next().unwrap(); +/// assert_eq!(b'0', escaped); +/// +/// let mut escaped = ascii::escape_default(b'\t'); +/// +/// assert_eq!(b'\\', escaped.next().unwrap()); +/// assert_eq!(b't', escaped.next().unwrap()); +/// +/// let mut escaped = ascii::escape_default(b'\r'); +/// +/// assert_eq!(b'\\', escaped.next().unwrap()); +/// assert_eq!(b'r', escaped.next().unwrap()); +/// +/// let mut escaped = ascii::escape_default(b'\n'); +/// +/// assert_eq!(b'\\', escaped.next().unwrap()); +/// assert_eq!(b'n', escaped.next().unwrap()); +/// +/// let mut escaped = ascii::escape_default(b'\''); +/// +/// assert_eq!(b'\\', escaped.next().unwrap()); +/// assert_eq!(b'\'', escaped.next().unwrap()); +/// +/// let mut escaped = ascii::escape_default(b'"'); +/// +/// assert_eq!(b'\\', escaped.next().unwrap()); +/// assert_eq!(b'"', escaped.next().unwrap()); +/// +/// let mut escaped = ascii::escape_default(b'\\'); +/// +/// assert_eq!(b'\\', escaped.next().unwrap()); +/// assert_eq!(b'\\', escaped.next().unwrap()); +/// +/// let mut escaped = ascii::escape_default(b'\x9d'); +/// +/// assert_eq!(b'\\', escaped.next().unwrap()); +/// assert_eq!(b'x', escaped.next().unwrap()); +/// assert_eq!(b'9', escaped.next().unwrap()); +/// assert_eq!(b'd', escaped.next().unwrap()); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub fn escape_default(c: u8) -> EscapeDefault { + let (data, len) = match c { + b'\t' => ([b'\\', b't', 0, 0], 2), + b'\r' => ([b'\\', b'r', 0, 0], 2), + b'\n' => ([b'\\', b'n', 0, 0], 2), + b'\\' => ([b'\\', b'\\', 0, 0], 2), + b'\'' => ([b'\\', b'\'', 0, 0], 2), + b'"' => ([b'\\', b'"', 0, 0], 2), + b'\x20' ..= b'\x7e' => ([c, 0, 0, 0], 1), + _ => ([b'\\', b'x', hexify(c >> 4), hexify(c & 0xf)], 4), + }; + + return EscapeDefault { range: 0..len, data }; + + fn hexify(b: u8) -> u8 { + match b { + 0 ..= 9 => b'0' + b, + _ => b'a' + b - 10, + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for EscapeDefault { + type Item = u8; + fn next(&mut self) -> Option { self.range.next().map(|i| self.data[i]) } + fn size_hint(&self) -> (usize, Option) { self.range.size_hint() } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for EscapeDefault { + fn next_back(&mut self) -> Option { + self.range.next_back().map(|i| self.data[i]) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for EscapeDefault {} +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for EscapeDefault {} + +#[stable(feature = "std_debug", since = "1.16.0")] +impl fmt::Debug for EscapeDefault { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("EscapeDefault { .. }") + } +} diff --git a/src/libcore/benches/iter.rs b/src/libcore/benches/iter.rs index b284d855c451..6c597301ac20 100644 --- a/src/libcore/benches/iter.rs +++ b/src/libcore/benches/iter.rs @@ -281,3 +281,32 @@ bench_sums! { bench_take_while_chain_ref_sum, (0i64..1000000).chain(1000000..).take_while(|&x| x < 1111111) } + +// Checks whether Skip> is as fast as Zip, Skip>, from +// https://users.rust-lang.org/t/performance-difference-between-iterator-zip-and-skip-order/15743 +#[bench] +fn bench_zip_then_skip(b: &mut Bencher) { + let v: Vec<_> = (0..100_000).collect(); + let t: Vec<_> = (0..100_000).collect(); + + b.iter(|| { + let s = v.iter().zip(t.iter()).skip(10000) + .take_while(|t| *t.0 < 10100) + .map(|(a, b)| *a + *b) + .sum::(); + assert_eq!(s, 2009900); + }); +} +#[bench] +fn bench_skip_then_zip(b: &mut Bencher) { + let v: Vec<_> = (0..100_000).collect(); + let t: Vec<_> = (0..100_000).collect(); + + b.iter(|| { + let s = v.iter().skip(10000).zip(t.iter().skip(10000)) + .take_while(|t| *t.0 < 10100) + .map(|(a, b)| *a + *b) + .sum::(); + assert_eq!(s, 2009900); + }); +} diff --git a/src/libcore/benches/lib.rs b/src/libcore/benches/lib.rs index 201064e823b1..ced77d779182 100644 --- a/src/libcore/benches/lib.rs +++ b/src/libcore/benches/lib.rs @@ -8,10 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(warnings)] - #![feature(flt2dec)] -#![feature(slice_patterns)] #![feature(test)] extern crate core; diff --git a/src/libcore/borrow.rs b/src/libcore/borrow.rs index 61558034e63e..f45a32d4b94a 100644 --- a/src/libcore/borrow.rs +++ b/src/libcore/borrow.rs @@ -14,24 +14,154 @@ /// A trait for borrowing data. /// -/// In general, there may be several ways to "borrow" a piece of data. The -/// typical ways of borrowing a type `T` are `&T` (a shared borrow) and `&mut T` -/// (a mutable borrow). But types like `Vec` provide additional kinds of -/// borrows: the borrowed slices `&[T]` and `&mut [T]`. +/// In Rust, it is common to provide different representations of a type for +/// different use cases. For instance, storage location and management for a +/// value can be specifically chosen as appropriate for a particular use via +/// pointer types such as [`Box`] or [`Rc`]. Beyond these generic +/// wrappers that can be used with any type, some types provide optional +/// facets providing potentially costly functionality. An example for such a +/// type is [`String`] which adds the ability to extend a string to the basic +/// [`str`]. This requires keeping additional information unnecessary for a +/// simple, immutable string. /// -/// When writing generic code, it is often desirable to abstract over all ways -/// of borrowing data from a given type. That is the role of the `Borrow` -/// trait: if `T: Borrow`, then `&U` can be borrowed from `&T`. A given -/// type can be borrowed as multiple different types. In particular, `Vec: -/// Borrow>` and `Vec: Borrow<[T]>`. +/// These types provide access to the underlying data through references +/// to the type of that data. They are said to be ‘borrowed as’ that type. +/// For instance, a [`Box`] can be borrowed as `T` while a [`String`] +/// can be borrowed as `str`. /// -/// If you are implementing `Borrow` and both `Self` and `Borrowed` implement -/// `Hash`, `Eq`, and/or `Ord`, they must produce the same result. +/// Types express that they can be borrowed as some type `T` by implementing +/// `Borrow`, providing a reference to a `T` in the trait’s +/// [`borrow`] method. A type is free to borrow as several different types. +/// If it wishes to mutably borrow as the type – allowing the underlying data +/// to be modified, it can additionally implement [`BorrowMut`]. /// -/// `Borrow` is very similar to, but different than, `AsRef`. See -/// [the book][book] for more. +/// Further, when providing implementations for additional traits, it needs +/// to be considered whether they should behave identical to those of the +/// underlying type as a consequence of acting as a representation of that +/// underlying type. Generic code typically uses `Borrow` when it relies +/// on the identical behavior of these additional trait implementations. +/// These traits will likely appear as additional trait bounds. /// -/// [book]: ../../book/first-edition/borrow-and-asref.html +/// If generic code merely needs to work for all types that can +/// provide a reference to related type `T`, it is often better to use +/// [`AsRef`] as more types can safely implement it. +/// +/// [`AsRef`]: ../../std/convert/trait.AsRef.html +/// [`BorrowMut`]: trait.BorrowMut.html +/// [`Box`]: ../../std/boxed/struct.Box.html +/// [`Mutex`]: ../../std/sync/struct.Mutex.html +/// [`Rc`]: ../../std/rc/struct.Rc.html +/// [`str`]: ../../std/primitive.str.html +/// [`String`]: ../../std/string/struct.String.html +/// [`borrow`]: #tymethod.borrow +/// +/// # Examples +/// +/// As a data collection, [`HashMap`] owns both keys and values. If +/// the key’s actual data is wrapped in a managing type of some kind, it +/// should, however, still be possible to search for a value using a +/// reference to the key’s data. For instance, if the key is a string, then +/// it is likely stored with the hash map as a [`String`], while it should +/// be possible to search using a [`&str`][`str`]. Thus, `insert` needs to +/// operate on a `String` while `get` needs to be able to use a `&str`. +/// +/// Slightly simplified, the relevant parts of `HashMap` look like +/// this: +/// +/// ``` +/// use std::borrow::Borrow; +/// use std::hash::Hash; +/// +/// pub struct HashMap { +/// # marker: ::std::marker::PhantomData<(K, V)>, +/// // fields omitted +/// } +/// +/// impl HashMap { +/// pub fn insert(&self, key: K, value: V) -> Option +/// where K: Hash + Eq +/// { +/// # unimplemented!() +/// // ... +/// } +/// +/// pub fn get(&self, k: &Q) -> Option<&V> +/// where +/// K: Borrow, +/// Q: Hash + Eq + ?Sized +/// { +/// # unimplemented!() +/// // ... +/// } +/// } +/// ``` +/// +/// The entire hash map is generic over a key type `K`. Because these keys +/// are stored with the hash map, this type has to own the key’s data. +/// When inserting a key-value pair, the map is given such a `K` and needs +/// to find the correct hash bucket and check if the key is already present +/// based on that `K`. It therefore requires `K: Hash + Eq`. +/// +/// When searching for a value in the map, however, having to provide a +/// reference to a `K` as the key to search for would require to always +/// create such an owned value. For string keys, this would mean a `String` +/// value needs to be created just for the search for cases where only a +/// `str` is available. +/// +/// Instead, the `get` method is generic over the type of the underlying key +/// data, called `Q` in the method signature above. It states that `K` +/// borrows as a `Q` by requiring that `K: Borrow`. By additionally +/// requiring `Q: Hash + Eq`, it signals the requirement that `K` and `Q` +/// have implementations of the `Hash` and `Eq` traits that produce identical +/// results. +/// +/// The implementation of `get` relies in particular on identical +/// implementations of `Hash` by determining the key’s hash bucket by calling +/// `Hash::hash` on the `Q` value even though it inserted the key based on +/// the hash value calculated from the `K` value. +/// +/// As a consequence, the hash map breaks if a `K` wrapping a `Q` value +/// produces a different hash than `Q`. For instance, imagine you have a +/// type that wraps a string but compares ASCII letters ignoring their case: +/// +/// ``` +/// pub struct CaseInsensitiveString(String); +/// +/// impl PartialEq for CaseInsensitiveString { +/// fn eq(&self, other: &Self) -> bool { +/// self.0.eq_ignore_ascii_case(&other.0) +/// } +/// } +/// +/// impl Eq for CaseInsensitiveString { } +/// ``` +/// +/// Because two equal values need to produce the same hash value, the +/// implementation of `Hash` needs to ignore ASCII case, too: +/// +/// ``` +/// # use std::hash::{Hash, Hasher}; +/// # pub struct CaseInsensitiveString(String); +/// impl Hash for CaseInsensitiveString { +/// fn hash(&self, state: &mut H) { +/// for c in self.0.as_bytes() { +/// c.to_ascii_lowercase().hash(state) +/// } +/// } +/// } +/// ``` +/// +/// Can `CaseInsensitiveString` implement `Borrow`? It certainly can +/// provide a reference to a string slice via its contained owned string. +/// But because its `Hash` implementation differs, it behaves differently +/// from `str` and therefore must not, in fact, implement `Borrow`. +/// If it wants to allow others access to the underlying `str`, it can do +/// that via `AsRef` which doesn’t carry any extra requirements. +/// +/// [`Hash`]: ../../std/hash/trait.Hash.html +/// [`HashMap`]: ../../std/collections/struct.HashMap.html +/// [`String`]: ../../std/string/struct.String.html +/// [`str`]: ../../std/primitive.str.html #[stable(feature = "rust1", since = "1.0.0")] pub trait Borrow { /// Immutably borrows from an owned value. @@ -59,7 +189,11 @@ pub trait Borrow { /// A trait for mutably borrowing data. /// -/// Similar to `Borrow`, but for mutable borrows. +/// As a companion to [`Borrow`] this trait allows a type to borrow as +/// an underlying type by providing a mutable reference. See [`Borrow`] +/// for more information on borrowing as another type. +/// +/// [`Borrow`]: trait.Borrow.html #[stable(feature = "rust1", since = "1.0.0")] pub trait BorrowMut : Borrow { /// Mutably borrows from an owned value. diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index c5375d1e00cb..009aba5f5986 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -10,6 +10,24 @@ //! Shareable mutable containers. //! +//! Rust memory safety is based on this rule: Given an object `T`, it is only possible to +//! have one of the following: +//! +//! - Having several immutable references (`&T`) to the object (also known as **aliasing**). +//! - Having one mutable reference (`&mut T`) to the object (also known as **mutability**). +//! +//! This is enforced by the Rust compiler. However, there are situations where this rule is not +//! flexible enough. Sometimes it is required to have multiple references to an object and yet +//! mutate it. +//! +//! Shareable mutable containers exist to permit mutability in a controlled manner, even in the +//! presence of aliasing. Both `Cell` and `RefCell` allows to do this in a single threaded +//! way. However, neither `Cell` nor `RefCell` are thread safe (they do not implement +//! `Sync`). If you need to do aliasing and mutation between multiple threads it is possible to +//! use [`Mutex`](../../std/sync/struct.Mutex.html), +//! [`RwLock`](../../std/sync/struct.RwLock.html) or +//! [`atomic`](../../core/sync/atomic/index.html) types. +//! //! Values of the `Cell` and `RefCell` types may be mutated through shared references (i.e. //! the common `&T` type), whereas most Rust types can only be mutated through unique (`&mut T`) //! references. We say that `Cell` and `RefCell` provide 'interior mutability', in contrast @@ -128,13 +146,12 @@ //! //! ``` //! #![feature(core_intrinsics)] -//! #![feature(shared)] //! use std::cell::Cell; -//! use std::ptr::Shared; +//! use std::ptr::NonNull; //! use std::intrinsics::abort; //! //! struct Rc { -//! ptr: Shared> +//! ptr: NonNull> //! } //! //! struct RcBox { @@ -218,7 +235,8 @@ use ptr; /// /// See the [module-level documentation](index.html) for more. #[stable(feature = "rust1", since = "1.0.0")] -pub struct Cell { +#[repr(transparent)] +pub struct Cell { value: UnsafeCell, } @@ -239,13 +257,40 @@ impl Cell { pub fn get(&self) -> T { unsafe{ *self.value.get() } } + + /// Updates the contained value using a function and returns the new value. + /// + /// # Examples + /// + /// ``` + /// #![feature(cell_update)] + /// + /// use std::cell::Cell; + /// + /// let c = Cell::new(5); + /// let new = c.update(|x| x + 1); + /// + /// assert_eq!(new, 6); + /// assert_eq!(c.get(), 6); + /// ``` + #[inline] + #[unstable(feature = "cell_update", issue = "50186")] + pub fn update(&self, f: F) -> T + where + F: FnOnce(T) -> T, + { + let old = self.get(); + let new = f(old); + self.set(new); + new + } } #[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Send for Cell where T: Send {} +unsafe impl Send for Cell where T: Send {} #[stable(feature = "rust1", since = "1.0.0")] -impl !Sync for Cell {} +impl !Sync for Cell {} #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Cell { @@ -336,46 +381,6 @@ impl Cell { } } - /// Returns a raw pointer to the underlying data in this cell. - /// - /// # Examples - /// - /// ``` - /// use std::cell::Cell; - /// - /// let c = Cell::new(5); - /// - /// let ptr = c.as_ptr(); - /// ``` - #[inline] - #[stable(feature = "cell_as_ptr", since = "1.12.0")] - pub fn as_ptr(&self) -> *mut T { - self.value.get() - } - - /// Returns a mutable reference to the underlying data. - /// - /// This call borrows `Cell` mutably (at compile-time) which guarantees - /// that we possess the only reference. - /// - /// # Examples - /// - /// ``` - /// use std::cell::Cell; - /// - /// let mut c = Cell::new(5); - /// *c.get_mut() += 1; - /// - /// assert_eq!(c.get(), 6); - /// ``` - #[inline] - #[stable(feature = "cell_get_mut", since = "1.11.0")] - pub fn get_mut(&mut self) -> &mut T { - unsafe { - &mut *self.value.get() - } - } - /// Sets the contained value. /// /// # Examples @@ -450,7 +455,71 @@ impl Cell { /// ``` #[stable(feature = "move_cell", since = "1.17.0")] pub fn into_inner(self) -> T { - unsafe { self.value.into_inner() } + self.value.into_inner() + } +} + +impl Cell { + /// Returns a raw pointer to the underlying data in this cell. + /// + /// # Examples + /// + /// ``` + /// use std::cell::Cell; + /// + /// let c = Cell::new(5); + /// + /// let ptr = c.as_ptr(); + /// ``` + #[inline] + #[stable(feature = "cell_as_ptr", since = "1.12.0")] + pub fn as_ptr(&self) -> *mut T { + self.value.get() + } + + /// Returns a mutable reference to the underlying data. + /// + /// This call borrows `Cell` mutably (at compile-time) which guarantees + /// that we possess the only reference. + /// + /// # Examples + /// + /// ``` + /// use std::cell::Cell; + /// + /// let mut c = Cell::new(5); + /// *c.get_mut() += 1; + /// + /// assert_eq!(c.get(), 6); + /// ``` + #[inline] + #[stable(feature = "cell_get_mut", since = "1.11.0")] + pub fn get_mut(&mut self) -> &mut T { + unsafe { + &mut *self.value.get() + } + } + + /// Returns a `&Cell` from a `&mut T` + /// + /// # Examples + /// + /// ``` + /// #![feature(as_cell)] + /// use std::cell::Cell; + /// + /// let slice: &mut [i32] = &mut [1, 2, 3]; + /// let cell_slice: &Cell<[i32]> = Cell::from_mut(slice); + /// let slice_cell: &[Cell] = cell_slice.as_slice_of_cells(); + /// + /// assert_eq!(slice_cell.len(), 3); + /// ``` + #[inline] + #[unstable(feature = "as_cell", issue="43038")] + pub fn from_mut(t: &mut T) -> &Cell { + unsafe { + &*(t as *mut T as *const Cell) + } } } @@ -477,6 +546,29 @@ impl Cell { #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U> CoerceUnsized> for Cell {} +impl Cell<[T]> { + /// Returns a `&[Cell]` from a `&Cell<[T]>` + /// + /// # Examples + /// + /// ``` + /// #![feature(as_cell)] + /// use std::cell::Cell; + /// + /// let slice: &mut [i32] = &mut [1, 2, 3]; + /// let cell_slice: &Cell<[i32]> = Cell::from_mut(slice); + /// let slice_cell: &[Cell] = cell_slice.as_slice_of_cells(); + /// + /// assert_eq!(slice_cell.len(), 3); + /// ``` + #[unstable(feature = "as_cell", issue="43038")] + pub fn as_slice_of_cells(&self) -> &[Cell] { + unsafe { + &*(self as *const Cell<[T]> as *const [Cell]) + } + } +} + /// A mutable memory location with dynamically checked borrow rules /// /// See the [module-level documentation](index.html) for more. @@ -526,11 +618,31 @@ impl Display for BorrowMutError { } } -// Values [1, MAX-1] represent the number of `Ref` active -// (will not outgrow its range since `usize` is the size of the address space) -type BorrowFlag = usize; +// Positive values represent the number of `Ref` active. Negative values +// represent the number of `RefMut` active. Multiple `RefMut`s can only be +// active at a time if they refer to distinct, nonoverlapping components of a +// `RefCell` (e.g., different ranges of a slice). +// +// `Ref` and `RefMut` are both two words in size, and so there will likely never +// be enough `Ref`s or `RefMut`s in existence to overflow half of the `usize` +// range. Thus, a `BorrowFlag` will probably never overflow or underflow. +// However, this is not a guarantee, as a pathological program could repeatedly +// create and then mem::forget `Ref`s or `RefMut`s. Thus, all code must +// explicitly check for overflow and underflow in order to avoid unsafety, or at +// least behave correctly in the event that overflow or underflow happens (e.g., +// see BorrowRef::new). +type BorrowFlag = isize; const UNUSED: BorrowFlag = 0; -const WRITING: BorrowFlag = !0; + +#[inline(always)] +fn is_writing(x: BorrowFlag) -> bool { + x < UNUSED +} + +#[inline(always)] +fn is_reading(x: BorrowFlag) -> bool { + x > UNUSED +} impl RefCell { /// Creates a new `RefCell` containing `value`. @@ -569,7 +681,7 @@ impl RefCell { // compiler statically verifies that it is not currently borrowed. // Therefore the following assertion is just a `debug_assert!`. debug_assert!(self.borrow.get() == UNUSED); - unsafe { self.value.into_inner() } + self.value.into_inner() } /// Replaces the wrapped value with a new one, returning the old value, @@ -731,8 +843,9 @@ impl RefCell { /// Mutably borrows the wrapped value. /// - /// The borrow lasts until the returned `RefMut` exits scope. The value - /// cannot be borrowed while this borrow is active. + /// The borrow lasts until the returned `RefMut` or all `RefMut`s derived + /// from it exit scope. The value cannot be borrowed while this borrow is + /// active. /// /// # Panics /// @@ -774,8 +887,9 @@ impl RefCell { /// Mutably borrows the wrapped value, returning an error if the value is currently borrowed. /// - /// The borrow lasts until the returned `RefMut` exits scope. The value cannot be borrowed - /// while this borrow is active. + /// The borrow lasts until the returned `RefMut` or all `RefMut`s derived + /// from it exit scope. The value cannot be borrowed while this borrow is + /// active. /// /// This is the non-panicking variant of [`borrow_mut`](#method.borrow_mut). /// @@ -863,6 +977,9 @@ impl !Sync for RefCell {} #[stable(feature = "rust1", since = "1.0.0")] impl Clone for RefCell { + /// # Panics + /// + /// Panics if the value is currently mutably borrowed. #[inline] fn clone(&self) -> RefCell { RefCell::new(self.borrow().clone()) @@ -880,6 +997,9 @@ impl Default for RefCell { #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for RefCell { + /// # Panics + /// + /// Panics if the value in either `RefCell` is currently borrowed. #[inline] fn eq(&self, other: &RefCell) -> bool { *self.borrow() == *other.borrow() @@ -891,26 +1011,41 @@ impl Eq for RefCell {} #[stable(feature = "cell_ord", since = "1.10.0")] impl PartialOrd for RefCell { + /// # Panics + /// + /// Panics if the value in either `RefCell` is currently borrowed. #[inline] fn partial_cmp(&self, other: &RefCell) -> Option { self.borrow().partial_cmp(&*other.borrow()) } + /// # Panics + /// + /// Panics if the value in either `RefCell` is currently borrowed. #[inline] fn lt(&self, other: &RefCell) -> bool { *self.borrow() < *other.borrow() } + /// # Panics + /// + /// Panics if the value in either `RefCell` is currently borrowed. #[inline] fn le(&self, other: &RefCell) -> bool { *self.borrow() <= *other.borrow() } + /// # Panics + /// + /// Panics if the value in either `RefCell` is currently borrowed. #[inline] fn gt(&self, other: &RefCell) -> bool { *self.borrow() > *other.borrow() } + /// # Panics + /// + /// Panics if the value in either `RefCell` is currently borrowed. #[inline] fn ge(&self, other: &RefCell) -> bool { *self.borrow() >= *other.borrow() @@ -919,6 +1054,9 @@ impl PartialOrd for RefCell { #[stable(feature = "cell_ord", since = "1.10.0")] impl Ord for RefCell { + /// # Panics + /// + /// Panics if the value in either `RefCell` is currently borrowed. #[inline] fn cmp(&self, other: &RefCell) -> Ordering { self.borrow().cmp(&*other.borrow()) @@ -942,12 +1080,14 @@ struct BorrowRef<'b> { impl<'b> BorrowRef<'b> { #[inline] fn new(borrow: &'b Cell) -> Option> { - match borrow.get() { - WRITING => None, - b => { - borrow.set(b + 1); - Some(BorrowRef { borrow: borrow }) - }, + let b = borrow.get(); + if is_writing(b) || b == isize::max_value() { + // If there's currently a writing borrow, or if incrementing the + // refcount would overflow into a writing borrow. + None + } else { + borrow.set(b + 1); + Some(BorrowRef { borrow }) } } } @@ -956,7 +1096,7 @@ impl<'b> Drop for BorrowRef<'b> { #[inline] fn drop(&mut self) { let borrow = self.borrow.get(); - debug_assert!(borrow != WRITING && borrow != UNUSED); + debug_assert!(is_reading(borrow)); self.borrow.set(borrow - 1); } } @@ -965,11 +1105,12 @@ impl<'b> Clone for BorrowRef<'b> { #[inline] fn clone(&self) -> BorrowRef<'b> { // Since this Ref exists, we know the borrow flag - // is not set to WRITING. + // is a reading borrow. let borrow = self.borrow.get(); - debug_assert!(borrow != UNUSED); - // Prevent the borrow counter from overflowing. - assert!(borrow != WRITING); + debug_assert!(is_reading(borrow)); + // Prevent the borrow counter from overflowing into + // a writing borrow. + assert!(borrow != isize::max_value()); self.borrow.set(borrow + 1); BorrowRef { borrow: self.borrow } } @@ -1041,6 +1182,37 @@ impl<'b, T: ?Sized> Ref<'b, T> { borrow: orig.borrow, } } + + /// Split a `Ref` into multiple `Ref`s for different components of the + /// borrowed data. + /// + /// The `RefCell` is already immutably borrowed, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `Ref::map_split(...)`. A method would interfere with methods of the same + /// name on the contents of a `RefCell` used through `Deref`. + /// + /// # Examples + /// + /// ``` + /// #![feature(refcell_map_split)] + /// use std::cell::{Ref, RefCell}; + /// + /// let cell = RefCell::new([1, 2, 3, 4]); + /// let borrow = cell.borrow(); + /// let (begin, end) = Ref::map_split(borrow, |slice| slice.split_at(2)); + /// assert_eq!(*begin, [1, 2]); + /// assert_eq!(*end, [3, 4]); + /// ``` + #[unstable(feature = "refcell_map_split", issue = "51476")] + #[inline] + pub fn map_split(orig: Ref<'b, T>, f: F) -> (Ref<'b, U>, Ref<'b, V>) + where F: FnOnce(&T) -> (&U, &V) + { + let (a, b) = f(orig.value); + let borrow = orig.borrow.clone(); + (Ref { value: a, borrow }, Ref { value: b, borrow: orig.borrow }) + } } #[unstable(feature = "coerce_unsized", issue = "27732")] @@ -1086,9 +1258,47 @@ impl<'b, T: ?Sized> RefMut<'b, T> { let RefMut { value, borrow } = orig; RefMut { value: f(value), - borrow: borrow, + borrow, } } + + /// Split a `RefMut` into multiple `RefMut`s for different components of the + /// borrowed data. + /// + /// The underlying `RefCell` will remain mutably borrowed until both + /// returned `RefMut`s go out of scope. + /// + /// The `RefCell` is already mutably borrowed, so this cannot fail. + /// + /// This is an associated function that needs to be used as + /// `RefMut::map_split(...)`. A method would interfere with methods of the + /// same name on the contents of a `RefCell` used through `Deref`. + /// + /// # Examples + /// + /// ``` + /// #![feature(refcell_map_split)] + /// use std::cell::{RefCell, RefMut}; + /// + /// let cell = RefCell::new([1, 2, 3, 4]); + /// let borrow = cell.borrow_mut(); + /// let (mut begin, mut end) = RefMut::map_split(borrow, |slice| slice.split_at_mut(2)); + /// assert_eq!(*begin, [1, 2]); + /// assert_eq!(*end, [3, 4]); + /// begin.copy_from_slice(&[4, 3]); + /// end.copy_from_slice(&[2, 1]); + /// ``` + #[unstable(feature = "refcell_map_split", issue = "51476")] + #[inline] + pub fn map_split( + orig: RefMut<'b, T>, f: F + ) -> (RefMut<'b, U>, RefMut<'b, V>) + where F: FnOnce(&mut T) -> (&mut U, &mut V) + { + let (a, b) = f(orig.value); + let borrow = orig.borrow.clone(); + (RefMut { value: a, borrow }, RefMut { value: b, borrow: orig.borrow }) + } } struct BorrowRefMut<'b> { @@ -1099,22 +1309,41 @@ impl<'b> Drop for BorrowRefMut<'b> { #[inline] fn drop(&mut self) { let borrow = self.borrow.get(); - debug_assert!(borrow == WRITING); - self.borrow.set(UNUSED); + debug_assert!(is_writing(borrow)); + self.borrow.set(borrow + 1); } } impl<'b> BorrowRefMut<'b> { #[inline] fn new(borrow: &'b Cell) -> Option> { + // NOTE: Unlike BorrowRefMut::clone, new is called to create the initial + // mutable reference, and so there must currently be no existing + // references. Thus, while clone increments the mutable refcount, here + // we explicitly only allow going from UNUSED to UNUSED - 1. match borrow.get() { UNUSED => { - borrow.set(WRITING); - Some(BorrowRefMut { borrow: borrow }) + borrow.set(UNUSED - 1); + Some(BorrowRefMut { borrow }) }, _ => None, } } + + // Clone a `BorrowRefMut`. + // + // This is only valid if each `BorrowRefMut` is used to track a mutable + // reference to a distinct, nonoverlapping range of the original object. + // This isn't in a Clone impl so that code doesn't call this implicitly. + #[inline] + fn clone(&self) -> BorrowRefMut<'b> { + let borrow = self.borrow.get(); + debug_assert!(is_writing(borrow)); + // Prevent the borrow counter from underflowing. + assert!(borrow != isize::min_value()); + self.borrow.set(borrow - 1); + BorrowRefMut { borrow: self.borrow } + } } /// A wrapper type for a mutably borrowed value from a `RefCell`. @@ -1161,22 +1390,44 @@ impl<'a, T: ?Sized + fmt::Display> fmt::Display for RefMut<'a, T> { /// The `UnsafeCell` type is the only legal way to obtain aliasable data that is considered /// mutable. In general, transmuting an `&T` type into an `&mut T` is considered undefined behavior. /// -/// The compiler makes optimizations based on the knowledge that `&T` is not mutably aliased or -/// mutated, and that `&mut T` is unique. When building abstractions like `Cell`, `RefCell`, -/// `Mutex`, etc, you need to turn these optimizations off. `UnsafeCell` is the only legal way -/// to do this. When `UnsafeCell` is immutably aliased, it is still safe to obtain a mutable -/// reference to its interior and/or to mutate it. However, it is up to the abstraction designer -/// to ensure that no two mutable references obtained this way are active at the same time, and -/// that there are no active mutable references or mutations when an immutable reference is obtained -/// from the cell. This is often done via runtime checks. +/// If you have a reference `&SomeStruct`, then normally in Rust all fields of `SomeStruct` are +/// immutable. The compiler makes optimizations based on the knowledge that `&T` is not mutably +/// aliased or mutated, and that `&mut T` is unique. `UnsafeCell` is the only core language +/// feature to work around this restriction. All other types that allow internal mutability, such as +/// `Cell` and `RefCell`, use `UnsafeCell` to wrap their internal data. /// -/// Note that while mutating or mutably aliasing the contents of an `& UnsafeCell` is -/// okay (provided you enforce the invariants some other way); it is still undefined behavior +/// The `UnsafeCell` API itself is technically very simple: it gives you a raw pointer `*mut T` to +/// its contents. It is up to _you_ as the abstraction designer to use that raw pointer correctly. +/// +/// The precise Rust aliasing rules are somewhat in flux, but the main points are not contentious: +/// +/// - If you create a safe reference with lifetime `'a` (either a `&T` or `&mut T` +/// reference) that is accessible by safe code (for example, because you returned it), +/// then you must not access the data in any way that contradicts that reference for the +/// remainder of `'a`. For example, this means that if you take the `*mut T` from an +/// `UnsafeCell` and cast it to an `&T`, then the data in `T` must remain immutable +/// (modulo any `UnsafeCell` data found within `T`, of course) until that reference's +/// lifetime expires. Similarly, if you create a `&mut T` reference that is released to +/// safe code, then you must not access the data within the `UnsafeCell` until that +/// reference expires. +/// +/// - At all times, you must avoid data races. If multiple threads have access to +/// the same `UnsafeCell`, then any writes must have a proper happens-before relation to all other +/// accesses (or use atomics). +/// +/// To assist with proper design, the following scenarios are explicitly declared legal +/// for single-threaded code: +/// +/// 1. A `&T` reference can be released to safe code and there it can co-exist with other `&T` +/// references, but not with a `&mut T` +/// +/// 2. A `&mut T` reference may be released to safe code provided neither other `&mut T` nor `&T` +/// co-exist with it. A `&mut T` must always be unique. +/// +/// Note that while mutating or mutably aliasing the contents of an `&UnsafeCell` is +/// okay (provided you enforce the invariants some other way), it is still undefined behavior /// to have multiple `&mut UnsafeCell` aliases. /// -/// -/// Types like `Cell` and `RefCell` use this type to wrap their internal data. -/// /// # Examples /// /// ``` @@ -1192,6 +1443,7 @@ impl<'a, T: ?Sized + fmt::Display> fmt::Display for RefMut<'a, T> { /// ``` #[lang = "unsafe_cell"] #[stable(feature = "rust1", since = "1.0.0")] +#[repr(transparent)] pub struct UnsafeCell { value: T, } @@ -1215,16 +1467,11 @@ impl UnsafeCell { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub const fn new(value: T) -> UnsafeCell { - UnsafeCell { value: value } + UnsafeCell { value } } /// Unwraps the value. /// - /// # Safety - /// - /// This function is unsafe because this thread or another thread may currently be - /// inspecting the inner value. - /// /// # Examples /// /// ``` @@ -1232,11 +1479,11 @@ impl UnsafeCell { /// /// let uc = UnsafeCell::new(5); /// - /// let five = unsafe { uc.into_inner() }; + /// let five = uc.into_inner(); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub unsafe fn into_inner(self) -> T { + pub fn into_inner(self) -> T { self.value } } @@ -1245,9 +1492,9 @@ impl UnsafeCell { /// Gets a mutable pointer to the wrapped value. /// /// This can be cast to a pointer of any kind. - /// Ensure that the access is unique when casting to - /// `&mut T`, and ensure that there are no mutations or mutable - /// aliases going on when casting to `&T` + /// Ensure that the access is unique (no active references, mutable or not) + /// when casting to `&mut T`, and ensure that there are no mutations + /// or mutable aliases going on when casting to `&T` /// /// # Examples /// @@ -1285,7 +1532,7 @@ impl, U> CoerceUnsized> for UnsafeCell {} #[allow(unused)] fn assert_coerce_unsized(a: UnsafeCell<&i32>, b: Cell<&i32>, c: RefCell<&i32>) { - let _: UnsafeCell<&Send> = a; - let _: Cell<&Send> = b; - let _: RefCell<&Send> = c; + let _: UnsafeCell<&dyn Send> = a; + let _: Cell<&dyn Send> = b; + let _: RefCell<&dyn Send> = c; } diff --git a/src/libcore/char.rs b/src/libcore/char.rs deleted file mode 100644 index e8b81db07067..000000000000 --- a/src/libcore/char.rs +++ /dev/null @@ -1,908 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Character manipulation. -//! -//! For more details, see ::std_unicode::char (a.k.a. std::char) - -#![allow(non_snake_case)] -#![stable(feature = "core_char", since = "1.2.0")] - -use char_private::is_printable; -use convert::TryFrom; -use fmt::{self, Write}; -use slice; -use str::{from_utf8_unchecked_mut, FromStr}; -use iter::FusedIterator; -use mem::transmute; - -// UTF-8 ranges and tags for encoding characters -const TAG_CONT: u8 = 0b1000_0000; -const TAG_TWO_B: u8 = 0b1100_0000; -const TAG_THREE_B: u8 = 0b1110_0000; -const TAG_FOUR_B: u8 = 0b1111_0000; -const MAX_ONE_B: u32 = 0x80; -const MAX_TWO_B: u32 = 0x800; -const MAX_THREE_B: u32 = 0x10000; - -/* - Lu Uppercase_Letter an uppercase letter - Ll Lowercase_Letter a lowercase letter - Lt Titlecase_Letter a digraphic character, with first part uppercase - Lm Modifier_Letter a modifier letter - Lo Other_Letter other letters, including syllables and ideographs - Mn Nonspacing_Mark a nonspacing combining mark (zero advance width) - Mc Spacing_Mark a spacing combining mark (positive advance width) - Me Enclosing_Mark an enclosing combining mark - Nd Decimal_Number a decimal digit - Nl Letter_Number a letterlike numeric character - No Other_Number a numeric character of other type - Pc Connector_Punctuation a connecting punctuation mark, like a tie - Pd Dash_Punctuation a dash or hyphen punctuation mark - Ps Open_Punctuation an opening punctuation mark (of a pair) - Pe Close_Punctuation a closing punctuation mark (of a pair) - Pi Initial_Punctuation an initial quotation mark - Pf Final_Punctuation a final quotation mark - Po Other_Punctuation a punctuation mark of other type - Sm Math_Symbol a symbol of primarily mathematical use - Sc Currency_Symbol a currency sign - Sk Modifier_Symbol a non-letterlike modifier symbol - So Other_Symbol a symbol of other type - Zs Space_Separator a space character (of various non-zero widths) - Zl Line_Separator U+2028 LINE SEPARATOR only - Zp Paragraph_Separator U+2029 PARAGRAPH SEPARATOR only - Cc Control a C0 or C1 control code - Cf Format a format control character - Cs Surrogate a surrogate code point - Co Private_Use a private-use character - Cn Unassigned a reserved unassigned code point or a noncharacter -*/ - -/// The highest valid code point a `char` can have. -/// -/// A [`char`] is a [Unicode Scalar Value], which means that it is a [Code -/// Point], but only ones within a certain range. `MAX` is the highest valid -/// code point that's a valid [Unicode Scalar Value]. -/// -/// [`char`]: ../../std/primitive.char.html -/// [Unicode Scalar Value]: http://www.unicode.org/glossary/#unicode_scalar_value -/// [Code Point]: http://www.unicode.org/glossary/#code_point -#[stable(feature = "rust1", since = "1.0.0")] -pub const MAX: char = '\u{10ffff}'; - -/// Converts a `u32` to a `char`. -/// -/// Note that all [`char`]s are valid [`u32`]s, and can be casted to one with -/// [`as`]: -/// -/// ``` -/// let c = '💯'; -/// let i = c as u32; -/// -/// assert_eq!(128175, i); -/// ``` -/// -/// However, the reverse is not true: not all valid [`u32`]s are valid -/// [`char`]s. `from_u32()` will return `None` if the input is not a valid value -/// for a [`char`]. -/// -/// [`char`]: ../../std/primitive.char.html -/// [`u32`]: ../../std/primitive.u32.html -/// [`as`]: ../../book/first-edition/casting-between-types.html#as -/// -/// For an unsafe version of this function which ignores these checks, see -/// [`from_u32_unchecked`]. -/// -/// [`from_u32_unchecked`]: fn.from_u32_unchecked.html -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::char; -/// -/// let c = char::from_u32(0x2764); -/// -/// assert_eq!(Some('❤'), c); -/// ``` -/// -/// Returning `None` when the input is not a valid [`char`]: -/// -/// ``` -/// use std::char; -/// -/// let c = char::from_u32(0x110000); -/// -/// assert_eq!(None, c); -/// ``` -#[inline] -#[stable(feature = "rust1", since = "1.0.0")] -pub fn from_u32(i: u32) -> Option { - char::try_from(i).ok() -} - -/// Converts a `u32` to a `char`, ignoring validity. -/// -/// Note that all [`char`]s are valid [`u32`]s, and can be casted to one with -/// [`as`]: -/// -/// ``` -/// let c = '💯'; -/// let i = c as u32; -/// -/// assert_eq!(128175, i); -/// ``` -/// -/// However, the reverse is not true: not all valid [`u32`]s are valid -/// [`char`]s. `from_u32_unchecked()` will ignore this, and blindly cast to -/// [`char`], possibly creating an invalid one. -/// -/// [`char`]: ../../std/primitive.char.html -/// [`u32`]: ../../std/primitive.u32.html -/// [`as`]: ../../book/first-edition/casting-between-types.html#as -/// -/// # Safety -/// -/// This function is unsafe, as it may construct invalid `char` values. -/// -/// For a safe version of this function, see the [`from_u32`] function. -/// -/// [`from_u32`]: fn.from_u32.html -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::char; -/// -/// let c = unsafe { char::from_u32_unchecked(0x2764) }; -/// -/// assert_eq!('❤', c); -/// ``` -#[inline] -#[stable(feature = "char_from_unchecked", since = "1.5.0")] -pub unsafe fn from_u32_unchecked(i: u32) -> char { - transmute(i) -} - -#[stable(feature = "char_convert", since = "1.13.0")] -impl From for u32 { - #[inline] - fn from(c: char) -> Self { - c as u32 - } -} - -/// Maps a byte in 0x00...0xFF to a `char` whose code point has the same value, in U+0000 to U+00FF. -/// -/// Unicode is designed such that this effectively decodes bytes -/// with the character encoding that IANA calls ISO-8859-1. -/// This encoding is compatible with ASCII. -/// -/// Note that this is different from ISO/IEC 8859-1 a.k.a. ISO 8859-1 (with one less hyphen), -/// which leaves some "blanks", byte values that are not assigned to any character. -/// ISO-8859-1 (the IANA one) assigns them to the C0 and C1 control codes. -/// -/// Note that this is *also* different from Windows-1252 a.k.a. code page 1252, -/// which is a superset ISO/IEC 8859-1 that assigns some (not all!) blanks -/// to punctuation and various Latin characters. -/// -/// To confuse things further, [on the Web](https://encoding.spec.whatwg.org/) -/// `ascii`, `iso-8859-1`, and `windows-1252` are all aliases -/// for a superset of Windows-1252 that fills the remaining blanks with corresponding -/// C0 and C1 control codes. -#[stable(feature = "char_convert", since = "1.13.0")] -impl From for char { - #[inline] - fn from(i: u8) -> Self { - i as char - } -} - - -/// An error which can be returned when parsing a char. -#[stable(feature = "char_from_str", since = "1.20.0")] -#[derive(Clone, Debug)] -pub struct ParseCharError { - kind: CharErrorKind, -} - -impl ParseCharError { - #[unstable(feature = "char_error_internals", - reason = "this method should not be available publicly", - issue = "0")] - #[doc(hidden)] - pub fn __description(&self) -> &str { - match self.kind { - CharErrorKind::EmptyString => { - "cannot parse char from empty string" - }, - CharErrorKind::TooManyChars => "too many characters in string" - } - } -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -enum CharErrorKind { - EmptyString, - TooManyChars, -} - -#[stable(feature = "char_from_str", since = "1.20.0")] -impl fmt::Display for ParseCharError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.__description().fmt(f) - } -} - - -#[stable(feature = "char_from_str", since = "1.20.0")] -impl FromStr for char { - type Err = ParseCharError; - - #[inline] - fn from_str(s: &str) -> Result { - let mut chars = s.chars(); - match (chars.next(), chars.next()) { - (None, _) => { - Err(ParseCharError { kind: CharErrorKind::EmptyString }) - }, - (Some(c), None) => Ok(c), - _ => { - Err(ParseCharError { kind: CharErrorKind::TooManyChars }) - } - } - } -} - - -#[unstable(feature = "try_from", issue = "33417")] -impl TryFrom for char { - type Error = CharTryFromError; - - #[inline] - fn try_from(i: u32) -> Result { - if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) { - Err(CharTryFromError(())) - } else { - Ok(unsafe { from_u32_unchecked(i) }) - } - } -} - -/// The error type returned when a conversion from u32 to char fails. -#[unstable(feature = "try_from", issue = "33417")] -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct CharTryFromError(()); - -#[unstable(feature = "try_from", issue = "33417")] -impl fmt::Display for CharTryFromError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - "converted integer out of range for `char`".fmt(f) - } -} - -/// Converts a digit in the given radix to a `char`. -/// -/// A 'radix' here is sometimes also called a 'base'. A radix of two -/// indicates a binary number, a radix of ten, decimal, and a radix of -/// sixteen, hexadecimal, to give some common values. Arbitrary -/// radices are supported. -/// -/// `from_digit()` will return `None` if the input is not a digit in -/// the given radix. -/// -/// # Panics -/// -/// Panics if given a radix larger than 36. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::char; -/// -/// let c = char::from_digit(4, 10); -/// -/// assert_eq!(Some('4'), c); -/// -/// // Decimal 11 is a single digit in base 16 -/// let c = char::from_digit(11, 16); -/// -/// assert_eq!(Some('b'), c); -/// ``` -/// -/// Returning `None` when the input is not a digit: -/// -/// ``` -/// use std::char; -/// -/// let c = char::from_digit(20, 10); -/// -/// assert_eq!(None, c); -/// ``` -/// -/// Passing a large radix, causing a panic: -/// -/// ``` -/// use std::thread; -/// use std::char; -/// -/// let result = thread::spawn(|| { -/// // this panics -/// let c = char::from_digit(1, 37); -/// }).join(); -/// -/// assert!(result.is_err()); -/// ``` -#[inline] -#[stable(feature = "rust1", since = "1.0.0")] -pub fn from_digit(num: u32, radix: u32) -> Option { - if radix > 36 { - panic!("from_digit: radix is too high (maximum 36)"); - } - if num < radix { - let num = num as u8; - if num < 10 { - Some((b'0' + num) as char) - } else { - Some((b'a' + num - 10) as char) - } - } else { - None - } -} - -// NB: the stabilization and documentation for this trait is in -// unicode/char.rs, not here -#[allow(missing_docs)] // docs in libunicode/u_char.rs -#[doc(hidden)] -#[unstable(feature = "core_char_ext", - reason = "the stable interface is `impl char` in later crate", - issue = "32110")] -pub trait CharExt { - #[stable(feature = "core", since = "1.6.0")] - fn is_digit(self, radix: u32) -> bool; - #[stable(feature = "core", since = "1.6.0")] - fn to_digit(self, radix: u32) -> Option; - #[stable(feature = "core", since = "1.6.0")] - fn escape_unicode(self) -> EscapeUnicode; - #[stable(feature = "core", since = "1.6.0")] - fn escape_default(self) -> EscapeDefault; - #[stable(feature = "char_escape_debug", since = "1.20.0")] - fn escape_debug(self) -> EscapeDebug; - #[stable(feature = "core", since = "1.6.0")] - fn len_utf8(self) -> usize; - #[stable(feature = "core", since = "1.6.0")] - fn len_utf16(self) -> usize; - #[stable(feature = "unicode_encode_char", since = "1.15.0")] - fn encode_utf8(self, dst: &mut [u8]) -> &mut str; - #[stable(feature = "unicode_encode_char", since = "1.15.0")] - fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16]; -} - -#[stable(feature = "core", since = "1.6.0")] -impl CharExt for char { - #[inline] - fn is_digit(self, radix: u32) -> bool { - self.to_digit(radix).is_some() - } - - #[inline] - fn to_digit(self, radix: u32) -> Option { - if radix > 36 { - panic!("to_digit: radix is too high (maximum 36)"); - } - let val = match self { - '0' ... '9' => self as u32 - '0' as u32, - 'a' ... 'z' => self as u32 - 'a' as u32 + 10, - 'A' ... 'Z' => self as u32 - 'A' as u32 + 10, - _ => return None, - }; - if val < radix { Some(val) } - else { None } - } - - #[inline] - fn escape_unicode(self) -> EscapeUnicode { - let c = self as u32; - - // or-ing 1 ensures that for c==0 the code computes that one - // digit should be printed and (which is the same) avoids the - // (31 - 32) underflow - let msb = 31 - (c | 1).leading_zeros(); - - // the index of the most significant hex digit - let ms_hex_digit = msb / 4; - EscapeUnicode { - c: self, - state: EscapeUnicodeState::Backslash, - hex_digit_idx: ms_hex_digit as usize, - } - } - - #[inline] - fn escape_default(self) -> EscapeDefault { - let init_state = match self { - '\t' => EscapeDefaultState::Backslash('t'), - '\r' => EscapeDefaultState::Backslash('r'), - '\n' => EscapeDefaultState::Backslash('n'), - '\\' | '\'' | '"' => EscapeDefaultState::Backslash(self), - '\x20' ... '\x7e' => EscapeDefaultState::Char(self), - _ => EscapeDefaultState::Unicode(self.escape_unicode()) - }; - EscapeDefault { state: init_state } - } - - #[inline] - fn escape_debug(self) -> EscapeDebug { - let init_state = match self { - '\t' => EscapeDefaultState::Backslash('t'), - '\r' => EscapeDefaultState::Backslash('r'), - '\n' => EscapeDefaultState::Backslash('n'), - '\\' | '\'' | '"' => EscapeDefaultState::Backslash(self), - c if is_printable(c) => EscapeDefaultState::Char(c), - c => EscapeDefaultState::Unicode(c.escape_unicode()), - }; - EscapeDebug(EscapeDefault { state: init_state }) - } - - #[inline] - fn len_utf8(self) -> usize { - let code = self as u32; - if code < MAX_ONE_B { - 1 - } else if code < MAX_TWO_B { - 2 - } else if code < MAX_THREE_B { - 3 - } else { - 4 - } - } - - #[inline] - fn len_utf16(self) -> usize { - let ch = self as u32; - if (ch & 0xFFFF) == ch { 1 } else { 2 } - } - - #[inline] - fn encode_utf8(self, dst: &mut [u8]) -> &mut str { - let code = self as u32; - unsafe { - let len = - if code < MAX_ONE_B && !dst.is_empty() { - *dst.get_unchecked_mut(0) = code as u8; - 1 - } else if code < MAX_TWO_B && dst.len() >= 2 { - *dst.get_unchecked_mut(0) = (code >> 6 & 0x1F) as u8 | TAG_TWO_B; - *dst.get_unchecked_mut(1) = (code & 0x3F) as u8 | TAG_CONT; - 2 - } else if code < MAX_THREE_B && dst.len() >= 3 { - *dst.get_unchecked_mut(0) = (code >> 12 & 0x0F) as u8 | TAG_THREE_B; - *dst.get_unchecked_mut(1) = (code >> 6 & 0x3F) as u8 | TAG_CONT; - *dst.get_unchecked_mut(2) = (code & 0x3F) as u8 | TAG_CONT; - 3 - } else if dst.len() >= 4 { - *dst.get_unchecked_mut(0) = (code >> 18 & 0x07) as u8 | TAG_FOUR_B; - *dst.get_unchecked_mut(1) = (code >> 12 & 0x3F) as u8 | TAG_CONT; - *dst.get_unchecked_mut(2) = (code >> 6 & 0x3F) as u8 | TAG_CONT; - *dst.get_unchecked_mut(3) = (code & 0x3F) as u8 | TAG_CONT; - 4 - } else { - panic!("encode_utf8: need {} bytes to encode U+{:X}, but the buffer has {}", - from_u32_unchecked(code).len_utf8(), - code, - dst.len()) - }; - from_utf8_unchecked_mut(dst.get_unchecked_mut(..len)) - } - } - - #[inline] - fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16] { - let mut code = self as u32; - unsafe { - if (code & 0xFFFF) == code && !dst.is_empty() { - // The BMP falls through (assuming non-surrogate, as it should) - *dst.get_unchecked_mut(0) = code as u16; - slice::from_raw_parts_mut(dst.as_mut_ptr(), 1) - } else if dst.len() >= 2 { - // Supplementary planes break into surrogates. - code -= 0x1_0000; - *dst.get_unchecked_mut(0) = 0xD800 | ((code >> 10) as u16); - *dst.get_unchecked_mut(1) = 0xDC00 | ((code as u16) & 0x3FF); - slice::from_raw_parts_mut(dst.as_mut_ptr(), 2) - } else { - panic!("encode_utf16: need {} units to encode U+{:X}, but the buffer has {}", - from_u32_unchecked(code).len_utf16(), - code, - dst.len()) - } - } - } -} - -/// Returns an iterator that yields the hexadecimal Unicode escape of a -/// character, as `char`s. -/// -/// This `struct` is created by the [`escape_unicode`] method on [`char`]. See -/// its documentation for more. -/// -/// [`escape_unicode`]: ../../std/primitive.char.html#method.escape_unicode -/// [`char`]: ../../std/primitive.char.html -#[derive(Clone, Debug)] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct EscapeUnicode { - c: char, - state: EscapeUnicodeState, - - // The index of the next hex digit to be printed (0 if none), - // i.e. the number of remaining hex digits to be printed; - // increasing from the least significant digit: 0x543210 - hex_digit_idx: usize, -} - -// The enum values are ordered so that their representation is the -// same as the remaining length (besides the hexadecimal digits). This -// likely makes `len()` a single load from memory) and inline-worth. -#[derive(Clone, Debug)] -enum EscapeUnicodeState { - Done, - RightBrace, - Value, - LeftBrace, - Type, - Backslash, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for EscapeUnicode { - type Item = char; - - fn next(&mut self) -> Option { - match self.state { - EscapeUnicodeState::Backslash => { - self.state = EscapeUnicodeState::Type; - Some('\\') - } - EscapeUnicodeState::Type => { - self.state = EscapeUnicodeState::LeftBrace; - Some('u') - } - EscapeUnicodeState::LeftBrace => { - self.state = EscapeUnicodeState::Value; - Some('{') - } - EscapeUnicodeState::Value => { - let hex_digit = ((self.c as u32) >> (self.hex_digit_idx * 4)) & 0xf; - let c = from_digit(hex_digit, 16).unwrap(); - if self.hex_digit_idx == 0 { - self.state = EscapeUnicodeState::RightBrace; - } else { - self.hex_digit_idx -= 1; - } - Some(c) - } - EscapeUnicodeState::RightBrace => { - self.state = EscapeUnicodeState::Done; - Some('}') - } - EscapeUnicodeState::Done => None, - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let n = self.len(); - (n, Some(n)) - } - - #[inline] - fn count(self) -> usize { - self.len() - } - - fn last(self) -> Option { - match self.state { - EscapeUnicodeState::Done => None, - - EscapeUnicodeState::RightBrace | - EscapeUnicodeState::Value | - EscapeUnicodeState::LeftBrace | - EscapeUnicodeState::Type | - EscapeUnicodeState::Backslash => Some('}'), - } - } -} - -#[stable(feature = "exact_size_escape", since = "1.11.0")] -impl ExactSizeIterator for EscapeUnicode { - #[inline] - fn len(&self) -> usize { - // The match is a single memory access with no branching - self.hex_digit_idx + match self.state { - EscapeUnicodeState::Done => 0, - EscapeUnicodeState::RightBrace => 1, - EscapeUnicodeState::Value => 2, - EscapeUnicodeState::LeftBrace => 3, - EscapeUnicodeState::Type => 4, - EscapeUnicodeState::Backslash => 5, - } - } -} - -#[unstable(feature = "fused", issue = "35602")] -impl FusedIterator for EscapeUnicode {} - -#[stable(feature = "char_struct_display", since = "1.16.0")] -impl fmt::Display for EscapeUnicode { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for c in self.clone() { - f.write_char(c)?; - } - Ok(()) - } -} - -/// An iterator that yields the literal escape code of a `char`. -/// -/// This `struct` is created by the [`escape_default`] method on [`char`]. See -/// its documentation for more. -/// -/// [`escape_default`]: ../../std/primitive.char.html#method.escape_default -/// [`char`]: ../../std/primitive.char.html -#[derive(Clone, Debug)] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct EscapeDefault { - state: EscapeDefaultState -} - -#[derive(Clone, Debug)] -enum EscapeDefaultState { - Done, - Char(char), - Backslash(char), - Unicode(EscapeUnicode), -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for EscapeDefault { - type Item = char; - - fn next(&mut self) -> Option { - match self.state { - EscapeDefaultState::Backslash(c) => { - self.state = EscapeDefaultState::Char(c); - Some('\\') - } - EscapeDefaultState::Char(c) => { - self.state = EscapeDefaultState::Done; - Some(c) - } - EscapeDefaultState::Done => None, - EscapeDefaultState::Unicode(ref mut iter) => iter.next(), - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let n = self.len(); - (n, Some(n)) - } - - #[inline] - fn count(self) -> usize { - self.len() - } - - fn nth(&mut self, n: usize) -> Option { - match self.state { - EscapeDefaultState::Backslash(c) if n == 0 => { - self.state = EscapeDefaultState::Char(c); - Some('\\') - }, - EscapeDefaultState::Backslash(c) if n == 1 => { - self.state = EscapeDefaultState::Done; - Some(c) - }, - EscapeDefaultState::Backslash(_) => { - self.state = EscapeDefaultState::Done; - None - }, - EscapeDefaultState::Char(c) => { - self.state = EscapeDefaultState::Done; - - if n == 0 { - Some(c) - } else { - None - } - }, - EscapeDefaultState::Done => return None, - EscapeDefaultState::Unicode(ref mut i) => return i.nth(n), - } - } - - fn last(self) -> Option { - match self.state { - EscapeDefaultState::Unicode(iter) => iter.last(), - EscapeDefaultState::Done => None, - EscapeDefaultState::Backslash(c) | EscapeDefaultState::Char(c) => Some(c), - } - } -} - -#[stable(feature = "exact_size_escape", since = "1.11.0")] -impl ExactSizeIterator for EscapeDefault { - fn len(&self) -> usize { - match self.state { - EscapeDefaultState::Done => 0, - EscapeDefaultState::Char(_) => 1, - EscapeDefaultState::Backslash(_) => 2, - EscapeDefaultState::Unicode(ref iter) => iter.len(), - } - } -} - -#[unstable(feature = "fused", issue = "35602")] -impl FusedIterator for EscapeDefault {} - -#[stable(feature = "char_struct_display", since = "1.16.0")] -impl fmt::Display for EscapeDefault { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - for c in self.clone() { - f.write_char(c)?; - } - Ok(()) - } -} - -/// An iterator that yields the literal escape code of a `char`. -/// -/// This `struct` is created by the [`escape_debug`] method on [`char`]. See its -/// documentation for more. -/// -/// [`escape_debug`]: ../../std/primitive.char.html#method.escape_debug -/// [`char`]: ../../std/primitive.char.html -#[stable(feature = "char_escape_debug", since = "1.20.0")] -#[derive(Clone, Debug)] -pub struct EscapeDebug(EscapeDefault); - -#[stable(feature = "char_escape_debug", since = "1.20.0")] -impl Iterator for EscapeDebug { - type Item = char; - fn next(&mut self) -> Option { self.0.next() } - fn size_hint(&self) -> (usize, Option) { self.0.size_hint() } -} - -#[stable(feature = "char_escape_debug", since = "1.20.0")] -impl ExactSizeIterator for EscapeDebug { } - -#[unstable(feature = "fused", issue = "35602")] -impl FusedIterator for EscapeDebug {} - -#[stable(feature = "char_escape_debug", since = "1.20.0")] -impl fmt::Display for EscapeDebug { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.0, f) - } -} - - - -/// An iterator over an iterator of bytes of the characters the bytes represent -/// as UTF-8 -#[unstable(feature = "decode_utf8", issue = "33906")] -#[derive(Clone, Debug)] -pub struct DecodeUtf8>(::iter::Peekable); - -/// Decodes an `Iterator` of bytes as UTF-8. -#[unstable(feature = "decode_utf8", issue = "33906")] -#[inline] -pub fn decode_utf8>(i: I) -> DecodeUtf8 { - DecodeUtf8(i.into_iter().peekable()) -} - -/// `::next` returns this for an invalid input sequence. -#[unstable(feature = "decode_utf8", issue = "33906")] -#[derive(PartialEq, Eq, Debug)] -pub struct InvalidSequence(()); - -#[unstable(feature = "decode_utf8", issue = "33906")] -impl> Iterator for DecodeUtf8 { - type Item = Result; - #[inline] - - fn next(&mut self) -> Option> { - self.0.next().map(|first_byte| { - // Emit InvalidSequence according to - // Unicode §5.22 Best Practice for U+FFFD Substitution - // http://www.unicode.org/versions/Unicode9.0.0/ch05.pdf#G40630 - - // Roughly: consume at least one byte, - // then validate one byte at a time and stop before the first unexpected byte - // (which might be the valid start of the next byte sequence). - - let mut code_point; - macro_rules! first_byte { - ($mask: expr) => { - code_point = u32::from(first_byte & $mask) - } - } - macro_rules! continuation_byte { - () => { continuation_byte!(0x80...0xBF) }; - ($range: pat) => { - match self.0.peek() { - Some(&byte @ $range) => { - code_point = (code_point << 6) | u32::from(byte & 0b0011_1111); - self.0.next(); - } - _ => return Err(InvalidSequence(())) - } - } - } - - match first_byte { - 0x00...0x7F => { - first_byte!(0b1111_1111); - } - 0xC2...0xDF => { - first_byte!(0b0001_1111); - continuation_byte!(); - } - 0xE0 => { - first_byte!(0b0000_1111); - continuation_byte!(0xA0...0xBF); // 0x80...0x9F here are overlong - continuation_byte!(); - } - 0xE1...0xEC | 0xEE...0xEF => { - first_byte!(0b0000_1111); - continuation_byte!(); - continuation_byte!(); - } - 0xED => { - first_byte!(0b0000_1111); - continuation_byte!(0x80...0x9F); // 0xA0..0xBF here are surrogates - continuation_byte!(); - } - 0xF0 => { - first_byte!(0b0000_0111); - continuation_byte!(0x90...0xBF); // 0x80..0x8F here are overlong - continuation_byte!(); - continuation_byte!(); - } - 0xF1...0xF3 => { - first_byte!(0b0000_0111); - continuation_byte!(); - continuation_byte!(); - continuation_byte!(); - } - 0xF4 => { - first_byte!(0b0000_0111); - continuation_byte!(0x80...0x8F); // 0x90..0xBF here are beyond char::MAX - continuation_byte!(); - continuation_byte!(); - } - _ => return Err(InvalidSequence(())) // Illegal first byte, overlong, or beyond MAX - } - unsafe { - Ok(from_u32_unchecked(code_point)) - } - }) - } -} - -#[unstable(feature = "fused", issue = "35602")] -impl> FusedIterator for DecodeUtf8 {} diff --git a/src/libcore/char/convert.rs b/src/libcore/char/convert.rs new file mode 100644 index 000000000000..803a924eb3a4 --- /dev/null +++ b/src/libcore/char/convert.rs @@ -0,0 +1,304 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Character conversions. + +use convert::TryFrom; +use fmt; +use mem::transmute; +use str::FromStr; +use super::MAX; + +/// Converts a `u32` to a `char`. +/// +/// Note that all [`char`]s are valid [`u32`]s, and can be cast to one with +/// [`as`]: +/// +/// ``` +/// let c = '💯'; +/// let i = c as u32; +/// +/// assert_eq!(128175, i); +/// ``` +/// +/// However, the reverse is not true: not all valid [`u32`]s are valid +/// [`char`]s. `from_u32()` will return `None` if the input is not a valid value +/// for a [`char`]. +/// +/// [`char`]: ../../std/primitive.char.html +/// [`u32`]: ../../std/primitive.u32.html +/// [`as`]: ../../book/first-edition/casting-between-types.html#as +/// +/// For an unsafe version of this function which ignores these checks, see +/// [`from_u32_unchecked`]. +/// +/// [`from_u32_unchecked`]: fn.from_u32_unchecked.html +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::char; +/// +/// let c = char::from_u32(0x2764); +/// +/// assert_eq!(Some('❤'), c); +/// ``` +/// +/// Returning `None` when the input is not a valid [`char`]: +/// +/// ``` +/// use std::char; +/// +/// let c = char::from_u32(0x110000); +/// +/// assert_eq!(None, c); +/// ``` +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +pub fn from_u32(i: u32) -> Option { + char::try_from(i).ok() +} + +/// Converts a `u32` to a `char`, ignoring validity. +/// +/// Note that all [`char`]s are valid [`u32`]s, and can be cast to one with +/// [`as`]: +/// +/// ``` +/// let c = '💯'; +/// let i = c as u32; +/// +/// assert_eq!(128175, i); +/// ``` +/// +/// However, the reverse is not true: not all valid [`u32`]s are valid +/// [`char`]s. `from_u32_unchecked()` will ignore this, and blindly cast to +/// [`char`], possibly creating an invalid one. +/// +/// [`char`]: ../../std/primitive.char.html +/// [`u32`]: ../../std/primitive.u32.html +/// [`as`]: ../../book/first-edition/casting-between-types.html#as +/// +/// # Safety +/// +/// This function is unsafe, as it may construct invalid `char` values. +/// +/// For a safe version of this function, see the [`from_u32`] function. +/// +/// [`from_u32`]: fn.from_u32.html +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::char; +/// +/// let c = unsafe { char::from_u32_unchecked(0x2764) }; +/// +/// assert_eq!('❤', c); +/// ``` +#[inline] +#[stable(feature = "char_from_unchecked", since = "1.5.0")] +pub unsafe fn from_u32_unchecked(i: u32) -> char { + transmute(i) +} + +#[stable(feature = "char_convert", since = "1.13.0")] +impl From for u32 { + #[inline] + fn from(c: char) -> Self { + c as u32 + } +} + +/// Maps a byte in 0x00...0xFF to a `char` whose code point has the same value, in U+0000 to U+00FF. +/// +/// Unicode is designed such that this effectively decodes bytes +/// with the character encoding that IANA calls ISO-8859-1. +/// This encoding is compatible with ASCII. +/// +/// Note that this is different from ISO/IEC 8859-1 a.k.a. ISO 8859-1 (with one less hyphen), +/// which leaves some "blanks", byte values that are not assigned to any character. +/// ISO-8859-1 (the IANA one) assigns them to the C0 and C1 control codes. +/// +/// Note that this is *also* different from Windows-1252 a.k.a. code page 1252, +/// which is a superset ISO/IEC 8859-1 that assigns some (not all!) blanks +/// to punctuation and various Latin characters. +/// +/// To confuse things further, [on the Web](https://encoding.spec.whatwg.org/) +/// `ascii`, `iso-8859-1`, and `windows-1252` are all aliases +/// for a superset of Windows-1252 that fills the remaining blanks with corresponding +/// C0 and C1 control codes. +#[stable(feature = "char_convert", since = "1.13.0")] +impl From for char { + #[inline] + fn from(i: u8) -> Self { + i as char + } +} + + +/// An error which can be returned when parsing a char. +#[stable(feature = "char_from_str", since = "1.20.0")] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ParseCharError { + kind: CharErrorKind, +} + +impl ParseCharError { + #[unstable(feature = "char_error_internals", + reason = "this method should not be available publicly", + issue = "0")] + #[doc(hidden)] + pub fn __description(&self) -> &str { + match self.kind { + CharErrorKind::EmptyString => { + "cannot parse char from empty string" + }, + CharErrorKind::TooManyChars => "too many characters in string" + } + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +enum CharErrorKind { + EmptyString, + TooManyChars, +} + +#[stable(feature = "char_from_str", since = "1.20.0")] +impl fmt::Display for ParseCharError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.__description().fmt(f) + } +} + + +#[stable(feature = "char_from_str", since = "1.20.0")] +impl FromStr for char { + type Err = ParseCharError; + + #[inline] + fn from_str(s: &str) -> Result { + let mut chars = s.chars(); + match (chars.next(), chars.next()) { + (None, _) => { + Err(ParseCharError { kind: CharErrorKind::EmptyString }) + }, + (Some(c), None) => Ok(c), + _ => { + Err(ParseCharError { kind: CharErrorKind::TooManyChars }) + } + } + } +} + + +#[unstable(feature = "try_from", issue = "33417")] +impl TryFrom for char { + type Error = CharTryFromError; + + #[inline] + fn try_from(i: u32) -> Result { + if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) { + Err(CharTryFromError(())) + } else { + Ok(unsafe { from_u32_unchecked(i) }) + } + } +} + +/// The error type returned when a conversion from u32 to char fails. +#[unstable(feature = "try_from", issue = "33417")] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct CharTryFromError(()); + +#[unstable(feature = "try_from", issue = "33417")] +impl fmt::Display for CharTryFromError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "converted integer out of range for `char`".fmt(f) + } +} + +/// Converts a digit in the given radix to a `char`. +/// +/// A 'radix' here is sometimes also called a 'base'. A radix of two +/// indicates a binary number, a radix of ten, decimal, and a radix of +/// sixteen, hexadecimal, to give some common values. Arbitrary +/// radices are supported. +/// +/// `from_digit()` will return `None` if the input is not a digit in +/// the given radix. +/// +/// # Panics +/// +/// Panics if given a radix larger than 36. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::char; +/// +/// let c = char::from_digit(4, 10); +/// +/// assert_eq!(Some('4'), c); +/// +/// // Decimal 11 is a single digit in base 16 +/// let c = char::from_digit(11, 16); +/// +/// assert_eq!(Some('b'), c); +/// ``` +/// +/// Returning `None` when the input is not a digit: +/// +/// ``` +/// use std::char; +/// +/// let c = char::from_digit(20, 10); +/// +/// assert_eq!(None, c); +/// ``` +/// +/// Passing a large radix, causing a panic: +/// +/// ``` +/// use std::thread; +/// use std::char; +/// +/// let result = thread::spawn(|| { +/// // this panics +/// let c = char::from_digit(1, 37); +/// }).join(); +/// +/// assert!(result.is_err()); +/// ``` +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +pub fn from_digit(num: u32, radix: u32) -> Option { + if radix > 36 { + panic!("from_digit: radix is too high (maximum 36)"); + } + if num < radix { + let num = num as u8; + if num < 10 { + Some((b'0' + num) as char) + } else { + Some((b'a' + num - 10) as char) + } + } else { + None + } +} + diff --git a/src/libcore/char/decode.rs b/src/libcore/char/decode.rs new file mode 100644 index 000000000000..cc52f048b891 --- /dev/null +++ b/src/libcore/char/decode.rs @@ -0,0 +1,143 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! UTF-8 and UTF-16 decoding iterators + +use fmt; +use super::from_u32_unchecked; + +/// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s. +#[stable(feature = "decode_utf16", since = "1.9.0")] +#[derive(Clone, Debug)] +pub struct DecodeUtf16 + where I: Iterator +{ + iter: I, + buf: Option, +} + +/// An error that can be returned when decoding UTF-16 code points. +#[stable(feature = "decode_utf16", since = "1.9.0")] +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct DecodeUtf16Error { + code: u16, +} + +/// Create an iterator over the UTF-16 encoded code points in `iter`, +/// returning unpaired surrogates as `Err`s. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::char::decode_utf16; +/// +/// fn main() { +/// // 𝄞music +/// let v = [0xD834, 0xDD1E, 0x006d, 0x0075, +/// 0x0073, 0xDD1E, 0x0069, 0x0063, +/// 0xD834]; +/// +/// assert_eq!(decode_utf16(v.iter().cloned()) +/// .map(|r| r.map_err(|e| e.unpaired_surrogate())) +/// .collect::>(), +/// vec![Ok('𝄞'), +/// Ok('m'), Ok('u'), Ok('s'), +/// Err(0xDD1E), +/// Ok('i'), Ok('c'), +/// Err(0xD834)]); +/// } +/// ``` +/// +/// A lossy decoder can be obtained by replacing `Err` results with the replacement character: +/// +/// ``` +/// use std::char::{decode_utf16, REPLACEMENT_CHARACTER}; +/// +/// fn main() { +/// // 𝄞music +/// let v = [0xD834, 0xDD1E, 0x006d, 0x0075, +/// 0x0073, 0xDD1E, 0x0069, 0x0063, +/// 0xD834]; +/// +/// assert_eq!(decode_utf16(v.iter().cloned()) +/// .map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)) +/// .collect::(), +/// "𝄞mus�ic�"); +/// } +/// ``` +#[stable(feature = "decode_utf16", since = "1.9.0")] +#[inline] +pub fn decode_utf16>(iter: I) -> DecodeUtf16 { + DecodeUtf16 { + iter: iter.into_iter(), + buf: None, + } +} + +#[stable(feature = "decode_utf16", since = "1.9.0")] +impl> Iterator for DecodeUtf16 { + type Item = Result; + + fn next(&mut self) -> Option> { + let u = match self.buf.take() { + Some(buf) => buf, + None => self.iter.next()? + }; + + if u < 0xD800 || 0xDFFF < u { + // not a surrogate + Some(Ok(unsafe { from_u32_unchecked(u as u32) })) + } else if u >= 0xDC00 { + // a trailing surrogate + Some(Err(DecodeUtf16Error { code: u })) + } else { + let u2 = match self.iter.next() { + Some(u2) => u2, + // eof + None => return Some(Err(DecodeUtf16Error { code: u })), + }; + if u2 < 0xDC00 || u2 > 0xDFFF { + // not a trailing surrogate so we're not a valid + // surrogate pair, so rewind to redecode u2 next time. + self.buf = Some(u2); + return Some(Err(DecodeUtf16Error { code: u })); + } + + // all ok, so lets decode it. + let c = (((u - 0xD800) as u32) << 10 | (u2 - 0xDC00) as u32) + 0x1_0000; + Some(Ok(unsafe { from_u32_unchecked(c) })) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (low, high) = self.iter.size_hint(); + // we could be entirely valid surrogates (2 elements per + // char), or entirely non-surrogates (1 element per char) + (low / 2, high) + } +} + +impl DecodeUtf16Error { + /// Returns the unpaired surrogate which caused this error. + #[stable(feature = "decode_utf16", since = "1.9.0")] + pub fn unpaired_surrogate(&self) -> u16 { + self.code + } +} + +#[stable(feature = "decode_utf16", since = "1.9.0")] +impl fmt::Display for DecodeUtf16Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "unpaired surrogate found: {:x}", self.code) + } +} diff --git a/src/libcore/char/methods.rs b/src/libcore/char/methods.rs new file mode 100644 index 000000000000..64a17786b0a6 --- /dev/null +++ b/src/libcore/char/methods.rs @@ -0,0 +1,1395 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! impl char {} + +use slice; +use str::from_utf8_unchecked_mut; +use super::*; +use unicode::printable::is_printable; +use unicode::tables::{conversions, derived_property, general_category, property}; + +#[lang = "char"] +impl char { + /// Checks if a `char` is a digit in the given radix. + /// + /// A 'radix' here is sometimes also called a 'base'. A radix of two + /// indicates a binary number, a radix of ten, decimal, and a radix of + /// sixteen, hexadecimal, to give some common values. Arbitrary + /// radices are supported. + /// + /// Compared to `is_numeric()`, this function only recognizes the characters + /// `0-9`, `a-z` and `A-Z`. + /// + /// 'Digit' is defined to be only the following characters: + /// + /// * `0-9` + /// * `a-z` + /// * `A-Z` + /// + /// For a more comprehensive understanding of 'digit', see [`is_numeric`][is_numeric]. + /// + /// [is_numeric]: #method.is_numeric + /// + /// # Panics + /// + /// Panics if given a radix larger than 36. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert!('1'.is_digit(10)); + /// assert!('f'.is_digit(16)); + /// assert!(!'f'.is_digit(10)); + /// ``` + /// + /// Passing a large radix, causing a panic: + /// + /// ``` + /// use std::thread; + /// + /// let result = thread::spawn(|| { + /// // this panics + /// '1'.is_digit(37); + /// }).join(); + /// + /// assert!(result.is_err()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_digit(self, radix: u32) -> bool { + self.to_digit(radix).is_some() + } + + /// Converts a `char` to a digit in the given radix. + /// + /// A 'radix' here is sometimes also called a 'base'. A radix of two + /// indicates a binary number, a radix of ten, decimal, and a radix of + /// sixteen, hexadecimal, to give some common values. Arbitrary + /// radices are supported. + /// + /// 'Digit' is defined to be only the following characters: + /// + /// * `0-9` + /// * `a-z` + /// * `A-Z` + /// + /// # Errors + /// + /// Returns `None` if the `char` does not refer to a digit in the given radix. + /// + /// # Panics + /// + /// Panics if given a radix larger than 36. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert_eq!('1'.to_digit(10), Some(1)); + /// assert_eq!('f'.to_digit(16), Some(15)); + /// ``` + /// + /// Passing a non-digit results in failure: + /// + /// ``` + /// assert_eq!('f'.to_digit(10), None); + /// assert_eq!('z'.to_digit(16), None); + /// ``` + /// + /// Passing a large radix, causing a panic: + /// + /// ``` + /// use std::thread; + /// + /// let result = thread::spawn(|| { + /// '1'.to_digit(37); + /// }).join(); + /// + /// assert!(result.is_err()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn to_digit(self, radix: u32) -> Option { + if radix > 36 { + panic!("to_digit: radix is too high (maximum 36)"); + } + let val = match self { + '0' ..= '9' => self as u32 - '0' as u32, + 'a' ..= 'z' => self as u32 - 'a' as u32 + 10, + 'A' ..= 'Z' => self as u32 - 'A' as u32 + 10, + _ => return None, + }; + if val < radix { Some(val) } + else { None } + } + + /// Returns an iterator that yields the hexadecimal Unicode escape of a + /// character as `char`s. + /// + /// This will escape characters with the Rust syntax of the form + /// `\u{NNNNNN}` where `NNNNNN` is a hexadecimal representation. + /// + /// # Examples + /// + /// As an iterator: + /// + /// ``` + /// for c in '❤'.escape_unicode() { + /// print!("{}", c); + /// } + /// println!(); + /// ``` + /// + /// Using `println!` directly: + /// + /// ``` + /// println!("{}", '❤'.escape_unicode()); + /// ``` + /// + /// Both are equivalent to: + /// + /// ``` + /// println!("\\u{{2764}}"); + /// ``` + /// + /// Using `to_string`: + /// + /// ``` + /// assert_eq!('❤'.escape_unicode().to_string(), "\\u{2764}"); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn escape_unicode(self) -> EscapeUnicode { + let c = self as u32; + + // or-ing 1 ensures that for c==0 the code computes that one + // digit should be printed and (which is the same) avoids the + // (31 - 32) underflow + let msb = 31 - (c | 1).leading_zeros(); + + // the index of the most significant hex digit + let ms_hex_digit = msb / 4; + EscapeUnicode { + c: self, + state: EscapeUnicodeState::Backslash, + hex_digit_idx: ms_hex_digit as usize, + } + } + + /// An extended version of `escape_debug` that optionally permits escaping + /// Extended Grapheme codepoints. This allows us to format characters like + /// nonspacing marks better when they're at the start of a string. + #[doc(hidden)] + #[unstable(feature = "str_internals", issue = "0")] + #[inline] + pub fn escape_debug_ext(self, escape_grapheme_extended: bool) -> EscapeDebug { + let init_state = match self { + '\t' => EscapeDefaultState::Backslash('t'), + '\r' => EscapeDefaultState::Backslash('r'), + '\n' => EscapeDefaultState::Backslash('n'), + '\\' | '\'' | '"' => EscapeDefaultState::Backslash(self), + _ if escape_grapheme_extended && self.is_grapheme_extended() => { + EscapeDefaultState::Unicode(self.escape_unicode()) + } + _ if is_printable(self) => EscapeDefaultState::Char(self), + _ => EscapeDefaultState::Unicode(self.escape_unicode()), + }; + EscapeDebug(EscapeDefault { state: init_state }) + } + + /// Returns an iterator that yields the literal escape code of a character + /// as `char`s. + /// + /// This will escape the characters similar to the `Debug` implementations + /// of `str` or `char`. + /// + /// # Examples + /// + /// As an iterator: + /// + /// ``` + /// for c in '\n'.escape_debug() { + /// print!("{}", c); + /// } + /// println!(); + /// ``` + /// + /// Using `println!` directly: + /// + /// ``` + /// println!("{}", '\n'.escape_debug()); + /// ``` + /// + /// Both are equivalent to: + /// + /// ``` + /// println!("\\n"); + /// ``` + /// + /// Using `to_string`: + /// + /// ``` + /// assert_eq!('\n'.escape_debug().to_string(), "\\n"); + /// ``` + #[stable(feature = "char_escape_debug", since = "1.20.0")] + #[inline] + pub fn escape_debug(self) -> EscapeDebug { + self.escape_debug_ext(true) + } + + /// Returns an iterator that yields the literal escape code of a character + /// as `char`s. + /// + /// The default is chosen with a bias toward producing literals that are + /// legal in a variety of languages, including C++11 and similar C-family + /// languages. The exact rules are: + /// + /// * Tab is escaped as `\t`. + /// * Carriage return is escaped as `\r`. + /// * Line feed is escaped as `\n`. + /// * Single quote is escaped as `\'`. + /// * Double quote is escaped as `\"`. + /// * Backslash is escaped as `\\`. + /// * Any character in the 'printable ASCII' range `0x20` .. `0x7e` + /// inclusive is not escaped. + /// * All other characters are given hexadecimal Unicode escapes; see + /// [`escape_unicode`][escape_unicode]. + /// + /// [escape_unicode]: #method.escape_unicode + /// + /// # Examples + /// + /// As an iterator: + /// + /// ``` + /// for c in '"'.escape_default() { + /// print!("{}", c); + /// } + /// println!(); + /// ``` + /// + /// Using `println!` directly: + /// + /// ``` + /// println!("{}", '"'.escape_default()); + /// ``` + /// + /// + /// Both are equivalent to: + /// + /// ``` + /// println!("\\\""); + /// ``` + /// + /// Using `to_string`: + /// + /// ``` + /// assert_eq!('"'.escape_default().to_string(), "\\\""); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn escape_default(self) -> EscapeDefault { + let init_state = match self { + '\t' => EscapeDefaultState::Backslash('t'), + '\r' => EscapeDefaultState::Backslash('r'), + '\n' => EscapeDefaultState::Backslash('n'), + '\\' | '\'' | '"' => EscapeDefaultState::Backslash(self), + '\x20' ..= '\x7e' => EscapeDefaultState::Char(self), + _ => EscapeDefaultState::Unicode(self.escape_unicode()) + }; + EscapeDefault { state: init_state } + } + + /// Returns the number of bytes this `char` would need if encoded in UTF-8. + /// + /// That number of bytes is always between 1 and 4, inclusive. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let len = 'A'.len_utf8(); + /// assert_eq!(len, 1); + /// + /// let len = 'ß'.len_utf8(); + /// assert_eq!(len, 2); + /// + /// let len = 'ℝ'.len_utf8(); + /// assert_eq!(len, 3); + /// + /// let len = '💣'.len_utf8(); + /// assert_eq!(len, 4); + /// ``` + /// + /// The `&str` type guarantees that its contents are UTF-8, and so we can compare the length it + /// would take if each code point was represented as a `char` vs in the `&str` itself: + /// + /// ``` + /// // as chars + /// let eastern = '東'; + /// let capitol = '京'; + /// + /// // both can be represented as three bytes + /// assert_eq!(3, eastern.len_utf8()); + /// assert_eq!(3, capitol.len_utf8()); + /// + /// // as a &str, these two are encoded in UTF-8 + /// let tokyo = "東京"; + /// + /// let len = eastern.len_utf8() + capitol.len_utf8(); + /// + /// // we can see that they take six bytes total... + /// assert_eq!(6, tokyo.len()); + /// + /// // ... just like the &str + /// assert_eq!(len, tokyo.len()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn len_utf8(self) -> usize { + let code = self as u32; + if code < MAX_ONE_B { + 1 + } else if code < MAX_TWO_B { + 2 + } else if code < MAX_THREE_B { + 3 + } else { + 4 + } + } + + /// Returns the number of 16-bit code units this `char` would need if + /// encoded in UTF-16. + /// + /// See the documentation for [`len_utf8`] for more explanation of this + /// concept. This function is a mirror, but for UTF-16 instead of UTF-8. + /// + /// [`len_utf8`]: #method.len_utf8 + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let n = 'ß'.len_utf16(); + /// assert_eq!(n, 1); + /// + /// let len = '💣'.len_utf16(); + /// assert_eq!(len, 2); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn len_utf16(self) -> usize { + let ch = self as u32; + if (ch & 0xFFFF) == ch { 1 } else { 2 } + } + + /// Encodes this character as UTF-8 into the provided byte buffer, + /// and then returns the subslice of the buffer that contains the encoded character. + /// + /// # Panics + /// + /// Panics if the buffer is not large enough. + /// A buffer of length four is large enough to encode any `char`. + /// + /// # Examples + /// + /// In both of these examples, 'ß' takes two bytes to encode. + /// + /// ``` + /// let mut b = [0; 2]; + /// + /// let result = 'ß'.encode_utf8(&mut b); + /// + /// assert_eq!(result, "ß"); + /// + /// assert_eq!(result.len(), 2); + /// ``` + /// + /// A buffer that's too small: + /// + /// ``` + /// use std::thread; + /// + /// let result = thread::spawn(|| { + /// let mut b = [0; 1]; + /// + /// // this panics + /// 'ß'.encode_utf8(&mut b); + /// }).join(); + /// + /// assert!(result.is_err()); + /// ``` + #[stable(feature = "unicode_encode_char", since = "1.15.0")] + #[inline] + pub fn encode_utf8(self, dst: &mut [u8]) -> &mut str { + let code = self as u32; + unsafe { + let len = + if code < MAX_ONE_B && !dst.is_empty() { + *dst.get_unchecked_mut(0) = code as u8; + 1 + } else if code < MAX_TWO_B && dst.len() >= 2 { + *dst.get_unchecked_mut(0) = (code >> 6 & 0x1F) as u8 | TAG_TWO_B; + *dst.get_unchecked_mut(1) = (code & 0x3F) as u8 | TAG_CONT; + 2 + } else if code < MAX_THREE_B && dst.len() >= 3 { + *dst.get_unchecked_mut(0) = (code >> 12 & 0x0F) as u8 | TAG_THREE_B; + *dst.get_unchecked_mut(1) = (code >> 6 & 0x3F) as u8 | TAG_CONT; + *dst.get_unchecked_mut(2) = (code & 0x3F) as u8 | TAG_CONT; + 3 + } else if dst.len() >= 4 { + *dst.get_unchecked_mut(0) = (code >> 18 & 0x07) as u8 | TAG_FOUR_B; + *dst.get_unchecked_mut(1) = (code >> 12 & 0x3F) as u8 | TAG_CONT; + *dst.get_unchecked_mut(2) = (code >> 6 & 0x3F) as u8 | TAG_CONT; + *dst.get_unchecked_mut(3) = (code & 0x3F) as u8 | TAG_CONT; + 4 + } else { + panic!("encode_utf8: need {} bytes to encode U+{:X}, but the buffer has {}", + from_u32_unchecked(code).len_utf8(), + code, + dst.len()) + }; + from_utf8_unchecked_mut(dst.get_unchecked_mut(..len)) + } + } + + /// Encodes this character as UTF-16 into the provided `u16` buffer, + /// and then returns the subslice of the buffer that contains the encoded character. + /// + /// # Panics + /// + /// Panics if the buffer is not large enough. + /// A buffer of length 2 is large enough to encode any `char`. + /// + /// # Examples + /// + /// In both of these examples, '𝕊' takes two `u16`s to encode. + /// + /// ``` + /// let mut b = [0; 2]; + /// + /// let result = '𝕊'.encode_utf16(&mut b); + /// + /// assert_eq!(result.len(), 2); + /// ``` + /// + /// A buffer that's too small: + /// + /// ``` + /// use std::thread; + /// + /// let result = thread::spawn(|| { + /// let mut b = [0; 1]; + /// + /// // this panics + /// '𝕊'.encode_utf16(&mut b); + /// }).join(); + /// + /// assert!(result.is_err()); + /// ``` + #[stable(feature = "unicode_encode_char", since = "1.15.0")] + #[inline] + pub fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16] { + let mut code = self as u32; + unsafe { + if (code & 0xFFFF) == code && !dst.is_empty() { + // The BMP falls through (assuming non-surrogate, as it should) + *dst.get_unchecked_mut(0) = code as u16; + slice::from_raw_parts_mut(dst.as_mut_ptr(), 1) + } else if dst.len() >= 2 { + // Supplementary planes break into surrogates. + code -= 0x1_0000; + *dst.get_unchecked_mut(0) = 0xD800 | ((code >> 10) as u16); + *dst.get_unchecked_mut(1) = 0xDC00 | ((code as u16) & 0x3FF); + slice::from_raw_parts_mut(dst.as_mut_ptr(), 2) + } else { + panic!("encode_utf16: need {} units to encode U+{:X}, but the buffer has {}", + from_u32_unchecked(code).len_utf16(), + code, + dst.len()) + } + } + } + + /// Returns true if this `char` is an alphabetic code point, and false if not. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert!('a'.is_alphabetic()); + /// assert!('京'.is_alphabetic()); + /// + /// let c = '💝'; + /// // love is many things, but it is not alphabetic + /// assert!(!c.is_alphabetic()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_alphabetic(self) -> bool { + match self { + 'a'..='z' | 'A'..='Z' => true, + c if c > '\x7f' => derived_property::Alphabetic(c), + _ => false, + } + } + + /// Returns true if this `char` satisfies the 'XID_Start' Unicode property, and false + /// otherwise. + /// + /// 'XID_Start' is a Unicode Derived Property specified in + /// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications), + /// mostly similar to `ID_Start` but modified for closure under `NFKx`. + #[unstable(feature = "rustc_private", + reason = "mainly needed for compiler internals", + issue = "27812")] + #[inline] + pub fn is_xid_start(self) -> bool { + derived_property::XID_Start(self) + } + + /// Returns true if this `char` satisfies the 'XID_Continue' Unicode property, and false + /// otherwise. + /// + /// 'XID_Continue' is a Unicode Derived Property specified in + /// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications), + /// mostly similar to 'ID_Continue' but modified for closure under NFKx. + #[unstable(feature = "rustc_private", + reason = "mainly needed for compiler internals", + issue = "27812")] + #[inline] + pub fn is_xid_continue(self) -> bool { + derived_property::XID_Continue(self) + } + + /// Returns true if this `char` is lowercase, and false otherwise. + /// + /// 'Lowercase' is defined according to the terms of the Unicode Derived Core + /// Property `Lowercase`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert!('a'.is_lowercase()); + /// assert!('δ'.is_lowercase()); + /// assert!(!'A'.is_lowercase()); + /// assert!(!'Δ'.is_lowercase()); + /// + /// // The various Chinese scripts do not have case, and so: + /// assert!(!'中'.is_lowercase()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_lowercase(self) -> bool { + match self { + 'a'..='z' => true, + c if c > '\x7f' => derived_property::Lowercase(c), + _ => false, + } + } + + /// Returns true if this `char` is uppercase, and false otherwise. + /// + /// 'Uppercase' is defined according to the terms of the Unicode Derived Core + /// Property `Uppercase`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert!(!'a'.is_uppercase()); + /// assert!(!'δ'.is_uppercase()); + /// assert!('A'.is_uppercase()); + /// assert!('Δ'.is_uppercase()); + /// + /// // The various Chinese scripts do not have case, and so: + /// assert!(!'中'.is_uppercase()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_uppercase(self) -> bool { + match self { + 'A'..='Z' => true, + c if c > '\x7f' => derived_property::Uppercase(c), + _ => false, + } + } + + /// Returns true if this `char` is whitespace, and false otherwise. + /// + /// 'Whitespace' is defined according to the terms of the Unicode Derived Core + /// Property `White_Space`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert!(' '.is_whitespace()); + /// + /// // a non-breaking space + /// assert!('\u{A0}'.is_whitespace()); + /// + /// assert!(!'越'.is_whitespace()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_whitespace(self) -> bool { + match self { + ' ' | '\x09'..='\x0d' => true, + c if c > '\x7f' => property::White_Space(c), + _ => false, + } + } + + /// Returns true if this `char` is alphanumeric, and false otherwise. + /// + /// 'Alphanumeric'-ness is defined in terms of the Unicode General Categories + /// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert!('٣'.is_alphanumeric()); + /// assert!('7'.is_alphanumeric()); + /// assert!('৬'.is_alphanumeric()); + /// assert!('¾'.is_alphanumeric()); + /// assert!('①'.is_alphanumeric()); + /// assert!('K'.is_alphanumeric()); + /// assert!('و'.is_alphanumeric()); + /// assert!('藏'.is_alphanumeric()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_alphanumeric(self) -> bool { + self.is_alphabetic() || self.is_numeric() + } + + /// Returns true if this `char` is a control code point, and false otherwise. + /// + /// 'Control code point' is defined in terms of the Unicode General + /// Category `Cc`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // U+009C, STRING TERMINATOR + /// assert!('œ'.is_control()); + /// assert!(!'q'.is_control()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_control(self) -> bool { + general_category::Cc(self) + } + + /// Returns true if this `char` is an extended grapheme character, and false otherwise. + /// + /// 'Extended grapheme character' is defined in terms of the Unicode Shaping and Rendering + /// Category `Grapheme_Extend`. + #[inline] + pub(crate) fn is_grapheme_extended(self) -> bool { + derived_property::Grapheme_Extend(self) + } + + /// Returns true if this `char` is numeric, and false otherwise. + /// + /// 'Numeric'-ness is defined in terms of the Unicode General Categories + /// 'Nd', 'Nl', 'No'. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert!('٣'.is_numeric()); + /// assert!('7'.is_numeric()); + /// assert!('৬'.is_numeric()); + /// assert!('¾'.is_numeric()); + /// assert!('①'.is_numeric()); + /// assert!(!'K'.is_numeric()); + /// assert!(!'و'.is_numeric()); + /// assert!(!'藏'.is_numeric()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_numeric(self) -> bool { + match self { + '0'..='9' => true, + c if c > '\x7f' => general_category::N(c), + _ => false, + } + } + + /// Returns an iterator that yields the lowercase equivalent of a `char` + /// as one or more `char`s. + /// + /// If a character does not have a lowercase equivalent, the same character + /// will be returned back by the iterator. + /// + /// This performs complex unconditional mappings with no tailoring: it maps + /// one Unicode character to its lowercase equivalent according to the + /// [Unicode database] and the additional complex mappings + /// [`SpecialCasing.txt`]. Conditional mappings (based on context or + /// language) are not considered here. + /// + /// For a full reference, see [here][reference]. + /// + /// [Unicode database]: ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt + /// + /// [`SpecialCasing.txt`]: ftp://ftp.unicode.org/Public/UNIDATA/SpecialCasing.txt + /// + /// [reference]: http://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992 + /// + /// # Examples + /// + /// As an iterator: + /// + /// ``` + /// for c in 'İ'.to_lowercase() { + /// print!("{}", c); + /// } + /// println!(); + /// ``` + /// + /// Using `println!` directly: + /// + /// ``` + /// println!("{}", 'İ'.to_lowercase()); + /// ``` + /// + /// Both are equivalent to: + /// + /// ``` + /// println!("i\u{307}"); + /// ``` + /// + /// Using `to_string`: + /// + /// ``` + /// assert_eq!('C'.to_lowercase().to_string(), "c"); + /// + /// // Sometimes the result is more than one character: + /// assert_eq!('İ'.to_lowercase().to_string(), "i\u{307}"); + /// + /// // Characters that do not have both uppercase and lowercase + /// // convert into themselves. + /// assert_eq!('山'.to_lowercase().to_string(), "山"); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn to_lowercase(self) -> ToLowercase { + ToLowercase(CaseMappingIter::new(conversions::to_lower(self))) + } + + /// Returns an iterator that yields the uppercase equivalent of a `char` + /// as one or more `char`s. + /// + /// If a character does not have an uppercase equivalent, the same character + /// will be returned back by the iterator. + /// + /// This performs complex unconditional mappings with no tailoring: it maps + /// one Unicode character to its uppercase equivalent according to the + /// [Unicode database] and the additional complex mappings + /// [`SpecialCasing.txt`]. Conditional mappings (based on context or + /// language) are not considered here. + /// + /// For a full reference, see [here][reference]. + /// + /// [Unicode database]: ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt + /// + /// [`SpecialCasing.txt`]: ftp://ftp.unicode.org/Public/UNIDATA/SpecialCasing.txt + /// + /// [reference]: http://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992 + /// + /// # Examples + /// + /// As an iterator: + /// + /// ``` + /// for c in 'ß'.to_uppercase() { + /// print!("{}", c); + /// } + /// println!(); + /// ``` + /// + /// Using `println!` directly: + /// + /// ``` + /// println!("{}", 'ß'.to_uppercase()); + /// ``` + /// + /// Both are equivalent to: + /// + /// ``` + /// println!("SS"); + /// ``` + /// + /// Using `to_string`: + /// + /// ``` + /// assert_eq!('c'.to_uppercase().to_string(), "C"); + /// + /// // Sometimes the result is more than one character: + /// assert_eq!('ß'.to_uppercase().to_string(), "SS"); + /// + /// // Characters that do not have both uppercase and lowercase + /// // convert into themselves. + /// assert_eq!('山'.to_uppercase().to_string(), "山"); + /// ``` + /// + /// # Note on locale + /// + /// In Turkish, the equivalent of 'i' in Latin has five forms instead of two: + /// + /// * 'Dotless': I / ı, sometimes written ï + /// * 'Dotted': İ / i + /// + /// Note that the lowercase dotted 'i' is the same as the Latin. Therefore: + /// + /// ``` + /// let upper_i = 'i'.to_uppercase().to_string(); + /// ``` + /// + /// The value of `upper_i` here relies on the language of the text: if we're + /// in `en-US`, it should be `"I"`, but if we're in `tr_TR`, it should + /// be `"İ"`. `to_uppercase()` does not take this into account, and so: + /// + /// ``` + /// let upper_i = 'i'.to_uppercase().to_string(); + /// + /// assert_eq!(upper_i, "I"); + /// ``` + /// + /// holds across languages. + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn to_uppercase(self) -> ToUppercase { + ToUppercase(CaseMappingIter::new(conversions::to_upper(self))) + } + + /// Checks if the value is within the ASCII range. + /// + /// # Examples + /// + /// ``` + /// let ascii = 'a'; + /// let non_ascii = '❤'; + /// + /// assert!(ascii.is_ascii()); + /// assert!(!non_ascii.is_ascii()); + /// ``` + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] + #[inline] + pub fn is_ascii(&self) -> bool { + *self as u32 <= 0x7F + } + + /// Makes a copy of the value in its ASCII upper case equivalent. + /// + /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z', + /// but non-ASCII letters are unchanged. + /// + /// To uppercase the value in-place, use [`make_ascii_uppercase`]. + /// + /// To uppercase ASCII characters in addition to non-ASCII characters, use + /// [`to_uppercase`]. + /// + /// # Examples + /// + /// ``` + /// let ascii = 'a'; + /// let non_ascii = '❤'; + /// + /// assert_eq!('A', ascii.to_ascii_uppercase()); + /// assert_eq!('❤', non_ascii.to_ascii_uppercase()); + /// ``` + /// + /// [`make_ascii_uppercase`]: #method.make_ascii_uppercase + /// [`to_uppercase`]: #method.to_uppercase + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] + #[inline] + pub fn to_ascii_uppercase(&self) -> char { + if self.is_ascii() { + (*self as u8).to_ascii_uppercase() as char + } else { + *self + } + } + + /// Makes a copy of the value in its ASCII lower case equivalent. + /// + /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', + /// but non-ASCII letters are unchanged. + /// + /// To lowercase the value in-place, use [`make_ascii_lowercase`]. + /// + /// To lowercase ASCII characters in addition to non-ASCII characters, use + /// [`to_lowercase`]. + /// + /// # Examples + /// + /// ``` + /// let ascii = 'A'; + /// let non_ascii = '❤'; + /// + /// assert_eq!('a', ascii.to_ascii_lowercase()); + /// assert_eq!('❤', non_ascii.to_ascii_lowercase()); + /// ``` + /// + /// [`make_ascii_lowercase`]: #method.make_ascii_lowercase + /// [`to_lowercase`]: #method.to_lowercase + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] + #[inline] + pub fn to_ascii_lowercase(&self) -> char { + if self.is_ascii() { + (*self as u8).to_ascii_lowercase() as char + } else { + *self + } + } + + /// Checks that two values are an ASCII case-insensitive match. + /// + /// Equivalent to `to_ascii_lowercase(a) == to_ascii_lowercase(b)`. + /// + /// # Examples + /// + /// ``` + /// let upper_a = 'A'; + /// let lower_a = 'a'; + /// let lower_z = 'z'; + /// + /// assert!(upper_a.eq_ignore_ascii_case(&lower_a)); + /// assert!(upper_a.eq_ignore_ascii_case(&upper_a)); + /// assert!(!upper_a.eq_ignore_ascii_case(&lower_z)); + /// ``` + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] + #[inline] + pub fn eq_ignore_ascii_case(&self, other: &char) -> bool { + self.to_ascii_lowercase() == other.to_ascii_lowercase() + } + + /// Converts this type to its ASCII upper case equivalent in-place. + /// + /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z', + /// but non-ASCII letters are unchanged. + /// + /// To return a new uppercased value without modifying the existing one, use + /// [`to_ascii_uppercase`]. + /// + /// # Examples + /// + /// ``` + /// let mut ascii = 'a'; + /// + /// ascii.make_ascii_uppercase(); + /// + /// assert_eq!('A', ascii); + /// ``` + /// + /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] + #[inline] + pub fn make_ascii_uppercase(&mut self) { + *self = self.to_ascii_uppercase(); + } + + /// Converts this type to its ASCII lower case equivalent in-place. + /// + /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', + /// but non-ASCII letters are unchanged. + /// + /// To return a new lowercased value without modifying the existing one, use + /// [`to_ascii_lowercase`]. + /// + /// # Examples + /// + /// ``` + /// let mut ascii = 'A'; + /// + /// ascii.make_ascii_lowercase(); + /// + /// assert_eq!('a', ascii); + /// ``` + /// + /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] + #[inline] + pub fn make_ascii_lowercase(&mut self) { + *self = self.to_ascii_lowercase(); + } + + /// Checks if the value is an ASCII alphabetic character: + /// + /// - U+0041 'A' ... U+005A 'Z', or + /// - U+0061 'a' ... U+007A 'z'. + /// + /// # Examples + /// + /// ``` + /// let uppercase_a = 'A'; + /// let uppercase_g = 'G'; + /// let a = 'a'; + /// let g = 'g'; + /// let zero = '0'; + /// let percent = '%'; + /// let space = ' '; + /// let lf = '\n'; + /// let esc: char = 0x1b_u8.into(); + /// + /// assert!(uppercase_a.is_ascii_alphabetic()); + /// assert!(uppercase_g.is_ascii_alphabetic()); + /// assert!(a.is_ascii_alphabetic()); + /// assert!(g.is_ascii_alphabetic()); + /// assert!(!zero.is_ascii_alphabetic()); + /// assert!(!percent.is_ascii_alphabetic()); + /// assert!(!space.is_ascii_alphabetic()); + /// assert!(!lf.is_ascii_alphabetic()); + /// assert!(!esc.is_ascii_alphabetic()); + /// ``` + #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] + #[inline] + pub fn is_ascii_alphabetic(&self) -> bool { + self.is_ascii() && (*self as u8).is_ascii_alphabetic() + } + + /// Checks if the value is an ASCII uppercase character: + /// U+0041 'A' ... U+005A 'Z'. + /// + /// # Examples + /// + /// ``` + /// let uppercase_a = 'A'; + /// let uppercase_g = 'G'; + /// let a = 'a'; + /// let g = 'g'; + /// let zero = '0'; + /// let percent = '%'; + /// let space = ' '; + /// let lf = '\n'; + /// let esc: char = 0x1b_u8.into(); + /// + /// assert!(uppercase_a.is_ascii_uppercase()); + /// assert!(uppercase_g.is_ascii_uppercase()); + /// assert!(!a.is_ascii_uppercase()); + /// assert!(!g.is_ascii_uppercase()); + /// assert!(!zero.is_ascii_uppercase()); + /// assert!(!percent.is_ascii_uppercase()); + /// assert!(!space.is_ascii_uppercase()); + /// assert!(!lf.is_ascii_uppercase()); + /// assert!(!esc.is_ascii_uppercase()); + /// ``` + #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] + #[inline] + pub fn is_ascii_uppercase(&self) -> bool { + self.is_ascii() && (*self as u8).is_ascii_uppercase() + } + + /// Checks if the value is an ASCII lowercase character: + /// U+0061 'a' ... U+007A 'z'. + /// + /// # Examples + /// + /// ``` + /// let uppercase_a = 'A'; + /// let uppercase_g = 'G'; + /// let a = 'a'; + /// let g = 'g'; + /// let zero = '0'; + /// let percent = '%'; + /// let space = ' '; + /// let lf = '\n'; + /// let esc: char = 0x1b_u8.into(); + /// + /// assert!(!uppercase_a.is_ascii_lowercase()); + /// assert!(!uppercase_g.is_ascii_lowercase()); + /// assert!(a.is_ascii_lowercase()); + /// assert!(g.is_ascii_lowercase()); + /// assert!(!zero.is_ascii_lowercase()); + /// assert!(!percent.is_ascii_lowercase()); + /// assert!(!space.is_ascii_lowercase()); + /// assert!(!lf.is_ascii_lowercase()); + /// assert!(!esc.is_ascii_lowercase()); + /// ``` + #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] + #[inline] + pub fn is_ascii_lowercase(&self) -> bool { + self.is_ascii() && (*self as u8).is_ascii_lowercase() + } + + /// Checks if the value is an ASCII alphanumeric character: + /// + /// - U+0041 'A' ... U+005A 'Z', or + /// - U+0061 'a' ... U+007A 'z', or + /// - U+0030 '0' ... U+0039 '9'. + /// + /// # Examples + /// + /// ``` + /// let uppercase_a = 'A'; + /// let uppercase_g = 'G'; + /// let a = 'a'; + /// let g = 'g'; + /// let zero = '0'; + /// let percent = '%'; + /// let space = ' '; + /// let lf = '\n'; + /// let esc: char = 0x1b_u8.into(); + /// + /// assert!(uppercase_a.is_ascii_alphanumeric()); + /// assert!(uppercase_g.is_ascii_alphanumeric()); + /// assert!(a.is_ascii_alphanumeric()); + /// assert!(g.is_ascii_alphanumeric()); + /// assert!(zero.is_ascii_alphanumeric()); + /// assert!(!percent.is_ascii_alphanumeric()); + /// assert!(!space.is_ascii_alphanumeric()); + /// assert!(!lf.is_ascii_alphanumeric()); + /// assert!(!esc.is_ascii_alphanumeric()); + /// ``` + #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] + #[inline] + pub fn is_ascii_alphanumeric(&self) -> bool { + self.is_ascii() && (*self as u8).is_ascii_alphanumeric() + } + + /// Checks if the value is an ASCII decimal digit: + /// U+0030 '0' ... U+0039 '9'. + /// + /// # Examples + /// + /// ``` + /// let uppercase_a = 'A'; + /// let uppercase_g = 'G'; + /// let a = 'a'; + /// let g = 'g'; + /// let zero = '0'; + /// let percent = '%'; + /// let space = ' '; + /// let lf = '\n'; + /// let esc: char = 0x1b_u8.into(); + /// + /// assert!(!uppercase_a.is_ascii_digit()); + /// assert!(!uppercase_g.is_ascii_digit()); + /// assert!(!a.is_ascii_digit()); + /// assert!(!g.is_ascii_digit()); + /// assert!(zero.is_ascii_digit()); + /// assert!(!percent.is_ascii_digit()); + /// assert!(!space.is_ascii_digit()); + /// assert!(!lf.is_ascii_digit()); + /// assert!(!esc.is_ascii_digit()); + /// ``` + #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] + #[inline] + pub fn is_ascii_digit(&self) -> bool { + self.is_ascii() && (*self as u8).is_ascii_digit() + } + + /// Checks if the value is an ASCII hexadecimal digit: + /// + /// - U+0030 '0' ... U+0039 '9', or + /// - U+0041 'A' ... U+0046 'F', or + /// - U+0061 'a' ... U+0066 'f'. + /// + /// # Examples + /// + /// ``` + /// let uppercase_a = 'A'; + /// let uppercase_g = 'G'; + /// let a = 'a'; + /// let g = 'g'; + /// let zero = '0'; + /// let percent = '%'; + /// let space = ' '; + /// let lf = '\n'; + /// let esc: char = 0x1b_u8.into(); + /// + /// assert!(uppercase_a.is_ascii_hexdigit()); + /// assert!(!uppercase_g.is_ascii_hexdigit()); + /// assert!(a.is_ascii_hexdigit()); + /// assert!(!g.is_ascii_hexdigit()); + /// assert!(zero.is_ascii_hexdigit()); + /// assert!(!percent.is_ascii_hexdigit()); + /// assert!(!space.is_ascii_hexdigit()); + /// assert!(!lf.is_ascii_hexdigit()); + /// assert!(!esc.is_ascii_hexdigit()); + /// ``` + #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] + #[inline] + pub fn is_ascii_hexdigit(&self) -> bool { + self.is_ascii() && (*self as u8).is_ascii_hexdigit() + } + + /// Checks if the value is an ASCII punctuation character: + /// + /// - U+0021 ... U+002F `! " # $ % & ' ( ) * + , - . /`, or + /// - U+003A ... U+0040 `: ; < = > ? @`, or + /// - U+005B ... U+0060 ``[ \ ] ^ _ ` ``, or + /// - U+007B ... U+007E `{ | } ~` + /// + /// # Examples + /// + /// ``` + /// let uppercase_a = 'A'; + /// let uppercase_g = 'G'; + /// let a = 'a'; + /// let g = 'g'; + /// let zero = '0'; + /// let percent = '%'; + /// let space = ' '; + /// let lf = '\n'; + /// let esc: char = 0x1b_u8.into(); + /// + /// assert!(!uppercase_a.is_ascii_punctuation()); + /// assert!(!uppercase_g.is_ascii_punctuation()); + /// assert!(!a.is_ascii_punctuation()); + /// assert!(!g.is_ascii_punctuation()); + /// assert!(!zero.is_ascii_punctuation()); + /// assert!(percent.is_ascii_punctuation()); + /// assert!(!space.is_ascii_punctuation()); + /// assert!(!lf.is_ascii_punctuation()); + /// assert!(!esc.is_ascii_punctuation()); + /// ``` + #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] + #[inline] + pub fn is_ascii_punctuation(&self) -> bool { + self.is_ascii() && (*self as u8).is_ascii_punctuation() + } + + /// Checks if the value is an ASCII graphic character: + /// U+0021 '!' ... U+007E '~'. + /// + /// # Examples + /// + /// ``` + /// let uppercase_a = 'A'; + /// let uppercase_g = 'G'; + /// let a = 'a'; + /// let g = 'g'; + /// let zero = '0'; + /// let percent = '%'; + /// let space = ' '; + /// let lf = '\n'; + /// let esc: char = 0x1b_u8.into(); + /// + /// assert!(uppercase_a.is_ascii_graphic()); + /// assert!(uppercase_g.is_ascii_graphic()); + /// assert!(a.is_ascii_graphic()); + /// assert!(g.is_ascii_graphic()); + /// assert!(zero.is_ascii_graphic()); + /// assert!(percent.is_ascii_graphic()); + /// assert!(!space.is_ascii_graphic()); + /// assert!(!lf.is_ascii_graphic()); + /// assert!(!esc.is_ascii_graphic()); + /// ``` + #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] + #[inline] + pub fn is_ascii_graphic(&self) -> bool { + self.is_ascii() && (*self as u8).is_ascii_graphic() + } + + /// Checks if the value is an ASCII whitespace character: + /// U+0020 SPACE, U+0009 HORIZONTAL TAB, U+000A LINE FEED, + /// U+000C FORM FEED, or U+000D CARRIAGE RETURN. + /// + /// Rust uses the WhatWG Infra Standard's [definition of ASCII + /// whitespace][infra-aw]. There are several other definitions in + /// wide use. For instance, [the POSIX locale][pct] includes + /// U+000B VERTICAL TAB as well as all the above characters, + /// but—from the very same specification—[the default rule for + /// "field splitting" in the Bourne shell][bfs] considers *only* + /// SPACE, HORIZONTAL TAB, and LINE FEED as whitespace. + /// + /// If you are writing a program that will process an existing + /// file format, check what that format's definition of whitespace is + /// before using this function. + /// + /// [infra-aw]: https://infra.spec.whatwg.org/#ascii-whitespace + /// [pct]: http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap07.html#tag_07_03_01 + /// [bfs]: http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_05 + /// + /// # Examples + /// + /// ``` + /// let uppercase_a = 'A'; + /// let uppercase_g = 'G'; + /// let a = 'a'; + /// let g = 'g'; + /// let zero = '0'; + /// let percent = '%'; + /// let space = ' '; + /// let lf = '\n'; + /// let esc: char = 0x1b_u8.into(); + /// + /// assert!(!uppercase_a.is_ascii_whitespace()); + /// assert!(!uppercase_g.is_ascii_whitespace()); + /// assert!(!a.is_ascii_whitespace()); + /// assert!(!g.is_ascii_whitespace()); + /// assert!(!zero.is_ascii_whitespace()); + /// assert!(!percent.is_ascii_whitespace()); + /// assert!(space.is_ascii_whitespace()); + /// assert!(lf.is_ascii_whitespace()); + /// assert!(!esc.is_ascii_whitespace()); + /// ``` + #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] + #[inline] + pub fn is_ascii_whitespace(&self) -> bool { + self.is_ascii() && (*self as u8).is_ascii_whitespace() + } + + /// Checks if the value is an ASCII control character: + /// U+0000 NUL ... U+001F UNIT SEPARATOR, or U+007F DELETE. + /// Note that most ASCII whitespace characters are control + /// characters, but SPACE is not. + /// + /// # Examples + /// + /// ``` + /// let uppercase_a = 'A'; + /// let uppercase_g = 'G'; + /// let a = 'a'; + /// let g = 'g'; + /// let zero = '0'; + /// let percent = '%'; + /// let space = ' '; + /// let lf = '\n'; + /// let esc: char = 0x1b_u8.into(); + /// + /// assert!(!uppercase_a.is_ascii_control()); + /// assert!(!uppercase_g.is_ascii_control()); + /// assert!(!a.is_ascii_control()); + /// assert!(!g.is_ascii_control()); + /// assert!(!zero.is_ascii_control()); + /// assert!(!percent.is_ascii_control()); + /// assert!(!space.is_ascii_control()); + /// assert!(lf.is_ascii_control()); + /// assert!(esc.is_ascii_control()); + /// ``` + #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] + #[inline] + pub fn is_ascii_control(&self) -> bool { + self.is_ascii() && (*self as u8).is_ascii_control() + } +} diff --git a/src/libcore/char/mod.rs b/src/libcore/char/mod.rs new file mode 100644 index 000000000000..7e1313747eef --- /dev/null +++ b/src/libcore/char/mod.rs @@ -0,0 +1,504 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A character type. +//! +//! The `char` type represents a single character. More specifically, since +//! 'character' isn't a well-defined concept in Unicode, `char` is a '[Unicode +//! scalar value]', which is similar to, but not the same as, a '[Unicode code +//! point]'. +//! +//! [Unicode scalar value]: http://www.unicode.org/glossary/#unicode_scalar_value +//! [Unicode code point]: http://www.unicode.org/glossary/#code_point +//! +//! This module exists for technical reasons, the primary documentation for +//! `char` is directly on [the `char` primitive type](../../std/primitive.char.html) +//! itself. +//! +//! This module is the home of the iterator implementations for the iterators +//! implemented on `char`, as well as some useful constants and conversion +//! functions that convert various types to `char`. + +#![allow(non_snake_case)] +#![stable(feature = "core_char", since = "1.2.0")] + +mod convert; +mod decode; +mod methods; + +// stable re-exports +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::convert::{from_u32, from_digit}; +#[stable(feature = "char_from_unchecked", since = "1.5.0")] +pub use self::convert::from_u32_unchecked; +#[stable(feature = "char_from_str", since = "1.20.0")] +pub use self::convert::ParseCharError; +#[unstable(feature = "try_from", issue = "33417")] +pub use self::convert::CharTryFromError; +#[stable(feature = "decode_utf16", since = "1.9.0")] +pub use self::decode::{decode_utf16, DecodeUtf16, DecodeUtf16Error}; + +// unstable re-exports +#[unstable(feature = "unicode_version", issue = "49726")] +pub use unicode::tables::UNICODE_VERSION; +#[unstable(feature = "unicode_version", issue = "49726")] +pub use unicode::version::UnicodeVersion; + +use fmt::{self, Write}; +use iter::FusedIterator; + +// UTF-8 ranges and tags for encoding characters +const TAG_CONT: u8 = 0b1000_0000; +const TAG_TWO_B: u8 = 0b1100_0000; +const TAG_THREE_B: u8 = 0b1110_0000; +const TAG_FOUR_B: u8 = 0b1111_0000; +const MAX_ONE_B: u32 = 0x80; +const MAX_TWO_B: u32 = 0x800; +const MAX_THREE_B: u32 = 0x10000; + +/* + Lu Uppercase_Letter an uppercase letter + Ll Lowercase_Letter a lowercase letter + Lt Titlecase_Letter a digraphic character, with first part uppercase + Lm Modifier_Letter a modifier letter + Lo Other_Letter other letters, including syllables and ideographs + Mn Nonspacing_Mark a nonspacing combining mark (zero advance width) + Mc Spacing_Mark a spacing combining mark (positive advance width) + Me Enclosing_Mark an enclosing combining mark + Nd Decimal_Number a decimal digit + Nl Letter_Number a letterlike numeric character + No Other_Number a numeric character of other type + Pc Connector_Punctuation a connecting punctuation mark, like a tie + Pd Dash_Punctuation a dash or hyphen punctuation mark + Ps Open_Punctuation an opening punctuation mark (of a pair) + Pe Close_Punctuation a closing punctuation mark (of a pair) + Pi Initial_Punctuation an initial quotation mark + Pf Final_Punctuation a final quotation mark + Po Other_Punctuation a punctuation mark of other type + Sm Math_Symbol a symbol of primarily mathematical use + Sc Currency_Symbol a currency sign + Sk Modifier_Symbol a non-letterlike modifier symbol + So Other_Symbol a symbol of other type + Zs Space_Separator a space character (of various non-zero widths) + Zl Line_Separator U+2028 LINE SEPARATOR only + Zp Paragraph_Separator U+2029 PARAGRAPH SEPARATOR only + Cc Control a C0 or C1 control code + Cf Format a format control character + Cs Surrogate a surrogate code point + Co Private_Use a private-use character + Cn Unassigned a reserved unassigned code point or a noncharacter +*/ + +/// The highest valid code point a `char` can have. +/// +/// A [`char`] is a [Unicode Scalar Value], which means that it is a [Code +/// Point], but only ones within a certain range. `MAX` is the highest valid +/// code point that's a valid [Unicode Scalar Value]. +/// +/// [`char`]: ../../std/primitive.char.html +/// [Unicode Scalar Value]: http://www.unicode.org/glossary/#unicode_scalar_value +/// [Code Point]: http://www.unicode.org/glossary/#code_point +#[stable(feature = "rust1", since = "1.0.0")] +pub const MAX: char = '\u{10ffff}'; + +/// `U+FFFD REPLACEMENT CHARACTER` (�) is used in Unicode to represent a +/// decoding error. +/// +/// It can occur, for example, when giving ill-formed UTF-8 bytes to +/// [`String::from_utf8_lossy`](../../std/string/struct.String.html#method.from_utf8_lossy). +#[stable(feature = "decode_utf16", since = "1.9.0")] +pub const REPLACEMENT_CHARACTER: char = '\u{FFFD}'; + +/// Returns an iterator that yields the hexadecimal Unicode escape of a +/// character, as `char`s. +/// +/// This `struct` is created by the [`escape_unicode`] method on [`char`]. See +/// its documentation for more. +/// +/// [`escape_unicode`]: ../../std/primitive.char.html#method.escape_unicode +/// [`char`]: ../../std/primitive.char.html +#[derive(Clone, Debug)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct EscapeUnicode { + c: char, + state: EscapeUnicodeState, + + // The index of the next hex digit to be printed (0 if none), + // i.e. the number of remaining hex digits to be printed; + // increasing from the least significant digit: 0x543210 + hex_digit_idx: usize, +} + +// The enum values are ordered so that their representation is the +// same as the remaining length (besides the hexadecimal digits). This +// likely makes `len()` a single load from memory) and inline-worth. +#[derive(Clone, Debug)] +enum EscapeUnicodeState { + Done, + RightBrace, + Value, + LeftBrace, + Type, + Backslash, +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for EscapeUnicode { + type Item = char; + + fn next(&mut self) -> Option { + match self.state { + EscapeUnicodeState::Backslash => { + self.state = EscapeUnicodeState::Type; + Some('\\') + } + EscapeUnicodeState::Type => { + self.state = EscapeUnicodeState::LeftBrace; + Some('u') + } + EscapeUnicodeState::LeftBrace => { + self.state = EscapeUnicodeState::Value; + Some('{') + } + EscapeUnicodeState::Value => { + let hex_digit = ((self.c as u32) >> (self.hex_digit_idx * 4)) & 0xf; + let c = from_digit(hex_digit, 16).unwrap(); + if self.hex_digit_idx == 0 { + self.state = EscapeUnicodeState::RightBrace; + } else { + self.hex_digit_idx -= 1; + } + Some(c) + } + EscapeUnicodeState::RightBrace => { + self.state = EscapeUnicodeState::Done; + Some('}') + } + EscapeUnicodeState::Done => None, + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let n = self.len(); + (n, Some(n)) + } + + #[inline] + fn count(self) -> usize { + self.len() + } + + fn last(self) -> Option { + match self.state { + EscapeUnicodeState::Done => None, + + EscapeUnicodeState::RightBrace | + EscapeUnicodeState::Value | + EscapeUnicodeState::LeftBrace | + EscapeUnicodeState::Type | + EscapeUnicodeState::Backslash => Some('}'), + } + } +} + +#[stable(feature = "exact_size_escape", since = "1.11.0")] +impl ExactSizeIterator for EscapeUnicode { + #[inline] + fn len(&self) -> usize { + // The match is a single memory access with no branching + self.hex_digit_idx + match self.state { + EscapeUnicodeState::Done => 0, + EscapeUnicodeState::RightBrace => 1, + EscapeUnicodeState::Value => 2, + EscapeUnicodeState::LeftBrace => 3, + EscapeUnicodeState::Type => 4, + EscapeUnicodeState::Backslash => 5, + } + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for EscapeUnicode {} + +#[stable(feature = "char_struct_display", since = "1.16.0")] +impl fmt::Display for EscapeUnicode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for c in self.clone() { + f.write_char(c)?; + } + Ok(()) + } +} + +/// An iterator that yields the literal escape code of a `char`. +/// +/// This `struct` is created by the [`escape_default`] method on [`char`]. See +/// its documentation for more. +/// +/// [`escape_default`]: ../../std/primitive.char.html#method.escape_default +/// [`char`]: ../../std/primitive.char.html +#[derive(Clone, Debug)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct EscapeDefault { + state: EscapeDefaultState +} + +#[derive(Clone, Debug)] +enum EscapeDefaultState { + Done, + Char(char), + Backslash(char), + Unicode(EscapeUnicode), +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for EscapeDefault { + type Item = char; + + fn next(&mut self) -> Option { + match self.state { + EscapeDefaultState::Backslash(c) => { + self.state = EscapeDefaultState::Char(c); + Some('\\') + } + EscapeDefaultState::Char(c) => { + self.state = EscapeDefaultState::Done; + Some(c) + } + EscapeDefaultState::Done => None, + EscapeDefaultState::Unicode(ref mut iter) => iter.next(), + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let n = self.len(); + (n, Some(n)) + } + + #[inline] + fn count(self) -> usize { + self.len() + } + + fn nth(&mut self, n: usize) -> Option { + match self.state { + EscapeDefaultState::Backslash(c) if n == 0 => { + self.state = EscapeDefaultState::Char(c); + Some('\\') + }, + EscapeDefaultState::Backslash(c) if n == 1 => { + self.state = EscapeDefaultState::Done; + Some(c) + }, + EscapeDefaultState::Backslash(_) => { + self.state = EscapeDefaultState::Done; + None + }, + EscapeDefaultState::Char(c) => { + self.state = EscapeDefaultState::Done; + + if n == 0 { + Some(c) + } else { + None + } + }, + EscapeDefaultState::Done => None, + EscapeDefaultState::Unicode(ref mut i) => i.nth(n), + } + } + + fn last(self) -> Option { + match self.state { + EscapeDefaultState::Unicode(iter) => iter.last(), + EscapeDefaultState::Done => None, + EscapeDefaultState::Backslash(c) | EscapeDefaultState::Char(c) => Some(c), + } + } +} + +#[stable(feature = "exact_size_escape", since = "1.11.0")] +impl ExactSizeIterator for EscapeDefault { + fn len(&self) -> usize { + match self.state { + EscapeDefaultState::Done => 0, + EscapeDefaultState::Char(_) => 1, + EscapeDefaultState::Backslash(_) => 2, + EscapeDefaultState::Unicode(ref iter) => iter.len(), + } + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for EscapeDefault {} + +#[stable(feature = "char_struct_display", since = "1.16.0")] +impl fmt::Display for EscapeDefault { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for c in self.clone() { + f.write_char(c)?; + } + Ok(()) + } +} + +/// An iterator that yields the literal escape code of a `char`. +/// +/// This `struct` is created by the [`escape_debug`] method on [`char`]. See its +/// documentation for more. +/// +/// [`escape_debug`]: ../../std/primitive.char.html#method.escape_debug +/// [`char`]: ../../std/primitive.char.html +#[stable(feature = "char_escape_debug", since = "1.20.0")] +#[derive(Clone, Debug)] +pub struct EscapeDebug(EscapeDefault); + +#[stable(feature = "char_escape_debug", since = "1.20.0")] +impl Iterator for EscapeDebug { + type Item = char; + fn next(&mut self) -> Option { self.0.next() } + fn size_hint(&self) -> (usize, Option) { self.0.size_hint() } +} + +#[stable(feature = "char_escape_debug", since = "1.20.0")] +impl ExactSizeIterator for EscapeDebug { } + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for EscapeDebug {} + +#[stable(feature = "char_escape_debug", since = "1.20.0")] +impl fmt::Display for EscapeDebug { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +/// Returns an iterator that yields the lowercase equivalent of a `char`. +/// +/// This `struct` is created by the [`to_lowercase`] method on [`char`]. See +/// its documentation for more. +/// +/// [`to_lowercase`]: ../../std/primitive.char.html#method.to_lowercase +/// [`char`]: ../../std/primitive.char.html +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Debug, Clone)] +pub struct ToLowercase(CaseMappingIter); + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for ToLowercase { + type Item = char; + fn next(&mut self) -> Option { + self.0.next() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for ToLowercase {} + +/// Returns an iterator that yields the uppercase equivalent of a `char`. +/// +/// This `struct` is created by the [`to_uppercase`] method on [`char`]. See +/// its documentation for more. +/// +/// [`to_uppercase`]: ../../std/primitive.char.html#method.to_uppercase +/// [`char`]: ../../std/primitive.char.html +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Debug, Clone)] +pub struct ToUppercase(CaseMappingIter); + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for ToUppercase { + type Item = char; + fn next(&mut self) -> Option { + self.0.next() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for ToUppercase {} + +#[derive(Debug, Clone)] +enum CaseMappingIter { + Three(char, char, char), + Two(char, char), + One(char), + Zero, +} + +impl CaseMappingIter { + fn new(chars: [char; 3]) -> CaseMappingIter { + if chars[2] == '\0' { + if chars[1] == '\0' { + CaseMappingIter::One(chars[0]) // Including if chars[0] == '\0' + } else { + CaseMappingIter::Two(chars[0], chars[1]) + } + } else { + CaseMappingIter::Three(chars[0], chars[1], chars[2]) + } + } +} + +impl Iterator for CaseMappingIter { + type Item = char; + fn next(&mut self) -> Option { + match *self { + CaseMappingIter::Three(a, b, c) => { + *self = CaseMappingIter::Two(b, c); + Some(a) + } + CaseMappingIter::Two(b, c) => { + *self = CaseMappingIter::One(c); + Some(b) + } + CaseMappingIter::One(c) => { + *self = CaseMappingIter::Zero; + Some(c) + } + CaseMappingIter::Zero => None, + } + } +} + +impl fmt::Display for CaseMappingIter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + CaseMappingIter::Three(a, b, c) => { + f.write_char(a)?; + f.write_char(b)?; + f.write_char(c) + } + CaseMappingIter::Two(b, c) => { + f.write_char(b)?; + f.write_char(c) + } + CaseMappingIter::One(c) => { + f.write_char(c) + } + CaseMappingIter::Zero => Ok(()), + } + } +} + +#[stable(feature = "char_struct_display", since = "1.16.0")] +impl fmt::Display for ToLowercase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +#[stable(feature = "char_struct_display", since = "1.16.0")] +impl fmt::Display for ToUppercase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} diff --git a/src/libcore/char_private.rs b/src/libcore/char_private.rs deleted file mode 100644 index e6803745ab54..000000000000 --- a/src/libcore/char_private.rs +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// NOTE: The following code was generated by "src/etc/char_private.py", -// do not edit directly! - -fn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], - normal: &[u8]) -> bool { - let xupper = (x >> 8) as u8; - let mut lowerstart = 0; - for &(upper, lowercount) in singletonuppers { - let lowerend = lowerstart + lowercount as usize; - if xupper == upper { - for &lower in &singletonlowers[lowerstart..lowerend] { - if lower == x as u8 { - return false; - } - } - } else if xupper < upper { - break; - } - lowerstart = lowerend; - } - - let mut x = x as i32; - let mut normal = normal.iter().cloned(); - let mut current = true; - while let Some(v) = normal.next() { - let len = if v & 0x80 != 0 { - ((v & 0x7f) as i32) << 8 | normal.next().unwrap() as i32 - } else { - v as i32 - }; - x -= len; - if x < 0 { - break; - } - current = !current; - } - current -} - -pub(crate) fn is_printable(x: char) -> bool { - let x = x as u32; - let lower = x as u16; - if x < 0x10000 { - check(lower, SINGLETONS0U, SINGLETONS0L, NORMAL0) - } else if x < 0x20000 { - check(lower, SINGLETONS1U, SINGLETONS1L, NORMAL1) - } else { - if 0x2a6d7 <= x && x < 0x2a700 { - return false; - } - if 0x2b735 <= x && x < 0x2b740 { - return false; - } - if 0x2b81e <= x && x < 0x2b820 { - return false; - } - if 0x2cea2 <= x && x < 0x2ceb0 { - return false; - } - if 0x2ebe1 <= x && x < 0x2f800 { - return false; - } - if 0x2fa1e <= x && x < 0xe0100 { - return false; - } - if 0xe01f0 <= x && x < 0x110000 { - return false; - } - true - } -} - -const SINGLETONS0U: &'static [(u8, u8)] = &[ - (0x00, 1), - (0x03, 5), - (0x05, 8), - (0x06, 3), - (0x07, 4), - (0x08, 8), - (0x09, 16), - (0x0a, 27), - (0x0b, 25), - (0x0c, 22), - (0x0d, 18), - (0x0e, 22), - (0x0f, 4), - (0x10, 3), - (0x12, 18), - (0x13, 9), - (0x16, 1), - (0x17, 5), - (0x18, 2), - (0x19, 3), - (0x1a, 7), - (0x1d, 1), - (0x1f, 22), - (0x20, 3), - (0x2b, 5), - (0x2c, 2), - (0x2d, 11), - (0x2e, 1), - (0x30, 3), - (0x31, 3), - (0x32, 2), - (0xa7, 1), - (0xa8, 2), - (0xa9, 2), - (0xaa, 4), - (0xab, 8), - (0xfa, 2), - (0xfb, 5), - (0xfd, 4), - (0xfe, 3), - (0xff, 9), -]; -const SINGLETONS0L: &'static [u8] = &[ - 0xad, 0x78, 0x79, 0x8b, 0x8d, 0xa2, 0x30, 0x57, - 0x58, 0x60, 0x88, 0x8b, 0x8c, 0x90, 0x1c, 0x1d, - 0xdd, 0x0e, 0x0f, 0x4b, 0x4c, 0x2e, 0x2f, 0x3f, - 0x5c, 0x5d, 0x5f, 0xb5, 0xe2, 0x84, 0x8d, 0x8e, - 0x91, 0x92, 0xa9, 0xb1, 0xba, 0xbb, 0xc5, 0xc6, - 0xc9, 0xca, 0xde, 0xe4, 0xe5, 0x04, 0x11, 0x12, - 0x29, 0x31, 0x34, 0x37, 0x3a, 0x3b, 0x3d, 0x49, - 0x4a, 0x5d, 0x84, 0x8e, 0x92, 0xa9, 0xb1, 0xb4, - 0xba, 0xbb, 0xc6, 0xca, 0xce, 0xcf, 0xe4, 0xe5, - 0x00, 0x04, 0x0d, 0x0e, 0x11, 0x12, 0x29, 0x31, - 0x34, 0x3a, 0x3b, 0x45, 0x46, 0x49, 0x4a, 0x5e, - 0x64, 0x65, 0x84, 0x91, 0x9b, 0x9d, 0xc9, 0xce, - 0xcf, 0x04, 0x0d, 0x11, 0x29, 0x45, 0x49, 0x57, - 0x64, 0x65, 0x84, 0x8d, 0x91, 0xa9, 0xb4, 0xba, - 0xbb, 0xc5, 0xc9, 0xdf, 0xe4, 0xe5, 0xf0, 0x04, - 0x0d, 0x11, 0x45, 0x49, 0x64, 0x65, 0x80, 0x81, - 0x84, 0xb2, 0xbc, 0xbe, 0xbf, 0xd5, 0xd7, 0xf0, - 0xf1, 0x83, 0x85, 0x86, 0x89, 0x8b, 0x8c, 0x98, - 0xa0, 0xa4, 0xa6, 0xa8, 0xa9, 0xac, 0xba, 0xbe, - 0xbf, 0xc5, 0xc7, 0xce, 0xcf, 0xda, 0xdb, 0x48, - 0x98, 0xbd, 0xcd, 0xc6, 0xce, 0xcf, 0x49, 0x4e, - 0x4f, 0x57, 0x59, 0x5e, 0x5f, 0x89, 0x8e, 0x8f, - 0xb1, 0xb6, 0xb7, 0xbf, 0xc1, 0xc6, 0xc7, 0xd7, - 0x11, 0x16, 0x17, 0x5b, 0x5c, 0xf6, 0xf7, 0xfe, - 0xff, 0x80, 0x0d, 0x6d, 0x71, 0xde, 0xdf, 0x0e, - 0x0f, 0x1f, 0x6e, 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, - 0x7e, 0xae, 0xaf, 0xfa, 0x16, 0x17, 0x1e, 0x1f, - 0x46, 0x47, 0x4e, 0x4f, 0x58, 0x5a, 0x5c, 0x5e, - 0x7e, 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, 0xdc, 0xf0, - 0xf1, 0xf5, 0x72, 0x73, 0x8f, 0x74, 0x75, 0x96, - 0x97, 0xc9, 0x2f, 0x5f, 0x26, 0x2e, 0x2f, 0xa7, - 0xaf, 0xb7, 0xbf, 0xc7, 0xcf, 0xd7, 0xdf, 0x9a, - 0x40, 0x97, 0x98, 0x2f, 0x30, 0x8f, 0x1f, 0xff, - 0xaf, 0xfe, 0xff, 0xce, 0xff, 0x4e, 0x4f, 0x5a, - 0x5b, 0x07, 0x08, 0x0f, 0x10, 0x27, 0x2f, 0xee, - 0xef, 0x6e, 0x6f, 0x37, 0x3d, 0x3f, 0x42, 0x45, - 0x90, 0x91, 0xfe, 0xff, 0x53, 0x67, 0x75, 0xc8, - 0xc9, 0xd0, 0xd1, 0xd8, 0xd9, 0xe7, 0xfe, 0xff, -]; -const SINGLETONS1U: &'static [(u8, u8)] = &[ - (0x00, 6), - (0x01, 1), - (0x03, 1), - (0x04, 2), - (0x08, 8), - (0x09, 2), - (0x0a, 3), - (0x0b, 2), - (0x10, 1), - (0x11, 4), - (0x12, 5), - (0x13, 18), - (0x14, 2), - (0x15, 2), - (0x1a, 3), - (0x1c, 5), - (0x1d, 4), - (0x24, 1), - (0x6a, 3), - (0x6b, 2), - (0xbc, 2), - (0xd1, 2), - (0xd4, 12), - (0xd5, 9), - (0xd6, 2), - (0xd7, 2), - (0xda, 1), - (0xe0, 5), - (0xe8, 2), - (0xee, 32), - (0xf0, 4), - (0xf1, 1), - (0xf9, 1), -]; -const SINGLETONS1L: &'static [u8] = &[ - 0x0c, 0x27, 0x3b, 0x3e, 0x4e, 0x4f, 0x8f, 0x9e, - 0x9e, 0x9f, 0x06, 0x07, 0x09, 0x36, 0x3d, 0x3e, - 0x56, 0xf3, 0xd0, 0xd1, 0x04, 0x14, 0x18, 0x56, - 0x57, 0xbd, 0x35, 0xce, 0xcf, 0xe0, 0x12, 0x87, - 0x89, 0x8e, 0x9e, 0x04, 0x0d, 0x0e, 0x11, 0x12, - 0x29, 0x31, 0x34, 0x3a, 0x3b, 0x45, 0x46, 0x49, - 0x4a, 0x4e, 0x4f, 0x64, 0x65, 0x5a, 0x5c, 0xb6, - 0xb7, 0x84, 0x85, 0x9d, 0x09, 0x37, 0x90, 0x91, - 0xa8, 0x07, 0x0a, 0x3b, 0x3e, 0x6f, 0x5f, 0xee, - 0xef, 0x5a, 0x62, 0x9a, 0x9b, 0x27, 0x28, 0x55, - 0x9d, 0xa0, 0xa1, 0xa3, 0xa4, 0xa7, 0xa8, 0xad, - 0xba, 0xbc, 0xc4, 0x06, 0x0b, 0x0c, 0x15, 0x1d, - 0x3a, 0x3f, 0x45, 0x51, 0xa6, 0xa7, 0xcc, 0xcd, - 0xa0, 0x07, 0x19, 0x1a, 0x22, 0x25, 0xc5, 0xc6, - 0x04, 0x20, 0x23, 0x25, 0x26, 0x28, 0x33, 0x38, - 0x3a, 0x48, 0x4a, 0x4c, 0x50, 0x53, 0x55, 0x56, - 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x63, 0x65, 0x66, - 0x6b, 0x73, 0x78, 0x7d, 0x7f, 0x8a, 0xa4, 0xaa, - 0xaf, 0xb0, 0xc0, 0xd0, 0x2f, 0x3f, -]; -const NORMAL0: &'static [u8] = &[ - 0x00, 0x20, - 0x5f, 0x22, - 0x82, 0xdf, 0x04, - 0x82, 0x44, 0x08, - 0x1b, 0x05, - 0x05, 0x11, - 0x81, 0xac, 0x0e, - 0x3b, 0x05, - 0x6b, 0x35, - 0x1e, 0x16, - 0x80, 0xdf, 0x03, - 0x19, 0x08, - 0x01, 0x04, - 0x22, 0x03, - 0x0a, 0x04, - 0x34, 0x04, - 0x07, 0x03, - 0x01, 0x07, - 0x06, 0x07, - 0x10, 0x0b, - 0x50, 0x0f, - 0x12, 0x07, - 0x55, 0x08, - 0x02, 0x04, - 0x1c, 0x0a, - 0x09, 0x03, - 0x08, 0x03, - 0x07, 0x03, - 0x02, 0x03, - 0x03, 0x03, - 0x0c, 0x04, - 0x05, 0x03, - 0x0b, 0x06, - 0x01, 0x0e, - 0x15, 0x05, - 0x3a, 0x03, - 0x11, 0x07, - 0x06, 0x05, - 0x10, 0x08, - 0x56, 0x07, - 0x02, 0x07, - 0x15, 0x0d, - 0x50, 0x04, - 0x43, 0x03, - 0x2d, 0x03, - 0x01, 0x04, - 0x11, 0x06, - 0x0f, 0x0c, - 0x3a, 0x04, - 0x1d, 0x25, - 0x0d, 0x06, - 0x4c, 0x20, - 0x6d, 0x04, - 0x6a, 0x25, - 0x80, 0xc8, 0x05, - 0x82, 0xb0, 0x03, - 0x1a, 0x06, - 0x82, 0xfd, 0x03, - 0x59, 0x07, - 0x15, 0x0b, - 0x17, 0x09, - 0x14, 0x0c, - 0x14, 0x0c, - 0x6a, 0x06, - 0x0a, 0x06, - 0x1a, 0x06, - 0x58, 0x08, - 0x2b, 0x05, - 0x46, 0x0a, - 0x2c, 0x04, - 0x0c, 0x04, - 0x01, 0x03, - 0x31, 0x0b, - 0x2c, 0x04, - 0x1a, 0x06, - 0x0b, 0x03, - 0x80, 0xac, 0x06, - 0x0a, 0x06, - 0x1f, 0x41, - 0x4c, 0x04, - 0x2d, 0x03, - 0x74, 0x08, - 0x3c, 0x03, - 0x0f, 0x03, - 0x3c, 0x37, - 0x08, 0x08, - 0x2a, 0x06, - 0x82, 0xff, 0x11, - 0x18, 0x08, - 0x2f, 0x11, - 0x2d, 0x03, - 0x20, 0x10, - 0x21, 0x0f, - 0x80, 0x8c, 0x04, - 0x82, 0x97, 0x19, - 0x0b, 0x15, - 0x87, 0x5a, 0x03, - 0x16, 0x19, - 0x04, 0x10, - 0x80, 0xf4, 0x05, - 0x2f, 0x05, - 0x3b, 0x07, - 0x02, 0x0e, - 0x18, 0x09, - 0x80, 0xaa, 0x36, - 0x74, 0x0c, - 0x80, 0xd6, 0x1a, - 0x0c, 0x05, - 0x80, 0xff, 0x05, - 0x80, 0xb6, 0x05, - 0x24, 0x0c, - 0x9b, 0xc6, 0x0a, - 0xd2, 0x2b, 0x15, - 0x84, 0x8d, 0x03, - 0x37, 0x09, - 0x81, 0x5c, 0x14, - 0x80, 0xb8, 0x08, - 0x80, 0xb8, 0x3f, - 0x35, 0x04, - 0x0a, 0x06, - 0x38, 0x08, - 0x46, 0x08, - 0x0c, 0x06, - 0x74, 0x0b, - 0x1e, 0x03, - 0x5a, 0x04, - 0x59, 0x09, - 0x80, 0x83, 0x18, - 0x1c, 0x0a, - 0x16, 0x09, - 0x46, 0x0a, - 0x80, 0x8a, 0x06, - 0xab, 0xa4, 0x0c, - 0x17, 0x04, - 0x31, 0xa1, 0x04, - 0x81, 0xda, 0x26, - 0x07, 0x0c, - 0x05, 0x05, - 0x80, 0xa5, 0x11, - 0x81, 0x6d, 0x10, - 0x78, 0x28, - 0x2a, 0x06, - 0x4c, 0x04, - 0x80, 0x8d, 0x04, - 0x80, 0xbe, 0x03, - 0x1b, 0x03, - 0x0f, 0x0d, -]; -const NORMAL1: &'static [u8] = &[ - 0x5e, 0x22, - 0x7b, 0x05, - 0x03, 0x04, - 0x2d, 0x03, - 0x65, 0x04, - 0x01, 0x2f, - 0x2e, 0x80, 0x82, - 0x1d, 0x03, - 0x31, 0x0f, - 0x1c, 0x04, - 0x24, 0x09, - 0x1e, 0x05, - 0x2b, 0x05, - 0x44, 0x04, - 0x0e, 0x2a, - 0x80, 0xaa, 0x06, - 0x24, 0x04, - 0x24, 0x04, - 0x28, 0x08, - 0x34, 0x0b, - 0x01, 0x80, 0x90, - 0x81, 0x37, 0x09, - 0x16, 0x0a, - 0x08, 0x80, 0x98, - 0x39, 0x03, - 0x63, 0x08, - 0x09, 0x30, - 0x16, 0x05, - 0x21, 0x03, - 0x1b, 0x05, - 0x01, 0x40, - 0x38, 0x04, - 0x4b, 0x05, - 0x28, 0x04, - 0x03, 0x04, - 0x09, 0x08, - 0x09, 0x07, - 0x40, 0x20, - 0x27, 0x04, - 0x0c, 0x09, - 0x36, 0x03, - 0x3a, 0x05, - 0x1a, 0x07, - 0x04, 0x0c, - 0x07, 0x50, - 0x49, 0x37, - 0x33, 0x0d, - 0x33, 0x07, - 0x06, 0x81, 0x60, - 0x1f, 0x81, 0x81, - 0x4e, 0x04, - 0x1e, 0x0f, - 0x43, 0x0e, - 0x19, 0x07, - 0x0a, 0x06, - 0x44, 0x0c, - 0x27, 0x09, - 0x75, 0x0b, - 0x3f, 0x41, - 0x2a, 0x06, - 0x3b, 0x05, - 0x0a, 0x06, - 0x51, 0x06, - 0x01, 0x05, - 0x10, 0x03, - 0x05, 0x80, 0x8b, - 0x5e, 0x22, - 0x48, 0x08, - 0x0a, 0x80, 0xa6, - 0x5e, 0x22, - 0x45, 0x0b, - 0x0a, 0x06, - 0x0d, 0x13, - 0x38, 0x08, - 0x0a, 0x36, - 0x1a, 0x03, - 0x0f, 0x04, - 0x10, 0x81, 0x60, - 0x53, 0x0c, - 0x01, 0x81, 0x00, - 0x48, 0x08, - 0x53, 0x1d, - 0x39, 0x81, 0x07, - 0x46, 0x0a, - 0x1d, 0x03, - 0x47, 0x49, - 0x37, 0x03, - 0x0e, 0x08, - 0x0a, 0x82, 0xa6, - 0x83, 0x9a, 0x66, - 0x75, 0x0b, - 0x80, 0xc4, 0x8a, 0xbc, - 0x84, 0x2f, 0x8f, 0xd1, - 0x82, 0x47, 0xa1, 0xb9, - 0x82, 0x39, 0x07, - 0x2a, 0x04, - 0x02, 0x60, - 0x26, 0x0a, - 0x46, 0x0a, - 0x28, 0x05, - 0x13, 0x83, 0x70, - 0x45, 0x0b, - 0x2f, 0x10, - 0x11, 0x40, - 0x02, 0x1e, - 0x97, 0xed, 0x13, - 0x82, 0xf3, 0xa5, 0x0d, - 0x81, 0x1f, 0x51, - 0x81, 0x8c, 0x89, 0x04, - 0x6b, 0x05, - 0x0d, 0x03, - 0x09, 0x07, - 0x10, 0x93, 0x60, - 0x80, 0xf6, 0x0a, - 0x73, 0x08, - 0x6e, 0x17, - 0x46, 0x80, 0xba, - 0x57, 0x09, - 0x12, 0x80, 0x8e, - 0x81, 0x47, 0x03, - 0x85, 0x42, 0x0f, - 0x15, 0x85, 0x50, - 0x2b, 0x87, 0xd5, - 0x80, 0xd7, 0x29, - 0x4b, 0x05, - 0x0a, 0x04, - 0x02, 0x84, 0xa0, - 0x3c, 0x06, - 0x01, 0x04, - 0x55, 0x05, - 0x1b, 0x34, - 0x02, 0x81, 0x0e, - 0x2c, 0x04, - 0x64, 0x0c, - 0x56, 0x0a, - 0x0d, 0x03, - 0x5c, 0x04, - 0x3d, 0x39, - 0x1d, 0x0d, - 0x2c, 0x04, - 0x09, 0x07, - 0x02, 0x0e, - 0x06, 0x80, 0x9a, - 0x83, 0xd5, 0x0b, - 0x0d, 0x03, - 0x09, 0x07, - 0x74, 0x0c, - 0x55, 0x2b, - 0x0c, 0x04, - 0x38, 0x08, - 0x0a, 0x06, - 0x28, 0x08, - 0x1e, 0x52, - 0x0c, 0x04, - 0x3d, 0x03, - 0x1c, 0x14, - 0x18, 0x28, - 0x01, 0x0f, - 0x17, 0x86, 0x19, -]; diff --git a/src/libcore/clone.rs b/src/libcore/clone.rs index 826420a0c001..3b15ba2b4ab1 100644 --- a/src/libcore/clone.rs +++ b/src/libcore/clone.rs @@ -87,6 +87,23 @@ /// fn clone(&self) -> Stats { *self } /// } /// ``` +/// +/// ## Additional implementors +/// +/// In addition to the [implementors listed below][impls], +/// the following types also implement `Clone`: +/// +/// * Function item types (i.e. the distinct types defined for each function) +/// * Function pointer types (e.g. `fn() -> i32`) +/// * Array types, for all sizes, if the item type also implements `Clone` (e.g. `[i32; 123456]`) +/// * Tuple types, if each component also implements `Clone` (e.g. `()`, `(i32, bool)`) +/// * Closure types, if they capture no value from the environment +/// or if all such captured values implement `Clone` themselves. +/// Note that variables captured by shared reference always implement `Clone` +/// (even if the referent doesn't), +/// while variables captured by mutable reference never implement `Clone`. +/// +/// [impls]: #implementors #[stable(feature = "rust1", since = "1.0.0")] #[lang = "clone"] pub trait Clone : Sized { @@ -100,6 +117,7 @@ pub trait Clone : Sized { /// assert_eq!("Hello", hello.clone()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "cloning is often expensive and is not expected to have side effects"] fn clone(&self) -> Self; /// Performs copy-assignment from `source`. @@ -130,3 +148,67 @@ pub struct AssertParamIsClone { _field: ::marker::PhantomData reason = "deriving hack, should not be public", issue = "0")] pub struct AssertParamIsCopy { _field: ::marker::PhantomData } + +/// Implementations of `Clone` for primitive types. +/// +/// Implementations that cannot be described in Rust +/// are implemented in `SelectionContext::copy_clone_conditions()` in librustc. +mod impls { + + use super::Clone; + + macro_rules! impl_clone { + ($($t:ty)*) => { + $( + #[stable(feature = "rust1", since = "1.0.0")] + impl Clone for $t { + #[inline] + fn clone(&self) -> Self { + *self + } + } + )* + } + } + + impl_clone! { + usize u8 u16 u32 u64 u128 + isize i8 i16 i32 i64 i128 + f32 f64 + bool char + } + + #[unstable(feature = "never_type", issue = "35121")] + impl Clone for ! { + #[inline] + fn clone(&self) -> Self { + *self + } + } + + #[stable(feature = "rust1", since = "1.0.0")] + impl Clone for *const T { + #[inline] + fn clone(&self) -> Self { + *self + } + } + + #[stable(feature = "rust1", since = "1.0.0")] + impl Clone for *mut T { + #[inline] + fn clone(&self) -> Self { + *self + } + } + + // Shared references can be cloned, but mutable references *cannot*! + #[stable(feature = "rust1", since = "1.0.0")] + impl<'a, T: ?Sized> Clone for &'a T { + #[inline] + fn clone(&self) -> Self { + *self + } + } + +} diff --git a/src/libcore/cmp.rs b/src/libcore/cmp.rs index 266cae3c122f..58d6c4f5e092 100644 --- a/src/libcore/cmp.rs +++ b/src/libcore/cmp.rs @@ -106,7 +106,12 @@ use self::Ordering::*; /// ``` #[lang = "eq"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "can't compare `{Self}` with `{Rhs}`"] +#[doc(alias = "==")] +#[doc(alias = "!=")] +#[rustc_on_unimplemented( + message="can't compare `{Self}` with `{Rhs}`", + label="no implementation for `{Self} == {Rhs}`", +)] pub trait PartialEq { /// This method tests for `self` and `other` values to be equal, and is used /// by `==`. @@ -160,6 +165,8 @@ pub trait PartialEq { /// } /// impl Eq for Book {} /// ``` +#[doc(alias = "==")] +#[doc(alias = "!=")] #[stable(feature = "rust1", since = "1.0.0")] pub trait Eq: PartialEq { // this method is used solely by #[deriving] to assert @@ -343,7 +350,7 @@ impl Ordering { /// v.sort_by_key(|&num| (num > 3, Reverse(num))); /// assert_eq!(v, vec![3, 2, 1, 6, 5, 4]); /// ``` -#[derive(PartialEq, Eq, Debug)] +#[derive(PartialEq, Eq, Debug, Copy, Clone, Default, Hash)] #[stable(feature = "reverse_cmp_key", since = "1.19.0")] pub struct Reverse(#[stable(feature = "reverse_cmp_key", since = "1.19.0")] pub T); @@ -427,6 +434,11 @@ impl Ord for Reverse { /// } /// } /// ``` +#[lang = "ord"] +#[doc(alias = "<")] +#[doc(alias = ">")] +#[doc(alias = "<=")] +#[doc(alias = ">=")] #[stable(feature = "rust1", since = "1.0.0")] pub trait Ord: Eq + PartialOrd { /// This method returns an `Ordering` between `self` and `other`. @@ -457,6 +469,7 @@ pub trait Ord: Eq + PartialOrd { /// assert_eq!(2, 2.max(2)); /// ``` #[stable(feature = "ord_max_min", since = "1.21.0")] + #[inline] fn max(self, other: Self) -> Self where Self: Sized { if other >= self { other } else { self } @@ -473,6 +486,7 @@ pub trait Ord: Eq + PartialOrd { /// assert_eq!(2, 2.min(2)); /// ``` #[stable(feature = "ord_max_min", since = "1.21.0")] + #[inline] fn min(self, other: Self) -> Self where Self: Sized { if self <= other { self } else { other } @@ -596,9 +610,16 @@ impl PartialOrd for Ordering { /// assert_eq!(x < y, true); /// assert_eq!(x.lt(&y), true); /// ``` -#[lang = "ord"] +#[lang = "partial_ord"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "can't compare `{Self}` with `{Rhs}`"] +#[doc(alias = ">")] +#[doc(alias = "<")] +#[doc(alias = "<=")] +#[doc(alias = ">=")] +#[rustc_on_unimplemented( + message="can't compare `{Self}` with `{Rhs}`", + label="no implementation for `{Self} < {Rhs}` and `{Self} > {Rhs}`", +)] pub trait PartialOrd: PartialEq { /// This method returns an ordering between `self` and `other` values if one exists. /// diff --git a/src/libcore/convert.rs b/src/libcore/convert.rs index 41eb1eed4e46..67cb010c6b41 100644 --- a/src/libcore/convert.rs +++ b/src/libcore/convert.rs @@ -48,9 +48,9 @@ #![stable(feature = "rust1", since = "1.0.0")] -use fmt; - -/// An identity function. Two things are important to note about this function: +/// An identity function. +/// +/// Two things are important to note about this function: /// /// - It is not always equivalent to a closure like `|x| x` since the /// closure may coerce `x` into a different type. @@ -62,80 +62,51 @@ use fmt; /// /// # Examples /// -/// Using `id` to do nothing among other interesting functions: +/// Using `identity` to do nothing among other interesting functions: /// /// ```rust /// #![feature(convert_id)] -/// use std::convert::id; +/// use std::convert::identity; /// /// fn manipulation(x: u32) -> u32 { /// // Let's assume that this function does something interesting. /// x + 1 /// } /// -/// let _arr = &[id, manipulation]; +/// let _arr = &[identity, manipulation]; /// ``` /// -/// Using `id` to get a function that changes nothing in a conditional: +/// Using `identity` to get a function that changes nothing in a conditional: /// /// ```rust /// #![feature(convert_id)] -/// use std::convert::id; +/// use std::convert::identity; /// /// # let condition = true; /// /// # fn manipulation(x: u32) -> u32 { x + 1 } /// -/// let do_stuff = if condition { manipulation } else { id }; +/// let do_stuff = if condition { manipulation } else { identity }; /// /// // do more interesting stuff.. /// /// let _results = do_stuff(42); /// ``` /// -/// Using `id` to concatenate an iterator of iterators: +/// Using `identity` to keep the `Some` variants of an iterator of `Option`: /// /// ```rust /// #![feature(convert_id)] -/// use std::convert::id; -/// -/// let vec_vec = vec![vec![1, 3, 4], vec![5, 6]]; -/// let iter_iter = vec_vec.into_iter().map(Vec::into_iter); -/// let concatenated = iter_iter.flat_map(id).collect::>(); -/// assert_eq!(vec![1, 3, 4, 5, 6], concatenated); -/// ``` -/// -/// Using `id` to keep the `Some` variants of an iterator of `Option`: -/// -/// ```rust -/// #![feature(convert_id)] -/// use std::convert::id; +/// use std::convert::identity; /// /// let iter = vec![Some(1), None, Some(3)].into_iter(); -/// let filtered = iter.filter_map(id).collect::>(); +/// let filtered = iter.filter_map(identity).collect::>(); /// assert_eq!(vec![1, 3], filtered); /// ``` #[unstable(feature = "convert_id", issue = "0")] #[inline] -pub fn id(x: T) -> T { x } +pub fn identity(x: T) -> T { x } -/// A type used as the error type for implementations of fallible conversion -/// traits in cases where conversions cannot actually fail. -/// -/// Because `Infallible` has no variants, a value of this type can never exist. -/// It is used only to satisfy trait signatures that expect an error type, and -/// signals to both the compiler and the user that the error case is impossible. -#[unstable(feature = "try_from", issue = "33417")] -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub enum Infallible {} - -#[unstable(feature = "try_from", issue = "33417")] -impl fmt::Display for Infallible { - fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { - match *self { - } - } -} /// A cheap reference-to-reference conversion. Used to convert a value to a /// reference value within generic code. /// @@ -151,9 +122,9 @@ impl fmt::Display for Infallible { /// /// The key difference between the two traits is the intention: /// -/// - Use `AsRef` when goal is to simply convert into a reference -/// - Use `Borrow` when goal is related to writing code that is agnostic to the -/// type of borrow and if is reference or value +/// - Use `AsRef` when the goal is to simply convert into a reference +/// - Use `Borrow` when the goal is related to writing code that is agnostic to +/// the type of borrow and whether it is a reference or value /// /// See [the book][book] for a more detailed comparison. /// @@ -210,9 +181,9 @@ pub trait AsRef { /// /// # Generic Implementations /// -/// - `AsMut` auto-dereferences if the inner type is a reference or a mutable -/// reference (e.g.: `foo.as_ref()` will work the same if `foo` has type -/// `&mut Foo` or `&&mut Foo`) +/// - `AsMut` auto-dereferences if the inner type is a mutable reference +/// (e.g.: `foo.as_mut()` will work the same if `foo` has type `&mut Foo` +/// or `&mut &mut Foo`) /// /// # Examples /// @@ -451,7 +422,7 @@ impl<'a, T: ?Sized, U: ?Sized> AsRef for &'a mut T where T: AsRef } } -// FIXME (#23442): replace the above impls for &/&mut with the following more general one: +// FIXME (#45742): replace the above impls for &/&mut with the following more general one: // // As lifts over Deref // impl AsRef for D where D::Target: AsRef { // fn as_ref(&self) -> &U { @@ -468,7 +439,7 @@ impl<'a, T: ?Sized, U: ?Sized> AsMut for &'a mut T where T: AsMut } } -// FIXME (#23442): replace the above impl for &mut with the following more general one: +// FIXME (#45742): replace the above impl for &mut with the following more general one: // // AsMut lifts over DerefMut // impl AsMut for D where D::Target: AsMut { // fn as_mut(&mut self) -> &mut U { @@ -507,7 +478,7 @@ impl TryInto for T where U: TryFrom // with an uninhabited error type. #[unstable(feature = "try_from", issue = "33417")] impl TryFrom for T where T: From { - type Error = Infallible; + type Error = !; fn try_from(value: U) -> Result { Ok(T::from(value)) diff --git a/src/libcore/fmt/builders.rs b/src/libcore/fmt/builders.rs index a1f4c6995dae..3c5f934d4d8c 100644 --- a/src/libcore/fmt/builders.rs +++ b/src/libcore/fmt/builders.rs @@ -11,7 +11,7 @@ use fmt; struct PadAdapter<'a> { - buf: &'a mut (fmt::Write + 'a), + buf: &'a mut (dyn fmt::Write + 'a), on_newline: bool, } @@ -84,7 +84,7 @@ impl<'a> fmt::Write for PadAdapter<'a> { /// // prints "Foo { bar: 10, baz: "Hello World" }" /// println!("{:?}", Foo { bar: 10, baz: "Hello World".to_string() }); /// ``` -#[must_use] +#[must_use = "must eventually call `finish()` on Debug builders"] #[allow(missing_debug_implementations)] #[stable(feature = "debug_builders", since = "1.2.0")] pub struct DebugStruct<'a, 'b: 'a> { @@ -107,7 +107,7 @@ pub fn debug_struct_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>, impl<'a, 'b: 'a> DebugStruct<'a, 'b> { /// Adds a new field to the generated struct output. #[stable(feature = "debug_builders", since = "1.2.0")] - pub fn field(&mut self, name: &str, value: &fmt::Debug) -> &mut DebugStruct<'a, 'b> { + pub fn field(&mut self, name: &str, value: &dyn fmt::Debug) -> &mut DebugStruct<'a, 'b> { self.result = self.result.and_then(|_| { let prefix = if self.has_fields { "," @@ -181,7 +181,7 @@ impl<'a, 'b: 'a> DebugStruct<'a, 'b> { /// // prints "Foo(10, "Hello World")" /// println!("{:?}", Foo(10, "Hello World".to_string())); /// ``` -#[must_use] +#[must_use = "must eventually call `finish()` on Debug builders"] #[allow(missing_debug_implementations)] #[stable(feature = "debug_builders", since = "1.2.0")] pub struct DebugTuple<'a, 'b: 'a> { @@ -204,7 +204,7 @@ pub fn debug_tuple_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>, name: &str) -> D impl<'a, 'b: 'a> DebugTuple<'a, 'b> { /// Adds a new field to the generated tuple struct output. #[stable(feature = "debug_builders", since = "1.2.0")] - pub fn field(&mut self, value: &fmt::Debug) -> &mut DebugTuple<'a, 'b> { + pub fn field(&mut self, value: &dyn fmt::Debug) -> &mut DebugTuple<'a, 'b> { self.result = self.result.and_then(|_| { let (prefix, space) = if self.fields > 0 { (",", " ") @@ -258,7 +258,7 @@ struct DebugInner<'a, 'b: 'a> { } impl<'a, 'b: 'a> DebugInner<'a, 'b> { - fn entry(&mut self, entry: &fmt::Debug) { + fn entry(&mut self, entry: &dyn fmt::Debug) { self.result = self.result.and_then(|_| { if self.is_pretty() { let mut slot = None; @@ -319,7 +319,7 @@ impl<'a, 'b: 'a> DebugInner<'a, 'b> { /// // prints "{10, 11}" /// println!("{:?}", Foo(vec![10, 11])); /// ``` -#[must_use] +#[must_use = "must eventually call `finish()` on Debug builders"] #[allow(missing_debug_implementations)] #[stable(feature = "debug_builders", since = "1.2.0")] pub struct DebugSet<'a, 'b: 'a> { @@ -340,7 +340,7 @@ pub fn debug_set_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugSet<'a, 'b impl<'a, 'b: 'a> DebugSet<'a, 'b> { /// Adds a new entry to the set output. #[stable(feature = "debug_builders", since = "1.2.0")] - pub fn entry(&mut self, entry: &fmt::Debug) -> &mut DebugSet<'a, 'b> { + pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut DebugSet<'a, 'b> { self.inner.entry(entry); self } @@ -390,7 +390,7 @@ impl<'a, 'b: 'a> DebugSet<'a, 'b> { /// // prints "[10, 11]" /// println!("{:?}", Foo(vec![10, 11])); /// ``` -#[must_use] +#[must_use = "must eventually call `finish()` on Debug builders"] #[allow(missing_debug_implementations)] #[stable(feature = "debug_builders", since = "1.2.0")] pub struct DebugList<'a, 'b: 'a> { @@ -411,7 +411,7 @@ pub fn debug_list_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugList<'a, impl<'a, 'b: 'a> DebugList<'a, 'b> { /// Adds a new entry to the list output. #[stable(feature = "debug_builders", since = "1.2.0")] - pub fn entry(&mut self, entry: &fmt::Debug) -> &mut DebugList<'a, 'b> { + pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut DebugList<'a, 'b> { self.inner.entry(entry); self } @@ -461,7 +461,7 @@ impl<'a, 'b: 'a> DebugList<'a, 'b> { /// // prints "{"A": 10, "B": 11}" /// println!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])); /// ``` -#[must_use] +#[must_use = "must eventually call `finish()` on Debug builders"] #[allow(missing_debug_implementations)] #[stable(feature = "debug_builders", since = "1.2.0")] pub struct DebugMap<'a, 'b: 'a> { @@ -482,7 +482,7 @@ pub fn debug_map_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugMap<'a, 'b impl<'a, 'b: 'a> DebugMap<'a, 'b> { /// Adds a new entry to the map output. #[stable(feature = "debug_builders", since = "1.2.0")] - pub fn entry(&mut self, key: &fmt::Debug, value: &fmt::Debug) -> &mut DebugMap<'a, 'b> { + pub fn entry(&mut self, key: &dyn fmt::Debug, value: &dyn fmt::Debug) -> &mut DebugMap<'a, 'b> { self.result = self.result.and_then(|_| { if self.is_pretty() { let mut slot = None; diff --git a/src/libcore/fmt/mod.rs b/src/libcore/fmt/mod.rs index 65aacb23bd76..928f95e3ba2e 100644 --- a/src/libcore/fmt/mod.rs +++ b/src/libcore/fmt/mod.rs @@ -25,18 +25,19 @@ mod float; mod num; mod builders; -#[unstable(feature = "fmt_flags_align", issue = "27726")] +#[stable(feature = "fmt_flags_align", since = "1.28.0")] /// Possible alignments returned by `Formatter::align` #[derive(Debug)] pub enum Alignment { + #[stable(feature = "fmt_flags_align", since = "1.28.0")] /// Indication that contents should be left-aligned. Left, + #[stable(feature = "fmt_flags_align", since = "1.28.0")] /// Indication that contents should be right-aligned. Right, + #[stable(feature = "fmt_flags_align", since = "1.28.0")] /// Indication that contents should be center-aligned. Center, - /// No alignment was requested. - Unknown, } #[stable(feature = "debug_builders", since = "1.2.0")] @@ -254,7 +255,7 @@ pub struct Formatter<'a> { width: Option, precision: Option, - buf: &'a mut (Write+'a), + buf: &'a mut (dyn Write+'a), curarg: slice::Iter<'a, ArgumentV1<'a>>, args: &'a [ArgumentV1<'a>], } @@ -271,7 +272,7 @@ struct Void { /// /// It was added after #45197 showed that one could share a `!Sync` /// object across threads by passing it into `format_args!`. - _oibit_remover: PhantomData<*mut Fn()>, + _oibit_remover: PhantomData<*mut dyn Fn()>, } /// This struct represents the generic "argument" which is taken by the Xprintf @@ -333,7 +334,7 @@ impl<'a> ArgumentV1<'a> { // flags available in the v1 format of format_args #[derive(Copy, Clone)] -enum FlagV1 { SignPlus, SignMinus, Alternate, SignAwareZeroPad, } +enum FlagV1 { SignPlus, SignMinus, Alternate, SignAwareZeroPad, DebugLowerHex, DebugUpperHex } impl<'a> Arguments<'a> { /// When using the format_args!() macro, this function is used to generate the @@ -401,10 +402,21 @@ impl<'a> Arguments<'a> { /// safely be done, so no constructors are given and the fields are private /// to prevent modification. /// -/// The [`format_args!`] macro will safely create an instance of this structure -/// and pass it to a function or closure, passed as the first argument. The -/// macro validates the format string at compile-time so usage of the [`write`] -/// and [`format`] functions can be safely performed. +/// The [`format_args!`] macro will safely create an instance of this structure. +/// The macro validates the format string at compile-time so usage of the +/// [`write`] and [`format`] functions can be safely performed. +/// +/// You can use the `Arguments<'a>` that [`format_args!`] returns in `Debug` +/// and `Display` contexts as seen below. The example also shows that `Debug` +/// and `Display` format to the same thing: the interpolated format string +/// in `format_args!`. +/// +/// ```rust +/// let debug = format!("{:?}", format_args!("{} foo {:?}", 1, 2)); +/// let display = format!("{}", format_args!("{} foo {:?}", 1, 2)); +/// assert_eq!("1 foo 2", display); +/// assert_eq!(display, debug); +/// ``` /// /// [`format_args!`]: ../../std/macro.format_args.html /// [`format`]: ../../std/fmt/fn.format.html @@ -530,9 +542,13 @@ impl<'a> Display for Arguments<'a> { /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "`{Self}` cannot be formatted using `:?`; if it is \ - defined in your crate, add `#[derive(Debug)]` or \ - manually implement it"] +#[rustc_on_unimplemented( + on(crate_local, label="`{Self}` cannot be formatted using `{{:?}}`", + note="add `#[derive(Debug)]` or manually implement `{Debug}`"), + message="`{Self}` doesn't implement `{Debug}`", + label="`{Self}` cannot be formatted using `{{:?}}` because it doesn't implement `{Debug}`", +)] +#[doc(alias = "{:?}")] #[lang = "debug_trait"] pub trait Debug { /// Formats the value using the given formatter. @@ -593,9 +609,13 @@ pub trait Debug { /// /// println!("The origin is: {}", origin); /// ``` -#[rustc_on_unimplemented = "`{Self}` cannot be formatted with the default \ - formatter; try using `:?` instead if you are using \ - a format string"] +#[rustc_on_unimplemented( + message="`{Self}` doesn't implement `{Display}`", + label="`{Self}` cannot be formatted with the default formatter", + note="in format strings you may be able to use `{{:?}}` \ + (or {{:#?}} for pretty-print) instead", +)] +#[doc(alias = "{}")] #[stable(feature = "rust1", since = "1.0.0")] pub trait Display { /// Formats the value using the given formatter. @@ -679,18 +699,16 @@ pub trait Octal { /// /// The `Binary` trait should format its output as a number in binary. /// -/// For primitive signed integers (`i8` to `i128`, and `isize`), +/// For primitive signed integers ([`i8`] to [`i128`], and [`isize`]), /// negative values are formatted as the two’s complement representation. /// /// The alternate flag, `#`, adds a `0b` in front of the output. /// /// For more information on formatters, see [the module-level documentation][module]. /// -/// [module]: ../../std/fmt/index.html -/// /// # Examples /// -/// Basic usage with `i32`: +/// Basic usage with [`i32`]: /// /// ``` /// let x = 42; // 42 is '101010' in binary @@ -720,6 +738,12 @@ pub trait Octal { /// /// println!("l as binary is: {:b}", l); /// ``` +/// +/// [module]: ../../std/fmt/index.html +/// [`i8`]: ../../std/primitive.i8.html +/// [`i128`]: ../../std/primitive.i128.html +/// [`isize`]: ../../std/primitive.isize.html +/// [`i32`]: ../../std/primitive.i32.html #[stable(feature = "rust1", since = "1.0.0")] pub trait Binary { /// Formats the value using the given formatter. @@ -996,7 +1020,7 @@ pub trait UpperExp { /// /// [`write!`]: ../../std/macro.write.html #[stable(feature = "rust1", since = "1.0.0")] -pub fn write(output: &mut Write, args: Arguments) -> Result { +pub fn write(output: &mut dyn Write, args: Arguments) -> Result { let mut formatter = Formatter { flags: 0, width: None, @@ -1038,7 +1062,7 @@ pub fn write(output: &mut Write, args: Arguments) -> Result { impl<'a> Formatter<'a> { fn wrap_buf<'b, 'c, F>(&'b mut self, wrap: F) -> Formatter<'c> - where 'b: 'c, F: FnOnce(&'b mut (Write+'b)) -> &'c mut (Write+'c) + where 'b: 'c, F: FnOnce(&'b mut (dyn Write+'b)) -> &'c mut (dyn Write+'c) { Formatter { // We want to change this @@ -1179,6 +1203,23 @@ impl<'a> Formatter<'a> { /// is longer than this length /// /// Notably this function ignores the `flag` parameters. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo; + /// + /// impl fmt::Display for Foo { + /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// formatter.pad("Foo") + /// } + /// } + /// + /// assert_eq!(&format!("{:<4}", Foo), "Foo "); + /// assert_eq!(&format!("{:0>4}", Foo), "0Foo"); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pad(&mut self, s: &str) -> Result { // Make sure there's a fast path up front @@ -1192,7 +1233,11 @@ impl<'a> Formatter<'a> { // truncation. However other flags like `fill`, `width` and `align` // must act as always. if let Some((i, _)) = s.char_indices().skip(max).next() { - &s[..i] + // LLVM here can't prove that `..i` won't panic `&s[..i]`, but + // we know that it can't panic. Use `get` + `unwrap_or` to avoid + // `unsafe` and otherwise don't emit any panic-related code + // here. + s.get(..i).unwrap_or(&s) } else { &s } @@ -1297,7 +1342,7 @@ impl<'a> Formatter<'a> { } fn write_formatted_parts(&mut self, formatted: &flt2dec::Formatted) -> Result { - fn write_bytes(buf: &mut Write, s: &[u8]) -> Result { + fn write_bytes(buf: &mut dyn Write, s: &[u8]) -> Result { buf.write_str(unsafe { str::from_utf8_unchecked(s) }) } @@ -1341,7 +1386,7 @@ impl<'a> Formatter<'a> { self.buf.write_str(data) } - /// Writes some formatted information into this instance + /// Writes some formatted information into this instance. #[stable(feature = "rust1", since = "1.0.0")] pub fn write_fmt(&mut self, fmt: Arguments) -> Result { write(self.buf, fmt) @@ -1354,48 +1399,243 @@ impl<'a> Formatter<'a> { or `sign_aware_zero_pad` methods instead")] pub fn flags(&self) -> u32 { self.flags } - /// Character used as 'fill' whenever there is alignment + /// Character used as 'fill' whenever there is alignment. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo; + /// + /// impl fmt::Display for Foo { + /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// let c = formatter.fill(); + /// if let Some(width) = formatter.width() { + /// for _ in 0..width { + /// write!(formatter, "{}", c)?; + /// } + /// Ok(()) + /// } else { + /// write!(formatter, "{}", c) + /// } + /// } + /// } + /// + /// // We set alignment to the left with ">". + /// assert_eq!(&format!("{:G>3}", Foo), "GGG"); + /// assert_eq!(&format!("{:t>6}", Foo), "tttttt"); + /// ``` #[stable(feature = "fmt_flags", since = "1.5.0")] pub fn fill(&self) -> char { self.fill } - /// Flag indicating what form of alignment was requested - #[unstable(feature = "fmt_flags_align", reason = "method was just created", - issue = "27726")] - pub fn align(&self) -> Alignment { + /// Flag indicating what form of alignment was requested. + /// + /// # Examples + /// + /// ``` + /// extern crate core; + /// + /// use std::fmt::{self, Alignment}; + /// + /// struct Foo; + /// + /// impl fmt::Display for Foo { + /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// let s = if let Some(s) = formatter.align() { + /// match s { + /// Alignment::Left => "left", + /// Alignment::Right => "right", + /// Alignment::Center => "center", + /// } + /// } else { + /// "into the void" + /// }; + /// write!(formatter, "{}", s) + /// } + /// } + /// + /// fn main() { + /// assert_eq!(&format!("{:<}", Foo), "left"); + /// assert_eq!(&format!("{:>}", Foo), "right"); + /// assert_eq!(&format!("{:^}", Foo), "center"); + /// assert_eq!(&format!("{}", Foo), "into the void"); + /// } + /// ``` + #[stable(feature = "fmt_flags_align", since = "1.28.0")] + pub fn align(&self) -> Option { match self.align { - rt::v1::Alignment::Left => Alignment::Left, - rt::v1::Alignment::Right => Alignment::Right, - rt::v1::Alignment::Center => Alignment::Center, - rt::v1::Alignment::Unknown => Alignment::Unknown, + rt::v1::Alignment::Left => Some(Alignment::Left), + rt::v1::Alignment::Right => Some(Alignment::Right), + rt::v1::Alignment::Center => Some(Alignment::Center), + rt::v1::Alignment::Unknown => None, } } - /// Optionally specified integer width that the output should be + /// Optionally specified integer width that the output should be. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(i32); + /// + /// impl fmt::Display for Foo { + /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// if let Some(width) = formatter.width() { + /// // If we received a width, we use it + /// write!(formatter, "{:width$}", &format!("Foo({})", self.0), width = width) + /// } else { + /// // Otherwise we do nothing special + /// write!(formatter, "Foo({})", self.0) + /// } + /// } + /// } + /// + /// assert_eq!(&format!("{:10}", Foo(23)), "Foo(23) "); + /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)"); + /// ``` #[stable(feature = "fmt_flags", since = "1.5.0")] pub fn width(&self) -> Option { self.width } - /// Optionally specified precision for numeric types + /// Optionally specified precision for numeric types. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(f32); + /// + /// impl fmt::Display for Foo { + /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// if let Some(precision) = formatter.precision() { + /// // If we received a precision, we use it. + /// write!(formatter, "Foo({1:.*})", precision, self.0) + /// } else { + /// // Otherwise we default to 2. + /// write!(formatter, "Foo({:.2})", self.0) + /// } + /// } + /// } + /// + /// assert_eq!(&format!("{:.4}", Foo(23.2)), "Foo(23.2000)"); + /// assert_eq!(&format!("{}", Foo(23.2)), "Foo(23.20)"); + /// ``` #[stable(feature = "fmt_flags", since = "1.5.0")] pub fn precision(&self) -> Option { self.precision } /// Determines if the `+` flag was specified. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(i32); + /// + /// impl fmt::Display for Foo { + /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// if formatter.sign_plus() { + /// write!(formatter, + /// "Foo({}{})", + /// if self.0 < 0 { '-' } else { '+' }, + /// self.0) + /// } else { + /// write!(formatter, "Foo({})", self.0) + /// } + /// } + /// } + /// + /// assert_eq!(&format!("{:+}", Foo(23)), "Foo(+23)"); + /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)"); + /// ``` #[stable(feature = "fmt_flags", since = "1.5.0")] pub fn sign_plus(&self) -> bool { self.flags & (1 << FlagV1::SignPlus as u32) != 0 } /// Determines if the `-` flag was specified. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(i32); + /// + /// impl fmt::Display for Foo { + /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// if formatter.sign_minus() { + /// // You want a minus sign? Have one! + /// write!(formatter, "-Foo({})", self.0) + /// } else { + /// write!(formatter, "Foo({})", self.0) + /// } + /// } + /// } + /// + /// assert_eq!(&format!("{:-}", Foo(23)), "-Foo(23)"); + /// assert_eq!(&format!("{}", Foo(23)), "Foo(23)"); + /// ``` #[stable(feature = "fmt_flags", since = "1.5.0")] pub fn sign_minus(&self) -> bool { self.flags & (1 << FlagV1::SignMinus as u32) != 0 } /// Determines if the `#` flag was specified. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(i32); + /// + /// impl fmt::Display for Foo { + /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// if formatter.alternate() { + /// write!(formatter, "Foo({})", self.0) + /// } else { + /// write!(formatter, "{}", self.0) + /// } + /// } + /// } + /// + /// assert_eq!(&format!("{:#}", Foo(23)), "Foo(23)"); + /// assert_eq!(&format!("{}", Foo(23)), "23"); + /// ``` #[stable(feature = "fmt_flags", since = "1.5.0")] pub fn alternate(&self) -> bool { self.flags & (1 << FlagV1::Alternate as u32) != 0 } /// Determines if the `0` flag was specified. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(i32); + /// + /// impl fmt::Display for Foo { + /// fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + /// assert!(formatter.sign_aware_zero_pad()); + /// assert_eq!(formatter.width(), Some(4)); + /// // We ignore the formatter's options. + /// write!(formatter, "{}", self.0) + /// } + /// } + /// + /// assert_eq!(&format!("{:04}", Foo(23)), "23"); + /// ``` #[stable(feature = "fmt_flags", since = "1.5.0")] pub fn sign_aware_zero_pad(&self) -> bool { self.flags & (1 << FlagV1::SignAwareZeroPad as u32) != 0 } + // FIXME: Decide what public API we want for these two flags. + // https://github.com/rust-lang/rust/issues/48584 + fn debug_lower_hex(&self) -> bool { self.flags & (1 << FlagV1::DebugLowerHex as u32) != 0 } + + fn debug_upper_hex(&self) -> bool { self.flags & (1 << FlagV1::DebugUpperHex as u32) != 0 } + /// Creates a [`DebugStruct`] builder designed to assist with creation of /// [`fmt::Debug`] implementations for structs. /// @@ -1406,10 +1646,12 @@ impl<'a> Formatter<'a> { /// /// ```rust /// use std::fmt; + /// use std::net::Ipv4Addr; /// /// struct Foo { /// bar: i32, /// baz: String, + /// addr: Ipv4Addr, /// } /// /// impl fmt::Debug for Foo { @@ -1417,12 +1659,19 @@ impl<'a> Formatter<'a> { /// fmt.debug_struct("Foo") /// .field("bar", &self.bar) /// .field("baz", &self.baz) + /// .field("addr", &format_args!("{}", self.addr)) /// .finish() /// } /// } /// - /// // prints "Foo { bar: 10, baz: "Hello World" }" - /// println!("{:?}", Foo { bar: 10, baz: "Hello World".to_string() }); + /// assert_eq!( + /// "Foo { bar: 10, baz: \"Hello World\", addr: 127.0.0.1 }", + /// format!("{:?}", Foo { + /// bar: 10, + /// baz: "Hello World".to_string(), + /// addr: Ipv4Addr::new(127, 0, 0, 1), + /// }) + /// ); /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn debug_struct<'b>(&'b mut self, name: &str) -> DebugStruct<'b, 'a> { @@ -1436,20 +1685,24 @@ impl<'a> Formatter<'a> { /// /// ```rust /// use std::fmt; + /// use std::marker::PhantomData; /// - /// struct Foo(i32, String); + /// struct Foo(i32, String, PhantomData); /// - /// impl fmt::Debug for Foo { + /// impl fmt::Debug for Foo { /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { /// fmt.debug_tuple("Foo") /// .field(&self.0) /// .field(&self.1) + /// .field(&format_args!("_")) /// .finish() /// } /// } /// - /// // prints "Foo(10, "Hello World")" - /// println!("{:?}", Foo(10, "Hello World".to_string())); + /// assert_eq!( + /// "Foo(10, \"Hello\", _)", + /// format!("{:?}", Foo(10, "Hello".to_string(), PhantomData::)) + /// ); /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn debug_tuple<'b>(&'b mut self, name: &str) -> DebugTuple<'b, 'a> { @@ -1499,6 +1752,41 @@ impl<'a> Formatter<'a> { /// // prints "{10, 11}" /// println!("{:?}", Foo(vec![10, 11])); /// ``` + /// + /// [`format_args!`]: ../../std/macro.format_args.html + /// + /// In this more complex example, we use [`format_args!`] and `.debug_set()` + /// to build a list of match arms: + /// + /// ```rust + /// use std::fmt; + /// + /// struct Arm<'a, L: 'a, R: 'a>(&'a (L, R)); + /// struct Table<'a, K: 'a, V: 'a>(&'a [(K, V)], V); + /// + /// impl<'a, L, R> fmt::Debug for Arm<'a, L, R> + /// where + /// L: 'a + fmt::Debug, R: 'a + fmt::Debug + /// { + /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + /// L::fmt(&(self.0).0, fmt)?; + /// fmt.write_str(" => ")?; + /// R::fmt(&(self.0).1, fmt) + /// } + /// } + /// + /// impl<'a, K, V> fmt::Debug for Table<'a, K, V> + /// where + /// K: 'a + fmt::Debug, V: 'a + fmt::Debug + /// { + /// fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + /// fmt.debug_set() + /// .entries(self.0.iter().map(Arm)) + /// .entry(&Arm(&(format_args!("_"), &self.1))) + /// .finish() + /// } + /// } + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn debug_set<'b>(&'b mut self) -> DebugSet<'b, 'a> { builders::debug_set_new(self) @@ -1586,6 +1874,7 @@ impl Display for ! { #[stable(feature = "rust1", since = "1.0.0")] impl Debug for bool { + #[inline] fn fmt(&self, f: &mut Formatter) -> Result { Display::fmt(self, f) } @@ -1748,6 +2037,7 @@ impl Debug for [T] { #[stable(feature = "rust1", since = "1.0.0")] impl Debug for () { + #[inline] fn fmt(&self, f: &mut Formatter) -> Result { f.pad("()") } diff --git a/src/libcore/fmt/num.rs b/src/libcore/fmt/num.rs index ee989854a377..51391fa50d56 100644 --- a/src/libcore/fmt/num.rs +++ b/src/libcore/fmt/num.rs @@ -49,15 +49,13 @@ doit! { i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize } #[doc(hidden)] trait GenericRadix { /// The number of digits. - fn base(&self) -> u8; + const BASE: u8; /// A radix-specific prefix string. - fn prefix(&self) -> &'static str { - "" - } + const PREFIX: &'static str; /// Converts an integer to corresponding radix digit. - fn digit(&self, x: u8) -> u8; + fn digit(x: u8) -> u8; /// Format an integer using the radix using a formatter. fn fmt_int(&self, mut x: T, f: &mut fmt::Formatter) -> fmt::Result { @@ -65,16 +63,16 @@ trait GenericRadix { // characters for a base 2 number. let zero = T::zero(); let is_nonnegative = x >= zero; - let mut buf = [0; 128]; + let mut buf: [u8; 128] = unsafe { mem::uninitialized() }; let mut curr = buf.len(); - let base = T::from_u8(self.base()); + let base = T::from_u8(Self::BASE); if is_nonnegative { // Accumulate each digit of the number from the least significant // to the most significant figure. for byte in buf.iter_mut().rev() { - let n = x % base; // Get the current place value. - x = x / base; // Deaccumulate the number. - *byte = self.digit(n.to_u8()); // Store the digit in the buffer. + let n = x % base; // Get the current place value. + x = x / base; // Deaccumulate the number. + *byte = Self::digit(n.to_u8()); // Store the digit in the buffer. curr -= 1; if x == zero { // No more digits left to accumulate. @@ -84,9 +82,9 @@ trait GenericRadix { } else { // Do the same as above, but accounting for two's complement. for byte in buf.iter_mut().rev() { - let n = zero - (x % base); // Get the current place value. - x = x / base; // Deaccumulate the number. - *byte = self.digit(n.to_u8()); // Store the digit in the buffer. + let n = zero - (x % base); // Get the current place value. + x = x / base; // Deaccumulate the number. + *byte = Self::digit(n.to_u8()); // Store the digit in the buffer. curr -= 1; if x == zero { // No more digits left to accumulate. @@ -95,7 +93,7 @@ trait GenericRadix { } } let buf = unsafe { str::from_utf8_unchecked(&buf[curr..]) }; - f.pad_integral(is_nonnegative, self.prefix(), buf) + f.pad_integral(is_nonnegative, Self::PREFIX, buf) } } @@ -107,10 +105,6 @@ struct Binary; #[derive(Clone, PartialEq)] struct Octal; -/// A decimal (base 10) radix -#[derive(Clone, PartialEq)] -struct Decimal; - /// A hexadecimal (base 16) radix, formatted with lower-case characters #[derive(Clone, PartialEq)] struct LowerHex; @@ -122,25 +116,24 @@ struct UpperHex; macro_rules! radix { ($T:ident, $base:expr, $prefix:expr, $($x:pat => $conv:expr),+) => { impl GenericRadix for $T { - fn base(&self) -> u8 { $base } - fn prefix(&self) -> &'static str { $prefix } - fn digit(&self, x: u8) -> u8 { + const BASE: u8 = $base; + const PREFIX: &'static str = $prefix; + fn digit(x: u8) -> u8 { match x { $($x => $conv,)+ - x => panic!("number not in the range 0..{}: {}", self.base() - 1, x), + x => panic!("number not in the range 0..={}: {}", Self::BASE - 1, x), } } } } } -radix! { Binary, 2, "0b", x @ 0 ... 1 => b'0' + x } -radix! { Octal, 8, "0o", x @ 0 ... 7 => b'0' + x } -radix! { Decimal, 10, "", x @ 0 ... 9 => b'0' + x } -radix! { LowerHex, 16, "0x", x @ 0 ... 9 => b'0' + x, - x @ 10 ... 15 => b'a' + (x - 10) } -radix! { UpperHex, 16, "0x", x @ 0 ... 9 => b'0' + x, - x @ 10 ... 15 => b'A' + (x - 10) } +radix! { Binary, 2, "0b", x @ 0 ..= 1 => b'0' + x } +radix! { Octal, 8, "0o", x @ 0 ..= 7 => b'0' + x } +radix! { LowerHex, 16, "0x", x @ 0 ..= 9 => b'0' + x, + x @ 10 ..= 15 => b'a' + (x - 10) } +radix! { UpperHex, 16, "0x", x @ 0 ..= 9 => b'0' + x, + x @ 10 ..= 15 => b'A' + (x - 10) } macro_rules! int_base { ($Trait:ident for $T:ident as $U:ident -> $Radix:ident) => { @@ -157,8 +150,15 @@ macro_rules! debug { ($T:ident) => { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for $T { + #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) + if f.debug_lower_hex() { + fmt::LowerHex::fmt(self, f) + } else if f.debug_upper_hex() { + fmt::UpperHex::fmt(self, f) + } else { + fmt::Display::fmt(self, f) + } } } } diff --git a/src/libcore/future/future.rs b/src/libcore/future/future.rs new file mode 100644 index 000000000000..10b4ca9b0b27 --- /dev/null +++ b/src/libcore/future/future.rs @@ -0,0 +1,112 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] + +use mem::PinMut; +use marker::Unpin; +use task::{self, Poll}; + +/// A future represents an asychronous computation. +/// +/// A future is a value that may not have finished computing yet. This kind of +/// "asynchronous value" makes it possible for a thread to continue doing useful +/// work while it waits for the value to become available. +/// +/// # The `poll` method +/// +/// The core method of future, `poll`, *attempts* to resolve the future into a +/// final value. This method does not block if the value is not ready. Instead, +/// the current task is scheduled to be woken up when it's possible to make +/// further progress by `poll`ing again. The wake up is performed using +/// `cx.waker()`, a handle for waking up the current task. +/// +/// When using a future, you generally won't call `poll` directly, but instead +/// `await!` the value. +pub trait Future { + /// The result of the `Future`. + type Output; + + /// Attempt to resolve the future to a final value, registering + /// the current task for wakeup if the value is not yet available. + /// + /// # Return value + /// + /// This function returns: + /// + /// - [`Poll::Pending`] if the future is not ready yet + /// - [`Poll::Ready(val)`] with the result `val` of this future if it + /// finished successfully. + /// + /// Once a future has finished, clients should not `poll` it again. + /// + /// When a future is not ready yet, `poll` returns + /// `Poll::Pending`. The future will *also* register the + /// interest of the current task in the value being produced. For example, + /// if the future represents the availability of data on a socket, then the + /// task is recorded so that when data arrives, it is woken up (via + /// [`cx.waker()`]). Once a task has been woken up, + /// it should attempt to `poll` the future again, which may or may not + /// produce a final value. + /// + /// Note that if `Pending` is returned it only means that the *current* task + /// (represented by the argument `cx`) will receive a notification. Tasks + /// from previous calls to `poll` will *not* receive notifications. + /// + /// # Runtime characteristics + /// + /// Futures alone are *inert*; they must be *actively* `poll`ed to make + /// progress, meaning that each time the current task is woken up, it should + /// actively re-`poll` pending futures that it still has an interest in. + /// + /// The `poll` function is not called repeatedly in a tight loop for + /// futures, but only whenever the future itself is ready, as signaled via + /// the `Waker` inside `task::Context`. If you're familiar with the + /// `poll(2)` or `select(2)` syscalls on Unix it's worth noting that futures + /// typically do *not* suffer the same problems of "all wakeups must poll + /// all events"; they are more like `epoll(4)`. + /// + /// An implementation of `poll` should strive to return quickly, and must + /// *never* block. Returning quickly prevents unnecessarily clogging up + /// threads or event loops. If it is known ahead of time that a call to + /// `poll` may end up taking awhile, the work should be offloaded to a + /// thread pool (or something similar) to ensure that `poll` can return + /// quickly. + /// + /// # Panics + /// + /// Once a future has completed (returned `Ready` from `poll`), + /// then any future calls to `poll` may panic, block forever, or otherwise + /// cause bad behavior. The `Future` trait itself provides no guarantees + /// about the behavior of `poll` after a future has completed. + /// + /// [`Poll::Pending`]: ../task/enum.Poll.html#variant.Pending + /// [`Poll::Ready(val)`]: ../task/enum.Poll.html#variant.Ready + /// [`cx.waker()`]: ../task/struct.Context.html#method.waker + fn poll(self: PinMut, cx: &mut task::Context) -> Poll; +} + +impl<'a, F: ?Sized + Future + Unpin> Future for &'a mut F { + type Output = F::Output; + + fn poll(mut self: PinMut, cx: &mut task::Context) -> Poll { + F::poll(PinMut::new(&mut **self), cx) + } +} + +impl<'a, F: ?Sized + Future> Future for PinMut<'a, F> { + type Output = F::Output; + + fn poll(mut self: PinMut, cx: &mut task::Context) -> Poll { + F::poll((*self).reborrow(), cx) + } +} diff --git a/src/libcore/future/future_obj.rs b/src/libcore/future/future_obj.rs new file mode 100644 index 000000000000..6045fac2b4b3 --- /dev/null +++ b/src/libcore/future/future_obj.rs @@ -0,0 +1,182 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] + +use fmt; +use future::Future; +use marker::{PhantomData, Unpin}; +use mem::PinMut; +use task::{Context, Poll}; + +/// A custom trait object for polling futures, roughly akin to +/// `Box + 'a>`. +/// +/// This custom trait object was introduced for two reasons: +/// - Currently it is not possible to take `dyn Trait` by value and +/// `Box` is not available in no_std contexts. +/// - The `Future` trait is currently not object safe: The `Future::poll` +/// method makes uses the arbitrary self types feature and traits in which +/// this feature is used are currently not object safe due to current compiler +/// limitations. (See tracking issue for arbitray self types for more +/// information #44874) +pub struct LocalFutureObj<'a, T> { + ptr: *mut (), + poll_fn: unsafe fn(*mut (), &mut Context) -> Poll, + drop_fn: unsafe fn(*mut ()), + _marker: PhantomData<&'a ()>, +} + +impl<'a, T> Unpin for LocalFutureObj<'a, T> {} + +impl<'a, T> LocalFutureObj<'a, T> { + /// Create a `LocalFutureObj` from a custom trait object representation. + #[inline] + pub fn new + 'a>(f: F) -> LocalFutureObj<'a, T> { + LocalFutureObj { + ptr: f.into_raw(), + poll_fn: F::poll, + drop_fn: F::drop, + _marker: PhantomData, + } + } + + /// Converts the `LocalFutureObj` into a `FutureObj` + /// To make this operation safe one has to ensure that the `UnsafeFutureObj` + /// instance from which this `LocalFutureObj` was created actually + /// implements `Send`. + #[inline] + pub unsafe fn into_future_obj(self) -> FutureObj<'a, T> { + FutureObj(self) + } +} + +impl<'a, T> fmt::Debug for LocalFutureObj<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("LocalFutureObj") + .finish() + } +} + +impl<'a, T> From> for LocalFutureObj<'a, T> { + #[inline] + fn from(f: FutureObj<'a, T>) -> LocalFutureObj<'a, T> { + f.0 + } +} + +impl<'a, T> Future for LocalFutureObj<'a, T> { + type Output = T; + + #[inline] + fn poll(self: PinMut, cx: &mut Context) -> Poll { + unsafe { + (self.poll_fn)(self.ptr, cx) + } + } +} + +impl<'a, T> Drop for LocalFutureObj<'a, T> { + fn drop(&mut self) { + unsafe { + (self.drop_fn)(self.ptr) + } + } +} + +/// A custom trait object for polling futures, roughly akin to +/// `Box + Send + 'a>`. +/// +/// This custom trait object was introduced for two reasons: +/// - Currently it is not possible to take `dyn Trait` by value and +/// `Box` is not available in no_std contexts. +/// - The `Future` trait is currently not object safe: The `Future::poll` +/// method makes uses the arbitrary self types feature and traits in which +/// this feature is used are currently not object safe due to current compiler +/// limitations. (See tracking issue for arbitray self types for more +/// information #44874) +pub struct FutureObj<'a, T>(LocalFutureObj<'a, T>); + +impl<'a, T> Unpin for FutureObj<'a, T> {} +unsafe impl<'a, T> Send for FutureObj<'a, T> {} + +impl<'a, T> FutureObj<'a, T> { + /// Create a `FutureObj` from a custom trait object representation. + #[inline] + pub fn new + Send>(f: F) -> FutureObj<'a, T> { + FutureObj(LocalFutureObj::new(f)) + } +} + +impl<'a, T> fmt::Debug for FutureObj<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("FutureObj") + .finish() + } +} + +impl<'a, T> Future for FutureObj<'a, T> { + type Output = T; + + #[inline] + fn poll(self: PinMut, cx: &mut Context) -> Poll { + let pinned_field = unsafe { PinMut::map_unchecked(self, |x| &mut x.0) }; + pinned_field.poll(cx) + } +} + +/// A custom implementation of a future trait object for `FutureObj`, providing +/// a hand-rolled vtable. +/// +/// This custom representation is typically used only in `no_std` contexts, +/// where the default `Box`-based implementation is not available. +/// +/// The implementor must guarantee that it is safe to call `poll` repeatedly (in +/// a non-concurrent fashion) with the result of `into_raw` until `drop` is +/// called. +pub unsafe trait UnsafeFutureObj<'a, T>: 'a { + /// Convert an owned instance into a (conceptually owned) void pointer. + fn into_raw(self) -> *mut (); + + /// Poll the future represented by the given void pointer. + /// + /// # Safety + /// + /// The trait implementor must guarantee that it is safe to repeatedly call + /// `poll` with the result of `into_raw` until `drop` is called; such calls + /// are not, however, allowed to race with each other or with calls to + /// `drop`. + unsafe fn poll(ptr: *mut (), cx: &mut Context) -> Poll; + + /// Drops the future represented by the given void pointer. + /// + /// # Safety + /// + /// The trait implementor must guarantee that it is safe to call this + /// function once per `into_raw` invocation; that call cannot race with + /// other calls to `drop` or `poll`. + unsafe fn drop(ptr: *mut ()); +} + +unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for &'a mut F + where F: Future + Unpin + 'a +{ + fn into_raw(self) -> *mut () { + self as *mut F as *mut () + } + + unsafe fn poll(ptr: *mut (), cx: &mut Context) -> Poll { + PinMut::new_unchecked(&mut *(ptr as *mut F)).poll(cx) + } + + unsafe fn drop(_ptr: *mut ()) {} +} diff --git a/src/libcore/future/mod.rs b/src/libcore/future/mod.rs new file mode 100644 index 000000000000..f9361a0f4e7a --- /dev/null +++ b/src/libcore/future/mod.rs @@ -0,0 +1,21 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] + +//! Asynchronous values. + +mod future; +pub use self::future::Future; + +mod future_obj; +pub use self::future_obj::{FutureObj, LocalFutureObj, UnsafeFutureObj}; diff --git a/src/libcore/hash/mod.rs b/src/libcore/hash/mod.rs index 15545a04b64d..e7907e034449 100644 --- a/src/libcore/hash/mod.rs +++ b/src/libcore/hash/mod.rs @@ -99,9 +99,10 @@ use mem; #[allow(deprecated)] pub use self::sip::SipHasher; -#[unstable(feature = "sip_hash_13", issue = "34767")] +#[unstable(feature = "hashmap_internals", issue = "0")] #[allow(deprecated)] -pub use self::sip::{SipHasher13, SipHasher24}; +#[doc(hidden)] +pub use self::sip::SipHasher13; mod sip; @@ -307,7 +308,7 @@ pub trait Hasher { } /// Writes a single `u128` into this hasher. #[inline] - #[unstable(feature = "i128", issue = "35118")] + #[stable(feature = "i128", since = "1.26.0")] fn write_u128(&mut self, i: u128) { self.write(&unsafe { mem::transmute::<_, [u8; 16]>(i) }) } @@ -347,7 +348,7 @@ pub trait Hasher { } /// Writes a single `i128` into this hasher. #[inline] - #[unstable(feature = "i128", issue = "35118")] + #[stable(feature = "i128", since = "1.26.0")] fn write_i128(&mut self, i: i128) { self.write_u128(i as u128) } @@ -541,6 +542,16 @@ impl Default for BuildHasherDefault { } } +#[stable(since = "1.29.0", feature = "build_hasher_eq")] +impl PartialEq for BuildHasherDefault { + fn eq(&self, _other: &BuildHasherDefault) -> bool { + true + } +} + +#[stable(since = "1.29.0", feature = "build_hasher_eq")] +impl Eq for BuildHasherDefault {} + ////////////////////////////////////////////////////////////////////////////// mod impls { @@ -602,6 +613,13 @@ mod impls { } } + #[stable(feature = "never_hash", since = "1.29.0")] + impl Hash for ! { + fn hash(&self, _: &mut H) { + *self + } + } + macro_rules! impl_hash_tuple { () => ( #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/hash/sip.rs b/src/libcore/hash/sip.rs index 4e4d9b3f1e2f..e3bdecdc4b1f 100644 --- a/src/libcore/hash/sip.rs +++ b/src/libcore/hash/sip.rs @@ -23,10 +23,11 @@ use mem; /// (eg. `collections::HashMap` uses it by default). /// /// See: -#[unstable(feature = "sip_hash_13", issue = "34767")] +#[unstable(feature = "hashmap_internals", issue = "0")] #[rustc_deprecated(since = "1.13.0", reason = "use `std::collections::hash_map::DefaultHasher` instead")] #[derive(Debug, Clone, Default)] +#[doc(hidden)] pub struct SipHasher13 { hasher: Hasher, } @@ -34,11 +35,11 @@ pub struct SipHasher13 { /// An implementation of SipHash 2-4. /// /// See: -#[unstable(feature = "sip_hash_13", issue = "34767")] +#[unstable(feature = "hashmap_internals", issue = "0")] #[rustc_deprecated(since = "1.13.0", reason = "use `std::collections::hash_map::DefaultHasher` instead")] #[derive(Debug, Clone, Default)] -pub struct SipHasher24 { +struct SipHasher24 { hasher: Hasher, } @@ -156,14 +157,16 @@ impl SipHasher { #[rustc_deprecated(since = "1.13.0", reason = "use `std::collections::hash_map::DefaultHasher` instead")] pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher { - SipHasher(SipHasher24::new_with_keys(key0, key1)) + SipHasher(SipHasher24 { + hasher: Hasher::new_with_keys(key0, key1) + }) } } impl SipHasher13 { /// Creates a new `SipHasher13` with the two initial keys set to 0. #[inline] - #[unstable(feature = "sip_hash_13", issue = "34767")] + #[unstable(feature = "hashmap_internals", issue = "0")] #[rustc_deprecated(since = "1.13.0", reason = "use `std::collections::hash_map::DefaultHasher` instead")] pub fn new() -> SipHasher13 { @@ -172,7 +175,7 @@ impl SipHasher13 { /// Creates a `SipHasher13` that is keyed off the provided keys. #[inline] - #[unstable(feature = "sip_hash_13", issue = "34767")] + #[unstable(feature = "hashmap_internals", issue = "0")] #[rustc_deprecated(since = "1.13.0", reason = "use `std::collections::hash_map::DefaultHasher` instead")] pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 { @@ -182,28 +185,6 @@ impl SipHasher13 { } } -impl SipHasher24 { - /// Creates a new `SipHasher24` with the two initial keys set to 0. - #[inline] - #[unstable(feature = "sip_hash_13", issue = "34767")] - #[rustc_deprecated(since = "1.13.0", - reason = "use `std::collections::hash_map::DefaultHasher` instead")] - pub fn new() -> SipHasher24 { - SipHasher24::new_with_keys(0, 0) - } - - /// Creates a `SipHasher24` that is keyed off the provided keys. - #[inline] - #[unstable(feature = "sip_hash_13", issue = "34767")] - #[rustc_deprecated(since = "1.13.0", - reason = "use `std::collections::hash_map::DefaultHasher` instead")] - pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher24 { - SipHasher24 { - hasher: Hasher::new_with_keys(key0, key1) - } - } -} - impl Hasher { #[inline] fn new_with_keys(key0: u64, key1: u64) -> Hasher { @@ -271,16 +252,16 @@ impl Hasher { impl super::Hasher for SipHasher { #[inline] fn write(&mut self, msg: &[u8]) { - self.0.write(msg) + self.0.hasher.write(msg) } #[inline] fn finish(&self) -> u64 { - self.0.finish() + self.0.hasher.finish() } } -#[unstable(feature = "sip_hash_13", issue = "34767")] +#[unstable(feature = "hashmap_internals", issue = "0")] impl super::Hasher for SipHasher13 { #[inline] fn write(&mut self, msg: &[u8]) { @@ -293,19 +274,6 @@ impl super::Hasher for SipHasher13 { } } -#[unstable(feature = "sip_hash_13", issue = "34767")] -impl super::Hasher for SipHasher24 { - #[inline] - fn write(&mut self, msg: &[u8]) { - self.hasher.write(msg) - } - - #[inline] - fn finish(&self) -> u64 { - self.hasher.finish() - } -} - impl super::Hasher for Hasher { // see short_write comment for explanation #[inline] diff --git a/src/libcore/hint.rs b/src/libcore/hint.rs new file mode 100644 index 000000000000..f4e96e67b2c6 --- /dev/null +++ b/src/libcore/hint.rs @@ -0,0 +1,61 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![stable(feature = "core_hint", since = "1.27.0")] + +//! Hints to compiler that affects how code should be emitted or optimized. + +use intrinsics; + +/// Informs the compiler that this point in the code is not reachable, enabling +/// further optimizations. +/// +/// # Safety +/// +/// Reaching this function is completely *undefined behavior* (UB). In +/// particular, the compiler assumes that all UB must never happen, and +/// therefore will eliminate all branches that reach to a call to +/// `unreachable_unchecked()`. +/// +/// Like all instances of UB, if this assumption turns out to be wrong, i.e. the +/// `unreachable_unchecked()` call is actually reachable among all possible +/// control flow, the compiler will apply the wrong optimization strategy, and +/// may sometimes even corrupt seemingly unrelated code, causing +/// difficult-to-debug problems. +/// +/// Use this function only when you can prove that the code will never call it. +/// +/// The [`unreachable!()`] macro is the safe counterpart of this function, which +/// will panic instead when executed. +/// +/// [`unreachable!()`]: ../macro.unreachable.html +/// +/// # Example +/// +/// ``` +/// fn div_1(a: u32, b: u32) -> u32 { +/// use std::hint::unreachable_unchecked; +/// +/// // `b.saturating_add(1)` is always positive (not zero), +/// // hence `checked_div` will never return None. +/// // Therefore, the else branch is unreachable. +/// a.checked_div(b.saturating_add(1)) +/// .unwrap_or_else(|| unsafe { unreachable_unchecked() }) +/// } +/// +/// assert_eq!(div_1(7, 0), 7); +/// assert_eq!(div_1(9, 1), 4); +/// assert_eq!(div_1(11, std::u32::MAX), 0); +/// ``` +#[inline] +#[stable(feature = "unreachable", since = "1.27.0")] +pub unsafe fn unreachable_unchecked() -> ! { + intrinsics::unreachable() +} diff --git a/src/libcore/internal_macros.rs b/src/libcore/internal_macros.rs index cb215a38e535..db75f9bf210f 100644 --- a/src/libcore/internal_macros.rs +++ b/src/libcore/internal_macros.rs @@ -86,4 +86,3 @@ macro_rules! forward_ref_op_assign { } } } - diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index a611dc02469e..9ddf902349dd 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -10,7 +10,7 @@ //! rustc compiler intrinsics. //! -//! The corresponding definitions are in librustc_trans/intrinsic.rs. +//! The corresponding definitions are in librustc_codegen_llvm/intrinsic.rs. //! //! # Volatiles //! @@ -638,6 +638,9 @@ extern "rust-intrinsic" { /// NB: This is very different from the `unreachable!()` macro: Unlike the /// macro, which panics when it is executed, it is *undefined behavior* to /// reach code marked with this function. + /// + /// The stabilized version of this intrinsic is + /// [`std::hint::unreachable_unchecked`](../../std/hint/fn.unreachable_unchecked.html). pub fn unreachable() -> !; /// Informs the optimizer that a condition is always true. @@ -714,7 +717,7 @@ extern "rust-intrinsic" { /// Reinterprets the bits of a value of one type as another type. /// /// Both types must have the same size. Neither the original, nor the result, - /// may be an [invalid value](../../nomicon/meet-safe-and-unsafe.html). + /// may be an [invalid value](../../nomicon/what-unsafe-does.html). /// /// `transmute` is semantically equivalent to a bitwise move of one type /// into another. It copies the bits from the source value into the @@ -992,7 +995,7 @@ extern "rust-intrinsic" { /// ptr::copy_nonoverlapping(y, x, 1); /// ptr::copy_nonoverlapping(&t, y, 1); /// - /// // y and t now point to the same thing, but we need to completely forget `tmp` + /// // y and t now point to the same thing, but we need to completely forget `t` /// // because it's no longer relevant. /// mem::forget(t); /// } @@ -1082,6 +1085,13 @@ extern "rust-intrinsic" { /// [`std::ptr::write_volatile`](../../std/ptr/fn.write_volatile.html). pub fn volatile_store(dst: *mut T, val: T); + /// Perform a volatile load from the `src` pointer + /// The pointer is not required to be aligned. + pub fn unaligned_volatile_load(src: *const T) -> T; + /// Perform a volatile store to the `dst` pointer. + /// The pointer is not required to be aligned. + pub fn unaligned_volatile_store(dst: *mut T, val: T); + /// Returns the square root of an `f32` pub fn sqrtf32(x: f32) -> f32; /// Returns the square root of an `f64` @@ -1292,6 +1302,9 @@ extern "rust-intrinsic" { /// Reverses the bytes in an integer type `T`. pub fn bswap(x: T) -> T; + /// Reverses the bits in an integer type `T`. + pub fn bitreverse(x: T) -> T; + /// Performs checked integer addition. /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `overflowing_add` method. For example, @@ -1310,6 +1323,10 @@ extern "rust-intrinsic" { /// [`std::u32::overflowing_mul`](../../std/primitive.u32.html#method.overflowing_mul) pub fn mul_with_overflow(x: T, y: T) -> (T, bool); + /// Performs an exact division, resulting in undefined behavior where + /// `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1` + pub fn exact_div(x: T, y: T) -> T; + /// Performs an unchecked division, resulting in undefined behavior /// where y = 0 or x = `T::min_value()` and y = -1 pub fn unchecked_div(x: T, y: T) -> T; @@ -1354,40 +1371,6 @@ extern "rust-intrinsic" { /// source as well as std's catch implementation. pub fn try(f: fn(*mut u8), data: *mut u8, local_ptr: *mut u8) -> i32; - /// Computes the byte offset that needs to be applied to `ptr` in order to - /// make it aligned to `align`. - /// If it is not possible to align `ptr`, the implementation returns - /// `usize::max_value()`. - /// - /// There are no guarantees whatsover that offsetting the pointer will not - /// overflow or go beyond the allocation that `ptr` points into. - /// It is up to the caller to ensure that the returned offset is correct - /// in all terms other than alignment. - /// - /// # Examples - /// - /// Accessing adjacent `u8` as `u16` - /// - /// ``` - /// # #![feature(core_intrinsics)] - /// # fn foo(n: usize) { - /// # use std::intrinsics::align_offset; - /// # use std::mem::align_of; - /// # unsafe { - /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; - /// let ptr = &x[n] as *const u8; - /// let offset = align_offset(ptr as *const (), align_of::()); - /// if offset < x.len() - n - 1 { - /// let u16_ptr = ptr.offset(offset as isize) as *const u16; - /// assert_ne!(*u16_ptr, 500); - /// } else { - /// // while the pointer can be aligned via `offset`, it would point - /// // outside the allocation - /// } - /// # } } - /// ``` - pub fn align_offset(ptr: *const (), align: usize) -> usize; - /// Emits a `!nontemporal` store according to LLVM (see their docs). /// Probably will never become stable. pub fn nontemporal_store(ptr: *mut T, val: T); diff --git a/src/libcore/iter/iterator.rs b/src/libcore/iter/iterator.rs index 35cd7441c66b..391852910533 100644 --- a/src/libcore/iter/iterator.rs +++ b/src/libcore/iter/iterator.rs @@ -11,13 +11,14 @@ use cmp::Ordering; use ops::Try; -use super::{AlwaysOk, LoopState}; -use super::{Chain, Cycle, Cloned, Enumerate, Filter, FilterMap, FlatMap, Fuse}; +use super::LoopState; +use super::{Chain, Cycle, Cloned, Enumerate, Filter, FilterMap, Fuse}; +use super::{Flatten, FlatMap, flatten_compat}; use super::{Inspect, Map, Peekable, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile, Rev}; use super::{Zip, Sum, Product}; use super::{ChainState, FromIterator, ZipImpl}; -fn _assert_is_object_safe(_: &Iterator) {} +fn _assert_is_object_safe(_: &dyn Iterator) {} /// An interface for dealing with iterators. /// @@ -28,8 +29,13 @@ fn _assert_is_object_safe(_: &Iterator) {} /// [module-level documentation]: index.html /// [impl]: index.html#implementing-iterator #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "`{Self}` is not an iterator; maybe try calling \ - `.iter()` or a similar method"] +#[rustc_on_unimplemented( + on( + _Self="&str", + label="`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`" + ), + label="`{Self}` is not an iterator; maybe try calling `.iter()` or a similar method" +)] #[doc(spotlight)] pub trait Iterator { /// The type of the elements being iterated over. @@ -163,7 +169,7 @@ pub trait Iterator { /// This function might panic if the iterator has more than [`usize::MAX`] /// elements. /// - /// [`usize::MAX`]: ../../std/isize/constant.MAX.html + /// [`usize::MAX`]: ../../std/usize/constant.MAX.html /// /// # Examples /// @@ -265,9 +271,30 @@ pub trait Iterator { /// Creates an iterator starting at the same point, but stepping by /// the given amount at each iteration. /// - /// Note that it will always return the first element of the iterator, + /// Note 1: The first element of the iterator will always be returned, /// regardless of the step given. /// + /// Note 2: The time at which ignored elements are pulled is not fixed. + /// `StepBy` behaves like the sequence `next(), nth(step-1), nth(step-1), …`, + /// but is also free to behave like the sequence + /// `advance_n_and_return_first(step), advance_n_and_return_first(step), …` + /// Which way is used may change for some iterators for performance reasons. + /// The second way will advance the iterator earlier and may consume more items. + /// + /// `advance_n_and_return_first` is the equivalent of: + /// ``` + /// fn advance_n_and_return_first(iter: &mut I, total_step: usize) -> Option + /// where + /// I: Iterator, + /// { + /// let next = iter.next(); + /// if total_step > 1 { + /// iter.nth(total_step-2); + /// } + /// next + /// } + /// ``` + /// /// # Panics /// /// The method will panic if the given step is `0`. @@ -277,7 +304,6 @@ pub trait Iterator { /// Basic usage: /// /// ``` - /// #![feature(iterator_step_by)] /// let a = [0, 1, 2, 3, 4, 5]; /// let mut iter = a.into_iter().step_by(2); /// @@ -287,9 +313,7 @@ pub trait Iterator { /// assert_eq!(iter.next(), None); /// ``` #[inline] - #[unstable(feature = "iterator_step_by", - reason = "unstable replacement of Range::step_by", - issue = "27741")] + #[stable(feature = "iterator_step_by", since = "1.28.0")] fn step_by(self, step: usize) -> StepBy where Self: Sized { assert!(step != 0); StepBy{iter: self, step: step - 1, first_take: true} @@ -360,8 +384,9 @@ pub trait Iterator { /// /// In other words, it zips two iterators together, into a single one. /// - /// When either iterator returns [`None`], all further calls to [`next`] - /// will return [`None`]. + /// If either iterator returns [`None`], [`next`] from the zipped iterator + /// will return [`None`]. If the first iterator returns [`None`], `zip` will + /// short-circuit and `next` will not be called on the second iterator. /// /// # Examples /// @@ -482,7 +507,7 @@ pub trait Iterator { fn map(self, f: F) -> Map where Self: Sized, F: FnMut(Self::Item) -> B, { - Map{iter: self, f: f} + Map { iter: self, f } } /// Calls a closure on each element of an iterator. @@ -593,7 +618,7 @@ pub trait Iterator { fn filter

(self, predicate: P) -> Filter where Self: Sized, P: FnMut(&Self::Item) -> bool, { - Filter{iter: self, predicate: predicate} + Filter {iter: self, predicate } } /// Creates an iterator that both filters and maps. @@ -650,7 +675,7 @@ pub trait Iterator { fn filter_map(self, f: F) -> FilterMap where Self: Sized, F: FnMut(Self::Item) -> Option, { - FilterMap { iter: self, f: f } + FilterMap { iter: self, f } } /// Creates an iterator which gives the current iteration count as well as @@ -803,7 +828,7 @@ pub trait Iterator { fn skip_while

(self, predicate: P) -> SkipWhile where Self: Sized, P: FnMut(&Self::Item) -> bool, { - SkipWhile{iter: self, flag: false, predicate: predicate} + SkipWhile { iter: self, flag: false, predicate } } /// Creates an iterator that yields elements based on a predicate. @@ -883,7 +908,7 @@ pub trait Iterator { fn take_while

(&mut self, mut predicate: P) -> Option where Self: Sized, P: FnMut(&Self::Item) -> bool @@ -595,7 +597,7 @@ impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I { /// that information can be useful. For example, if you want to iterate /// backwards, a good start is to know where the end is. /// -/// When implementing an `ExactSizeIterator`, You must also implement +/// When implementing an `ExactSizeIterator`, you must also implement /// [`Iterator`]. When doing so, the implementation of [`size_hint`] *must* /// return the exact size of the iterator. /// @@ -706,7 +708,7 @@ pub trait ExactSizeIterator: Iterator { /// ``` /// #![feature(exact_size_is_empty)] /// - /// let mut one_element = 0..1; + /// let mut one_element = std::iter::once(0); /// assert!(!one_element.is_empty()); /// /// assert_eq!(one_element.next(), Some(0)); @@ -901,6 +903,15 @@ impl Iterator for ResultShunt None => None, } } + + fn size_hint(&self) -> (usize, Option) { + if self.error.is_some() { + (0, Some(0)) + } else { + let (_, upper) = self.iter.size_hint(); + (0, upper) + } + } } #[stable(feature = "iter_arith_traits_result", since="1.16.0")] @@ -959,10 +970,10 @@ impl Product> for Result /// [`None`]: ../../std/option/enum.Option.html#variant.None /// [`Iterator::fuse`]: ../../std/iter/trait.Iterator.html#method.fuse /// [`Fuse`]: ../../std/iter/struct.Fuse.html -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] pub trait FusedIterator: Iterator {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, I: FusedIterator + ?Sized> FusedIterator for &'a mut I {} /// An iterator that reports an accurate length using size_hint. @@ -970,9 +981,11 @@ impl<'a, I: FusedIterator + ?Sized> FusedIterator for &'a mut I {} /// The iterator reports a size hint where it is either exact /// (lower bound is equal to upper bound), or the upper bound is [`None`]. /// The upper bound must only be [`None`] if the actual iterator length is -/// larger than [`usize::MAX`]. +/// larger than [`usize::MAX`]. In that case, the lower bound must be +/// [`usize::MAX`], resulting in a [`.size_hint`] of `(usize::MAX, None)`. /// -/// The iterator must produce exactly the number of elements it reported. +/// The iterator must produce exactly the number of elements it reported +/// or diverge before reaching the end. /// /// # Safety /// diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index c49292481c3f..055da19de60d 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -41,7 +41,7 @@ //! dictate the panic message, the file at which panic was invoked, and the //! line and column inside the file. It is up to consumers of this core //! library to define this panic function; it is only required to never -//! return. This requires a `lang` attribute named `panic_fmt`. +//! return. This requires a `lang` attribute named `panic_impl`. //! //! * `rust_eh_personality` - is used by the failure mechanisms of the //! compiler. This is often mapped to GCC's personality function, but crates @@ -50,6 +50,15 @@ // Since libcore defines many fundamental lang items, all tests live in a // separate crate, libcoretest, to avoid bizarre issues. +// +// Here we explicitly #[cfg]-out this whole crate when testing. If we don't do +// this, both the generated test artifact and the linked libtest (which +// transitively includes libcore) will both define the same set of lang items, +// and this will cause the E0152 "duplicate lang item found" error. See +// discussion in #50466 for details. +// +// This cfg won't affect doc tests. +#![cfg(not(test))] #![stable(feature = "core", since = "1.6.0")] #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", @@ -63,35 +72,56 @@ #![no_core] #![deny(missing_docs)] #![deny(missing_debug_implementations)] -#![deny(warnings)] #![feature(allow_internal_unstable)] +#![feature(arbitrary_self_types)] #![feature(asm)] #![feature(associated_type_defaults)] -#![feature(cfg_target_feature)] +#![feature(attr_literals)] #![feature(cfg_target_has_atomic)] #![feature(concat_idents)] #![feature(const_fn)] +#![feature(const_int_ops)] +#![feature(const_fn_union)] #![feature(convert_id)] #![feature(custom_attribute)] +#![feature(doc_cfg)] +#![feature(doc_spotlight)] +#![feature(extern_types)] #![feature(fundamental)] -#![feature(i128_type)] -#![feature(inclusive_range_syntax)] #![feature(intrinsics)] #![feature(lang_items)] +#![feature(link_llvm_intrinsics)] #![feature(never_type)] +#![cfg_attr(not(stage0), feature(nll))] +#![feature(exhaustive_patterns)] +#![feature(macro_at_most_once_rep)] #![feature(no_core)] #![feature(on_unimplemented)] #![feature(optin_builtin_traits)] #![feature(prelude_import)] #![feature(repr_simd, platform_intrinsics)] #![feature(rustc_attrs)] +#![feature(rustc_const_unstable)] +#![feature(simd_ffi)] #![feature(specialization)] #![feature(staged_api)] +#![feature(stmt_expr_attributes)] #![feature(unboxed_closures)] #![feature(untagged_unions)] #![feature(unwind_attributes)] -#![feature(doc_spotlight)] +#![feature(doc_alias)] +#![feature(mmx_target_feature)] +#![feature(tbm_target_feature)] +#![feature(sse4a_target_feature)] +#![feature(arm_target_feature)] +#![feature(powerpc_target_feature)] +#![feature(mips_target_feature)] +#![feature(aarch64_target_feature)] +#![feature(const_slice_len)] +#![feature(const_str_as_bytes)] +#![feature(const_str_len)] +#![feature(non_exhaustive)] #[prelude_import] #[allow(unused)] @@ -116,14 +146,14 @@ mod uint_macros; #[path = "num/i16.rs"] pub mod i16; #[path = "num/i32.rs"] pub mod i32; #[path = "num/i64.rs"] pub mod i64; -#[path = "num/i128.rs"] pub mod i128; +#[path = "num/i128.rs"] pub mod i128; #[path = "num/usize.rs"] pub mod usize; #[path = "num/u8.rs"] pub mod u8; #[path = "num/u16.rs"] pub mod u16; #[path = "num/u32.rs"] pub mod u32; #[path = "num/u64.rs"] pub mod u64; -#[path = "num/u128.rs"] pub mod u128; +#[path = "num/u128.rs"] pub mod u128; #[path = "num/f32.rs"] pub mod f32; #[path = "num/f64.rs"] pub mod f64; @@ -139,8 +169,8 @@ pub mod prelude; pub mod intrinsics; pub mod mem; -pub mod nonzero; pub mod ptr; +pub mod hint; /* Core language traits */ @@ -156,9 +186,11 @@ pub mod borrow; pub mod any; pub mod array; +pub mod ascii; pub mod sync; pub mod cell; pub mod char; +pub mod panic; pub mod panicking; pub mod iter; pub mod option; @@ -169,9 +201,49 @@ pub mod slice; pub mod str; pub mod hash; pub mod fmt; +pub mod time; + +pub mod unicode; + +/* Async */ +pub mod future; +pub mod task; + +/* Heap memory allocator trait */ +#[allow(missing_docs)] +pub mod alloc; // note: does not need to be public -mod char_private; mod iter_private; +mod nonzero; mod tuple; mod unit; + +// Pull in the the `coresimd` crate directly into libcore. This is where all the +// architecture-specific (and vendor-specific) intrinsics are defined. AKA +// things like SIMD and such. Note that the actual source for all this lies in a +// different repository, rust-lang-nursery/stdsimd. That's why the setup here is +// a bit wonky. +#[allow(unused_macros)] +macro_rules! test_v16 { ($item:item) => {}; } +#[allow(unused_macros)] +macro_rules! test_v32 { ($item:item) => {}; } +#[allow(unused_macros)] +macro_rules! test_v64 { ($item:item) => {}; } +#[allow(unused_macros)] +macro_rules! test_v128 { ($item:item) => {}; } +#[allow(unused_macros)] +macro_rules! test_v256 { ($item:item) => {}; } +#[allow(unused_macros)] +macro_rules! test_v512 { ($item:item) => {}; } +#[allow(unused_macros)] +macro_rules! vector_impl { ($([$f:ident, $($args:tt)*]),*) => { $($f!($($args)*);)* } } +#[path = "../stdsimd/coresimd/mod.rs"] +#[allow(missing_docs, missing_debug_implementations, dead_code, unused_imports)] +#[unstable(feature = "stdsimd", issue = "48556")] +#[cfg(not(stage0))] // allow changes to how stdsimd works in stage0 +mod coresimd; + +#[stable(feature = "simd_arch", since = "1.27.0")] +#[cfg(not(stage0))] +pub use coresimd::arch; diff --git a/src/libcore/macros.rs b/src/libcore/macros.rs index f00128a8147d..5b3b2d163568 100644 --- a/src/libcore/macros.rs +++ b/src/libcore/macros.rs @@ -19,73 +19,15 @@ macro_rules! panic { ($msg:expr) => ({ $crate::panicking::panic(&($msg, file!(), line!(), __rust_unstable_column!())) }); - ($fmt:expr, $($arg:tt)*) => ({ + ($msg:expr,) => ( + panic!($msg) + ); + ($fmt:expr, $($arg:tt)+) => ({ $crate::panicking::panic_fmt(format_args!($fmt, $($arg)*), &(file!(), line!(), __rust_unstable_column!())) }); } -/// Ensure that a boolean expression is `true` at runtime. -/// -/// This will invoke the [`panic!`] macro if the provided expression cannot be -/// evaluated to `true` at runtime. -/// -/// # Uses -/// -/// Assertions are always checked in both debug and release builds, and cannot -/// be disabled. See [`debug_assert!`] for assertions that are not enabled in -/// release builds by default. -/// -/// Unsafe code relies on `assert!` to enforce run-time invariants that, if -/// violated could lead to unsafety. -/// -/// Other use-cases of `assert!` include [testing] and enforcing run-time -/// invariants in safe code (whose violation cannot result in unsafety). -/// -/// # Custom Messages -/// -/// This macro has a second form, where a custom panic message can -/// be provided with or without arguments for formatting. See [`std::fmt`] -/// for syntax for this form. -/// -/// [`panic!`]: macro.panic.html -/// [`debug_assert!`]: macro.debug_assert.html -/// [testing]: ../book/second-edition/ch11-01-writing-tests.html#checking-results-with-the-assert-macro -/// [`std::fmt`]: ../std/fmt/index.html -/// -/// # Examples -/// -/// ``` -/// // the panic message for these assertions is the stringified value of the -/// // expression given. -/// assert!(true); -/// -/// fn some_computation() -> bool { true } // a very simple function -/// -/// assert!(some_computation()); -/// -/// // assert with a custom message -/// let x = true; -/// assert!(x, "x wasn't true!"); -/// -/// let a = 3; let b = 27; -/// assert!(a + b == 30, "a = {}, b = {}", a, b); -/// ``` -#[macro_export] -#[stable(feature = "rust1", since = "1.0.0")] -macro_rules! assert { - ($cond:expr) => ( - if !$cond { - panic!(concat!("assertion failed: ", stringify!($cond))) - } - ); - ($cond:expr, $($arg:tt)+) => ( - if !$cond { - panic!($($arg)+) - } - ); -} - /// Asserts that two expressions are equal to each other (using [`PartialEq`]). /// /// On panic, this macro will print the values of the expressions with their @@ -327,7 +269,7 @@ macro_rules! debug_assert_ne { /// } /// } /// -/// // The prefered method of quick returning Errors +/// // The preferred method of quick returning Errors /// fn write_to_file_question() -> Result<(), MyError> { /// let mut file = File::create("my_best_friends.txt")?; /// file.write_all(b"This is a list of my best friends.")?; @@ -353,13 +295,15 @@ macro_rules! debug_assert_ne { /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] +#[doc(alias = "?")] macro_rules! try { ($expr:expr) => (match $expr { $crate::result::Result::Ok(val) => val, $crate::result::Result::Err(err) => { return $crate::result::Result::Err($crate::convert::From::from(err)) } - }) + }); + ($expr:expr,) => (try!($expr)); } /// Write formatted data into a buffer. @@ -452,15 +396,16 @@ macro_rules! write { /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] +#[allow_internal_unstable] macro_rules! writeln { ($dst:expr) => ( write!($dst, "\n") ); - ($dst:expr, $fmt:expr) => ( - write!($dst, concat!($fmt, "\n")) + ($dst:expr,) => ( + writeln!($dst) ); - ($dst:expr, $fmt:expr, $($arg:tt)*) => ( - write!($dst, concat!($fmt, "\n"), $($arg)*) + ($dst:expr, $($arg:tt)*) => ( + $dst.write_fmt(format_args_nl!($($arg)*)) ); } @@ -474,13 +419,13 @@ macro_rules! writeln { /// * Iterators that dynamically terminate. /// /// If the determination that the code is unreachable proves incorrect, the -/// program immediately terminates with a [`panic!`]. The function [`unreachable`], -/// which belongs to the [`std::intrinsics`] module, informs the compilier to +/// program immediately terminates with a [`panic!`]. The function [`unreachable_unchecked`], +/// which belongs to the [`std::hint`] module, informs the compilier to /// optimize the code out of the release version entirely. /// /// [`panic!`]: ../std/macro.panic.html -/// [`unreachable`]: ../std/intrinsics/fn.unreachable.html -/// [`std::intrinsics`]: ../std/intrinsics/index.html +/// [`unreachable_unchecked`]: ../std/hint/fn.unreachable_unchecked.html +/// [`std::hint`]: ../std/hint/index.html /// /// # Panics /// @@ -524,6 +469,9 @@ macro_rules! unreachable { ($msg:expr) => ({ unreachable!("{}", $msg) }); + ($msg:expr,) => ({ + unreachable!($msg) + }); ($fmt:expr, $($arg:tt)*) => ({ panic!(concat!("internal error: entered unreachable code: ", $fmt), $($arg)*) }); @@ -593,6 +541,7 @@ macro_rules! unimplemented { /// into libsyntax itself. /// /// For more information, see documentation for `std`'s macros. +#[cfg(dox)] mod builtin { /// Unconditionally causes compilation to fail with the given error message when encountered. @@ -601,9 +550,11 @@ mod builtin { /// /// [`std::compile_error!`]: ../std/macro.compile_error.html #[stable(feature = "compile_error_macro", since = "1.20.0")] - #[macro_export] - #[cfg(dox)] - macro_rules! compile_error { ($msg:expr) => ({ /* compiler built-in */ }) } + #[rustc_doc_only_macro] + macro_rules! compile_error { + ($msg:expr) => ({ /* compiler built-in */ }); + ($msg:expr,) => ({ /* compiler built-in */ }); + } /// The core macro for formatted string creation & output. /// @@ -611,8 +562,7 @@ mod builtin { /// /// [`std::format_args!`]: ../std/macro.format_args.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] + #[rustc_doc_only_macro] macro_rules! format_args { ($fmt:expr) => ({ /* compiler built-in */ }); ($fmt:expr, $($args:tt)*) => ({ /* compiler built-in */ }); @@ -624,8 +574,7 @@ mod builtin { /// /// [`std::env!`]: ../std/macro.env.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] + #[rustc_doc_only_macro] macro_rules! env { ($name:expr) => ({ /* compiler built-in */ }); ($name:expr,) => ({ /* compiler built-in */ }); @@ -637,9 +586,11 @@ mod builtin { /// /// [`std::option_env!`]: ../std/macro.option_env.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] - macro_rules! option_env { ($name:expr) => ({ /* compiler built-in */ }) } + #[rustc_doc_only_macro] + macro_rules! option_env { + ($name:expr) => ({ /* compiler built-in */ }); + ($name:expr,) => ({ /* compiler built-in */ }); + } /// Concatenate identifiers into one identifier. /// @@ -647,11 +598,10 @@ mod builtin { /// /// [`std::concat_idents!`]: ../std/macro.concat_idents.html #[unstable(feature = "concat_idents_macro", issue = "29599")] - #[macro_export] - #[cfg(dox)] + #[rustc_doc_only_macro] macro_rules! concat_idents { - ($($e:ident),*) => ({ /* compiler built-in */ }); - ($($e:ident,)*) => ({ /* compiler built-in */ }); + ($($e:ident),+) => ({ /* compiler built-in */ }); + ($($e:ident,)+) => ({ /* compiler built-in */ }); } /// Concatenates literals into a static string slice. @@ -660,8 +610,7 @@ mod builtin { /// /// [`std::concat!`]: ../std/macro.concat.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] + #[rustc_doc_only_macro] macro_rules! concat { ($($e:expr),*) => ({ /* compiler built-in */ }); ($($e:expr,)*) => ({ /* compiler built-in */ }); @@ -673,8 +622,7 @@ mod builtin { /// /// [`std::line!`]: ../std/macro.line.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] + #[rustc_doc_only_macro] macro_rules! line { () => ({ /* compiler built-in */ }) } /// A macro which expands to the column number on which it was invoked. @@ -683,8 +631,7 @@ mod builtin { /// /// [`std::column!`]: ../std/macro.column.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] + #[rustc_doc_only_macro] macro_rules! column { () => ({ /* compiler built-in */ }) } /// A macro which expands to the file name from which it was invoked. @@ -693,8 +640,7 @@ mod builtin { /// /// [`std::file!`]: ../std/macro.file.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] + #[rustc_doc_only_macro] macro_rules! file { () => ({ /* compiler built-in */ }) } /// A macro which stringifies its arguments. @@ -703,8 +649,7 @@ mod builtin { /// /// [`std::stringify!`]: ../std/macro.stringify.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] + #[rustc_doc_only_macro] macro_rules! stringify { ($($t:tt)*) => ({ /* compiler built-in */ }) } /// Includes a utf8-encoded file as a string. @@ -713,9 +658,11 @@ mod builtin { /// /// [`std::include_str!`]: ../std/macro.include_str.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] - macro_rules! include_str { ($file:expr) => ({ /* compiler built-in */ }) } + #[rustc_doc_only_macro] + macro_rules! include_str { + ($file:expr) => ({ /* compiler built-in */ }); + ($file:expr,) => ({ /* compiler built-in */ }); + } /// Includes a file as a reference to a byte array. /// @@ -723,9 +670,11 @@ mod builtin { /// /// [`std::include_bytes!`]: ../std/macro.include_bytes.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] - macro_rules! include_bytes { ($file:expr) => ({ /* compiler built-in */ }) } + #[rustc_doc_only_macro] + macro_rules! include_bytes { + ($file:expr) => ({ /* compiler built-in */ }); + ($file:expr,) => ({ /* compiler built-in */ }); + } /// Expands to a string that represents the current module path. /// @@ -733,8 +682,7 @@ mod builtin { /// /// [`std::module_path!`]: ../std/macro.module_path.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] + #[rustc_doc_only_macro] macro_rules! module_path { () => ({ /* compiler built-in */ }) } /// Boolean evaluation of configuration flags, at compile-time. @@ -743,8 +691,7 @@ mod builtin { /// /// [`std::cfg!`]: ../std/macro.cfg.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] + #[rustc_doc_only_macro] macro_rules! cfg { ($($cfg:tt)*) => ({ /* compiler built-in */ }) } /// Parse a file as an expression or an item according to the context. @@ -753,7 +700,22 @@ mod builtin { /// /// [`std::include!`]: ../std/macro.include.html #[stable(feature = "rust1", since = "1.0.0")] - #[macro_export] - #[cfg(dox)] - macro_rules! include { ($file:expr) => ({ /* compiler built-in */ }) } + #[rustc_doc_only_macro] + macro_rules! include { + ($file:expr) => ({ /* compiler built-in */ }); + ($file:expr,) => ({ /* compiler built-in */ }); + } + + /// Ensure that a boolean expression is `true` at runtime. + /// + /// For more information, see the documentation for [`std::assert!`]. + /// + /// [`std::assert!`]: ../std/macro.assert.html + #[rustc_doc_only_macro] + #[stable(feature = "rust1", since = "1.0.0")] + macro_rules! assert { + ($cond:expr) => ({ /* compiler built-in */ }); + ($cond:expr,) => ({ /* compiler built-in */ }); + ($cond:expr, $($arg:tt)+) => ({ /* compiler built-in */ }); + } } diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index 3032fb2de33a..d18e167fc3fa 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -39,7 +39,10 @@ use hash::Hasher; /// [arc]: ../../std/sync/struct.Arc.html /// [ub]: ../../reference/behavior-considered-undefined.html #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "`{Self}` cannot be sent between threads safely"] +#[rustc_on_unimplemented( + message="`{Self}` cannot be sent between threads safely", + label="`{Self}` cannot be sent between threads safely" +)] pub unsafe auto trait Send { // empty. } @@ -63,9 +66,13 @@ impl !Send for *mut T { } /// struct BarUse(Bar<[i32]>); // OK /// ``` /// -/// The one exception is the implicit `Self` type of a trait, which does not -/// get an implicit `Sized` bound. This is because a `Sized` bound prevents -/// the trait from being used to form a [trait object]: +/// The one exception is the implicit `Self` type of a trait. A trait does not +/// have an implicit `Sized` bound as this is incompatible with [trait object]s +/// where, by definition, the trait needs to work with all possible implementors, +/// and thus could be any size. +/// +/// Although Rust will let you bind `Sized` to a trait, you won't +/// be able to use it to form a trait object later: /// /// ``` /// # #![allow(unused_variables)] @@ -84,7 +91,12 @@ impl !Send for *mut T { } /// [trait object]: ../../book/first-edition/trait-objects.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sized"] -#[rustc_on_unimplemented = "`{Self}` does not have a constant size known at compile-time"] +#[rustc_on_unimplemented( + message="the size for values of type `{Self}` cannot be known at compilation time", + label="doesn't have a size known at compile-time", + note="to learn more, visit ", +)] #[fundamental] // for Default, for example, which requires that `[T]: !Default` be evaluatable pub trait Sized { // Empty. @@ -256,6 +268,21 @@ pub trait Unsize { /// non-`Copy` in the future, it could be prudent to omit the `Copy` implementation now, to /// avoid a breaking API change. /// +/// ## Additional implementors +/// +/// In addition to the [implementors listed below][impls], +/// the following types also implement `Copy`: +/// +/// * Function item types (i.e. the distinct types defined for each function) +/// * Function pointer types (e.g. `fn() -> i32`) +/// * Array types, for all sizes, if the item type also implements `Copy` (e.g. `[i32; 123456]`) +/// * Tuple types, if each component also implements `Copy` (e.g. `()`, `(i32, bool)`) +/// * Closure types, if they capture no value from the environment +/// or if all such captured values implement `Copy` themselves. +/// Note that variables captured by shared reference always implement `Copy` +/// (even if the referent doesn't), +/// while variables captured by mutable reference never implement `Copy`. +/// /// [`Vec`]: ../../std/vec/struct.Vec.html /// [`String`]: ../../std/string/struct.String.html /// [`Drop`]: ../../std/ops/trait.Drop.html @@ -263,6 +290,7 @@ pub trait Unsize { /// [`Clone`]: ../clone/trait.Clone.html /// [`String`]: ../../std/string/struct.String.html /// [`i32`]: ../../std/primitive.i32.html +/// [impls]: #implementors #[stable(feature = "rust1", since = "1.0.0")] #[lang = "copy"] pub trait Copy : Clone { @@ -274,7 +302,7 @@ pub trait Copy : Clone { /// This trait is automatically implemented when the compiler determines /// it's appropriate. /// -/// The precise definition is: a type `T` is `Sync` if `&T` is +/// The precise definition is: a type `T` is `Sync` if and only if `&T` is /// [`Send`][send]. In other words, if there is no possibility of /// [undefined behavior][ub] (including data races) when passing /// `&T` references between threads. @@ -339,8 +367,21 @@ pub trait Copy : Clone { /// [transmute]: ../../std/mem/fn.transmute.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sync"] -#[rustc_on_unimplemented = "`{Self}` cannot be shared between threads safely"] +#[rustc_on_unimplemented( + message="`{Self}` cannot be shared between threads safely", + label="`{Self}` cannot be shared between threads safely" +)] pub unsafe auto trait Sync { + // FIXME(estebank): once support to add notes in `rustc_on_unimplemented` + // lands in beta, and it has been extended to check whether a closure is + // anywhere in the requirement chain, extend it as such (#48534): + // ``` + // on( + // closure, + // note="`{Self}` cannot be shared safely, consider marking the closure `move`" + // ), + // ``` + // Empty } @@ -561,3 +602,70 @@ unsafe impl Freeze for *const T {} unsafe impl Freeze for *mut T {} unsafe impl<'a, T: ?Sized> Freeze for &'a T {} unsafe impl<'a, T: ?Sized> Freeze for &'a mut T {} + +/// Types which can be moved out of a `PinMut`. +/// +/// The `Unpin` trait is used to control the behavior of the [`PinMut`] type. If a +/// type implements `Unpin`, it is safe to move a value of that type out of the +/// `PinMut` pointer. +/// +/// This trait is automatically implemented for almost every type. +/// +/// [`PinMut`]: ../mem/struct.PinMut.html +#[unstable(feature = "pin", issue = "49150")] +pub auto trait Unpin {} + +/// A type which does not implement `Unpin`. +/// +/// If a type contains a `Pinned`, it will not implement `Unpin` by default. +#[unstable(feature = "pin", issue = "49150")] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct Pinned; + +#[unstable(feature = "pin", issue = "49150")] +impl !Unpin for Pinned {} + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: ?Sized + 'a> Unpin for &'a T {} + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: ?Sized + 'a> Unpin for &'a mut T {} + +/// Implementations of `Copy` for primitive types. +/// +/// Implementations that cannot be described in Rust +/// are implemented in `SelectionContext::copy_clone_conditions()` in librustc. +mod copy_impls { + + use super::Copy; + + macro_rules! impl_copy { + ($($t:ty)*) => { + $( + #[stable(feature = "rust1", since = "1.0.0")] + impl Copy for $t {} + )* + } + } + + impl_copy! { + usize u8 u16 u32 u64 u128 + isize i8 i16 i32 i64 i128 + f32 f64 + bool char + } + + #[unstable(feature = "never_type", issue = "35121")] + impl Copy for ! {} + + #[stable(feature = "rust1", since = "1.0.0")] + impl Copy for *const T {} + + #[stable(feature = "rust1", since = "1.0.0")] + impl Copy for *mut T {} + + // Shared references can be copied, but mutable references *cannot*! + #[stable(feature = "rust1", since = "1.0.0")] + impl<'a, T: ?Sized> Copy for &'a T {} + +} diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index 93f6a0214d77..ea711c69393a 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -18,11 +18,13 @@ use clone; use cmp; use fmt; +use future::{Future, UnsafeFutureObj}; use hash; use intrinsics; -use marker::{Copy, PhantomData, Sized}; +use marker::{Copy, PhantomData, Sized, Unpin, Unsize}; use ptr; -use ops::{Deref, DerefMut}; +use task::{Context, Poll}; +use ops::{Deref, DerefMut, CoerceUnsized}; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::transmute; @@ -189,14 +191,17 @@ pub fn forget(t: T) { /// Type | size_of::\() /// ---- | --------------- /// () | 0 +/// bool | 1 /// u8 | 1 /// u16 | 2 /// u32 | 4 /// u64 | 8 +/// u128 | 16 /// i8 | 1 /// i16 | 2 /// i32 | 4 /// i64 | 8 +/// i128 | 16 /// f32 | 4 /// f64 | 8 /// char | 4 @@ -224,6 +229,8 @@ pub fn forget(t: T) { /// 2. Round up the current size to the nearest multiple of the next field's [alignment]. /// /// Finally, round the size of the struct to the nearest multiple of its [alignment]. +/// The alignment of the struct is usually the largest alignment of all its +/// fields; this can be changed with the use of `repr(align(N))`. /// /// Unlike `C`, zero sized structs are not rounded up to one byte in size. /// @@ -278,7 +285,8 @@ pub fn forget(t: T) { /// // The size of the second field is 2, so add 2 to the size. Size is 4. /// // The alignment of the third field is 1, so add 0 to the size for padding. Size is 4. /// // The size of the third field is 1, so add 1 to the size. Size is 5. -/// // Finally, the alignment of the struct is 2, so add 1 to the size for padding. Size is 6. +/// // Finally, the alignment of the struct is 2 (because the largest alignment amongst its +/// // fields is 2), so add 1 to the size for padding. Size is 6. /// assert_eq!(6, mem::size_of::()); /// /// #[repr(C)] @@ -630,12 +638,13 @@ pub unsafe fn uninitialized() -> T { #[stable(feature = "rust1", since = "1.0.0")] pub fn swap(x: &mut T, y: &mut T) { unsafe { - ptr::swap_nonoverlapping(x, y, 1); + ptr::swap_nonoverlapping_one(x, y); } } -/// Replaces the value at a mutable location with a new one, returning the old value, without -/// deinitializing either one. +/// Moves `src` into the referenced `dest`, returning the previous `dest` value. +/// +/// Neither value is dropped. /// /// # Examples /// @@ -834,7 +843,9 @@ pub unsafe fn transmute_copy(src: &T) -> U { /// Opaque type representing the discriminant of an enum. /// -/// See the `discriminant` function in this module for more information. +/// See the [`discriminant`] function in this module for more information. +/// +/// [`discriminant`]: fn.discriminant.html #[stable(feature = "discriminant_value", since = "1.21.0")] pub struct Discriminant(u64, PhantomData T>); @@ -907,7 +918,6 @@ pub fn discriminant(v: &T) -> Discriminant { } } - /// A wrapper to inhibit compiler from automatically calling `T`’s destructor. /// /// This wrapper is 0-cost. @@ -944,9 +954,12 @@ pub fn discriminant(v: &T) -> Discriminant { /// } /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] -#[allow(unions_with_drop_fields)] -#[derive(Copy)] -pub union ManuallyDrop{ value: T } +#[lang = "manually_drop"] +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[repr(transparent)] +pub struct ManuallyDrop { + value: T, +} impl ManuallyDrop { /// Wrap a value to be manually dropped. @@ -958,9 +971,10 @@ impl ManuallyDrop { /// ManuallyDrop::new(Box::new(())); /// ``` #[stable(feature = "manually_drop", since = "1.20.0")] + #[rustc_const_unstable(feature = "const_manually_drop_new")] #[inline] - pub fn new(value: T) -> ManuallyDrop { - ManuallyDrop { value: value } + pub const fn new(value: T) -> ManuallyDrop { + ManuallyDrop { value } } /// Extract the value from the ManuallyDrop container. @@ -975,11 +989,11 @@ impl ManuallyDrop { #[stable(feature = "manually_drop", since = "1.20.0")] #[inline] pub fn into_inner(slot: ManuallyDrop) -> T { - unsafe { - slot.value - } + slot.value } +} +impl ManuallyDrop { /// Manually drops the contained value. /// /// # Safety @@ -995,112 +1009,161 @@ impl ManuallyDrop { } #[stable(feature = "manually_drop", since = "1.20.0")] -impl Deref for ManuallyDrop { +impl Deref for ManuallyDrop { type Target = T; #[inline] fn deref(&self) -> &Self::Target { - unsafe { - &self.value - } + &self.value } } #[stable(feature = "manually_drop", since = "1.20.0")] -impl DerefMut for ManuallyDrop { +impl DerefMut for ManuallyDrop { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { - &mut self.value - } + &mut self.value } } -#[stable(feature = "manually_drop", since = "1.20.0")] -impl ::fmt::Debug for ManuallyDrop { - fn fmt(&self, fmt: &mut ::fmt::Formatter) -> ::fmt::Result { - unsafe { - fmt.debug_tuple("ManuallyDrop").field(&self.value).finish() - } - } -} - -#[stable(feature = "manually_drop_impls", since = "1.22.0")] -impl Clone for ManuallyDrop { - fn clone(&self) -> Self { - ManuallyDrop::new(self.deref().clone()) - } - - fn clone_from(&mut self, source: &Self) { - self.deref_mut().clone_from(source); - } -} - -#[stable(feature = "manually_drop_impls", since = "1.22.0")] -impl Default for ManuallyDrop { - fn default() -> Self { - ManuallyDrop::new(Default::default()) - } -} - -#[stable(feature = "manually_drop_impls", since = "1.22.0")] -impl PartialEq for ManuallyDrop { - fn eq(&self, other: &Self) -> bool { - self.deref().eq(other) - } - - fn ne(&self, other: &Self) -> bool { - self.deref().ne(other) - } -} - -#[stable(feature = "manually_drop_impls", since = "1.22.0")] -impl Eq for ManuallyDrop {} - -#[stable(feature = "manually_drop_impls", since = "1.22.0")] -impl PartialOrd for ManuallyDrop { - fn partial_cmp(&self, other: &Self) -> Option<::cmp::Ordering> { - self.deref().partial_cmp(other) - } - - fn lt(&self, other: &Self) -> bool { - self.deref().lt(other) - } - - fn le(&self, other: &Self) -> bool { - self.deref().le(other) - } - - fn gt(&self, other: &Self) -> bool { - self.deref().gt(other) - } - - fn ge(&self, other: &Self) -> bool { - self.deref().ge(other) - } -} - -#[stable(feature = "manually_drop_impls", since = "1.22.0")] -impl Ord for ManuallyDrop { - fn cmp(&self, other: &Self) -> ::cmp::Ordering { - self.deref().cmp(other) - } -} - -#[stable(feature = "manually_drop_impls", since = "1.22.0")] -impl ::hash::Hash for ManuallyDrop { - fn hash(&self, state: &mut H) { - self.deref().hash(state); - } -} - -/// Tells LLVM that this point in the code is not reachable, enabling further -/// optimizations. +/// A pinned reference. /// -/// NB: This is very different from the `unreachable!()` macro: Unlike the -/// macro, which panics when it is executed, it is *undefined behavior* to -/// reach code marked with this function. -#[inline] -#[unstable(feature = "unreachable", issue = "43751")] -pub unsafe fn unreachable() -> ! { - intrinsics::unreachable() +/// A pinned reference is a lot like a mutable reference, except that it is not +/// safe to move a value out of a pinned reference unless the type of that +/// value implements the `Unpin` trait. +#[unstable(feature = "pin", issue = "49150")] +#[fundamental] +pub struct PinMut<'a, T: ?Sized + 'a> { + inner: &'a mut T, +} + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: ?Sized + Unpin> PinMut<'a, T> { + /// Construct a new `PinMut` around a reference to some data of a type that + /// implements `Unpin`. + #[unstable(feature = "pin", issue = "49150")] + pub fn new(reference: &'a mut T) -> PinMut<'a, T> { + PinMut { inner: reference } + } + + /// Get a mutable reference to the data inside of this `PinMut`. + #[unstable(feature = "pin", issue = "49150")] + pub fn get_mut(this: PinMut<'a, T>) -> &'a mut T { + this.inner + } +} + + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: ?Sized> PinMut<'a, T> { + /// Construct a new `PinMut` around a reference to some data of a type that + /// may or may not implement `Unpin`. + /// + /// This constructor is unsafe because we do not know what will happen with + /// that data after the reference ends. If you cannot guarantee that the + /// data will never move again, calling this constructor is invalid. + #[unstable(feature = "pin", issue = "49150")] + pub unsafe fn new_unchecked(reference: &'a mut T) -> PinMut<'a, T> { + PinMut { inner: reference } + } + + /// Reborrow a `PinMut` for a shorter lifetime. + /// + /// For example, `PinMut::get_mut(x.reborrow())` (unsafely) returns a + /// short-lived mutable reference reborrowing from `x`. + #[unstable(feature = "pin", issue = "49150")] + pub fn reborrow<'b>(&'b mut self) -> PinMut<'b, T> { + PinMut { inner: self.inner } + } + + /// Get a mutable reference to the data inside of this `PinMut`. + /// + /// This function is unsafe. You must guarantee that you will never move + /// the data out of the mutable reference you receive when you call this + /// function. + #[unstable(feature = "pin", issue = "49150")] + pub unsafe fn get_mut_unchecked(this: PinMut<'a, T>) -> &'a mut T { + this.inner + } + + /// Construct a new pin by mapping the interior value. + /// + /// For example, if you wanted to get a `PinMut` of a field of something, + /// you could use this to get access to that field in one line of code. + /// + /// This function is unsafe. You must guarantee that the data you return + /// will not move so long as the argument value does not move (for example, + /// because it is one of the fields of that value), and also that you do + /// not move out of the argument you receive to the interior function. + #[unstable(feature = "pin", issue = "49150")] + pub unsafe fn map_unchecked(this: PinMut<'a, T>, f: F) -> PinMut<'a, U> where + F: FnOnce(&mut T) -> &mut U + { + PinMut { inner: f(this.inner) } + } + + /// Assign a new value to the memory behind the pinned reference. + #[unstable(feature = "pin", issue = "49150")] + pub fn set(this: PinMut<'a, T>, value: T) + where T: Sized, + { + *this.inner = value; + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: ?Sized> Deref for PinMut<'a, T> { + type Target = T; + + fn deref(&self) -> &T { + &*self.inner + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: ?Sized + Unpin> DerefMut for PinMut<'a, T> { + fn deref_mut(&mut self) -> &mut T { + self.inner + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: fmt::Debug + ?Sized> fmt::Debug for PinMut<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: fmt::Display + ?Sized> fmt::Display for PinMut<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: ?Sized> fmt::Pointer for PinMut<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&(&*self.inner as *const T), f) + } +} + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: ?Sized + Unsize, U: ?Sized> CoerceUnsized> for PinMut<'a, T> {} + +#[unstable(feature = "pin", issue = "49150")] +impl<'a, T: ?Sized> Unpin for PinMut<'a, T> {} + +#[unstable(feature = "futures_api", issue = "50547")] +unsafe impl<'a, T, F> UnsafeFutureObj<'a, T> for PinMut<'a, F> + where F: Future + 'a +{ + fn into_raw(self) -> *mut () { + unsafe { PinMut::get_mut_unchecked(self) as *mut F as *mut () } + } + + unsafe fn poll(ptr: *mut (), cx: &mut Context) -> Poll { + PinMut::new_unchecked(&mut *(ptr as *mut F)).poll(cx) + } + + unsafe fn drop(_ptr: *mut ()) {} } diff --git a/src/libcore/nonzero.rs b/src/libcore/nonzero.rs index 2c966eb3b579..cc36ea7f7139 100644 --- a/src/libcore/nonzero.rs +++ b/src/libcore/nonzero.rs @@ -9,105 +9,14 @@ // except according to those terms. //! Exposes the NonZero lang item which provides optimization hints. -#![unstable(feature = "nonzero", - reason = "needs an RFC to flesh out the design", - issue = "27730")] use ops::CoerceUnsized; -/// Unsafe trait to indicate what types are usable with the NonZero struct -pub unsafe trait Zeroable { - /// Whether this value is zero - fn is_zero(&self) -> bool; -} - -macro_rules! impl_zeroable_for_pointer_types { - ( $( $Ptr: ty )+ ) => { - $( - /// For fat pointers to be considered "zero", only the "data" part needs to be null. - unsafe impl Zeroable for $Ptr { - #[inline] - fn is_zero(&self) -> bool { - (*self).is_null() - } - } - )+ - } -} - -macro_rules! impl_zeroable_for_integer_types { - ( $( $Int: ty )+ ) => { - $( - unsafe impl Zeroable for $Int { - #[inline] - fn is_zero(&self) -> bool { - *self == 0 - } - } - )+ - } -} - -impl_zeroable_for_pointer_types! { - *const T - *mut T -} - -impl_zeroable_for_integer_types! { - usize u8 u16 u32 u64 u128 - isize i8 i16 i32 i64 i128 -} - /// A wrapper type for raw pointers and integers that will never be /// NULL or 0 that might allow certain optimizations. #[lang = "non_zero"] -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)] -pub struct NonZero(T); +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[repr(transparent)] +pub(crate) struct NonZero(pub(crate) T); -impl NonZero { - /// Creates an instance of NonZero with the provided value. - /// You must indeed ensure that the value is actually "non-zero". - #[unstable(feature = "nonzero", - reason = "needs an RFC to flesh out the design", - issue = "27730")] - #[inline] - pub const unsafe fn new_unchecked(inner: T) -> Self { - NonZero(inner) - } - - /// Creates an instance of NonZero with the provided value. - #[inline] - pub fn new(inner: T) -> Option { - if inner.is_zero() { - None - } else { - Some(NonZero(inner)) - } - } - - /// Gets the inner value. - pub fn get(self) -> T { - self.0 - } -} - -impl, U: Zeroable> CoerceUnsized> for NonZero {} - -impl<'a, T: ?Sized> From<&'a mut T> for NonZero<*mut T> { - fn from(reference: &'a mut T) -> Self { - NonZero(reference) - } -} - -impl<'a, T: ?Sized> From<&'a mut T> for NonZero<*const T> { - fn from(reference: &'a mut T) -> Self { - let ptr: *mut T = reference; - NonZero(ptr) - } -} - -impl<'a, T: ?Sized> From<&'a T> for NonZero<*const T> { - fn from(reference: &'a T) -> Self { - NonZero(reference) - } -} +impl, U> CoerceUnsized> for NonZero {} diff --git a/src/libcore/num/dec2flt/parse.rs b/src/libcore/num/dec2flt/parse.rs index d20986faa0fc..e7ed94d4d91c 100644 --- a/src/libcore/num/dec2flt/parse.rs +++ b/src/libcore/num/dec2flt/parse.rs @@ -40,7 +40,7 @@ pub struct Decimal<'a> { impl<'a> Decimal<'a> { pub fn new(integral: &'a [u8], fractional: &'a [u8], exp: i64) -> Decimal<'a> { - Decimal { integral: integral, fractional: fractional, exp: exp } + Decimal { integral, fractional, exp } } } @@ -73,7 +73,8 @@ pub fn parse_decimal(s: &str) -> ParseResult { } Some(&b'.') => { let (fractional, s) = eat_digits(&s[1..]); - if integral.is_empty() && fractional.is_empty() && s.is_empty() { + if integral.is_empty() && fractional.is_empty() { + // We require at least a single digit before or after the point. return Invalid; } diff --git a/src/libcore/num/dec2flt/rawfp.rs b/src/libcore/num/dec2flt/rawfp.rs index 12960fed0455..38f4e4687a99 100644 --- a/src/libcore/num/dec2flt/rawfp.rs +++ b/src/libcore/num/dec2flt/rawfp.rs @@ -27,14 +27,13 @@ //! Many functions in this module only handle normal numbers. The dec2flt routines conservatively //! take the universally-correct slow path (Algorithm M) for very small and very large numbers. //! That algorithm needs only next_float() which does handle subnormals and zeros. -use u32; use cmp::Ordering::{Less, Equal, Greater}; -use ops::{Mul, Div, Neg}; +use convert::{TryFrom, TryInto}; +use ops::{Add, Mul, Div, Neg}; use fmt::{Debug, LowerExp}; -use mem::transmute; use num::diy_float::Fp; use num::FpCategory::{Infinite, Zero, Subnormal, Normal, Nan}; -use num::Float; +use num::FpCategory; use num::dec2flt::num::{self, Big}; use num::dec2flt::table; @@ -46,7 +45,7 @@ pub struct Unpacked { impl Unpacked { pub fn new(sig: u64, k: i16) -> Self { - Unpacked { sig: sig, k: k } + Unpacked { sig, k } } } @@ -55,23 +54,33 @@ impl Unpacked { /// See the parent module's doc comment for why this is necessary. /// /// Should **never ever** be implemented for other types or be used outside the dec2flt module. -/// Inherits from `Float` because there is some overlap, but all the reused methods are trivial. -pub trait RawFloat : Float + Copy + Debug + LowerExp - + Mul + Div + Neg +pub trait RawFloat + : Copy + + Debug + + LowerExp + + Mul + + Div + + Neg { const INFINITY: Self; const NAN: Self; const ZERO: Self; + /// Type used by `to_bits` and `from_bits`. + type Bits: Add + From + TryFrom; + + /// Raw transmutation to integer. + fn to_bits(self) -> Self::Bits; + + /// Raw transmutation from integer. + fn from_bits(v: Self::Bits) -> Self; + + /// Returns the category that this number falls into. + fn classify(self) -> FpCategory; + /// Returns the mantissa, exponent and sign as integers. fn integer_decode(self) -> (u64, i16, i8); - /// Get the raw binary representation of the float. - fn transmute(self) -> u64; - - /// Transmute the raw binary representation into a float. - fn from_bits(bits: u64) -> Self; - /// Decode the float. fn unpack(self) -> Unpacked; @@ -149,6 +158,8 @@ macro_rules! other_constants { } impl RawFloat for f32 { + type Bits = u32; + const SIG_BITS: u8 = 24; const EXP_BITS: u8 = 8; const CEIL_LOG5_OF_MAX_SIG: i16 = 11; @@ -159,7 +170,7 @@ impl RawFloat for f32 { /// Returns the mantissa, exponent and sign as integers. fn integer_decode(self) -> (u64, i16, i8) { - let bits: u32 = unsafe { transmute(self) }; + let bits = self.to_bits(); let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 }; let mut exponent: i16 = ((bits >> 23) & 0xff) as i16; let mantissa = if exponent == 0 { @@ -172,16 +183,6 @@ impl RawFloat for f32 { (mantissa as u64, exponent, sign) } - fn transmute(self) -> u64 { - let bits: u32 = unsafe { transmute(self) }; - bits as u64 - } - - fn from_bits(bits: u64) -> f32 { - assert!(bits < u32::MAX as u64, "f32::from_bits: too many bits"); - unsafe { transmute(bits as u32) } - } - fn unpack(self) -> Unpacked { let (sig, exp, _sig) = self.integer_decode(); Unpacked::new(sig, exp) @@ -196,10 +197,16 @@ impl RawFloat for f32 { fn short_fast_pow10(e: usize) -> Self { table::F32_SHORT_POWERS[e] } + + fn classify(self) -> FpCategory { self.classify() } + fn to_bits(self) -> Self::Bits { self.to_bits() } + fn from_bits(v: Self::Bits) -> Self { Self::from_bits(v) } } impl RawFloat for f64 { + type Bits = u64; + const SIG_BITS: u8 = 53; const EXP_BITS: u8 = 11; const CEIL_LOG5_OF_MAX_SIG: i16 = 23; @@ -210,7 +217,7 @@ impl RawFloat for f64 { /// Returns the mantissa, exponent and sign as integers. fn integer_decode(self) -> (u64, i16, i8) { - let bits: u64 = unsafe { transmute(self) }; + let bits = self.to_bits(); let sign: i8 = if bits >> 63 == 0 { 1 } else { -1 }; let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16; let mantissa = if exponent == 0 { @@ -223,15 +230,6 @@ impl RawFloat for f64 { (mantissa, exponent, sign) } - fn transmute(self) -> u64 { - let bits: u64 = unsafe { transmute(self) }; - bits - } - - fn from_bits(bits: u64) -> f64 { - unsafe { transmute(bits) } - } - fn unpack(self) -> Unpacked { let (sig, exp, _sig) = self.integer_decode(); Unpacked::new(sig, exp) @@ -246,6 +244,10 @@ impl RawFloat for f64 { fn short_fast_pow10(e: usize) -> Self { table::F64_SHORT_POWERS[e] } + + fn classify(self) -> FpCategory { self.classify() } + fn to_bits(self) -> Self::Bits { self.to_bits() } + fn from_bits(v: Self::Bits) -> Self { Self::from_bits(v) } } /// Convert an Fp to the closest machine float type. @@ -296,14 +298,14 @@ pub fn encode_normal(x: Unpacked) -> T { "encode_normal: exponent out of range"); // Leave sign bit at 0 ("+"), our numbers are all positive let bits = (k_enc as u64) << T::EXPLICIT_SIG_BITS | sig_enc; - T::from_bits(bits) + T::from_bits(bits.try_into().unwrap_or_else(|_| unreachable!())) } /// Construct a subnormal. A mantissa of 0 is allowed and constructs zero. pub fn encode_subnormal(significand: u64) -> T { assert!(significand < T::MIN_SIG, "encode_subnormal: not actually subnormal"); // Encoded exponent is 0, the sign bit is 0, so we just have to reinterpret the bits. - T::from_bits(significand) + T::from_bits(significand.try_into().unwrap_or_else(|_| unreachable!())) } /// Approximate a bignum with an Fp. Rounds within 0.5 ULP with half-to-even. @@ -315,13 +317,13 @@ pub fn big_to_fp(f: &Big) -> Fp { // We cut off all bits prior to the index `start`, i.e., we effectively right-shift by // an amount of `start`, so this is also the exponent we need. let e = start as i16; - let rounded_down = Fp { f: leading, e: e }.normalize(); + let rounded_down = Fp { f: leading, e }.normalize(); // Round (half-to-even) depending on the truncated bits. match num::compare_with_half_ulp(f, start) { Less => rounded_down, Equal if leading % 2 == 0 => rounded_down, Equal | Greater => match leading.checked_add(1) { - Some(f) => Fp { f: f, e: e }.normalize(), + Some(f) => Fp { f, e }.normalize(), None => Fp { f: 1 << 63, e: e + 1 }, } } @@ -363,8 +365,7 @@ pub fn next_float(x: T) -> T { // too is exactly what we want! // Finally, f64::MAX + 1 = 7eff...f + 1 = 7ff0...0 = f64::INFINITY. Zero | Subnormal | Normal => { - let bits: u64 = x.transmute(); - T::from_bits(bits + 1) + T::from_bits(x.to_bits() + T::Bits::from(1u8)) } } } diff --git a/src/libcore/num/diy_float.rs b/src/libcore/num/diy_float.rs index 97bcba2f2ffb..b0561da5934c 100644 --- a/src/libcore/num/diy_float.rs +++ b/src/libcore/num/diy_float.rs @@ -42,7 +42,7 @@ impl Fp { let tmp = (bd >> 32) + (ad & MASK) + (bc & MASK) + (1 << 31) /* round */; let f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32); let e = self.e + other.e + 64; - Fp { f: f, e: e } + Fp { f, e } } /// Normalizes itself so that the resulting mantissa is at least `2^63`. @@ -74,7 +74,7 @@ impl Fp { e -= 1; } debug_assert!(f >= (1 >> 63)); - Fp { f: f, e: e } + Fp { f, e } } /// Normalizes itself to have the shared exponent. diff --git a/src/libcore/num/f32.rs b/src/libcore/num/f32.rs index 0dc58d61e496..577c823f9a06 100644 --- a/src/libcore/num/f32.rs +++ b/src/libcore/num/f32.rs @@ -11,16 +11,14 @@ //! This module provides constants which are specific to the implementation //! of the `f32` floating point data type. //! -//! Mathematically significant numbers are provided in the `consts` sub-module. -//! //! *[See also the `f32` primitive type](../../std/primitive.f32.html).* +//! +//! Mathematically significant numbers are provided in the `consts` sub-module. #![stable(feature = "rust1", since = "1.0.0")] -use intrinsics; use mem; -use num::Float; -use num::FpCategory as Fp; +use num::FpCategory; /// The radix or base of the internal representation of `f32`. #[stable(feature = "rust1", since = "1.0.0")] @@ -33,7 +31,11 @@ pub const MANTISSA_DIGITS: u32 = 24; #[stable(feature = "rust1", since = "1.0.0")] pub const DIGITS: u32 = 6; -/// Difference between `1.0` and the next largest representable number. +/// [Machine epsilon] value for `f32`. +/// +/// This is the difference between `1.0` and the next largest representable number. +/// +/// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon #[stable(feature = "rust1", since = "1.0.0")] pub const EPSILON: f32 = 1.19209290e-07_f32; @@ -128,10 +130,18 @@ pub mod consts { #[stable(feature = "rust1", since = "1.0.0")] pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32; + /// log2(10) + #[unstable(feature = "extra_log_consts", issue = "50540")] + pub const LOG2_10: f32 = 3.32192809488736234787031942948939018_f32; + /// log10(e) #[stable(feature = "rust1", since = "1.0.0")] pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32; + /// log10(2) + #[unstable(feature = "extra_log_consts", issue = "50540")] + pub const LOG10_2: f32 = 0.301029995663981195213738894724493027_f32; + /// ln(2) #[stable(feature = "rust1", since = "1.0.0")] pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32; @@ -141,120 +151,227 @@ pub mod consts { pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32; } -#[unstable(feature = "core_float", - reason = "stable interface is via `impl f{32,64}` in later crates", - issue = "32110")] -impl Float for f32 { - /// Returns `true` if the number is NaN. +#[lang = "f32"] +#[cfg(not(test))] +impl f32 { + /// Returns `true` if this value is `NaN` and false otherwise. + /// + /// ``` + /// use std::f32; + /// + /// let nan = f32::NAN; + /// let f = 7.0_f32; + /// + /// assert!(nan.is_nan()); + /// assert!(!f.is_nan()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_nan(self) -> bool { + pub fn is_nan(self) -> bool { self != self } - /// Returns `true` if the number is infinite. + /// Returns `true` if this value is positive infinity or negative infinity and + /// false otherwise. + /// + /// ``` + /// use std::f32; + /// + /// let f = 7.0f32; + /// let inf = f32::INFINITY; + /// let neg_inf = f32::NEG_INFINITY; + /// let nan = f32::NAN; + /// + /// assert!(!f.is_infinite()); + /// assert!(!nan.is_infinite()); + /// + /// assert!(inf.is_infinite()); + /// assert!(neg_inf.is_infinite()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_infinite(self) -> bool { + pub fn is_infinite(self) -> bool { self == INFINITY || self == NEG_INFINITY } - /// Returns `true` if the number is neither infinite or NaN. + /// Returns `true` if this number is neither infinite nor `NaN`. + /// + /// ``` + /// use std::f32; + /// + /// let f = 7.0f32; + /// let inf = f32::INFINITY; + /// let neg_inf = f32::NEG_INFINITY; + /// let nan = f32::NAN; + /// + /// assert!(f.is_finite()); + /// + /// assert!(!nan.is_finite()); + /// assert!(!inf.is_finite()); + /// assert!(!neg_inf.is_finite()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_finite(self) -> bool { + pub fn is_finite(self) -> bool { !(self.is_nan() || self.is_infinite()) } - /// Returns `true` if the number is neither zero, infinite, subnormal or NaN. + /// Returns `true` if the number is neither zero, infinite, + /// [subnormal][subnormal], or `NaN`. + /// + /// ``` + /// use std::f32; + /// + /// let min = f32::MIN_POSITIVE; // 1.17549435e-38f32 + /// let max = f32::MAX; + /// let lower_than_min = 1.0e-40_f32; + /// let zero = 0.0_f32; + /// + /// assert!(min.is_normal()); + /// assert!(max.is_normal()); + /// + /// assert!(!zero.is_normal()); + /// assert!(!f32::NAN.is_normal()); + /// assert!(!f32::INFINITY.is_normal()); + /// // Values between `0` and `min` are Subnormal. + /// assert!(!lower_than_min.is_normal()); + /// ``` + /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_normal(self) -> bool { - self.classify() == Fp::Normal + pub fn is_normal(self) -> bool { + self.classify() == FpCategory::Normal } /// Returns the floating point category of the number. If only one property /// is going to be tested, it is generally faster to use the specific /// predicate instead. - fn classify(self) -> Fp { + /// + /// ``` + /// use std::num::FpCategory; + /// use std::f32; + /// + /// let num = 12.4_f32; + /// let inf = f32::INFINITY; + /// + /// assert_eq!(num.classify(), FpCategory::Normal); + /// assert_eq!(inf.classify(), FpCategory::Infinite); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn classify(self) -> FpCategory { const EXP_MASK: u32 = 0x7f800000; const MAN_MASK: u32 = 0x007fffff; - let bits: u32 = unsafe { mem::transmute(self) }; + let bits = self.to_bits(); match (bits & MAN_MASK, bits & EXP_MASK) { - (0, 0) => Fp::Zero, - (_, 0) => Fp::Subnormal, - (0, EXP_MASK) => Fp::Infinite, - (_, EXP_MASK) => Fp::Nan, - _ => Fp::Normal, - } - } - - /// Computes the absolute value of `self`. Returns `Float::nan()` if the - /// number is `Float::nan()`. - #[inline] - fn abs(self) -> f32 { - unsafe { intrinsics::fabsf32(self) } - } - - /// Returns a number that represents the sign of `self`. - /// - /// - `1.0` if the number is positive, `+0.0` or `Float::infinity()` - /// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()` - /// - `Float::nan()` if the number is `Float::nan()` - #[inline] - fn signum(self) -> f32 { - if self.is_nan() { - NAN - } else { - unsafe { intrinsics::copysignf32(1.0, self) } + (0, 0) => FpCategory::Zero, + (_, 0) => FpCategory::Subnormal, + (0, EXP_MASK) => FpCategory::Infinite, + (_, EXP_MASK) => FpCategory::Nan, + _ => FpCategory::Normal, } } /// Returns `true` if and only if `self` has a positive sign, including `+0.0`, `NaN`s with /// positive sign bit and positive infinity. + /// + /// ``` + /// let f = 7.0_f32; + /// let g = -7.0_f32; + /// + /// assert!(f.is_sign_positive()); + /// assert!(!g.is_sign_positive()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_sign_positive(self) -> bool { + pub fn is_sign_positive(self) -> bool { !self.is_sign_negative() } /// Returns `true` if and only if `self` has a negative sign, including `-0.0`, `NaN`s with /// negative sign bit and negative infinity. + /// + /// ``` + /// let f = 7.0f32; + /// let g = -7.0f32; + /// + /// assert!(!f.is_sign_negative()); + /// assert!(g.is_sign_negative()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_sign_negative(self) -> bool { + pub fn is_sign_negative(self) -> bool { // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus // applies to zeros and NaNs as well. - #[repr(C)] - union F32Bytes { - f: f32, - b: u32 - } - unsafe { F32Bytes { f: self }.b & 0x8000_0000 != 0 } + self.to_bits() & 0x8000_0000 != 0 } - /// Returns the reciprocal (multiplicative inverse) of the number. + /// Takes the reciprocal (inverse) of a number, `1/x`. + /// + /// ``` + /// use std::f32; + /// + /// let x = 2.0_f32; + /// let abs_difference = (x.recip() - (1.0/x)).abs(); + /// + /// assert!(abs_difference <= f32::EPSILON); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn recip(self) -> f32 { + pub fn recip(self) -> f32 { 1.0 / self } + /// Converts radians to degrees. + /// + /// ``` + /// use std::f32::{self, consts}; + /// + /// let angle = consts::PI; + /// + /// let abs_difference = (angle.to_degrees() - 180.0).abs(); + /// + /// assert!(abs_difference <= f32::EPSILON); + /// ``` + #[stable(feature = "f32_deg_rad_conversions", since="1.7.0")] #[inline] - fn powi(self, n: i32) -> f32 { - unsafe { intrinsics::powif32(self, n) } + pub fn to_degrees(self) -> f32 { + // Use a constant for better precision. + const PIS_IN_180: f32 = 57.2957795130823208767981548141051703_f32; + self * PIS_IN_180 } - /// Converts to degrees, assuming the number is in radians. + /// Converts degrees to radians. + /// + /// ``` + /// use std::f32::{self, consts}; + /// + /// let angle = 180.0f32; + /// + /// let abs_difference = (angle.to_radians() - consts::PI).abs(); + /// + /// assert!(abs_difference <= f32::EPSILON); + /// ``` + #[stable(feature = "f32_deg_rad_conversions", since="1.7.0")] #[inline] - fn to_degrees(self) -> f32 { - self * (180.0f32 / consts::PI) - } - - /// Converts to radians, assuming the number is in degrees. - #[inline] - fn to_radians(self) -> f32 { + pub fn to_radians(self) -> f32 { let value: f32 = consts::PI; self * (value / 180.0f32) } /// Returns the maximum of the two numbers. + /// + /// ``` + /// let x = 1.0f32; + /// let y = 2.0f32; + /// + /// assert_eq!(x.max(y), y); + /// ``` + /// + /// If one of the arguments is NaN, then the other argument is returned. + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn max(self, other: f32) -> f32 { + pub fn max(self, other: f32) -> f32 { // IEEE754 says: maxNum(x, y) is the canonicalized number y if x < y, x if y < x, the // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it // is either x or y, canonicalized (this means results might differ among implementations). @@ -267,8 +384,18 @@ impl Float for f32 { } /// Returns the minimum of the two numbers. + /// + /// ``` + /// let x = 1.0f32; + /// let y = 2.0f32; + /// + /// assert_eq!(x.min(y), x); + /// ``` + /// + /// If one of the arguments is NaN, then the other argument is returned. + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn min(self, other: f32) -> f32 { + pub fn min(self, other: f32) -> f32 { // IEEE754 says: minNum(x, y) is the canonicalized number x if x < y, y if y < x, the // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it // is either x or y, canonicalized (this means results might differ among implementations). @@ -279,4 +406,72 @@ impl Float for f32 { // multiplying by 1.0. Should switch to the `canonicalize` when it works. (if other.is_nan() || self < other { self } else { other }) * 1.0 } + + /// Raw transmutation to `u32`. + /// + /// This is currently identical to `transmute::(self)` on all platforms. + /// + /// See `from_bits` for some discussion of the portability of this operation + /// (there are almost no issues). + /// + /// Note that this function is distinct from `as` casting, which attempts to + /// preserve the *numeric* value, and not the bitwise value. + /// + /// # Examples + /// + /// ``` + /// assert_ne!((1f32).to_bits(), 1f32 as u32); // to_bits() is not casting! + /// assert_eq!((12.5f32).to_bits(), 0x41480000); + /// + /// ``` + #[stable(feature = "float_bits_conv", since = "1.20.0")] + #[inline] + pub fn to_bits(self) -> u32 { + unsafe { mem::transmute(self) } + } + + /// Raw transmutation from `u32`. + /// + /// This is currently identical to `transmute::(v)` on all platforms. + /// It turns out this is incredibly portable, for two reasons: + /// + /// * Floats and Ints have the same endianness on all supported platforms. + /// * IEEE-754 very precisely specifies the bit layout of floats. + /// + /// However there is one caveat: prior to the 2008 version of IEEE-754, how + /// to interpret the NaN signaling bit wasn't actually specified. Most platforms + /// (notably x86 and ARM) picked the interpretation that was ultimately + /// standardized in 2008, but some didn't (notably MIPS). As a result, all + /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa. + /// + /// Rather than trying to preserve signaling-ness cross-platform, this + /// implementation favours preserving the exact bits. This means that + /// any payloads encoded in NaNs will be preserved even if the result of + /// this method is sent over the network from an x86 machine to a MIPS one. + /// + /// If the results of this method are only manipulated by the same + /// architecture that produced them, then there is no portability concern. + /// + /// If the input isn't NaN, then there is no portability concern. + /// + /// If you don't care about signalingness (very likely), then there is no + /// portability concern. + /// + /// Note that this function is distinct from `as` casting, which attempts to + /// preserve the *numeric* value, and not the bitwise value. + /// + /// # Examples + /// + /// ``` + /// use std::f32; + /// let v = f32::from_bits(0x41480000); + /// let difference = (v - 12.5).abs(); + /// assert!(difference <= 1e-5); + /// ``` + #[stable(feature = "float_bits_conv", since = "1.20.0")] + #[inline] + pub fn from_bits(v: u32) -> Self { + // It turns out the safety issues with sNaN were overblown! Hooray! + unsafe { mem::transmute(v) } + } } diff --git a/src/libcore/num/f64.rs b/src/libcore/num/f64.rs index 0e76000efe9d..b8e3dd6ed646 100644 --- a/src/libcore/num/f64.rs +++ b/src/libcore/num/f64.rs @@ -11,16 +11,14 @@ //! This module provides constants which are specific to the implementation //! of the `f64` floating point data type. //! -//! Mathematically significant numbers are provided in the `consts` sub-module. -//! //! *[See also the `f64` primitive type](../../std/primitive.f64.html).* +//! +//! Mathematically significant numbers are provided in the `consts` sub-module. #![stable(feature = "rust1", since = "1.0.0")] -use intrinsics; use mem; -use num::FpCategory as Fp; -use num::Float; +use num::FpCategory; /// The radix or base of the internal representation of `f64`. #[stable(feature = "rust1", since = "1.0.0")] @@ -33,7 +31,11 @@ pub const MANTISSA_DIGITS: u32 = 53; #[stable(feature = "rust1", since = "1.0.0")] pub const DIGITS: u32 = 15; -/// Difference between `1.0` and the next largest representable number. +/// [Machine epsilon] value for `f64`. +/// +/// This is the difference between `1.0` and the next largest representable number. +/// +/// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon #[stable(feature = "rust1", since = "1.0.0")] pub const EPSILON: f64 = 2.2204460492503131e-16_f64; @@ -124,10 +126,18 @@ pub mod consts { #[stable(feature = "rust1", since = "1.0.0")] pub const E: f64 = 2.71828182845904523536028747135266250_f64; + /// log2(10) + #[unstable(feature = "extra_log_consts", issue = "50540")] + pub const LOG2_10: f64 = 3.32192809488736234787031942948939018_f64; + /// log2(e) #[stable(feature = "rust1", since = "1.0.0")] pub const LOG2_E: f64 = 1.44269504088896340735992468100189214_f64; + /// log10(2) + #[unstable(feature = "extra_log_consts", issue = "50540")] + pub const LOG10_2: f64 = 0.301029995663981195213738894724493027_f64; + /// log10(e) #[stable(feature = "rust1", since = "1.0.0")] pub const LOG10_E: f64 = 0.434294481903251827651128918916605082_f64; @@ -141,118 +151,240 @@ pub mod consts { pub const LN_10: f64 = 2.30258509299404568401799145468436421_f64; } -#[unstable(feature = "core_float", - reason = "stable interface is via `impl f{32,64}` in later crates", - issue = "32110")] -impl Float for f64 { - /// Returns `true` if the number is NaN. +#[lang = "f64"] +#[cfg(not(test))] +impl f64 { + /// Returns `true` if this value is `NaN` and false otherwise. + /// + /// ``` + /// use std::f64; + /// + /// let nan = f64::NAN; + /// let f = 7.0_f64; + /// + /// assert!(nan.is_nan()); + /// assert!(!f.is_nan()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_nan(self) -> bool { + pub fn is_nan(self) -> bool { self != self } - /// Returns `true` if the number is infinite. + /// Returns `true` if this value is positive infinity or negative infinity and + /// false otherwise. + /// + /// ``` + /// use std::f64; + /// + /// let f = 7.0f64; + /// let inf = f64::INFINITY; + /// let neg_inf = f64::NEG_INFINITY; + /// let nan = f64::NAN; + /// + /// assert!(!f.is_infinite()); + /// assert!(!nan.is_infinite()); + /// + /// assert!(inf.is_infinite()); + /// assert!(neg_inf.is_infinite()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_infinite(self) -> bool { + pub fn is_infinite(self) -> bool { self == INFINITY || self == NEG_INFINITY } - /// Returns `true` if the number is neither infinite or NaN. + /// Returns `true` if this number is neither infinite nor `NaN`. + /// + /// ``` + /// use std::f64; + /// + /// let f = 7.0f64; + /// let inf: f64 = f64::INFINITY; + /// let neg_inf: f64 = f64::NEG_INFINITY; + /// let nan: f64 = f64::NAN; + /// + /// assert!(f.is_finite()); + /// + /// assert!(!nan.is_finite()); + /// assert!(!inf.is_finite()); + /// assert!(!neg_inf.is_finite()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_finite(self) -> bool { + pub fn is_finite(self) -> bool { !(self.is_nan() || self.is_infinite()) } - /// Returns `true` if the number is neither zero, infinite, subnormal or NaN. + /// Returns `true` if the number is neither zero, infinite, + /// [subnormal][subnormal], or `NaN`. + /// + /// ``` + /// use std::f64; + /// + /// let min = f64::MIN_POSITIVE; // 2.2250738585072014e-308f64 + /// let max = f64::MAX; + /// let lower_than_min = 1.0e-308_f64; + /// let zero = 0.0f64; + /// + /// assert!(min.is_normal()); + /// assert!(max.is_normal()); + /// + /// assert!(!zero.is_normal()); + /// assert!(!f64::NAN.is_normal()); + /// assert!(!f64::INFINITY.is_normal()); + /// // Values between `0` and `min` are Subnormal. + /// assert!(!lower_than_min.is_normal()); + /// ``` + /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_normal(self) -> bool { - self.classify() == Fp::Normal + pub fn is_normal(self) -> bool { + self.classify() == FpCategory::Normal } /// Returns the floating point category of the number. If only one property /// is going to be tested, it is generally faster to use the specific /// predicate instead. - fn classify(self) -> Fp { + /// + /// ``` + /// use std::num::FpCategory; + /// use std::f64; + /// + /// let num = 12.4_f64; + /// let inf = f64::INFINITY; + /// + /// assert_eq!(num.classify(), FpCategory::Normal); + /// assert_eq!(inf.classify(), FpCategory::Infinite); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn classify(self) -> FpCategory { const EXP_MASK: u64 = 0x7ff0000000000000; const MAN_MASK: u64 = 0x000fffffffffffff; - let bits: u64 = unsafe { mem::transmute(self) }; + let bits = self.to_bits(); match (bits & MAN_MASK, bits & EXP_MASK) { - (0, 0) => Fp::Zero, - (_, 0) => Fp::Subnormal, - (0, EXP_MASK) => Fp::Infinite, - (_, EXP_MASK) => Fp::Nan, - _ => Fp::Normal, - } - } - - /// Computes the absolute value of `self`. Returns `Float::nan()` if the - /// number is `Float::nan()`. - #[inline] - fn abs(self) -> f64 { - unsafe { intrinsics::fabsf64(self) } - } - - /// Returns a number that represents the sign of `self`. - /// - /// - `1.0` if the number is positive, `+0.0` or `Float::infinity()` - /// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()` - /// - `Float::nan()` if the number is `Float::nan()` - #[inline] - fn signum(self) -> f64 { - if self.is_nan() { - NAN - } else { - unsafe { intrinsics::copysignf64(1.0, self) } + (0, 0) => FpCategory::Zero, + (_, 0) => FpCategory::Subnormal, + (0, EXP_MASK) => FpCategory::Infinite, + (_, EXP_MASK) => FpCategory::Nan, + _ => FpCategory::Normal, } } /// Returns `true` if and only if `self` has a positive sign, including `+0.0`, `NaN`s with /// positive sign bit and positive infinity. + /// + /// ``` + /// let f = 7.0_f64; + /// let g = -7.0_f64; + /// + /// assert!(f.is_sign_positive()); + /// assert!(!g.is_sign_positive()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_sign_positive(self) -> bool { + pub fn is_sign_positive(self) -> bool { !self.is_sign_negative() } + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_deprecated(since = "1.0.0", reason = "renamed to is_sign_positive")] + #[inline] + #[doc(hidden)] + pub fn is_positive(self) -> bool { + self.is_sign_positive() + } + /// Returns `true` if and only if `self` has a negative sign, including `-0.0`, `NaN`s with /// negative sign bit and negative infinity. + /// + /// ``` + /// let f = 7.0_f64; + /// let g = -7.0_f64; + /// + /// assert!(!f.is_sign_negative()); + /// assert!(g.is_sign_negative()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn is_sign_negative(self) -> bool { - #[repr(C)] - union F64Bytes { - f: f64, - b: u64 - } - unsafe { F64Bytes { f: self }.b & 0x8000_0000_0000_0000 != 0 } + pub fn is_sign_negative(self) -> bool { + self.to_bits() & 0x8000_0000_0000_0000 != 0 } - /// Returns the reciprocal (multiplicative inverse) of the number. + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_deprecated(since = "1.0.0", reason = "renamed to is_sign_negative")] #[inline] - fn recip(self) -> f64 { + #[doc(hidden)] + pub fn is_negative(self) -> bool { + self.is_sign_negative() + } + + /// Takes the reciprocal (inverse) of a number, `1/x`. + /// + /// ``` + /// let x = 2.0_f64; + /// let abs_difference = (x.recip() - (1.0/x)).abs(); + /// + /// assert!(abs_difference < 1e-10); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn recip(self) -> f64 { 1.0 / self } + /// Converts radians to degrees. + /// + /// ``` + /// use std::f64::consts; + /// + /// let angle = consts::PI; + /// + /// let abs_difference = (angle.to_degrees() - 180.0).abs(); + /// + /// assert!(abs_difference < 1e-10); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn powi(self, n: i32) -> f64 { - unsafe { intrinsics::powif64(self, n) } - } - - /// Converts to degrees, assuming the number is in radians. - #[inline] - fn to_degrees(self) -> f64 { + pub fn to_degrees(self) -> f64 { + // The division here is correctly rounded with respect to the true + // value of 180/π. (This differs from f32, where a constant must be + // used to ensure a correctly rounded result.) self * (180.0f64 / consts::PI) } - /// Converts to radians, assuming the number is in degrees. + /// Converts degrees to radians. + /// + /// ``` + /// use std::f64::consts; + /// + /// let angle = 180.0_f64; + /// + /// let abs_difference = (angle.to_radians() - consts::PI).abs(); + /// + /// assert!(abs_difference < 1e-10); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn to_radians(self) -> f64 { + pub fn to_radians(self) -> f64 { let value: f64 = consts::PI; self * (value / 180.0) } /// Returns the maximum of the two numbers. + /// + /// ``` + /// let x = 1.0_f64; + /// let y = 2.0_f64; + /// + /// assert_eq!(x.max(y), y); + /// ``` + /// + /// If one of the arguments is NaN, then the other argument is returned. + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn max(self, other: f64) -> f64 { + pub fn max(self, other: f64) -> f64 { // IEEE754 says: maxNum(x, y) is the canonicalized number y if x < y, x if y < x, the // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it // is either x or y, canonicalized (this means results might differ among implementations). @@ -265,8 +397,18 @@ impl Float for f64 { } /// Returns the minimum of the two numbers. + /// + /// ``` + /// let x = 1.0_f64; + /// let y = 2.0_f64; + /// + /// assert_eq!(x.min(y), x); + /// ``` + /// + /// If one of the arguments is NaN, then the other argument is returned. + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn min(self, other: f64) -> f64 { + pub fn min(self, other: f64) -> f64 { // IEEE754 says: minNum(x, y) is the canonicalized number x if x < y, y if y < x, the // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it // is either x or y, canonicalized (this means results might differ among implementations). @@ -277,4 +419,72 @@ impl Float for f64 { // multiplying by 1.0. Should switch to the `canonicalize` when it works. (if other.is_nan() || self < other { self } else { other }) * 1.0 } + + /// Raw transmutation to `u64`. + /// + /// This is currently identical to `transmute::(self)` on all platforms. + /// + /// See `from_bits` for some discussion of the portability of this operation + /// (there are almost no issues). + /// + /// Note that this function is distinct from `as` casting, which attempts to + /// preserve the *numeric* value, and not the bitwise value. + /// + /// # Examples + /// + /// ``` + /// assert!((1f64).to_bits() != 1f64 as u64); // to_bits() is not casting! + /// assert_eq!((12.5f64).to_bits(), 0x4029000000000000); + /// + /// ``` + #[stable(feature = "float_bits_conv", since = "1.20.0")] + #[inline] + pub fn to_bits(self) -> u64 { + unsafe { mem::transmute(self) } + } + + /// Raw transmutation from `u64`. + /// + /// This is currently identical to `transmute::(v)` on all platforms. + /// It turns out this is incredibly portable, for two reasons: + /// + /// * Floats and Ints have the same endianness on all supported platforms. + /// * IEEE-754 very precisely specifies the bit layout of floats. + /// + /// However there is one caveat: prior to the 2008 version of IEEE-754, how + /// to interpret the NaN signaling bit wasn't actually specified. Most platforms + /// (notably x86 and ARM) picked the interpretation that was ultimately + /// standardized in 2008, but some didn't (notably MIPS). As a result, all + /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa. + /// + /// Rather than trying to preserve signaling-ness cross-platform, this + /// implementation favours preserving the exact bits. This means that + /// any payloads encoded in NaNs will be preserved even if the result of + /// this method is sent over the network from an x86 machine to a MIPS one. + /// + /// If the results of this method are only manipulated by the same + /// architecture that produced them, then there is no portability concern. + /// + /// If the input isn't NaN, then there is no portability concern. + /// + /// If you don't care about signalingness (very likely), then there is no + /// portability concern. + /// + /// Note that this function is distinct from `as` casting, which attempts to + /// preserve the *numeric* value, and not the bitwise value. + /// + /// # Examples + /// + /// ``` + /// use std::f64; + /// let v = f64::from_bits(0x4029000000000000); + /// let difference = (v - 12.5).abs(); + /// assert!(difference <= 1e-5); + /// ``` + #[stable(feature = "float_bits_conv", since = "1.20.0")] + #[inline] + pub fn from_bits(v: u64) -> Self { + // It turns out the safety issues with sNaN were overblown! Hooray! + unsafe { mem::transmute(v) } + } } diff --git a/src/libcore/num/flt2dec/decoder.rs b/src/libcore/num/flt2dec/decoder.rs index b779eefce575..c34a56f288fd 100644 --- a/src/libcore/num/flt2dec/decoder.rs +++ b/src/libcore/num/flt2dec/decoder.rs @@ -77,8 +77,8 @@ pub fn decode(v: T) -> (/*negative?*/ bool, FullDecoded) { // neighbors: (mant - 2, exp) -- (mant, exp) -- (mant + 2, exp) // Float::integer_decode always preserves the exponent, // so the mantissa is scaled for subnormals. - FullDecoded::Finite(Decoded { mant: mant, minus: 1, plus: 1, - exp: exp, inclusive: even }) + FullDecoded::Finite(Decoded { mant, minus: 1, plus: 1, + exp, inclusive: even }) } FpCategory::Normal => { let minnorm = ::min_pos_norm_value().integer_decode(); diff --git a/src/libcore/num/flt2dec/mod.rs b/src/libcore/num/flt2dec/mod.rs index beaa6e140a69..21a2e72dac8c 100644 --- a/src/libcore/num/flt2dec/mod.rs +++ b/src/libcore/num/flt2dec/mod.rs @@ -424,20 +424,20 @@ pub fn to_shortest_str<'a, T, F>(mut format_shortest: F, v: T, match full_decoded { FullDecoded::Nan => { parts[0] = Part::Copy(b"NaN"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } FullDecoded::Infinite => { parts[0] = Part::Copy(b"inf"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } FullDecoded::Zero => { if frac_digits > 0 { // [0.][0000] parts[0] = Part::Copy(b"0."); parts[1] = Part::Zero(frac_digits); - Formatted { sign: sign, parts: &parts[..2] } + Formatted { sign, parts: &parts[..2] } } else { parts[0] = Part::Copy(b"0"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } } FullDecoded::Finite(ref decoded) => { @@ -480,11 +480,11 @@ pub fn to_shortest_exp_str<'a, T, F>(mut format_shortest: F, v: T, match full_decoded { FullDecoded::Nan => { parts[0] = Part::Copy(b"NaN"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } FullDecoded::Infinite => { parts[0] = Part::Copy(b"inf"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } FullDecoded::Zero => { parts[0] = if dec_bounds.0 <= 0 && 0 < dec_bounds.1 { @@ -492,7 +492,7 @@ pub fn to_shortest_exp_str<'a, T, F>(mut format_shortest: F, v: T, } else { Part::Copy(if upper { b"0E0" } else { b"0e0" }) }; - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } FullDecoded::Finite(ref decoded) => { let (len, exp) = format_shortest(decoded, buf); @@ -502,7 +502,7 @@ pub fn to_shortest_exp_str<'a, T, F>(mut format_shortest: F, v: T, } else { digits_to_exp_str(&buf[..len], exp, 0, upper, parts) }; - Formatted { sign: sign, parts: parts } + Formatted { sign, parts } } } } @@ -558,21 +558,21 @@ pub fn to_exact_exp_str<'a, T, F>(mut format_exact: F, v: T, match full_decoded { FullDecoded::Nan => { parts[0] = Part::Copy(b"NaN"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } FullDecoded::Infinite => { parts[0] = Part::Copy(b"inf"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } FullDecoded::Zero => { if ndigits > 1 { // [0.][0000][e0] parts[0] = Part::Copy(b"0."); parts[1] = Part::Zero(ndigits - 1); parts[2] = Part::Copy(if upper { b"E0" } else { b"e0" }); - Formatted { sign: sign, parts: &parts[..3] } + Formatted { sign, parts: &parts[..3] } } else { parts[0] = Part::Copy(if upper { b"0E0" } else { b"0e0" }); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } } FullDecoded::Finite(ref decoded) => { @@ -613,20 +613,20 @@ pub fn to_exact_fixed_str<'a, T, F>(mut format_exact: F, v: T, match full_decoded { FullDecoded::Nan => { parts[0] = Part::Copy(b"NaN"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } FullDecoded::Infinite => { parts[0] = Part::Copy(b"inf"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } FullDecoded::Zero => { if frac_digits > 0 { // [0.][0000] parts[0] = Part::Copy(b"0."); parts[1] = Part::Zero(frac_digits); - Formatted { sign: sign, parts: &parts[..2] } + Formatted { sign, parts: &parts[..2] } } else { parts[0] = Part::Copy(b"0"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } } FullDecoded::Finite(ref decoded) => { @@ -646,10 +646,10 @@ pub fn to_exact_fixed_str<'a, T, F>(mut format_exact: F, v: T, if frac_digits > 0 { // [0.][0000] parts[0] = Part::Copy(b"0."); parts[1] = Part::Zero(frac_digits); - Formatted { sign: sign, parts: &parts[..2] } + Formatted { sign, parts: &parts[..2] } } else { parts[0] = Part::Copy(b"0"); - Formatted { sign: sign, parts: &parts[..1] } + Formatted { sign, parts: &parts[..1] } } } else { Formatted { sign, diff --git a/src/libcore/num/flt2dec/strategy/dragon.rs b/src/libcore/num/flt2dec/strategy/dragon.rs index 6aa4f297e75b..aa6a08cb2057 100644 --- a/src/libcore/num/flt2dec/strategy/dragon.rs +++ b/src/libcore/num/flt2dec/strategy/dragon.rs @@ -8,12 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -Almost direct (but slightly optimized) Rust translation of Figure 3 of [1]. - -[1] Burger, R. G. and Dybvig, R. K. 1996. Printing floating-point numbers - quickly and accurately. SIGPLAN Not. 31, 5 (May. 1996), 108-116. -*/ +//! Almost direct (but slightly optimized) Rust translation of Figure 3 of "Printing +//! Floating-Point Numbers Quickly and Accurately"[^1]. +//! +//! [^1]: Burger, R. G. and Dybvig, R. K. 1996. Printing floating-point numbers +//! quickly and accurately. SIGPLAN Not. 31, 5 (May. 1996), 108-116. use cmp::Ordering; diff --git a/src/libcore/num/flt2dec/strategy/grisu.rs b/src/libcore/num/flt2dec/strategy/grisu.rs index cf70a1978f5e..effe073c3816 100644 --- a/src/libcore/num/flt2dec/strategy/grisu.rs +++ b/src/libcore/num/flt2dec/strategy/grisu.rs @@ -8,13 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -/*! -Rust adaptation of Grisu3 algorithm described in [1]. It uses about -1KB of precomputed table, and in turn, it's very quick for most inputs. - -[1] Florian Loitsch. 2010. Printing floating-point numbers quickly and - accurately with integers. SIGPLAN Not. 45, 6 (June 2010), 233-243. -*/ +//! Rust adaptation of the Grisu3 algorithm described in "Printing Floating-Point Numbers Quickly +//! and Accurately with Integers"[^1]. It uses about 1KB of precomputed table, and in turn, it's +//! very quick for most inputs. +//! +//! [^1]: Florian Loitsch. 2010. Printing floating-point numbers quickly and +//! accurately with integers. SIGPLAN Not. 45, 6 (June 2010), 233-243. use num::diy_float::Fp; use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up}; @@ -130,7 +129,7 @@ pub fn cached_power(alpha: i16, gamma: i16) -> (i16, Fp) { let idx = ((gamma as i32) - offset) * range / domain; let (f, e, k) = CACHED_POW10[idx as usize]; debug_assert!(alpha <= e && e <= gamma); - (k, Fp { f: f, e: e }) + (k, Fp { f, e }) } /// Given `x > 0`, returns `(k, 10^k)` such that `10^k <= x < 10^(k+1)`. diff --git a/src/libcore/num/i128.rs b/src/libcore/num/i128.rs index 04354e2e33f9..989376d1ac2d 100644 --- a/src/libcore/num/i128.rs +++ b/src/libcore/num/i128.rs @@ -12,6 +12,6 @@ //! //! *[See also the `i128` primitive type](../../std/primitive.i128.html).* -#![unstable(feature = "i128", issue="35118")] +#![stable(feature = "i128", since = "1.26.0")] -int_module! { i128, #[unstable(feature = "i128", issue="35118")] } +int_module! { i128, #[stable(feature = "i128", since="1.26.0")] } diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index b5d24203b5e8..37856dc54693 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -12,12 +12,93 @@ #![stable(feature = "rust1", since = "1.0.0")] -use convert::{Infallible, TryFrom}; +use convert::TryFrom; use fmt; use intrinsics; +use mem; +use nonzero::NonZero; use ops; use str::FromStr; +macro_rules! impl_nonzero_fmt { + ( ( $( $Trait: ident ),+ ) for $Ty: ident ) => { + $( + #[stable(feature = "nonzero", since = "1.28.0")] + impl fmt::$Trait for $Ty { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.get().fmt(f) + } + } + )+ + } +} + +macro_rules! nonzero_integers { + ( $( $Ty: ident($Int: ty); )+ ) => { + $( + /// An integer that is known not to equal zero. + /// + /// This enables some memory layout optimization. + /// For example, `Option` is the same size as `u32`: + /// + /// ```rust + /// use std::mem::size_of; + /// assert_eq!(size_of::>(), size_of::()); + /// ``` + #[stable(feature = "nonzero", since = "1.28.0")] + #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] + #[repr(transparent)] + pub struct $Ty(NonZero<$Int>); + + impl $Ty { + /// Create a non-zero without checking the value. + /// + /// # Safety + /// + /// The value must not be zero. + #[stable(feature = "nonzero", since = "1.28.0")] + #[inline] + pub const unsafe fn new_unchecked(n: $Int) -> Self { + $Ty(NonZero(n)) + } + + /// Create a non-zero if the given value is not zero. + #[stable(feature = "nonzero", since = "1.28.0")] + #[inline] + pub fn new(n: $Int) -> Option { + if n != 0 { + Some($Ty(NonZero(n))) + } else { + None + } + } + + /// Returns the value as a primitive type. + #[stable(feature = "nonzero", since = "1.28.0")] + #[inline] + pub fn get(self) -> $Int { + self.0 .0 + } + + } + + impl_nonzero_fmt! { + (Debug, Display, Binary, Octal, LowerHex, UpperHex) for $Ty + } + )+ + } +} + +nonzero_integers! { + NonZeroU8(u8); + NonZeroU16(u16); + NonZeroU32(u32); + NonZeroU64(u64); + NonZeroU128(u128); + NonZeroUsize(usize); +} + /// Provides intentionally-wrapped arithmetic on `T`. /// /// Operations like `+` on `u32` values is intended to never overflow, @@ -43,6 +124,7 @@ use str::FromStr; /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default, Hash)] +#[repr(transparent)] pub struct Wrapping(#[stable(feature = "rust1", since = "1.0.0")] pub T); @@ -88,1715 +170,2669 @@ impl fmt::UpperHex for Wrapping { } } -mod wrapping; - // All these modules are technically private and only exposed for coretests: pub mod flt2dec; pub mod dec2flt; pub mod bignum; pub mod diy_float; +macro_rules! doc_comment { + ($x:expr, $($tt:tt)*) => { + #[doc = $x] + $($tt)* + }; +} + +mod wrapping; + // `Int` + `SignedInt` implemented for signed integers macro_rules! int_impl { - ($SelfT:ty, $ActualT:ident, $UnsignedT:ty, $BITS:expr) => { - /// Returns the smallest value that can be represented by this integer type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(i8::min_value(), -128); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub const fn min_value() -> Self { - !0 ^ ((!0 as $UnsignedT) >> 1) as Self - } + ($SelfT:ty, $ActualT:ident, $UnsignedT:ty, $BITS:expr, $Min:expr, $Max:expr, $Feature:expr, + $EndFeature:expr, $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr, + $reversed:expr) => { + doc_comment! { + concat!("Returns the smallest value that can be represented by this integer type. - /// Returns the largest value that can be represented by this integer type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(i8::max_value(), 127); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub const fn max_value() -> Self { - !Self::min_value() - } +# Examples - /// Converts a string slice in a given base to an integer. - /// - /// The string is expected to be an optional `+` or `-` sign - /// followed by digits. - /// Leading and trailing whitespace represent an error. - /// Digits are a subset of these characters, depending on `radix`: - /// - /// * `0-9` - /// * `a-z` - /// * `A-Z` - /// - /// # Panics - /// - /// This function panics if `radix` is not in the range from 2 to 36. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(i32::from_str_radix("A", 16), Ok(10)); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn from_str_radix(src: &str, radix: u32) -> Result { - from_str_radix(src, radix) - } +Basic usage: - /// Returns the number of ones in the binary representation of `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = -0b1000_0000i8; - /// - /// assert_eq!(n.count_ones(), 1); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() } - - /// Returns the number of zeros in the binary representation of `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = -0b1000_0000i8; - /// - /// assert_eq!(n.count_zeros(), 7); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn count_zeros(self) -> u32 { - (!self).count_ones() - } - - /// Returns the number of leading zeros in the binary representation - /// of `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = -1i16; - /// - /// assert_eq!(n.leading_zeros(), 0); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn leading_zeros(self) -> u32 { - (self as $UnsignedT).leading_zeros() - } - - /// Returns the number of trailing zeros in the binary representation - /// of `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = -4i8; - /// - /// assert_eq!(n.trailing_zeros(), 2); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn trailing_zeros(self) -> u32 { - (self as $UnsignedT).trailing_zeros() - } - - /// Shifts the bits to the left by a specified amount, `n`, - /// wrapping the truncated bits to the end of the resulting integer. - /// - /// Please note this isn't the same operation as `<<`! - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFi64; - /// let m = -0x76543210FEDCBA99i64; - /// - /// assert_eq!(n.rotate_left(32), m); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rotate_left(self, n: u32) -> Self { - (self as $UnsignedT).rotate_left(n) as Self - } - - /// Shifts the bits to the right by a specified amount, `n`, - /// wrapping the truncated bits to the beginning of the resulting - /// integer. - /// - /// Please note this isn't the same operation as `>>`! - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFi64; - /// let m = -0xFEDCBA987654322i64; - /// - /// assert_eq!(n.rotate_right(4), m); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rotate_right(self, n: u32) -> Self { - (self as $UnsignedT).rotate_right(n) as Self - } - - /// Reverses the byte order of the integer. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n: i16 = 0b0000000_01010101; - /// assert_eq!(n, 85); - /// - /// let m = n.swap_bytes(); - /// - /// assert_eq!(m, 0b01010101_00000000); - /// assert_eq!(m, 21760); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn swap_bytes(self) -> Self { - (self as $UnsignedT).swap_bytes() as Self - } - - /// Converts an integer from big endian to the target's endianness. - /// - /// On big endian this is a no-op. On little endian the bytes are - /// swapped. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFi64; - /// - /// if cfg!(target_endian = "big") { - /// assert_eq!(i64::from_be(n), n) - /// } else { - /// assert_eq!(i64::from_be(n), n.swap_bytes()) - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn from_be(x: Self) -> Self { - if cfg!(target_endian = "big") { x } else { x.swap_bytes() } - } - - /// Converts an integer from little endian to the target's endianness. - /// - /// On little endian this is a no-op. On big endian the bytes are - /// swapped. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFi64; - /// - /// if cfg!(target_endian = "little") { - /// assert_eq!(i64::from_le(n), n) - /// } else { - /// assert_eq!(i64::from_le(n), n.swap_bytes()) - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn from_le(x: Self) -> Self { - if cfg!(target_endian = "little") { x } else { x.swap_bytes() } - } - - /// Converts `self` to big endian from the target's endianness. - /// - /// On big endian this is a no-op. On little endian the bytes are - /// swapped. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFi64; - /// - /// if cfg!(target_endian = "big") { - /// assert_eq!(n.to_be(), n) - /// } else { - /// assert_eq!(n.to_be(), n.swap_bytes()) - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn to_be(self) -> Self { // or not to be? - if cfg!(target_endian = "big") { self } else { self.swap_bytes() } - } - - /// Converts `self` to little endian from the target's endianness. - /// - /// On little endian this is a no-op. On big endian the bytes are - /// swapped. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFi64; - /// - /// if cfg!(target_endian = "little") { - /// assert_eq!(n.to_le(), n) - /// } else { - /// assert_eq!(n.to_le(), n.swap_bytes()) - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn to_le(self) -> Self { - if cfg!(target_endian = "little") { self } else { self.swap_bytes() } - } - - /// Checked integer addition. Computes `self + rhs`, returning `None` - /// if overflow occurred. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(7i16.checked_add(32760), Some(32767)); - /// assert_eq!(8i16.checked_add(32760), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn checked_add(self, rhs: Self) -> Option { - let (a, b) = self.overflowing_add(rhs); - if b {None} else {Some(a)} - } - - /// Checked integer subtraction. Computes `self - rhs`, returning - /// `None` if overflow occurred. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!((-127i8).checked_sub(1), Some(-128)); - /// assert_eq!((-128i8).checked_sub(1), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn checked_sub(self, rhs: Self) -> Option { - let (a, b) = self.overflowing_sub(rhs); - if b {None} else {Some(a)} - } - - /// Checked integer multiplication. Computes `self * rhs`, returning - /// `None` if overflow occurred. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(6i8.checked_mul(21), Some(126)); - /// assert_eq!(6i8.checked_mul(22), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn checked_mul(self, rhs: Self) -> Option { - let (a, b) = self.overflowing_mul(rhs); - if b {None} else {Some(a)} - } - - /// Checked integer division. Computes `self / rhs`, returning `None` - /// if `rhs == 0` or the division results in overflow. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!((-127i8).checked_div(-1), Some(127)); - /// assert_eq!((-128i8).checked_div(-1), None); - /// assert_eq!((1i8).checked_div(0), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn checked_div(self, rhs: Self) -> Option { - if rhs == 0 || (self == Self::min_value() && rhs == -1) { - None - } else { - Some(unsafe { intrinsics::unchecked_div(self, rhs) }) +``` +", $Feature, "assert_eq!(", stringify!($SelfT), "::min_value(), ", stringify!($Min), ");", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub const fn min_value() -> Self { + !0 ^ ((!0 as $UnsignedT) >> 1) as Self } } - /// Checked integer remainder. Computes `self % rhs`, returning `None` - /// if `rhs == 0` or the division results in overflow. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::i32; - /// - /// assert_eq!(5i32.checked_rem(2), Some(1)); - /// assert_eq!(5i32.checked_rem(0), None); - /// assert_eq!(i32::MIN.checked_rem(-1), None); - /// ``` - #[stable(feature = "wrapping", since = "1.7.0")] - #[inline] - pub fn checked_rem(self, rhs: Self) -> Option { - if rhs == 0 || (self == Self::min_value() && rhs == -1) { - None - } else { - Some(unsafe { intrinsics::unchecked_rem(self, rhs) }) + doc_comment! { + concat!("Returns the largest value that can be represented by this integer type. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(", stringify!($SelfT), "::max_value(), ", stringify!($Max), ");", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub const fn max_value() -> Self { + !Self::min_value() } } - /// Checked negation. Computes `-self`, returning `None` if `self == - /// MIN`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::i32; - /// - /// assert_eq!(5i32.checked_neg(), Some(-5)); - /// assert_eq!(i32::MIN.checked_neg(), None); - /// ``` - #[stable(feature = "wrapping", since = "1.7.0")] - #[inline] - pub fn checked_neg(self) -> Option { - let (a, b) = self.overflowing_neg(); - if b {None} else {Some(a)} - } + doc_comment! { + concat!("Converts a string slice in a given base to an integer. - /// Checked shift left. Computes `self << rhs`, returning `None` - /// if `rhs` is larger than or equal to the number of bits in `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(0x10i32.checked_shl(4), Some(0x100)); - /// assert_eq!(0x10i32.checked_shl(33), None); - /// ``` - #[stable(feature = "wrapping", since = "1.7.0")] - #[inline] - pub fn checked_shl(self, rhs: u32) -> Option { - let (a, b) = self.overflowing_shl(rhs); - if b {None} else {Some(a)} - } +The string is expected to be an optional `+` or `-` sign followed by digits. +Leading and trailing whitespace represent an error. Digits are a subset of these characters, +depending on `radix`: - /// Checked shift right. Computes `self >> rhs`, returning `None` - /// if `rhs` is larger than or equal to the number of bits in `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(0x10i32.checked_shr(4), Some(0x1)); - /// assert_eq!(0x10i32.checked_shr(33), None); - /// ``` - #[stable(feature = "wrapping", since = "1.7.0")] - #[inline] - pub fn checked_shr(self, rhs: u32) -> Option { - let (a, b) = self.overflowing_shr(rhs); - if b {None} else {Some(a)} - } + * `0-9` + * `a-z` + * `A-Z` - /// Checked absolute value. Computes `self.abs()`, returning `None` if - /// `self == MIN`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::i32; - /// - /// assert_eq!((-5i32).checked_abs(), Some(5)); - /// assert_eq!(i32::MIN.checked_abs(), None); - /// ``` - #[stable(feature = "no_panic_abs", since = "1.13.0")] - #[inline] - pub fn checked_abs(self) -> Option { - if self.is_negative() { - self.checked_neg() - } else { - Some(self) +# Panics + +This function panics if `radix` is not in the range from 2 to 36. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(", stringify!($SelfT), "::from_str_radix(\"A\", 16), Ok(10));", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + pub fn from_str_radix(src: &str, radix: u32) -> Result { + from_str_radix(src, radix) } } - /// Saturating integer addition. Computes `self + rhs`, saturating at - /// the numeric bounds instead of overflowing. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100i8.saturating_add(1), 101); - /// assert_eq!(100i8.saturating_add(127), 127); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn saturating_add(self, rhs: Self) -> Self { - match self.checked_add(rhs) { - Some(x) => x, - None if rhs >= 0 => Self::max_value(), - None => Self::min_value(), + doc_comment! { + concat!("Returns the number of ones in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = 0b100_0000", stringify!($SelfT), "; + +assert_eq!(n.count_ones(), 1);", +$EndFeature, " +``` +"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() } + } + + doc_comment! { + concat!("Returns the number of zeros in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(", stringify!($SelfT), "::max_value().count_zeros(), 1);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn count_zeros(self) -> u32 { + (!self).count_ones() } } - /// Saturating integer subtraction. Computes `self - rhs`, saturating - /// at the numeric bounds instead of overflowing. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100i8.saturating_sub(127), -27); - /// assert_eq!((-100i8).saturating_sub(127), -128); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn saturating_sub(self, rhs: Self) -> Self { - match self.checked_sub(rhs) { - Some(x) => x, - None if rhs >= 0 => Self::min_value(), - None => Self::max_value(), + doc_comment! { + concat!("Returns the number of leading zeros in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = -1", stringify!($SelfT), "; + +assert_eq!(n.leading_zeros(), 0);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn leading_zeros(self) -> u32 { + (self as $UnsignedT).leading_zeros() } } - /// Saturating integer multiplication. Computes `self * rhs`, - /// saturating at the numeric bounds instead of overflowing. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::i32; - /// - /// assert_eq!(100i32.saturating_mul(127), 12700); - /// assert_eq!((1i32 << 23).saturating_mul(1 << 23), i32::MAX); - /// assert_eq!((-1i32 << 23).saturating_mul(1 << 23), i32::MIN); - /// ``` - #[stable(feature = "wrapping", since = "1.7.0")] - #[inline] - pub fn saturating_mul(self, rhs: Self) -> Self { - self.checked_mul(rhs).unwrap_or_else(|| { - if (self < 0 && rhs < 0) || (self > 0 && rhs > 0) { - Self::max_value() - } else { - Self::min_value() + doc_comment! { + concat!("Returns the number of trailing zeros in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = -4", stringify!($SelfT), "; + +assert_eq!(n.trailing_zeros(), 2);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn trailing_zeros(self) -> u32 { + (self as $UnsignedT).trailing_zeros() + } + } + + doc_comment! { + concat!("Shifts the bits to the left by a specified amount, `n`, +wrapping the truncated bits to the end of the resulting integer. + +Please note this isn't the same operation as `<<`! + +# Examples + +Basic usage: + +``` +let n = ", $rot_op, stringify!($SelfT), "; +let m = ", $rot_result, "; + +assert_eq!(n.rotate_left(", $rot, "), m); +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn rotate_left(self, n: u32) -> Self { + (self as $UnsignedT).rotate_left(n) as Self + } + } + + doc_comment! { + concat!("Shifts the bits to the right by a specified amount, `n`, +wrapping the truncated bits to the beginning of the resulting +integer. + +Please note this isn't the same operation as `>>`! + +# Examples + +Basic usage: + +``` +let n = ", $rot_result, stringify!($SelfT), "; +let m = ", $rot_op, "; + +assert_eq!(n.rotate_right(", $rot, "), m); +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn rotate_right(self, n: u32) -> Self { + (self as $UnsignedT).rotate_right(n) as Self + } + } + doc_comment! { + concat!("Reverses the byte order of the integer. + +# Examples + +Basic usage: + +``` +let n = ", $swap_op, stringify!($SelfT), "; + +let m = n.swap_bytes(); + +assert_eq!(m, ", $swapped, "); +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn swap_bytes(self) -> Self { + (self as $UnsignedT).swap_bytes() as Self + } + } + + doc_comment! { + concat!("Reverses the bit pattern of the integer. + +# Examples + +Basic usage: + +``` +#![feature(reverse_bits)] + +let n = ", $swap_op, stringify!($SelfT), "; +let m = n.reverse_bits(); + +assert_eq!(m, ", $reversed, "); +```"), + #[unstable(feature = "reverse_bits", issue = "48763")] + #[inline] + pub fn reverse_bits(self) -> Self { + (self as $UnsignedT).reverse_bits() as Self + } + } + + doc_comment! { + concat!("Converts an integer from big endian to the target's endianness. + +On big endian this is a no-op. On little endian the bytes are swapped. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = 0x1A", stringify!($SelfT), "; + +if cfg!(target_endian = \"big\") { + assert_eq!(", stringify!($SelfT), "::from_be(n), n) +} else { + assert_eq!(", stringify!($SelfT), "::from_be(n), n.swap_bytes()) +}", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn from_be(x: Self) -> Self { + #[cfg(target_endian = "big")] + { + x + } + #[cfg(not(target_endian = "big"))] + { + x.swap_bytes() } - }) - } - - /// Wrapping (modular) addition. Computes `self + rhs`, - /// wrapping around at the boundary of the type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100i8.wrapping_add(27), 127); - /// assert_eq!(100i8.wrapping_add(127), -29); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn wrapping_add(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_add(self, rhs) } } - /// Wrapping (modular) subtraction. Computes `self - rhs`, - /// wrapping around at the boundary of the type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(0i8.wrapping_sub(127), -127); - /// assert_eq!((-2i8).wrapping_sub(127), 127); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn wrapping_sub(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_sub(self, rhs) + doc_comment! { + concat!("Converts an integer from little endian to the target's endianness. + +On little endian this is a no-op. On big endian the bytes are swapped. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = 0x1A", stringify!($SelfT), "; + +if cfg!(target_endian = \"little\") { + assert_eq!(", stringify!($SelfT), "::from_le(n), n) +} else { + assert_eq!(", stringify!($SelfT), "::from_le(n), n.swap_bytes()) +}", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn from_le(x: Self) -> Self { + #[cfg(target_endian = "little")] + { + x + } + #[cfg(not(target_endian = "little"))] + { + x.swap_bytes() + } } } - /// Wrapping (modular) multiplication. Computes `self * - /// rhs`, wrapping around at the boundary of the type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(10i8.wrapping_mul(12), 120); - /// assert_eq!(11i8.wrapping_mul(12), -124); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn wrapping_mul(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_mul(self, rhs) + doc_comment! { + concat!("Converts `self` to big endian from the target's endianness. + +On big endian this is a no-op. On little endian the bytes are swapped. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = 0x1A", stringify!($SelfT), "; + +if cfg!(target_endian = \"big\") { + assert_eq!(n.to_be(), n) +} else { + assert_eq!(n.to_be(), n.swap_bytes()) +}", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn to_be(self) -> Self { // or not to be? + #[cfg(target_endian = "big")] + { + self + } + #[cfg(not(target_endian = "big"))] + { + self.swap_bytes() + } } } - /// Wrapping (modular) division. Computes `self / rhs`, - /// wrapping around at the boundary of the type. - /// - /// The only case where such wrapping can occur is when one - /// divides `MIN / -1` on a signed type (where `MIN` is the - /// negative minimal value for the type); this is equivalent - /// to `-MIN`, a positive value that is too large to represent - /// in the type. In such a case, this function returns `MIN` - /// itself. - /// - /// # Panics - /// - /// This function will panic if `rhs` is 0. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100u8.wrapping_div(10), 10); - /// assert_eq!((-128i8).wrapping_div(-1), -128); - /// ``` - #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline] - pub fn wrapping_div(self, rhs: Self) -> Self { - self.overflowing_div(rhs).0 - } + doc_comment! { + concat!("Converts `self` to little endian from the target's endianness. - /// Wrapping (modular) remainder. Computes `self % rhs`, - /// wrapping around at the boundary of the type. - /// - /// Such wrap-around never actually occurs mathematically; - /// implementation artifacts make `x % y` invalid for `MIN / - /// -1` on a signed type (where `MIN` is the negative - /// minimal value). In such a case, this function returns `0`. - /// - /// # Panics - /// - /// This function will panic if `rhs` is 0. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100i8.wrapping_rem(10), 0); - /// assert_eq!((-128i8).wrapping_rem(-1), 0); - /// ``` - #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline] - pub fn wrapping_rem(self, rhs: Self) -> Self { - self.overflowing_rem(rhs).0 - } +On little endian this is a no-op. On big endian the bytes are swapped. - /// Wrapping (modular) negation. Computes `-self`, - /// wrapping around at the boundary of the type. - /// - /// The only case where such wrapping can occur is when one - /// negates `MIN` on a signed type (where `MIN` is the - /// negative minimal value for the type); this is a positive - /// value that is too large to represent in the type. In such - /// a case, this function returns `MIN` itself. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100i8.wrapping_neg(), -100); - /// assert_eq!((-128i8).wrapping_neg(), -128); - /// ``` - #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline] - pub fn wrapping_neg(self) -> Self { - self.overflowing_neg().0 - } +# Examples - /// Panic-free bitwise shift-left; yields `self << mask(rhs)`, - /// where `mask` removes any high-order bits of `rhs` that - /// would cause the shift to exceed the bitwidth of the type. - /// - /// Note that this is *not* the same as a rotate-left; the - /// RHS of a wrapping shift-left is restricted to the range - /// of the type, rather than the bits shifted out of the LHS - /// being returned to the other end. The primitive integer - /// types all implement a `rotate_left` function, which may - /// be what you want instead. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!((-1i8).wrapping_shl(7), -128); - /// assert_eq!((-1i8).wrapping_shl(8), -1); - /// ``` - #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline] - pub fn wrapping_shl(self, rhs: u32) -> Self { - unsafe { - intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) +Basic usage: + +``` +", $Feature, "let n = 0x1A", stringify!($SelfT), "; + +if cfg!(target_endian = \"little\") { + assert_eq!(n.to_le(), n) +} else { + assert_eq!(n.to_le(), n.swap_bytes()) +}", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn to_le(self) -> Self { + #[cfg(target_endian = "little")] + { + self + } + #[cfg(not(target_endian = "little"))] + { + self.swap_bytes() + } } } - /// Panic-free bitwise shift-right; yields `self >> mask(rhs)`, - /// where `mask` removes any high-order bits of `rhs` that - /// would cause the shift to exceed the bitwidth of the type. - /// - /// Note that this is *not* the same as a rotate-right; the - /// RHS of a wrapping shift-right is restricted to the range - /// of the type, rather than the bits shifted out of the LHS - /// being returned to the other end. The primitive integer - /// types all implement a `rotate_right` function, which may - /// be what you want instead. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!((-128i8).wrapping_shr(7), -1); - /// assert_eq!((-128i8).wrapping_shr(8), -128); - /// ``` - #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline] - pub fn wrapping_shr(self, rhs: u32) -> Self { - unsafe { - intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) + doc_comment! { + concat!("Checked integer addition. Computes `self + rhs`, returning `None` +if overflow occurred. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!((", stringify!($SelfT), +"::max_value() - 2).checked_add(1), Some(", stringify!($SelfT), "::max_value() - 1)); +assert_eq!((", stringify!($SelfT), "::max_value() - 2).checked_add(3), None);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn checked_add(self, rhs: Self) -> Option { + let (a, b) = self.overflowing_add(rhs); + if b {None} else {Some(a)} } } - /// Wrapping (modular) absolute value. Computes `self.abs()`, - /// wrapping around at the boundary of the type. - /// - /// The only case where such wrapping can occur is when one takes - /// the absolute value of the negative minimal value for the type - /// this is a positive value that is too large to represent in the - /// type. In such a case, this function returns `MIN` itself. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100i8.wrapping_abs(), 100); - /// assert_eq!((-100i8).wrapping_abs(), 100); - /// assert_eq!((-128i8).wrapping_abs(), -128); - /// assert_eq!((-128i8).wrapping_abs() as u8, 128); - /// ``` - #[stable(feature = "no_panic_abs", since = "1.13.0")] - #[inline] - pub fn wrapping_abs(self) -> Self { - if self.is_negative() { - self.wrapping_neg() - } else { - self + doc_comment! { + concat!("Checked integer subtraction. Computes `self - rhs`, returning `None` if +overflow occurred. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!((", stringify!($SelfT), +"::min_value() + 2).checked_sub(1), Some(", stringify!($SelfT), "::min_value() + 1)); +assert_eq!((", stringify!($SelfT), "::min_value() + 2).checked_sub(3), None);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn checked_sub(self, rhs: Self) -> Option { + let (a, b) = self.overflowing_sub(rhs); + if b {None} else {Some(a)} } } - /// Calculates `self` + `rhs` - /// - /// Returns a tuple of the addition along with a boolean indicating - /// whether an arithmetic overflow would occur. If an overflow would - /// have occurred then the wrapped value is returned. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// use std::i32; - /// - /// assert_eq!(5i32.overflowing_add(2), (7, false)); - /// assert_eq!(i32::MAX.overflowing_add(1), (i32::MIN, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_add(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::add_with_overflow(self as $ActualT, - rhs as $ActualT) - }; - (a as Self, b) - } + doc_comment! { + concat!("Checked integer multiplication. Computes `self * rhs`, returning `None` if +overflow occurred. - /// Calculates `self` - `rhs` - /// - /// Returns a tuple of the subtraction along with a boolean indicating - /// whether an arithmetic overflow would occur. If an overflow would - /// have occurred then the wrapped value is returned. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// use std::i32; - /// - /// assert_eq!(5i32.overflowing_sub(2), (3, false)); - /// assert_eq!(i32::MIN.overflowing_sub(1), (i32::MAX, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_sub(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::sub_with_overflow(self as $ActualT, - rhs as $ActualT) - }; - (a as Self, b) - } +# Examples - /// Calculates the multiplication of `self` and `rhs`. - /// - /// Returns a tuple of the multiplication along with a boolean - /// indicating whether an arithmetic overflow would occur. If an - /// overflow would have occurred then the wrapped value is returned. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// assert_eq!(5i32.overflowing_mul(2), (10, false)); - /// assert_eq!(1_000_000_000i32.overflowing_mul(10), (1410065408, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_mul(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::mul_with_overflow(self as $ActualT, - rhs as $ActualT) - }; - (a as Self, b) - } +Basic usage: - /// Calculates the divisor when `self` is divided by `rhs`. - /// - /// Returns a tuple of the divisor along with a boolean indicating - /// whether an arithmetic overflow would occur. If an overflow would - /// occur then self is returned. - /// - /// # Panics - /// - /// This function will panic if `rhs` is 0. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// use std::i32; - /// - /// assert_eq!(5i32.overflowing_div(2), (2, false)); - /// assert_eq!(i32::MIN.overflowing_div(-1), (i32::MIN, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_div(self, rhs: Self) -> (Self, bool) { - if self == Self::min_value() && rhs == -1 { - (self, true) - } else { - (self / rhs, false) +``` +", $Feature, "assert_eq!(", stringify!($SelfT), +"::max_value().checked_mul(1), Some(", stringify!($SelfT), "::max_value())); +assert_eq!(", stringify!($SelfT), "::max_value().checked_mul(2), None);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn checked_mul(self, rhs: Self) -> Option { + let (a, b) = self.overflowing_mul(rhs); + if b {None} else {Some(a)} } } - /// Calculates the remainder when `self` is divided by `rhs`. - /// - /// Returns a tuple of the remainder after dividing along with a boolean - /// indicating whether an arithmetic overflow would occur. If an - /// overflow would occur then 0 is returned. - /// - /// # Panics - /// - /// This function will panic if `rhs` is 0. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// use std::i32; - /// - /// assert_eq!(5i32.overflowing_rem(2), (1, false)); - /// assert_eq!(i32::MIN.overflowing_rem(-1), (0, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_rem(self, rhs: Self) -> (Self, bool) { - if self == Self::min_value() && rhs == -1 { - (0, true) - } else { - (self % rhs, false) + doc_comment! { + concat!("Checked integer division. Computes `self / rhs`, returning `None` if `rhs == 0` +or the division results in overflow. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!((", stringify!($SelfT), +"::min_value() + 1).checked_div(-1), Some(", stringify!($Max), ")); +assert_eq!(", stringify!($SelfT), "::min_value().checked_div(-1), None); +assert_eq!((1", stringify!($SelfT), ").checked_div(0), None);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn checked_div(self, rhs: Self) -> Option { + if rhs == 0 || (self == Self::min_value() && rhs == -1) { + None + } else { + Some(unsafe { intrinsics::unchecked_div(self, rhs) }) + } } } - /// Negates self, overflowing if this is equal to the minimum value. - /// - /// Returns a tuple of the negated version of self along with a boolean - /// indicating whether an overflow happened. If `self` is the minimum - /// value (e.g. `i32::MIN` for values of type `i32`), then the minimum - /// value will be returned again and `true` will be returned for an - /// overflow happening. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// use std::i32; - /// - /// assert_eq!(2i32.overflowing_neg(), (-2, false)); - /// assert_eq!(i32::MIN.overflowing_neg(), (i32::MIN, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_neg(self) -> (Self, bool) { - if self == Self::min_value() { - (Self::min_value(), true) - } else { - (-self, false) + doc_comment! { + concat!("Checked Euclidean division. Computes `self.div_euc(rhs)`, +returning `None` if `rhs == 0` or the division results in overflow. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +assert_eq!((", stringify!($SelfT), +"::min_value() + 1).checked_div_euc(-1), Some(", stringify!($Max), ")); +assert_eq!(", stringify!($SelfT), "::min_value().checked_div_euc(-1), None); +assert_eq!((1", stringify!($SelfT), ").checked_div_euc(0), None); +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + pub fn checked_div_euc(self, rhs: Self) -> Option { + if rhs == 0 || (self == Self::min_value() && rhs == -1) { + None + } else { + Some(self.div_euc(rhs)) + } } } - /// Shifts self left by `rhs` bits. - /// - /// Returns a tuple of the shifted version of self along with a boolean - /// indicating whether the shift value was larger than or equal to the - /// number of bits. If the shift value is too large, then value is - /// masked (N-1) where N is the number of bits, and this value is then - /// used to perform the shift. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// assert_eq!(0x10i32.overflowing_shl(4), (0x100, false)); - /// assert_eq!(0x10i32.overflowing_shl(36), (0x100, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) - } + doc_comment! { + concat!("Checked integer remainder. Computes `self % rhs`, returning `None` if +`rhs == 0` or the division results in overflow. - /// Shifts self right by `rhs` bits. - /// - /// Returns a tuple of the shifted version of self along with a boolean - /// indicating whether the shift value was larger than or equal to the - /// number of bits. If the shift value is too large, then value is - /// masked (N-1) where N is the number of bits, and this value is then - /// used to perform the shift. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// assert_eq!(0x10i32.overflowing_shr(4), (0x1, false)); - /// assert_eq!(0x10i32.overflowing_shr(36), (0x1, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) - } +# Examples - /// Computes the absolute value of `self`. - /// - /// Returns a tuple of the absolute version of self along with a - /// boolean indicating whether an overflow happened. If self is the - /// minimum value (e.g. i32::MIN for values of type i32), then the - /// minimum value will be returned again and true will be returned for - /// an overflow happening. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(10i8.overflowing_abs(), (10,false)); - /// assert_eq!((-10i8).overflowing_abs(), (10,false)); - /// assert_eq!((-128i8).overflowing_abs(), (-128,true)); - /// ``` - #[stable(feature = "no_panic_abs", since = "1.13.0")] - #[inline] - pub fn overflowing_abs(self) -> (Self, bool) { - if self.is_negative() { - self.overflowing_neg() - } else { - (self, false) +Basic usage: + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".checked_rem(2), Some(1)); +assert_eq!(5", stringify!($SelfT), ".checked_rem(0), None); +assert_eq!(", stringify!($SelfT), "::MIN.checked_rem(-1), None);", +$EndFeature, " +```"), + #[stable(feature = "wrapping", since = "1.7.0")] + #[inline] + pub fn checked_rem(self, rhs: Self) -> Option { + if rhs == 0 || (self == Self::min_value() && rhs == -1) { + None + } else { + Some(unsafe { intrinsics::unchecked_rem(self, rhs) }) + } } } - /// Raises self to the power of `exp`, using exponentiation by squaring. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let x: i32 = 2; // or any other integer type - /// - /// assert_eq!(x.pow(4), 16); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - #[rustc_inherit_overflow_checks] - pub fn pow(self, mut exp: u32) -> Self { - let mut base = self; - let mut acc = 1; + doc_comment! { + concat!("Checked Euclidean modulo. Computes `self.mod_euc(rhs)`, returning `None` if +`rhs == 0` or the division results in overflow. - while exp > 1 { - if (exp & 1) == 1 { +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".checked_mod_euc(2), Some(1)); +assert_eq!(5", stringify!($SelfT), ".checked_mod_euc(0), None); +assert_eq!(", stringify!($SelfT), "::MIN.checked_mod_euc(-1), None); +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + pub fn checked_mod_euc(self, rhs: Self) -> Option { + if rhs == 0 || (self == Self::min_value() && rhs == -1) { + None + } else { + Some(self.mod_euc(rhs)) + } + } + } + + doc_comment! { + concat!("Checked negation. Computes `-self`, returning `None` if `self == MIN`. + +# Examples + +Basic usage: + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".checked_neg(), Some(-5)); +assert_eq!(", stringify!($SelfT), "::MIN.checked_neg(), None);", +$EndFeature, " +```"), + #[stable(feature = "wrapping", since = "1.7.0")] + #[inline] + pub fn checked_neg(self) -> Option { + let (a, b) = self.overflowing_neg(); + if b {None} else {Some(a)} + } + } + + doc_comment! { + concat!("Checked shift left. Computes `self << rhs`, returning `None` if `rhs` is larger +than or equal to the number of bits in `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(0x1", stringify!($SelfT), ".checked_shl(4), Some(0x10)); +assert_eq!(0x1", stringify!($SelfT), ".checked_shl(129), None);", +$EndFeature, " +```"), + #[stable(feature = "wrapping", since = "1.7.0")] + #[inline] + pub fn checked_shl(self, rhs: u32) -> Option { + let (a, b) = self.overflowing_shl(rhs); + if b {None} else {Some(a)} + } + } + + doc_comment! { + concat!("Checked shift right. Computes `self >> rhs`, returning `None` if `rhs` is +larger than or equal to the number of bits in `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(0x10", stringify!($SelfT), ".checked_shr(4), Some(0x1)); +assert_eq!(0x10", stringify!($SelfT), ".checked_shr(128), None);", +$EndFeature, " +```"), + #[stable(feature = "wrapping", since = "1.7.0")] + #[inline] + pub fn checked_shr(self, rhs: u32) -> Option { + let (a, b) = self.overflowing_shr(rhs); + if b {None} else {Some(a)} + } + } + + doc_comment! { + concat!("Checked absolute value. Computes `self.abs()`, returning `None` if +`self == MIN`. + +# Examples + +Basic usage: + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!((-5", stringify!($SelfT), ").checked_abs(), Some(5)); +assert_eq!(", stringify!($SelfT), "::MIN.checked_abs(), None);", +$EndFeature, " +```"), + #[stable(feature = "no_panic_abs", since = "1.13.0")] + #[inline] + pub fn checked_abs(self) -> Option { + if self.is_negative() { + self.checked_neg() + } else { + Some(self) + } + } + } + + doc_comment! { + concat!("Checked exponentiation. Computes `self.pow(exp)`, returning `None` if +overflow occurred. + +# Examples + +Basic usage: + +``` +#![feature(no_panic_pow)] +", $Feature, "assert_eq!(8", stringify!($SelfT), ".checked_pow(2), Some(64)); +assert_eq!(", stringify!($SelfT), "::max_value().checked_pow(2), None);", +$EndFeature, " +```"), + + #[unstable(feature = "no_panic_pow", issue = "48320")] + #[inline] + pub fn checked_pow(self, mut exp: u32) -> Option { + let mut base = self; + let mut acc: Self = 1; + + while exp > 1 { + if (exp & 1) == 1 { + acc = acc.checked_mul(base)?; + } + exp /= 2; + base = base.checked_mul(base)?; + } + + // Deal with the final bit of the exponent separately, since + // squaring the base afterwards is not necessary and may cause a + // needless overflow. + if exp == 1 { + acc = acc.checked_mul(base)?; + } + + Some(acc) + } + } + + doc_comment! { + concat!("Saturating integer addition. Computes `self + rhs`, saturating at the numeric +bounds instead of overflowing. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_add(1), 101); +assert_eq!(", stringify!($SelfT), "::max_value().saturating_add(100), ", stringify!($SelfT), +"::max_value());", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn saturating_add(self, rhs: Self) -> Self { + match self.checked_add(rhs) { + Some(x) => x, + None if rhs >= 0 => Self::max_value(), + None => Self::min_value(), + } + } + } + + doc_comment! { + concat!("Saturating integer subtraction. Computes `self - rhs`, saturating at the +numeric bounds instead of overflowing. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_sub(127), -27); +assert_eq!(", stringify!($SelfT), "::min_value().saturating_sub(100), ", stringify!($SelfT), +"::min_value());", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn saturating_sub(self, rhs: Self) -> Self { + match self.checked_sub(rhs) { + Some(x) => x, + None if rhs >= 0 => Self::min_value(), + None => Self::max_value(), + } + } + } + + doc_comment! { + concat!("Saturating integer multiplication. Computes `self * rhs`, saturating at the +numeric bounds instead of overflowing. + +# Examples + +Basic usage: + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(10", stringify!($SelfT), ".saturating_mul(12), 120); +assert_eq!(", stringify!($SelfT), "::MAX.saturating_mul(10), ", stringify!($SelfT), "::MAX); +assert_eq!(", stringify!($SelfT), "::MIN.saturating_mul(10), ", stringify!($SelfT), "::MIN);", +$EndFeature, " +```"), + #[stable(feature = "wrapping", since = "1.7.0")] + #[inline] + pub fn saturating_mul(self, rhs: Self) -> Self { + self.checked_mul(rhs).unwrap_or_else(|| { + if (self < 0 && rhs < 0) || (self > 0 && rhs > 0) { + Self::max_value() + } else { + Self::min_value() + } + }) + } + } + + doc_comment! { + concat!("Saturating integer exponentiation. Computes `self.pow(exp)`, +saturating at the numeric bounds instead of overflowing. + +# Examples + +Basic usage: + +``` +#![feature(no_panic_pow)] +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!((-4", stringify!($SelfT), ").saturating_pow(3), -64); +assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(2), ", stringify!($SelfT), "::MAX); +assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(3), ", stringify!($SelfT), "::MIN);", +$EndFeature, " +```"), + #[unstable(feature = "no_panic_pow", issue = "48320")] + #[inline] + pub fn saturating_pow(self, exp: u32) -> Self { + match self.checked_pow(exp) { + Some(x) => x, + None if self < 0 && exp % 2 == 1 => Self::min_value(), + None => Self::max_value(), + } + } + } + + doc_comment! { + concat!("Wrapping (modular) addition. Computes `self + rhs`, wrapping around at the +boundary of the type. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_add(27), 127); +assert_eq!(", stringify!($SelfT), "::max_value().wrapping_add(2), ", stringify!($SelfT), +"::min_value() + 1);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn wrapping_add(self, rhs: Self) -> Self { + unsafe { + intrinsics::overflowing_add(self, rhs) + } + } + } + + doc_comment! { + concat!("Wrapping (modular) subtraction. Computes `self - rhs`, wrapping around at the +boundary of the type. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(0", stringify!($SelfT), ".wrapping_sub(127), -127); +assert_eq!((-2", stringify!($SelfT), ").wrapping_sub(", stringify!($SelfT), "::max_value()), ", +stringify!($SelfT), "::max_value());", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn wrapping_sub(self, rhs: Self) -> Self { + unsafe { + intrinsics::overflowing_sub(self, rhs) + } + } + } + + doc_comment! { + concat!("Wrapping (modular) multiplication. Computes `self * rhs`, wrapping around at +the boundary of the type. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(10", stringify!($SelfT), ".wrapping_mul(12), 120); +assert_eq!(11i8.wrapping_mul(12), -124);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn wrapping_mul(self, rhs: Self) -> Self { + unsafe { + intrinsics::overflowing_mul(self, rhs) + } + } + } + + doc_comment! { + concat!("Wrapping (modular) division. Computes `self / rhs`, wrapping around at the +boundary of the type. + +The only case where such wrapping can occur is when one divides `MIN / -1` on a signed type (where +`MIN` is the negative minimal value for the type); this is equivalent to `-MIN`, a positive value +that is too large to represent in the type. In such a case, this function returns `MIN` itself. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_div(10), 10); +assert_eq!((-128i8).wrapping_div(-1), -128);", +$EndFeature, " +```"), + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[inline] + pub fn wrapping_div(self, rhs: Self) -> Self { + self.overflowing_div(rhs).0 + } + } + + doc_comment! { + concat!("Wrapping Euclidean division. Computes `self.div_euc(rhs)`, +wrapping around at the boundary of the type. + +Wrapping will only occur in `MIN / -1` on a signed type (where `MIN` is the negative minimal value +for the type). This is equivalent to `-MIN`, a positive value that is too large to represent in the +type. In this case, this method returns `MIN` itself. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +assert_eq!(100", stringify!($SelfT), ".wrapping_div_euc(10), 10); +assert_eq!((-128i8).wrapping_div_euc(-1), -128); +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + pub fn wrapping_div_euc(self, rhs: Self) -> Self { + self.overflowing_div_euc(rhs).0 + } + } + + doc_comment! { + concat!("Wrapping (modular) remainder. Computes `self % rhs`, wrapping around at the +boundary of the type. + +Such wrap-around never actually occurs mathematically; implementation artifacts make `x % y` +invalid for `MIN / -1` on a signed type (where `MIN` is the negative minimal value). In such a case, +this function returns `0`. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_rem(10), 0); +assert_eq!((-128i8).wrapping_rem(-1), 0);", +$EndFeature, " +```"), + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[inline] + pub fn wrapping_rem(self, rhs: Self) -> Self { + self.overflowing_rem(rhs).0 + } + } + + doc_comment! { + concat!("Wrapping Euclidean modulo. Computes `self.mod_euc(rhs)`, wrapping around at the +boundary of the type. + +Wrapping will only occur in `MIN % -1` on a signed type (where `MIN` is the negative minimal value +for the type). In this case, this method returns 0. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +assert_eq!(100", stringify!($SelfT), ".wrapping_mod_euc(10), 0); +assert_eq!((-128i8).wrapping_mod_euc(-1), 0); +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + pub fn wrapping_mod_euc(self, rhs: Self) -> Self { + self.overflowing_mod_euc(rhs).0 + } + } + + doc_comment! { + concat!("Wrapping (modular) negation. Computes `-self`, wrapping around at the boundary +of the type. + +The only case where such wrapping can occur is when one negates `MIN` on a signed type (where `MIN` +is the negative minimal value for the type); this is a positive value that is too large to represent +in the type. In such a case, this function returns `MIN` itself. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_neg(), -100); +assert_eq!(", stringify!($SelfT), "::min_value().wrapping_neg(), ", stringify!($SelfT), +"::min_value());", +$EndFeature, " +```"), + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[inline] + pub fn wrapping_neg(self) -> Self { + self.overflowing_neg().0 + } + } + + doc_comment! { + concat!("Panic-free bitwise shift-left; yields `self << mask(rhs)`, where `mask` removes +any high-order bits of `rhs` that would cause the shift to exceed the bitwidth of the type. + +Note that this is *not* the same as a rotate-left; the RHS of a wrapping shift-left is restricted to +the range of the type, rather than the bits shifted out of the LHS being returned to the other end. +The primitive integer types all implement a `rotate_left` function, which may be what you want +instead. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!((-1", stringify!($SelfT), ").wrapping_shl(7), -128); +assert_eq!((-1", stringify!($SelfT), ").wrapping_shl(128), -1);", +$EndFeature, " +```"), + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[inline] + pub fn wrapping_shl(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) + } + } + } + + doc_comment! { + concat!("Panic-free bitwise shift-right; yields `self >> mask(rhs)`, where `mask` +removes any high-order bits of `rhs` that would cause the shift to exceed the bitwidth of the type. + +Note that this is *not* the same as a rotate-right; the RHS of a wrapping shift-right is restricted +to the range of the type, rather than the bits shifted out of the LHS being returned to the other +end. The primitive integer types all implement a `rotate_right` function, which may be what you want +instead. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!((-128", stringify!($SelfT), ").wrapping_shr(7), -1); +assert_eq!((-128i16).wrapping_shr(64), -128);", +$EndFeature, " +```"), + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[inline] + pub fn wrapping_shr(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) + } + } + } + + doc_comment! { + concat!("Wrapping (modular) absolute value. Computes `self.abs()`, wrapping around at +the boundary of the type. + +The only case where such wrapping can occur is when one takes the absolute value of the negative +minimal value for the type this is a positive value that is too large to represent in the type. In +such a case, this function returns `MIN` itself. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_abs(), 100); +assert_eq!((-100", stringify!($SelfT), ").wrapping_abs(), 100); +assert_eq!(", stringify!($SelfT), "::min_value().wrapping_abs(), ", stringify!($SelfT), +"::min_value()); +assert_eq!((-128i8).wrapping_abs() as u8, 128);", +$EndFeature, " +```"), + #[stable(feature = "no_panic_abs", since = "1.13.0")] + #[inline] + pub fn wrapping_abs(self) -> Self { + if self.is_negative() { + self.wrapping_neg() + } else { + self + } + } + } + + doc_comment! { + concat!("Wrapping (modular) exponentiation. Computes `self.pow(exp)`, +wrapping around at the boundary of the type. + +# Examples + +Basic usage: + +``` +#![feature(no_panic_pow)] +", $Feature, "assert_eq!(3", stringify!($SelfT), ".wrapping_pow(4), 81); +assert_eq!(3i8.wrapping_pow(5), -13); +assert_eq!(3i8.wrapping_pow(6), -39);", +$EndFeature, " +```"), + #[unstable(feature = "no_panic_pow", issue = "48320")] + #[inline] + pub fn wrapping_pow(self, mut exp: u32) -> Self { + let mut base = self; + let mut acc: Self = 1; + + while exp > 1 { + if (exp & 1) == 1 { + acc = acc.wrapping_mul(base); + } + exp /= 2; + base = base.wrapping_mul(base); + } + + // Deal with the final bit of the exponent separately, since + // squaring the base afterwards is not necessary and may cause a + // needless overflow. + if exp == 1 { + acc = acc.wrapping_mul(base); + } + + acc + } + } + + doc_comment! { + concat!("Calculates `self` + `rhs` + +Returns a tuple of the addition along with a boolean indicating whether an arithmetic overflow would +occur. If an overflow would have occurred then the wrapped value is returned. + +# Examples + +Basic usage: + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false)); +assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (", stringify!($SelfT), +"::MIN, true));", $EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_add(self, rhs: Self) -> (Self, bool) { + let (a, b) = unsafe { + intrinsics::add_with_overflow(self as $ActualT, + rhs as $ActualT) + }; + (a as Self, b) + } + } + + doc_comment! { + concat!("Calculates `self` - `rhs` + +Returns a tuple of the subtraction along with a boolean indicating whether an arithmetic overflow +would occur. If an overflow would have occurred then the wrapped value is returned. + +# Examples + +Basic usage: + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false)); +assert_eq!(", stringify!($SelfT), "::MIN.overflowing_sub(1), (", stringify!($SelfT), +"::MAX, true));", $EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_sub(self, rhs: Self) -> (Self, bool) { + let (a, b) = unsafe { + intrinsics::sub_with_overflow(self as $ActualT, + rhs as $ActualT) + }; + (a as Self, b) + } + } + + doc_comment! { + concat!("Calculates the multiplication of `self` and `rhs`. + +Returns a tuple of the multiplication along with a boolean indicating whether an arithmetic overflow +would occur. If an overflow would have occurred then the wrapped value is returned. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(5", stringify!($SelfT), ".overflowing_mul(2), (10, false)); +assert_eq!(1_000_000_000i32.overflowing_mul(10), (1410065408, true));", +$EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_mul(self, rhs: Self) -> (Self, bool) { + let (a, b) = unsafe { + intrinsics::mul_with_overflow(self as $ActualT, + rhs as $ActualT) + }; + (a as Self, b) + } + } + + doc_comment! { + concat!("Calculates the divisor when `self` is divided by `rhs`. + +Returns a tuple of the divisor along with a boolean indicating whether an arithmetic overflow would +occur. If an overflow would occur then self is returned. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage: + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".overflowing_div(2), (2, false)); +assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div(-1), (", stringify!($SelfT), +"::MIN, true));", +$EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_div(self, rhs: Self) -> (Self, bool) { + if self == Self::min_value() && rhs == -1 { + (self, true) + } else { + (self / rhs, false) + } + } + } + + doc_comment! { + concat!("Calculates the quotient of Euclidean division `self.div_euc(rhs)`. + +Returns a tuple of the divisor along with a boolean indicating whether an arithmetic overflow would +occur. If an overflow would occur then `self` is returned. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".overflowing_div_euc(2), (2, false)); +assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div_euc(-1), (", stringify!($SelfT), +"::MIN, true)); +```"), + #[inline] + #[unstable(feature = "euclidean_division", issue = "49048")] + pub fn overflowing_div_euc(self, rhs: Self) -> (Self, bool) { + if self == Self::min_value() && rhs == -1 { + (self, true) + } else { + (self.div_euc(rhs), false) + } + } + } + + doc_comment! { + concat!("Calculates the remainder when `self` is divided by `rhs`. + +Returns a tuple of the remainder after dividing along with a boolean indicating whether an +arithmetic overflow would occur. If an overflow would occur then 0 is returned. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage: + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".overflowing_rem(2), (1, false)); +assert_eq!(", stringify!($SelfT), "::MIN.overflowing_rem(-1), (0, true));", +$EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_rem(self, rhs: Self) -> (Self, bool) { + if self == Self::min_value() && rhs == -1 { + (0, true) + } else { + (self % rhs, false) + } + } + } + + + doc_comment! { + concat!("Calculates the remainder `self.mod_euc(rhs)` by Euclidean division. + +Returns a tuple of the remainder after dividing along with a boolean indicating whether an +arithmetic overflow would occur. If an overflow would occur then 0 is returned. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".overflowing_mod_euc(2), (1, false)); +assert_eq!(", stringify!($SelfT), "::MIN.overflowing_mod_euc(-1), (0, true)); +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + pub fn overflowing_mod_euc(self, rhs: Self) -> (Self, bool) { + if self == Self::min_value() && rhs == -1 { + (0, true) + } else { + (self.mod_euc(rhs), false) + } + } + } + + + doc_comment! { + concat!("Negates self, overflowing if this is equal to the minimum value. + +Returns a tuple of the negated version of self along with a boolean indicating whether an overflow +happened. If `self` is the minimum value (e.g. `i32::MIN` for values of type `i32`), then the +minimum value will be returned again and `true` will be returned for an overflow happening. + +# Examples + +Basic usage: + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(2", stringify!($SelfT), ".overflowing_neg(), (-2, false)); +assert_eq!(", stringify!($SelfT), "::MIN.overflowing_neg(), (", stringify!($SelfT), +"::MIN, true));", $EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_neg(self) -> (Self, bool) { + if self == Self::min_value() { + (Self::min_value(), true) + } else { + (-self, false) + } + } + } + + doc_comment! { + concat!("Shifts self left by `rhs` bits. + +Returns a tuple of the shifted version of self along with a boolean indicating whether the shift +value was larger than or equal to the number of bits. If the shift value is too large, then value is +masked (N-1) where N is the number of bits, and this value is then used to perform the shift. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(0x1", stringify!($SelfT),".overflowing_shl(4), (0x10, false)); +assert_eq!(0x1i32.overflowing_shl(36), (0x10, true));", +$EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + } + } + + doc_comment! { + concat!("Shifts self right by `rhs` bits. + +Returns a tuple of the shifted version of self along with a boolean indicating whether the shift +value was larger than or equal to the number of bits. If the shift value is too large, then value is +masked (N-1) where N is the number of bits, and this value is then used to perform the shift. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(0x10", stringify!($SelfT), ".overflowing_shr(4), (0x1, false)); +assert_eq!(0x10i32.overflowing_shr(36), (0x1, true));", +$EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + } + } + + doc_comment! { + concat!("Computes the absolute value of `self`. + +Returns a tuple of the absolute version of self along with a boolean indicating whether an overflow +happened. If self is the minimum value (e.g. ", stringify!($SelfT), "::MIN for values of type + ", stringify!($SelfT), "), then the minimum value will be returned again and true will be returned +for an overflow happening. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(10", stringify!($SelfT), ".overflowing_abs(), (10, false)); +assert_eq!((-10", stringify!($SelfT), ").overflowing_abs(), (10, false)); +assert_eq!((", stringify!($SelfT), "::min_value()).overflowing_abs(), (", stringify!($SelfT), +"::min_value(), true));", +$EndFeature, " +```"), + #[stable(feature = "no_panic_abs", since = "1.13.0")] + #[inline] + pub fn overflowing_abs(self) -> (Self, bool) { + if self.is_negative() { + self.overflowing_neg() + } else { + (self, false) + } + } + } + + doc_comment! { + concat!("Raises self to the power of `exp`, using exponentiation by squaring. + +Returns a tuple of the exponentiation along with a bool indicating +whether an overflow happened. + +# Examples + +Basic usage: + +``` +#![feature(no_panic_pow)] +", $Feature, "assert_eq!(3", stringify!($SelfT), ".overflowing_pow(4), (81, false)); +assert_eq!(3i8.overflowing_pow(5), (-13, true));", +$EndFeature, " +```"), + #[unstable(feature = "no_panic_pow", issue = "48320")] + #[inline] + pub fn overflowing_pow(self, mut exp: u32) -> (Self, bool) { + let mut base = self; + let mut acc: Self = 1; + let mut overflown = false; + // Scratch space for storing results of overflowing_mul. + let mut r; + + while exp > 1 { + if (exp & 1) == 1 { + r = acc.overflowing_mul(base); + acc = r.0; + overflown |= r.1; + } + exp /= 2; + r = base.overflowing_mul(base); + base = r.0; + overflown |= r.1; + } + + // Deal with the final bit of the exponent separately, since + // squaring the base afterwards is not necessary and may cause a + // needless overflow. + if exp == 1 { + r = acc.overflowing_mul(base); + acc = r.0; + overflown |= r.1; + } + + (acc, overflown) + } + } + + doc_comment! { + concat!("Raises self to the power of `exp`, using exponentiation by squaring. + +# Examples + +Basic usage: + +``` +", $Feature, "let x: ", stringify!($SelfT), " = 2; // or any other integer type + +assert_eq!(x.pow(5), 32);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + #[rustc_inherit_overflow_checks] + pub fn pow(self, mut exp: u32) -> Self { + let mut base = self; + let mut acc = 1; + + while exp > 1 { + if (exp & 1) == 1 { + acc = acc * base; + } + exp /= 2; + base = base * base; + } + + // Deal with the final bit of the exponent separately, since + // squaring the base afterwards is not necessary and may cause a + // needless overflow. + if exp == 1 { acc = acc * base; } - exp /= 2; - base = base * base; - } - // Deal with the final bit of the exponent separately, since - // squaring the base afterwards is not necessary and may cause a - // needless overflow. - if exp == 1 { - acc = acc * base; - } - - acc - } - - /// Computes the absolute value of `self`. - /// - /// # Overflow behavior - /// - /// The absolute value of `i32::min_value()` cannot be represented as an - /// `i32`, and attempting to calculate it will cause an overflow. This - /// means that code in debug mode will trigger a panic on this case and - /// optimized code will return `i32::min_value()` without a panic. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(10i8.abs(), 10); - /// assert_eq!((-10i8).abs(), 10); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - #[rustc_inherit_overflow_checks] - pub fn abs(self) -> Self { - if self.is_negative() { - // Note that the #[inline] above means that the overflow - // semantics of this negation depend on the crate we're being - // inlined into. - -self - } else { - self + acc } } - /// Returns a number representing sign of `self`. - /// - /// - `0` if the number is zero - /// - `1` if the number is positive - /// - `-1` if the number is negative - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(10i8.signum(), 1); - /// assert_eq!(0i8.signum(), 0); - /// assert_eq!((-10i8).signum(), -1); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn signum(self) -> Self { - match self { - n if n > 0 => 1, - 0 => 0, - _ => -1, + doc_comment! { + concat!("Calculates the quotient of Euclidean division of `self` by `rhs`. + +This computes the integer `n` such that `self = n * rhs + self.mod_euc(rhs)`. +In other words, the result is `self / rhs` rounded to the integer `n` +such that `self >= n * rhs`. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +let a: ", stringify!($SelfT), " = 7; // or any other integer type +let b = 4; + +assert_eq!(a.div_euc(b), 1); // 7 >= 4 * 1 +assert_eq!(a.div_euc(-b), -1); // 7 >= -4 * -1 +assert_eq!((-a).div_euc(b), -2); // -7 >= 4 * -2 +assert_eq!((-a).div_euc(-b), 2); // -7 >= -4 * 2 +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + #[rustc_inherit_overflow_checks] + pub fn div_euc(self, rhs: Self) -> Self { + let q = self / rhs; + if self % rhs < 0 { + return if rhs > 0 { q - 1 } else { q + 1 } + } + q } } - /// Returns `true` if `self` is positive and `false` if the number - /// is zero or negative. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert!(10i8.is_positive()); - /// assert!(!(-10i8).is_positive()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn is_positive(self) -> bool { self > 0 } - /// Returns `true` if `self` is negative and `false` if the number - /// is zero or positive. + doc_comment! { + concat!("Calculates the remainder `self mod rhs` by Euclidean division. + +In particular, the result `n` satisfies `0 <= n < rhs.abs()`. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +let a: ", stringify!($SelfT), " = 7; // or any other integer type +let b = 4; + +assert_eq!(a.mod_euc(b), 3); +assert_eq!((-a).mod_euc(b), 1); +assert_eq!(a.mod_euc(-b), 3); +assert_eq!((-a).mod_euc(-b), 1); +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + #[rustc_inherit_overflow_checks] + pub fn mod_euc(self, rhs: Self) -> Self { + let r = self % rhs; + if r < 0 { + if rhs < 0 { + r - rhs + } else { + r + rhs + } + } else { + r + } + } + } + + doc_comment! { + concat!("Computes the absolute value of `self`. + +# Overflow behavior + +The absolute value of `", stringify!($SelfT), "::min_value()` cannot be represented as an +`", stringify!($SelfT), "`, and attempting to calculate it will cause an overflow. This means that +code in debug mode will trigger a panic on this case and optimized code will return `", +stringify!($SelfT), "::min_value()` without a panic. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(10", stringify!($SelfT), ".abs(), 10); +assert_eq!((-10", stringify!($SelfT), ").abs(), 10);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + #[rustc_inherit_overflow_checks] + pub fn abs(self) -> Self { + if self.is_negative() { + // Note that the #[inline] above means that the overflow + // semantics of this negation depend on the crate we're being + // inlined into. + -self + } else { + self + } + } + } + + doc_comment! { + concat!("Returns a number representing sign of `self`. + + - `0` if the number is zero + - `1` if the number is positive + - `-1` if the number is negative + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(10", stringify!($SelfT), ".signum(), 1); +assert_eq!(0", stringify!($SelfT), ".signum(), 0); +assert_eq!((-10", stringify!($SelfT), ").signum(), -1);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn signum(self) -> Self { + match self { + n if n > 0 => 1, + 0 => 0, + _ => -1, + } + } + } + + doc_comment! { + concat!("Returns `true` if `self` is positive and `false` if the number is zero or +negative. + +# Examples + +Basic usage: + +``` +", $Feature, "assert!(10", stringify!($SelfT), ".is_positive()); +assert!(!(-10", stringify!($SelfT), ").is_positive());", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_positive(self) -> bool { self > 0 } + } + + doc_comment! { + concat!("Returns `true` if `self` is negative and `false` if the number is zero or +positive. + +# Examples + +Basic usage: + +``` +", $Feature, "assert!((-10", stringify!($SelfT), ").is_negative()); +assert!(!10", stringify!($SelfT), ".is_negative());", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_negative(self) -> bool { self < 0 } + } + + /// Return the memory representation of this integer as a byte array in + /// big-endian (network) byte order. /// /// # Examples /// - /// Basic usage: + /// ``` + /// #![feature(int_to_from_bytes)] + /// + /// let bytes = 0x12_34_56_78_i32.to_be_bytes(); + /// assert_eq!(bytes, [0x12, 0x34, 0x56, 0x78]); + /// ``` + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn to_be_bytes(self) -> [u8; mem::size_of::()] { + self.to_be().to_ne_bytes() + } + + /// Return the memory representation of this integer as a byte array in + /// little-endian byte order. + /// + /// # Examples /// /// ``` - /// assert!((-10i8).is_negative()); - /// assert!(!10i8.is_negative()); + /// #![feature(int_to_from_bytes)] + /// + /// let bytes = 0x12_34_56_78_i32.to_le_bytes(); + /// assert_eq!(bytes, [0x78, 0x56, 0x34, 0x12]); /// ``` - #[stable(feature = "rust1", since = "1.0.0")] + #[unstable(feature = "int_to_from_bytes", issue = "52963")] #[inline] - pub fn is_negative(self) -> bool { self < 0 } + pub fn to_le_bytes(self) -> [u8; mem::size_of::()] { + self.to_le().to_ne_bytes() + } + + /// Return the memory representation of this integer as a byte array in + /// native byte order. + /// + /// As the target platform's native endianness is used, portable code + /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, + /// instead. + /// + /// [`to_be_bytes`]: #method.to_be_bytes + /// [`to_le_bytes`]: #method.to_le_bytes + /// + /// # Examples + /// + /// ``` + /// #![feature(int_to_from_bytes)] + /// + /// let bytes = i32::min_value().to_be().to_ne_bytes(); + /// assert_eq!(bytes, [0x80, 0, 0, 0]); + /// ``` + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn to_ne_bytes(self) -> [u8; mem::size_of::()] { + unsafe { mem::transmute(self) } + } + + /// Create an integer value from its representation as a byte array in + /// big endian. + /// + /// # Examples + /// + /// ``` + /// #![feature(int_to_from_bytes)] + /// + /// let int = i32::from_be_bytes([0x12, 0x34, 0x56, 0x78]); + /// assert_eq!(int, 0x12_34_56_78); + /// ``` + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn from_be_bytes(bytes: [u8; mem::size_of::()]) -> Self { + Self::from_be(Self::from_ne_bytes(bytes)) + } + + /// Create an integer value from its representation as a byte array in + /// little endian. + /// + /// # Examples + /// + /// ``` + /// #![feature(int_to_from_bytes)] + /// + /// let int = i32::from_le_bytes([0x12, 0x34, 0x56, 0x78]); + /// assert_eq!(int, 0x78_56_34_12); + /// ``` + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { + Self::from_le(Self::from_ne_bytes(bytes)) + } + + /// Create an integer value from its memory representation as a byte + /// array in native endianness. + /// + /// As the target platform's native endianness is used, portable code + /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as + /// appropriate instead. + /// + /// [`from_be_bytes`]: #method.from_be_bytes + /// [`from_le_bytes`]: #method.from_le_bytes + /// + /// # Examples + /// + /// ``` + /// #![feature(int_to_from_bytes)] + /// + /// let int = i32::from_be(i32::from_ne_bytes([0x80, 0, 0, 0])); + /// assert_eq!(int, i32::min_value()); + /// ``` + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { + unsafe { mem::transmute(bytes) } + } } } #[lang = "i8"] impl i8 { - int_impl! { i8, i8, u8, 8 } + int_impl! { i8, i8, u8, 8, -128, 127, "", "", 2, "-0x7e", "0xa", "0x12", "0x12", "0x48" } } #[lang = "i16"] impl i16 { - int_impl! { i16, i16, u16, 16 } + int_impl! { i16, i16, u16, 16, -32768, 32767, "", "", 4, "-0x5ffd", "0x3a", "0x1234", "0x3412", + "0x2c48" } } #[lang = "i32"] impl i32 { - int_impl! { i32, i32, u32, 32 } + int_impl! { i32, i32, u32, 32, -2147483648, 2147483647, "", "", 8, "0x10000b3", "0xb301", + "0x12345678", "0x78563412", "0x1e6a2c48" } } #[lang = "i64"] impl i64 { - int_impl! { i64, i64, u64, 64 } + int_impl! { i64, i64, u64, 64, -9223372036854775808, 9223372036854775807, "", "", 12, + "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412", + "0x6a2c48091e6a2c48" } } #[lang = "i128"] impl i128 { - int_impl! { i128, i128, u128, 128 } + int_impl! { i128, i128, u128, 128, -170141183460469231731687303715884105728, + 170141183460469231731687303715884105727, "", "", 16, + "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012", + "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48" + } } #[cfg(target_pointer_width = "16")] #[lang = "isize"] impl isize { - int_impl! { isize, i16, u16, 16 } + int_impl! { isize, i16, u16, 16, -32768, 32767, "", "", 4, "-0x5ffd", "0x3a", "0x1234", + "0x3412", "0x2c48" } } #[cfg(target_pointer_width = "32")] #[lang = "isize"] impl isize { - int_impl! { isize, i32, u32, 32 } + int_impl! { isize, i32, u32, 32, -2147483648, 2147483647, "", "", 8, "0x10000b3", "0xb301", + "0x12345678", "0x78563412", "0x1e6a2c48" } } #[cfg(target_pointer_width = "64")] #[lang = "isize"] impl isize { - int_impl! { isize, i64, u64, 64 } + int_impl! { isize, i64, u64, 64, -9223372036854775808, 9223372036854775807, "", "", + 12, "0xaa00000000006e1", "0x6e10aa", "0x1234567890123456", "0x5634129078563412", + "0x6a2c48091e6a2c48" } +} + +// Emits the correct `cttz` call, depending on the size of the type. +macro_rules! uint_cttz_call { + // As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic + // emits two conditional moves on x86_64. By promoting the value to + // u16 and setting bit 8, we get better code without any conditional + // operations. + // FIXME: There's a LLVM patch (http://reviews.llvm.org/D9284) + // pending, remove this workaround once LLVM generates better code + // for cttz8. + ($value:expr, 8) => { intrinsics::cttz($value as u16 | 0x100) }; + ($value:expr, $_BITS:expr) => { intrinsics::cttz($value) } } // `Int` + `UnsignedInt` implemented for unsigned integers macro_rules! uint_impl { - ($SelfT:ty, $ActualT:ty, $BITS:expr) => { - /// Returns the smallest value that can be represented by this integer type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(u8::min_value(), 0); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub const fn min_value() -> Self { 0 } + ($SelfT:ty, $ActualT:ty, $BITS:expr, $MaxV:expr, $Feature:expr, $EndFeature:expr, + $rot:expr, $rot_op:expr, $rot_result:expr, $swap_op:expr, $swapped:expr, + $reversed:expr ) => { + doc_comment! { + concat!("Returns the smallest value that can be represented by this integer type. - /// Returns the largest value that can be represented by this integer type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(u8::max_value(), 255); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub const fn max_value() -> Self { !0 } +# Examples - /// Converts a string slice in a given base to an integer. - /// - /// The string is expected to be an optional `+` sign - /// followed by digits. - /// Leading and trailing whitespace represent an error. - /// Digits are a subset of these characters, depending on `radix`: - /// - /// * `0-9` - /// * `a-z` - /// * `A-Z` - /// - /// # Panics - /// - /// This function panics if `radix` is not in the range from 2 to 36. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(u32::from_str_radix("A", 16), Ok(10)); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn from_str_radix(src: &str, radix: u32) -> Result { - from_str_radix(src, radix) +Basic usage: + +``` +", $Feature, "assert_eq!(", stringify!($SelfT), "::min_value(), 0);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub const fn min_value() -> Self { 0 } } - /// Returns the number of ones in the binary representation of `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0b01001100u8; - /// - /// assert_eq!(n.count_ones(), 3); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn count_ones(self) -> u32 { - unsafe { intrinsics::ctpop(self as $ActualT) as u32 } + doc_comment! { + concat!("Returns the largest value that can be represented by this integer type. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(", stringify!($SelfT), "::max_value(), ", +stringify!($MaxV), ");", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub const fn max_value() -> Self { !0 } } - /// Returns the number of zeros in the binary representation of `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0b01001100u8; - /// - /// assert_eq!(n.count_zeros(), 5); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn count_zeros(self) -> u32 { - (!self).count_ones() + doc_comment! { + concat!("Converts a string slice in a given base to an integer. + +The string is expected to be an optional `+` sign +followed by digits. +Leading and trailing whitespace represent an error. +Digits are a subset of these characters, depending on `radix`: + +* `0-9` +* `a-z` +* `A-Z` + +# Panics + +This function panics if `radix` is not in the range from 2 to 36. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(", stringify!($SelfT), "::from_str_radix(\"A\", 16), Ok(10));", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + pub fn from_str_radix(src: &str, radix: u32) -> Result { + from_str_radix(src, radix) + } } - /// Returns the number of leading zeros in the binary representation - /// of `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0b0101000u16; - /// - /// assert_eq!(n.leading_zeros(), 10); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn leading_zeros(self) -> u32 { - unsafe { intrinsics::ctlz(self as $ActualT) as u32 } + doc_comment! { + concat!("Returns the number of ones in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = 0b01001100", stringify!($SelfT), "; + +assert_eq!(n.count_ones(), 3);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn count_ones(self) -> u32 { + unsafe { intrinsics::ctpop(self as $ActualT) as u32 } + } } - /// Returns the number of trailing zeros in the binary representation - /// of `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0b0101000u16; - /// - /// assert_eq!(n.trailing_zeros(), 3); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn trailing_zeros(self) -> u32 { - // As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic - // emits two conditional moves on x86_64. By promoting the value to - // u16 and setting bit 8, we get better code without any conditional - // operations. - // FIXME: There's a LLVM patch (http://reviews.llvm.org/D9284) - // pending, remove this workaround once LLVM generates better code - // for cttz8. - unsafe { - if $BITS == 8 { - intrinsics::cttz(self as u16 | 0x100) as u32 - } else { - intrinsics::cttz(self) as u32 + doc_comment! { + concat!("Returns the number of zeros in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(", stringify!($SelfT), "::max_value().count_zeros(), 0);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn count_zeros(self) -> u32 { + (!self).count_ones() + } + } + + doc_comment! { + concat!("Returns the number of leading zeros in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = ", stringify!($SelfT), "::max_value() >> 2; + +assert_eq!(n.leading_zeros(), 2);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn leading_zeros(self) -> u32 { + unsafe { intrinsics::ctlz(self as $ActualT) as u32 } + } + } + + doc_comment! { + concat!("Returns the number of trailing zeros in the binary representation +of `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = 0b0101000", stringify!($SelfT), "; + +assert_eq!(n.trailing_zeros(), 3);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn trailing_zeros(self) -> u32 { + unsafe { uint_cttz_call!(self, $BITS) as u32 } + } + } + + doc_comment! { + concat!("Shifts the bits to the left by a specified amount, `n`, +wrapping the truncated bits to the end of the resulting integer. + +Please note this isn't the same operation as `<<`! + +# Examples + +Basic usage: + +``` +let n = ", $rot_op, stringify!($SelfT), "; +let m = ", $rot_result, "; + +assert_eq!(n.rotate_left(", $rot, "), m); +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn rotate_left(self, n: u32) -> Self { + // Protect against undefined behaviour for over-long bit shifts + let n = n % $BITS; + (self << n) | (self >> (($BITS - n) % $BITS)) + } + } + + doc_comment! { + concat!("Shifts the bits to the right by a specified amount, `n`, +wrapping the truncated bits to the beginning of the resulting +integer. + +Please note this isn't the same operation as `>>`! + +# Examples + +Basic usage: + +``` +let n = ", $rot_result, stringify!($SelfT), "; +let m = ", $rot_op, "; + +assert_eq!(n.rotate_right(", $rot, "), m); +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn rotate_right(self, n: u32) -> Self { + // Protect against undefined behaviour for over-long bit shifts + let n = n % $BITS; + (self >> n) | (self << (($BITS - n) % $BITS)) + } + } + + doc_comment! { + concat!(" +Reverses the byte order of the integer. + +# Examples + +Basic usage: + +``` +let n = ", $swap_op, stringify!($SelfT), "; +let m = n.swap_bytes(); + +assert_eq!(m, ", $swapped, "); +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn swap_bytes(self) -> Self { + unsafe { intrinsics::bswap(self as $ActualT) as Self } + } + } + + doc_comment! { + concat!("Reverses the bit pattern of the integer. + +# Examples + +Basic usage: + +``` +#![feature(reverse_bits)] + +let n = ", $swap_op, stringify!($SelfT), "; +let m = n.reverse_bits(); + +assert_eq!(m, ", $reversed, "); +```"), + #[unstable(feature = "reverse_bits", issue = "48763")] + #[inline] + pub fn reverse_bits(self) -> Self { + unsafe { intrinsics::bitreverse(self as $ActualT) as Self } + } + } + + doc_comment! { + concat!("Converts an integer from big endian to the target's endianness. + +On big endian this is a no-op. On little endian the bytes are +swapped. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = 0x1A", stringify!($SelfT), "; + +if cfg!(target_endian = \"big\") { + assert_eq!(", stringify!($SelfT), "::from_be(n), n) +} else { + assert_eq!(", stringify!($SelfT), "::from_be(n), n.swap_bytes()) +}", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn from_be(x: Self) -> Self { + #[cfg(target_endian = "big")] + { + x + } + #[cfg(not(target_endian = "big"))] + { + x.swap_bytes() } } } - /// Shifts the bits to the left by a specified amount, `n`, - /// wrapping the truncated bits to the end of the resulting integer. - /// - /// Please note this isn't the same operation as `<<`! - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFu64; - /// let m = 0x3456789ABCDEF012u64; - /// - /// assert_eq!(n.rotate_left(12), m); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rotate_left(self, n: u32) -> Self { - // Protect against undefined behaviour for over-long bit shifts - let n = n % $BITS; - (self << n) | (self >> (($BITS - n) % $BITS)) - } + doc_comment! { + concat!("Converts an integer from little endian to the target's endianness. - /// Shifts the bits to the right by a specified amount, `n`, - /// wrapping the truncated bits to the beginning of the resulting - /// integer. - /// - /// Please note this isn't the same operation as `>>`! - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFu64; - /// let m = 0xDEF0123456789ABCu64; - /// - /// assert_eq!(n.rotate_right(12), m); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn rotate_right(self, n: u32) -> Self { - // Protect against undefined behaviour for over-long bit shifts - let n = n % $BITS; - (self >> n) | (self << (($BITS - n) % $BITS)) - } +On little endian this is a no-op. On big endian the bytes are +swapped. - /// Reverses the byte order of the integer. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n: u16 = 0b0000000_01010101; - /// assert_eq!(n, 85); - /// - /// let m = n.swap_bytes(); - /// - /// assert_eq!(m, 0b01010101_00000000); - /// assert_eq!(m, 21760); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn swap_bytes(self) -> Self { - unsafe { intrinsics::bswap(self as $ActualT) as Self } - } +# Examples - /// Converts an integer from big endian to the target's endianness. - /// - /// On big endian this is a no-op. On little endian the bytes are - /// swapped. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFu64; - /// - /// if cfg!(target_endian = "big") { - /// assert_eq!(u64::from_be(n), n) - /// } else { - /// assert_eq!(u64::from_be(n), n.swap_bytes()) - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn from_be(x: Self) -> Self { - if cfg!(target_endian = "big") { x } else { x.swap_bytes() } - } +Basic usage: - /// Converts an integer from little endian to the target's endianness. - /// - /// On little endian this is a no-op. On big endian the bytes are - /// swapped. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFu64; - /// - /// if cfg!(target_endian = "little") { - /// assert_eq!(u64::from_le(n), n) - /// } else { - /// assert_eq!(u64::from_le(n), n.swap_bytes()) - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn from_le(x: Self) -> Self { - if cfg!(target_endian = "little") { x } else { x.swap_bytes() } - } +``` +", $Feature, "let n = 0x1A", stringify!($SelfT), "; - /// Converts `self` to big endian from the target's endianness. - /// - /// On big endian this is a no-op. On little endian the bytes are - /// swapped. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFu64; - /// - /// if cfg!(target_endian = "big") { - /// assert_eq!(n.to_be(), n) - /// } else { - /// assert_eq!(n.to_be(), n.swap_bytes()) - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn to_be(self) -> Self { // or not to be? - if cfg!(target_endian = "big") { self } else { self.swap_bytes() } - } - - /// Converts `self` to little endian from the target's endianness. - /// - /// On little endian this is a no-op. On big endian the bytes are - /// swapped. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let n = 0x0123456789ABCDEFu64; - /// - /// if cfg!(target_endian = "little") { - /// assert_eq!(n.to_le(), n) - /// } else { - /// assert_eq!(n.to_le(), n.swap_bytes()) - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn to_le(self) -> Self { - if cfg!(target_endian = "little") { self } else { self.swap_bytes() } - } - - /// Checked integer addition. Computes `self + rhs`, returning `None` - /// if overflow occurred. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(5u16.checked_add(65530), Some(65535)); - /// assert_eq!(6u16.checked_add(65530), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn checked_add(self, rhs: Self) -> Option { - let (a, b) = self.overflowing_add(rhs); - if b {None} else {Some(a)} - } - - /// Checked integer subtraction. Computes `self - rhs`, returning - /// `None` if overflow occurred. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(1u8.checked_sub(1), Some(0)); - /// assert_eq!(0u8.checked_sub(1), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn checked_sub(self, rhs: Self) -> Option { - let (a, b) = self.overflowing_sub(rhs); - if b {None} else {Some(a)} - } - - /// Checked integer multiplication. Computes `self * rhs`, returning - /// `None` if overflow occurred. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(5u8.checked_mul(51), Some(255)); - /// assert_eq!(5u8.checked_mul(52), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn checked_mul(self, rhs: Self) -> Option { - let (a, b) = self.overflowing_mul(rhs); - if b {None} else {Some(a)} - } - - /// Checked integer division. Computes `self / rhs`, returning `None` - /// if `rhs == 0`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(128u8.checked_div(2), Some(64)); - /// assert_eq!(1u8.checked_div(0), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn checked_div(self, rhs: Self) -> Option { - match rhs { - 0 => None, - rhs => Some(unsafe { intrinsics::unchecked_div(self, rhs) }), +if cfg!(target_endian = \"little\") { + assert_eq!(", stringify!($SelfT), "::from_le(n), n) +} else { + assert_eq!(", stringify!($SelfT), "::from_le(n), n.swap_bytes()) +}", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn from_le(x: Self) -> Self { + #[cfg(target_endian = "little")] + { + x + } + #[cfg(not(target_endian = "little"))] + { + x.swap_bytes() + } } } - /// Checked integer remainder. Computes `self % rhs`, returning `None` - /// if `rhs == 0`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(5u32.checked_rem(2), Some(1)); - /// assert_eq!(5u32.checked_rem(0), None); - /// ``` - #[stable(feature = "wrapping", since = "1.7.0")] - #[inline] - pub fn checked_rem(self, rhs: Self) -> Option { - if rhs == 0 { - None - } else { - Some(unsafe { intrinsics::unchecked_rem(self, rhs) }) + doc_comment! { + concat!("Converts `self` to big endian from the target's endianness. + +On big endian this is a no-op. On little endian the bytes are +swapped. + +# Examples + +Basic usage: + +``` +", $Feature, "let n = 0x1A", stringify!($SelfT), "; + +if cfg!(target_endian = \"big\") { + assert_eq!(n.to_be(), n) +} else { + assert_eq!(n.to_be(), n.swap_bytes()) +}", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn to_be(self) -> Self { // or not to be? + #[cfg(target_endian = "big")] + { + self + } + #[cfg(not(target_endian = "big"))] + { + self.swap_bytes() + } } } - /// Checked negation. Computes `-self`, returning `None` unless `self == - /// 0`. - /// - /// Note that negating any positive integer will overflow. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(0u32.checked_neg(), Some(0)); - /// assert_eq!(1u32.checked_neg(), None); - /// ``` - #[stable(feature = "wrapping", since = "1.7.0")] - #[inline] - pub fn checked_neg(self) -> Option { - let (a, b) = self.overflowing_neg(); - if b {None} else {Some(a)} - } + doc_comment! { + concat!("Converts `self` to little endian from the target's endianness. - /// Checked shift left. Computes `self << rhs`, returning `None` - /// if `rhs` is larger than or equal to the number of bits in `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(0x10u32.checked_shl(4), Some(0x100)); - /// assert_eq!(0x10u32.checked_shl(33), None); - /// ``` - #[stable(feature = "wrapping", since = "1.7.0")] - #[inline] - pub fn checked_shl(self, rhs: u32) -> Option { - let (a, b) = self.overflowing_shl(rhs); - if b {None} else {Some(a)} - } +On little endian this is a no-op. On big endian the bytes are +swapped. - /// Checked shift right. Computes `self >> rhs`, returning `None` - /// if `rhs` is larger than or equal to the number of bits in `self`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(0x10u32.checked_shr(4), Some(0x1)); - /// assert_eq!(0x10u32.checked_shr(33), None); - /// ``` - #[stable(feature = "wrapping", since = "1.7.0")] - #[inline] - pub fn checked_shr(self, rhs: u32) -> Option { - let (a, b) = self.overflowing_shr(rhs); - if b {None} else {Some(a)} - } +# Examples - /// Saturating integer addition. Computes `self + rhs`, saturating at - /// the numeric bounds instead of overflowing. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100u8.saturating_add(1), 101); - /// assert_eq!(200u8.saturating_add(127), 255); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn saturating_add(self, rhs: Self) -> Self { - match self.checked_add(rhs) { - Some(x) => x, - None => Self::max_value(), +Basic usage: + +``` +", $Feature, "let n = 0x1A", stringify!($SelfT), "; + +if cfg!(target_endian = \"little\") { + assert_eq!(n.to_le(), n) +} else { + assert_eq!(n.to_le(), n.swap_bytes()) +}", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_int_ops")] + #[inline] + pub const fn to_le(self) -> Self { + #[cfg(target_endian = "little")] + { + self + } + #[cfg(not(target_endian = "little"))] + { + self.swap_bytes() + } } } - /// Saturating integer subtraction. Computes `self - rhs`, saturating - /// at the numeric bounds instead of overflowing. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100u8.saturating_sub(27), 73); - /// assert_eq!(13u8.saturating_sub(127), 0); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn saturating_sub(self, rhs: Self) -> Self { - match self.checked_sub(rhs) { - Some(x) => x, - None => Self::min_value(), + doc_comment! { + concat!("Checked integer addition. Computes `self + rhs`, returning `None` +if overflow occurred. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!((", stringify!($SelfT), "::max_value() - 2).checked_add(1), ", +"Some(", stringify!($SelfT), "::max_value() - 1)); +assert_eq!((", stringify!($SelfT), "::max_value() - 2).checked_add(3), None);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn checked_add(self, rhs: Self) -> Option { + let (a, b) = self.overflowing_add(rhs); + if b {None} else {Some(a)} } } - /// Saturating integer multiplication. Computes `self * rhs`, - /// saturating at the numeric bounds instead of overflowing. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::u32; - /// - /// assert_eq!(100u32.saturating_mul(127), 12700); - /// assert_eq!((1u32 << 23).saturating_mul(1 << 23), u32::MAX); - /// ``` - #[stable(feature = "wrapping", since = "1.7.0")] - #[inline] - pub fn saturating_mul(self, rhs: Self) -> Self { - self.checked_mul(rhs).unwrap_or(Self::max_value()) - } + doc_comment! { + concat!("Checked integer subtraction. Computes `self - rhs`, returning +`None` if overflow occurred. - /// Wrapping (modular) addition. Computes `self + rhs`, - /// wrapping around at the boundary of the type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(200u8.wrapping_add(55), 255); - /// assert_eq!(200u8.wrapping_add(155), 99); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn wrapping_add(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_add(self, rhs) +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(1", stringify!($SelfT), ".checked_sub(1), Some(0)); +assert_eq!(0", stringify!($SelfT), ".checked_sub(1), None);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn checked_sub(self, rhs: Self) -> Option { + let (a, b) = self.overflowing_sub(rhs); + if b {None} else {Some(a)} } } - /// Wrapping (modular) subtraction. Computes `self - rhs`, - /// wrapping around at the boundary of the type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100u8.wrapping_sub(100), 0); - /// assert_eq!(100u8.wrapping_sub(155), 201); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn wrapping_sub(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_sub(self, rhs) + doc_comment! { + concat!("Checked integer multiplication. Computes `self * rhs`, returning +`None` if overflow occurred. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(5", stringify!($SelfT), ".checked_mul(1), Some(5)); +assert_eq!(", stringify!($SelfT), "::max_value().checked_mul(2), None);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn checked_mul(self, rhs: Self) -> Option { + let (a, b) = self.overflowing_mul(rhs); + if b {None} else {Some(a)} + } + } + + doc_comment! { + concat!("Checked integer division. Computes `self / rhs`, returning `None` +if `rhs == 0`. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(128", stringify!($SelfT), ".checked_div(2), Some(64)); +assert_eq!(1", stringify!($SelfT), ".checked_div(0), None);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn checked_div(self, rhs: Self) -> Option { + match rhs { + 0 => None, + rhs => Some(unsafe { intrinsics::unchecked_div(self, rhs) }), + } + } + } + + doc_comment! { + concat!("Checked Euclidean division. Computes `self.div_euc(rhs)`, returning `None` +if `rhs == 0`. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +assert_eq!(128", stringify!($SelfT), ".checked_div(2), Some(64)); +assert_eq!(1", stringify!($SelfT), ".checked_div_euc(0), None); +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + pub fn checked_div_euc(self, rhs: Self) -> Option { + if rhs == 0 { + None + } else { + Some(self.div_euc(rhs)) + } + } + } + + + doc_comment! { + concat!("Checked integer remainder. Computes `self % rhs`, returning `None` +if `rhs == 0`. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(5", stringify!($SelfT), ".checked_rem(2), Some(1)); +assert_eq!(5", stringify!($SelfT), ".checked_rem(0), None);", $EndFeature, " +```"), + #[stable(feature = "wrapping", since = "1.7.0")] + #[inline] + pub fn checked_rem(self, rhs: Self) -> Option { + if rhs == 0 { + None + } else { + Some(unsafe { intrinsics::unchecked_rem(self, rhs) }) + } + } + } + + doc_comment! { + concat!("Checked Euclidean modulo. Computes `self.mod_euc(rhs)`, returning `None` +if `rhs == 0`. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +assert_eq!(5", stringify!($SelfT), ".checked_mod_euc(2), Some(1)); +assert_eq!(5", stringify!($SelfT), ".checked_mod_euc(0), None); +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + pub fn checked_mod_euc(self, rhs: Self) -> Option { + if rhs == 0 { + None + } else { + Some(self.mod_euc(rhs)) + } + } + } + + doc_comment! { + concat!("Checked negation. Computes `-self`, returning `None` unless `self == +0`. + +Note that negating any positive integer will overflow. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(0", stringify!($SelfT), ".checked_neg(), Some(0)); +assert_eq!(1", stringify!($SelfT), ".checked_neg(), None);", $EndFeature, " +```"), + #[stable(feature = "wrapping", since = "1.7.0")] + #[inline] + pub fn checked_neg(self) -> Option { + let (a, b) = self.overflowing_neg(); + if b {None} else {Some(a)} + } + } + + doc_comment! { + concat!("Checked shift left. Computes `self << rhs`, returning `None` +if `rhs` is larger than or equal to the number of bits in `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(0x1", stringify!($SelfT), ".checked_shl(4), Some(0x10)); +assert_eq!(0x10", stringify!($SelfT), ".checked_shl(129), None);", $EndFeature, " +```"), + #[stable(feature = "wrapping", since = "1.7.0")] + #[inline] + pub fn checked_shl(self, rhs: u32) -> Option { + let (a, b) = self.overflowing_shl(rhs); + if b {None} else {Some(a)} + } + } + + doc_comment! { + concat!("Checked shift right. Computes `self >> rhs`, returning `None` +if `rhs` is larger than or equal to the number of bits in `self`. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(0x10", stringify!($SelfT), ".checked_shr(4), Some(0x1)); +assert_eq!(0x10", stringify!($SelfT), ".checked_shr(129), None);", $EndFeature, " +```"), + #[stable(feature = "wrapping", since = "1.7.0")] + #[inline] + pub fn checked_shr(self, rhs: u32) -> Option { + let (a, b) = self.overflowing_shr(rhs); + if b {None} else {Some(a)} + } + } + + doc_comment! { + concat!("Checked exponentiation. Computes `self.pow(exp)`, returning `None` if +overflow occurred. + +# Examples + +Basic usage: + +``` +#![feature(no_panic_pow)] +", $Feature, "assert_eq!(2", stringify!($SelfT), ".checked_pow(5), Some(32)); +assert_eq!(", stringify!($SelfT), "::max_value().checked_pow(2), None);", $EndFeature, " +```"), + #[unstable(feature = "no_panic_pow", issue = "48320")] + #[inline] + pub fn checked_pow(self, mut exp: u32) -> Option { + let mut base = self; + let mut acc: Self = 1; + + while exp > 1 { + if (exp & 1) == 1 { + acc = acc.checked_mul(base)?; + } + exp /= 2; + base = base.checked_mul(base)?; + } + + // Deal with the final bit of the exponent separately, since + // squaring the base afterwards is not necessary and may cause a + // needless overflow. + if exp == 1 { + acc = acc.checked_mul(base)?; + } + + Some(acc) + } + } + + doc_comment! { + concat!("Saturating integer addition. Computes `self + rhs`, saturating at +the numeric bounds instead of overflowing. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_add(1), 101); +assert_eq!(200u8.saturating_add(127), 255);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn saturating_add(self, rhs: Self) -> Self { + match self.checked_add(rhs) { + Some(x) => x, + None => Self::max_value(), + } + } + } + + doc_comment! { + concat!("Saturating integer subtraction. Computes `self - rhs`, saturating +at the numeric bounds instead of overflowing. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_sub(27), 73); +assert_eq!(13", stringify!($SelfT), ".saturating_sub(127), 0);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn saturating_sub(self, rhs: Self) -> Self { + match self.checked_sub(rhs) { + Some(x) => x, + None => Self::min_value(), + } + } + } + + doc_comment! { + concat!("Saturating integer multiplication. Computes `self * rhs`, +saturating at the numeric bounds instead of overflowing. + +# Examples + +Basic usage: + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(2", stringify!($SelfT), ".saturating_mul(10), 20); +assert_eq!((", stringify!($SelfT), "::MAX).saturating_mul(10), ", stringify!($SelfT), +"::MAX);", $EndFeature, " +```"), + #[stable(feature = "wrapping", since = "1.7.0")] + #[inline] + pub fn saturating_mul(self, rhs: Self) -> Self { + self.checked_mul(rhs).unwrap_or(Self::max_value()) + } + } + + doc_comment! { + concat!("Saturating integer exponentiation. Computes `self.pow(exp)`, +saturating at the numeric bounds instead of overflowing. + +# Examples + +Basic usage: + +``` +#![feature(no_panic_pow)] +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(4", stringify!($SelfT), ".saturating_pow(3), 64); +assert_eq!(", stringify!($SelfT), "::MAX.saturating_pow(2), ", stringify!($SelfT), "::MAX);", +$EndFeature, " +```"), + #[unstable(feature = "no_panic_pow", issue = "48320")] + #[inline] + pub fn saturating_pow(self, exp: u32) -> Self { + match self.checked_pow(exp) { + Some(x) => x, + None => Self::max_value(), + } + } + } + + doc_comment! { + concat!("Wrapping (modular) addition. Computes `self + rhs`, +wrapping around at the boundary of the type. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(200", stringify!($SelfT), ".wrapping_add(55), 255); +assert_eq!(200", stringify!($SelfT), ".wrapping_add(", stringify!($SelfT), "::max_value()), 199);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn wrapping_add(self, rhs: Self) -> Self { + unsafe { + intrinsics::overflowing_add(self, rhs) + } + } + } + + doc_comment! { + concat!("Wrapping (modular) subtraction. Computes `self - rhs`, +wrapping around at the boundary of the type. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_sub(100), 0); +assert_eq!(100", stringify!($SelfT), ".wrapping_sub(", stringify!($SelfT), "::max_value()), 101);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn wrapping_sub(self, rhs: Self) -> Self { + unsafe { + intrinsics::overflowing_sub(self, rhs) + } } } @@ -1807,6 +2843,9 @@ macro_rules! uint_impl { /// /// Basic usage: /// + /// Please note that this example is shared between integer types. + /// Which explains why `u8` is used here. + /// /// ``` /// assert_eq!(10u8.wrapping_mul(12), 120); /// assert_eq!(25u8.wrapping_mul(12), 44); @@ -1819,43 +2858,92 @@ macro_rules! uint_impl { } } - /// Wrapping (modular) division. Computes `self / rhs`. - /// Wrapped division on unsigned types is just normal division. - /// There's no way wrapping could ever happen. - /// This function exists, so that all operations - /// are accounted for in the wrapping operations. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100u8.wrapping_div(10), 10); - /// ``` - #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline] - pub fn wrapping_div(self, rhs: Self) -> Self { - self / rhs + doc_comment! { + concat!("Wrapping (modular) division. Computes `self / rhs`. +Wrapped division on unsigned types is just normal division. +There's no way wrapping could ever happen. +This function exists, so that all operations +are accounted for in the wrapping operations. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_div(10), 10);", $EndFeature, " +```"), + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[inline] + pub fn wrapping_div(self, rhs: Self) -> Self { + self / rhs + } } - /// Wrapping (modular) remainder. Computes `self % rhs`. - /// Wrapped remainder calculation on unsigned types is - /// just the regular remainder calculation. - /// There's no way wrapping could ever happen. - /// This function exists, so that all operations - /// are accounted for in the wrapping operations. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(100u8.wrapping_rem(10), 0); - /// ``` - #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline] - pub fn wrapping_rem(self, rhs: Self) -> Self { - self % rhs + doc_comment! { + concat!("Wrapping Euclidean division. Computes `self.div_euc(rhs)`. +Wrapped division on unsigned types is just normal division. +There's no way wrapping could ever happen. +This function exists, so that all operations +are accounted for in the wrapping operations. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +assert_eq!(100", stringify!($SelfT), ".wrapping_div_euc(10), 10); +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + pub fn wrapping_div_euc(self, rhs: Self) -> Self { + self / rhs + } + } + + doc_comment! { + concat!("Wrapping (modular) remainder. Computes `self % rhs`. +Wrapped remainder calculation on unsigned types is +just the regular remainder calculation. +There's no way wrapping could ever happen. +This function exists, so that all operations +are accounted for in the wrapping operations. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(100", stringify!($SelfT), ".wrapping_rem(10), 0);", $EndFeature, " +```"), + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[inline] + pub fn wrapping_rem(self, rhs: Self) -> Self { + self % rhs + } + } + + doc_comment! { + concat!("Wrapping Euclidean modulo. Computes `self.mod_euc(rhs)`. +Wrapped modulo calculation on unsigned types is +just the regular remainder calculation. +There's no way wrapping could ever happen. +This function exists, so that all operations +are accounted for in the wrapping operations. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +assert_eq!(100", stringify!($SelfT), ".wrapping_mod_euc(10), 0); +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + pub fn wrapping_mod_euc(self, rhs: Self) -> Self { + self % rhs + } } /// Wrapping (modular) negation. Computes `-self`, @@ -1872,11 +2960,12 @@ macro_rules! uint_impl { /// /// Basic usage: /// + /// Please note that this example is shared between integer types. + /// Which explains why `i8` is used here. + /// /// ``` - /// assert_eq!(100u8.wrapping_neg(), 156); - /// assert_eq!(0u8.wrapping_neg(), 0); - /// assert_eq!(180u8.wrapping_neg(), 76); - /// assert_eq!(180u8.wrapping_neg(), (127 + 1) - (180u8 - (127 + 1))); + /// assert_eq!(100i8.wrapping_neg(), -100); + /// assert_eq!((-128i8).wrapping_neg(), -128); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline] @@ -1884,110 +2973,157 @@ macro_rules! uint_impl { self.overflowing_neg().0 } - /// Panic-free bitwise shift-left; yields `self << mask(rhs)`, - /// where `mask` removes any high-order bits of `rhs` that - /// would cause the shift to exceed the bitwidth of the type. - /// - /// Note that this is *not* the same as a rotate-left; the - /// RHS of a wrapping shift-left is restricted to the range - /// of the type, rather than the bits shifted out of the LHS - /// being returned to the other end. The primitive integer - /// types all implement a `rotate_left` function, which may - /// be what you want instead. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(1u8.wrapping_shl(7), 128); - /// assert_eq!(1u8.wrapping_shl(8), 1); - /// ``` - #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline] - pub fn wrapping_shl(self, rhs: u32) -> Self { - unsafe { - intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) + doc_comment! { + concat!("Panic-free bitwise shift-left; yields `self << mask(rhs)`, +where `mask` removes any high-order bits of `rhs` that +would cause the shift to exceed the bitwidth of the type. + +Note that this is *not* the same as a rotate-left; the +RHS of a wrapping shift-left is restricted to the range +of the type, rather than the bits shifted out of the LHS +being returned to the other end. The primitive integer +types all implement a `rotate_left` function, which may +be what you want instead. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(1", stringify!($SelfT), ".wrapping_shl(7), 128); +assert_eq!(1", stringify!($SelfT), ".wrapping_shl(128), 1);", $EndFeature, " +```"), + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[inline] + pub fn wrapping_shl(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) + } } } - /// Panic-free bitwise shift-right; yields `self >> mask(rhs)`, - /// where `mask` removes any high-order bits of `rhs` that - /// would cause the shift to exceed the bitwidth of the type. - /// - /// Note that this is *not* the same as a rotate-right; the - /// RHS of a wrapping shift-right is restricted to the range - /// of the type, rather than the bits shifted out of the LHS - /// being returned to the other end. The primitive integer - /// types all implement a `rotate_right` function, which may - /// be what you want instead. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(128u8.wrapping_shr(7), 1); - /// assert_eq!(128u8.wrapping_shr(8), 128); - /// ``` - #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline] - pub fn wrapping_shr(self, rhs: u32) -> Self { - unsafe { - intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) + doc_comment! { + concat!("Panic-free bitwise shift-right; yields `self >> mask(rhs)`, +where `mask` removes any high-order bits of `rhs` that +would cause the shift to exceed the bitwidth of the type. + +Note that this is *not* the same as a rotate-right; the +RHS of a wrapping shift-right is restricted to the range +of the type, rather than the bits shifted out of the LHS +being returned to the other end. The primitive integer +types all implement a `rotate_right` function, which may +be what you want instead. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(128", stringify!($SelfT), ".wrapping_shr(7), 1); +assert_eq!(128", stringify!($SelfT), ".wrapping_shr(128), 128);", $EndFeature, " +```"), + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[inline] + pub fn wrapping_shr(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) + } } } - /// Calculates `self` + `rhs` - /// - /// Returns a tuple of the addition along with a boolean indicating - /// whether an arithmetic overflow would occur. If an overflow would - /// have occurred then the wrapped value is returned. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// use std::u32; - /// - /// assert_eq!(5u32.overflowing_add(2), (7, false)); - /// assert_eq!(u32::MAX.overflowing_add(1), (0, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_add(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::add_with_overflow(self as $ActualT, - rhs as $ActualT) - }; - (a as Self, b) + doc_comment! { + concat!("Wrapping (modular) exponentiation. Computes `self.pow(exp)`, +wrapping around at the boundary of the type. + +# Examples + +Basic usage: + +``` +#![feature(no_panic_pow)] +", $Feature, "assert_eq!(3", stringify!($SelfT), ".wrapping_pow(5), 243); +assert_eq!(3u8.wrapping_pow(6), 217);", $EndFeature, " +```"), + #[unstable(feature = "no_panic_pow", issue = "48320")] + #[inline] + pub fn wrapping_pow(self, mut exp: u32) -> Self { + let mut base = self; + let mut acc: Self = 1; + + while exp > 1 { + if (exp & 1) == 1 { + acc = acc.wrapping_mul(base); + } + exp /= 2; + base = base.wrapping_mul(base); + } + + // Deal with the final bit of the exponent separately, since + // squaring the base afterwards is not necessary and may cause a + // needless overflow. + if exp == 1 { + acc = acc.wrapping_mul(base); + } + + acc + } } - /// Calculates `self` - `rhs` - /// - /// Returns a tuple of the subtraction along with a boolean indicating - /// whether an arithmetic overflow would occur. If an overflow would - /// have occurred then the wrapped value is returned. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// use std::u32; - /// - /// assert_eq!(5u32.overflowing_sub(2), (3, false)); - /// assert_eq!(0u32.overflowing_sub(1), (u32::MAX, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_sub(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::sub_with_overflow(self as $ActualT, - rhs as $ActualT) - }; - (a as Self, b) + doc_comment! { + concat!("Calculates `self` + `rhs` + +Returns a tuple of the addition along with a boolean indicating +whether an arithmetic overflow would occur. If an overflow would +have occurred then the wrapped value is returned. + +# Examples + +Basic usage + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false)); +assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (0, true));", $EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_add(self, rhs: Self) -> (Self, bool) { + let (a, b) = unsafe { + intrinsics::add_with_overflow(self as $ActualT, + rhs as $ActualT) + }; + (a as Self, b) + } + } + + doc_comment! { + concat!("Calculates `self` - `rhs` + +Returns a tuple of the subtraction along with a boolean indicating +whether an arithmetic overflow would occur. If an overflow would +have occurred then the wrapped value is returned. + +# Examples + +Basic usage + +``` +", $Feature, "use std::", stringify!($SelfT), "; + +assert_eq!(5", stringify!($SelfT), ".overflowing_sub(2), (3, false)); +assert_eq!(0", stringify!($SelfT), ".overflowing_sub(1), (", stringify!($SelfT), "::MAX, true));", +$EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_sub(self, rhs: Self) -> (Self, bool) { + let (a, b) = unsafe { + intrinsics::sub_with_overflow(self as $ActualT, + rhs as $ActualT) + }; + (a as Self, b) + } } /// Calculates the multiplication of `self` and `rhs`. @@ -1998,7 +3134,10 @@ macro_rules! uint_impl { /// /// # Examples /// - /// Basic usage + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `u32` is used here. /// /// ``` /// assert_eq!(5u32.overflowing_mul(2), (10, false)); @@ -2014,129 +3153,243 @@ macro_rules! uint_impl { (a as Self, b) } - /// Calculates the divisor when `self` is divided by `rhs`. - /// - /// Returns a tuple of the divisor along with a boolean indicating - /// whether an arithmetic overflow would occur. Note that for unsigned - /// integers overflow never occurs, so the second value is always - /// `false`. - /// - /// # Panics - /// - /// This function will panic if `rhs` is 0. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// assert_eq!(5u32.overflowing_div(2), (2, false)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_div(self, rhs: Self) -> (Self, bool) { - (self / rhs, false) + doc_comment! { + concat!("Calculates the divisor when `self` is divided by `rhs`. + +Returns a tuple of the divisor along with a boolean indicating +whether an arithmetic overflow would occur. Note that for unsigned +integers overflow never occurs, so the second value is always +`false`. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage + +``` +", $Feature, "assert_eq!(5", stringify!($SelfT), ".overflowing_div(2), (2, false));", $EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_div(self, rhs: Self) -> (Self, bool) { + (self / rhs, false) + } } - /// Calculates the remainder when `self` is divided by `rhs`. - /// - /// Returns a tuple of the remainder after dividing along with a boolean - /// indicating whether an arithmetic overflow would occur. Note that for - /// unsigned integers overflow never occurs, so the second value is - /// always `false`. - /// - /// # Panics - /// - /// This function will panic if `rhs` is 0. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// assert_eq!(5u32.overflowing_rem(2), (1, false)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_rem(self, rhs: Self) -> (Self, bool) { - (self % rhs, false) + doc_comment! { + concat!("Calculates the quotient of Euclidean division `self.div_euc(rhs)`. + +Returns a tuple of the divisor along with a boolean indicating +whether an arithmetic overflow would occur. Note that for unsigned +integers overflow never occurs, so the second value is always +`false`. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage + +``` +#![feature(euclidean_division)] +assert_eq!(5", stringify!($SelfT), ".overflowing_div_euc(2), (2, false)); +```"), + #[inline] + #[unstable(feature = "euclidean_division", issue = "49048")] + pub fn overflowing_div_euc(self, rhs: Self) -> (Self, bool) { + (self / rhs, false) + } } - /// Negates self in an overflowing fashion. - /// - /// Returns `!self + 1` using wrapping operations to return the value - /// that represents the negation of this unsigned value. Note that for - /// positive unsigned values overflow always occurs, but negating 0 does - /// not overflow. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// assert_eq!(0u32.overflowing_neg(), (0, false)); - /// assert_eq!(2u32.overflowing_neg(), (-2i32 as u32, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_neg(self) -> (Self, bool) { - ((!self).wrapping_add(1), self != 0) + doc_comment! { + concat!("Calculates the remainder when `self` is divided by `rhs`. + +Returns a tuple of the remainder after dividing along with a boolean +indicating whether an arithmetic overflow would occur. Note that for +unsigned integers overflow never occurs, so the second value is +always `false`. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage + +``` +", $Feature, "assert_eq!(5", stringify!($SelfT), ".overflowing_rem(2), (1, false));", $EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_rem(self, rhs: Self) -> (Self, bool) { + (self % rhs, false) + } } - /// Shifts self left by `rhs` bits. - /// - /// Returns a tuple of the shifted version of self along with a boolean - /// indicating whether the shift value was larger than or equal to the - /// number of bits. If the shift value is too large, then value is - /// masked (N-1) where N is the number of bits, and this value is then - /// used to perform the shift. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// assert_eq!(0x10u32.overflowing_shl(4), (0x100, false)); - /// assert_eq!(0x10u32.overflowing_shl(36), (0x100, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + doc_comment! { + concat!("Calculates the remainder `self.mod_euc(rhs)` by Euclidean division. + +Returns a tuple of the modulo after dividing along with a boolean +indicating whether an arithmetic overflow would occur. Note that for +unsigned integers overflow never occurs, so the second value is +always `false`. + +# Panics + +This function will panic if `rhs` is 0. + +# Examples + +Basic usage + +``` +#![feature(euclidean_division)] +assert_eq!(5", stringify!($SelfT), ".overflowing_mod_euc(2), (1, false)); +```"), + #[inline] + #[unstable(feature = "euclidean_division", issue = "49048")] + pub fn overflowing_mod_euc(self, rhs: Self) -> (Self, bool) { + (self % rhs, false) + } } - /// Shifts self right by `rhs` bits. - /// - /// Returns a tuple of the shifted version of self along with a boolean - /// indicating whether the shift value was larger than or equal to the - /// number of bits. If the shift value is too large, then value is - /// masked (N-1) where N is the number of bits, and this value is then - /// used to perform the shift. - /// - /// # Examples - /// - /// Basic usage - /// - /// ``` - /// assert_eq!(0x10u32.overflowing_shr(4), (0x1, false)); - /// assert_eq!(0x10u32.overflowing_shr(36), (0x1, true)); - /// ``` - #[inline] - #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { - (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + doc_comment! { + concat!("Negates self in an overflowing fashion. +Returns `!self + 1` using wrapping operations to return the value +that represents the negation of this unsigned value. Note that for +positive unsigned values overflow always occurs, but negating 0 does +not overflow. + +# Examples + +Basic usage + +``` +", $Feature, "assert_eq!(0", stringify!($SelfT), ".overflowing_neg(), (0, false)); +assert_eq!(2", stringify!($SelfT), ".overflowing_neg(), (-2i32 as ", stringify!($SelfT), +", true));", $EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_neg(self) -> (Self, bool) { + ((!self).wrapping_add(1), self != 0) + } } - /// Raises self to the power of `exp`, using exponentiation by squaring. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(2u32.pow(4), 16); - /// ``` + doc_comment! { + concat!("Shifts self left by `rhs` bits. + +Returns a tuple of the shifted version of self along with a boolean +indicating whether the shift value was larger than or equal to the +number of bits. If the shift value is too large, then value is +masked (N-1) where N is the number of bits, and this value is then +used to perform the shift. + +# Examples + +Basic usage + +``` +", $Feature, "assert_eq!(0x1", stringify!($SelfT), ".overflowing_shl(4), (0x10, false)); +assert_eq!(0x1", stringify!($SelfT), ".overflowing_shl(132), (0x10, true));", $EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + } + } + + doc_comment! { + concat!("Shifts self right by `rhs` bits. + +Returns a tuple of the shifted version of self along with a boolean +indicating whether the shift value was larger than or equal to the +number of bits. If the shift value is too large, then value is +masked (N-1) where N is the number of bits, and this value is then +used to perform the shift. + +# Examples + +Basic usage + +``` +", $Feature, "assert_eq!(0x10", stringify!($SelfT), ".overflowing_shr(4), (0x1, false)); +assert_eq!(0x10", stringify!($SelfT), ".overflowing_shr(132), (0x1, true));", $EndFeature, " +```"), + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + } + } + + doc_comment! { + concat!("Raises self to the power of `exp`, using exponentiation by squaring. + +Returns a tuple of the exponentiation along with a bool indicating +whether an overflow happened. + +# Examples + +Basic usage: + +``` +#![feature(no_panic_pow)] +", $Feature, "assert_eq!(3", stringify!($SelfT), ".overflowing_pow(5), (243, false)); +assert_eq!(3u8.overflowing_pow(6), (217, true));", $EndFeature, " +```"), + #[unstable(feature = "no_panic_pow", issue = "48320")] + #[inline] + pub fn overflowing_pow(self, mut exp: u32) -> (Self, bool) { + let mut base = self; + let mut acc: Self = 1; + let mut overflown = false; + // Scratch space for storing results of overflowing_mul. + let mut r; + + while exp > 1 { + if (exp & 1) == 1 { + r = acc.overflowing_mul(base); + acc = r.0; + overflown |= r.1; + } + exp /= 2; + r = base.overflowing_mul(base); + base = r.0; + overflown |= r.1; + } + + // Deal with the final bit of the exponent separately, since + // squaring the base afterwards is not necessary and may cause a + // needless overflow. + if exp == 1 { + r = acc.overflowing_mul(base); + acc = r.0; + overflown |= r.1; + } + + (acc, overflown) + } + } + + doc_comment! { + concat!("Raises self to the power of `exp`, using exponentiation by squaring. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(2", stringify!($SelfT), ".pow(5), 32);", $EndFeature, " +```"), #[stable(feature = "rust1", since = "1.0.0")] #[inline] #[rustc_inherit_overflow_checks] @@ -2161,21 +3414,67 @@ macro_rules! uint_impl { acc } + } - /// Returns `true` if and only if `self == 2^k` for some `k`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert!(16u8.is_power_of_two()); - /// assert!(!10u8.is_power_of_two()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn is_power_of_two(self) -> bool { - (self.wrapping_sub(1)) & self == 0 && !(self == 0) + doc_comment! { + concat!("Performs Euclidean division. + +For unsigned types, this is just the same as `self / rhs`. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +assert_eq!(7", stringify!($SelfT), ".div_euc(4), 1); // or any other integer type +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + #[rustc_inherit_overflow_checks] + pub fn div_euc(self, rhs: Self) -> Self { + self / rhs + } + } + + + doc_comment! { + concat!("Calculates the remainder `self mod rhs` by Euclidean division. + +For unsigned types, this is just the same as `self % rhs`. + +# Examples + +Basic usage: + +``` +#![feature(euclidean_division)] +assert_eq!(7", stringify!($SelfT), ".mod_euc(4), 3); // or any other integer type +```"), + #[unstable(feature = "euclidean_division", issue = "49048")] + #[inline] + #[rustc_inherit_overflow_checks] + pub fn mod_euc(self, rhs: Self) -> Self { + self % rhs + } + } + + doc_comment! { + concat!("Returns `true` if and only if `self == 2^k` for some `k`. + +# Examples + +Basic usage: + +``` +", $Feature, "assert!(16", stringify!($SelfT), ".is_power_of_two()); +assert!(!10", stringify!($SelfT), ".is_power_of_two());", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn is_power_of_two(self) -> bool { + (self.wrapping_sub(1)) & self == 0 && !(self == 0) + } } // Returns one less than next power of two. @@ -2200,50 +3499,196 @@ macro_rules! uint_impl { <$SelfT>::max_value() >> z } - /// Returns the smallest power of two greater than or equal to `self`. - /// - /// When return value overflows (i.e. `self > (1 << (N-1))` for type - /// `uN`), it panics in debug mode and return value is wrapped to 0 in - /// release mode (the only situation in which method can return 0). - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// assert_eq!(2u8.next_power_of_two(), 2); - /// assert_eq!(3u8.next_power_of_two(), 4); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - pub fn next_power_of_two(self) -> Self { - // Call the trait to get overflow checks - ops::Add::add(self.one_less_than_next_power_of_two(), 1) + doc_comment! { + concat!("Returns the smallest power of two greater than or equal to `self`. + +When return value overflows (i.e. `self > (1 << (N-1))` for type +`uN`), it panics in debug mode and return value is wrapped to 0 in +release mode (the only situation in which method can return 0). + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(2", stringify!($SelfT), ".next_power_of_two(), 2); +assert_eq!(3", stringify!($SelfT), ".next_power_of_two(), 4);", $EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn next_power_of_two(self) -> Self { + // Call the trait to get overflow checks + ops::Add::add(self.one_less_than_next_power_of_two(), 1) + } } - /// Returns the smallest power of two greater than or equal to `n`. If - /// the next power of two is greater than the type's maximum value, - /// `None` is returned, otherwise the power of two is wrapped in `Some`. + doc_comment! { + concat!("Returns the smallest power of two greater than or equal to `n`. If +the next power of two is greater than the type's maximum value, +`None` is returned, otherwise the power of two is wrapped in `Some`. + +# Examples + +Basic usage: + +``` +", $Feature, "assert_eq!(2", stringify!($SelfT), +".checked_next_power_of_two(), Some(2)); +assert_eq!(3", stringify!($SelfT), ".checked_next_power_of_two(), Some(4)); +assert_eq!(", stringify!($SelfT), "::max_value().checked_next_power_of_two(), None);", +$EndFeature, " +```"), + #[stable(feature = "rust1", since = "1.0.0")] + pub fn checked_next_power_of_two(self) -> Option { + self.one_less_than_next_power_of_two().checked_add(1) + } + } + + doc_comment! { + concat!("Returns the smallest power of two greater than or equal to `n`. If +the next power of two is greater than the type's maximum value, +the return value is wrapped to `0`. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_next_power_of_two)] +", $Feature, " +assert_eq!(2", stringify!($SelfT), ".wrapping_next_power_of_two(), 2); +assert_eq!(3", stringify!($SelfT), ".wrapping_next_power_of_two(), 4); +assert_eq!(", stringify!($SelfT), "::max_value().wrapping_next_power_of_two(), 0);", +$EndFeature, " +```"), + #[unstable(feature = "wrapping_next_power_of_two", issue = "32463", + reason = "needs decision on wrapping behaviour")] + pub fn wrapping_next_power_of_two(self) -> Self { + self.one_less_than_next_power_of_two().wrapping_add(1) + } + } + + /// Return the memory representation of this integer as a byte array in + /// big-endian (network) byte order. /// /// # Examples /// - /// Basic usage: + /// ``` + /// #![feature(int_to_from_bytes)] + /// + /// let bytes = 0x12_34_56_78_i32.to_be_bytes(); + /// assert_eq!(bytes, [0x12, 0x34, 0x56, 0x78]); + /// ``` + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn to_be_bytes(self) -> [u8; mem::size_of::()] { + self.to_be().to_ne_bytes() + } + + /// Return the memory representation of this integer as a byte array in + /// little-endian byte order. + /// + /// # Examples /// /// ``` - /// assert_eq!(2u8.checked_next_power_of_two(), Some(2)); - /// assert_eq!(3u8.checked_next_power_of_two(), Some(4)); - /// assert_eq!(200u8.checked_next_power_of_two(), None); + /// #![feature(int_to_from_bytes)] + /// + /// let bytes = 0x12_34_56_78_i32.to_le_bytes(); + /// assert_eq!(bytes, [0x78, 0x56, 0x34, 0x12]); /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn checked_next_power_of_two(self) -> Option { - self.one_less_than_next_power_of_two().checked_add(1) + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn to_le_bytes(self) -> [u8; mem::size_of::()] { + self.to_le().to_ne_bytes() + } + + /// Return the memory representation of this integer as a byte array in + /// native byte order. + /// + /// As the target platform's native endianness is used, portable code + /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, + /// instead. + /// + /// [`to_be_bytes`]: #method.to_be_bytes + /// [`to_le_bytes`]: #method.to_le_bytes + /// + /// # Examples + /// + /// ``` + /// #![feature(int_to_from_bytes)] + /// + /// let bytes = i32::min_value().to_be().to_ne_bytes(); + /// assert_eq!(bytes, [0x80, 0, 0, 0]); + /// ``` + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn to_ne_bytes(self) -> [u8; mem::size_of::()] { + unsafe { mem::transmute(self) } + } + + /// Create an integer value from its representation as a byte array in + /// big endian. + /// + /// # Examples + /// + /// ``` + /// #![feature(int_to_from_bytes)] + /// + /// let int = i32::from_be_bytes([0x12, 0x34, 0x56, 0x78]); + /// assert_eq!(int, 0x12_34_56_78); + /// ``` + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn from_be_bytes(bytes: [u8; mem::size_of::()]) -> Self { + Self::from_be(Self::from_ne_bytes(bytes)) + } + + /// Create an integer value from its representation as a byte array in + /// little endian. + /// + /// # Examples + /// + /// ``` + /// #![feature(int_to_from_bytes)] + /// + /// let int = i32::from_le_bytes([0x12, 0x34, 0x56, 0x78]); + /// assert_eq!(int, 0x78_56_34_12); + /// ``` + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { + Self::from_le(Self::from_ne_bytes(bytes)) + } + + /// Create an integer value from its memory representation as a byte + /// array in native endianness. + /// + /// As the target platform's native endianness is used, portable code + /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as + /// appropriate instead. + /// + /// [`from_be_bytes`]: #method.from_be_bytes + /// [`from_le_bytes`]: #method.from_le_bytes + /// + /// # Examples + /// + /// ``` + /// #![feature(int_to_from_bytes)] + /// + /// let int = i32::from_be(i32::from_ne_bytes([0x80, 0, 0, 0])); + /// assert_eq!(int, i32::min_value()); + /// ``` + #[unstable(feature = "int_to_from_bytes", issue = "52963")] + #[inline] + pub fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { + unsafe { mem::transmute(bytes) } } } } #[lang = "u8"] impl u8 { - uint_impl! { u8, u8, 8 } + uint_impl! { u8, u8, 8, 255, "", "", 2, "0x82", "0xa", "0x12", "0x12", "0x48" } /// Checks if the value is within the ASCII range. @@ -2383,8 +3828,6 @@ impl u8 { /// # Examples /// /// ``` - /// #![feature(ascii_ctype)] - /// /// let uppercase_a = b'A'; /// let uppercase_g = b'G'; /// let a = b'a'; @@ -2421,8 +3864,6 @@ impl u8 { /// # Examples /// /// ``` - /// #![feature(ascii_ctype)] - /// /// let uppercase_a = b'A'; /// let uppercase_g = b'G'; /// let a = b'a'; @@ -2459,8 +3900,6 @@ impl u8 { /// # Examples /// /// ``` - /// #![feature(ascii_ctype)] - /// /// let uppercase_a = b'A'; /// let uppercase_g = b'G'; /// let a = b'a'; @@ -2500,8 +3939,6 @@ impl u8 { /// # Examples /// /// ``` - /// #![feature(ascii_ctype)] - /// /// let uppercase_a = b'A'; /// let uppercase_g = b'G'; /// let a = b'a'; @@ -2538,8 +3975,6 @@ impl u8 { /// # Examples /// /// ``` - /// #![feature(ascii_ctype)] - /// /// let uppercase_a = b'A'; /// let uppercase_g = b'G'; /// let a = b'a'; @@ -2579,8 +4014,6 @@ impl u8 { /// # Examples /// /// ``` - /// #![feature(ascii_ctype)] - /// /// let uppercase_a = b'A'; /// let uppercase_g = b'G'; /// let a = b'a'; @@ -2621,8 +4054,6 @@ impl u8 { /// # Examples /// /// ``` - /// #![feature(ascii_ctype)] - /// /// let uppercase_a = b'A'; /// let uppercase_g = b'G'; /// let a = b'a'; @@ -2654,13 +4085,11 @@ impl u8 { } /// Checks if the value is an ASCII graphic character: - /// U+0021 '@' ... U+007E '~'. + /// U+0021 '!' ... U+007E '~'. /// /// # Examples /// /// ``` - /// #![feature(ascii_ctype)] - /// /// let uppercase_a = b'A'; /// let uppercase_g = b'G'; /// let a = b'a'; @@ -2714,8 +4143,6 @@ impl u8 { /// # Examples /// /// ``` - /// #![feature(ascii_ctype)] - /// /// let uppercase_a = b'A'; /// let uppercase_g = b'G'; /// let a = b'a'; @@ -2754,8 +4181,6 @@ impl u8 { /// # Examples /// /// ``` - /// #![feature(ascii_ctype)] - /// /// let uppercase_a = b'A'; /// let uppercase_g = b'G'; /// let a = b'a'; @@ -2789,39 +4214,45 @@ impl u8 { #[lang = "u16"] impl u16 { - uint_impl! { u16, u16, 16 } + uint_impl! { u16, u16, 16, 65535, "", "", 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48" } } #[lang = "u32"] impl u32 { - uint_impl! { u32, u32, 32 } + uint_impl! { u32, u32, 32, 4294967295, "", "", 8, "0x10000b3", "0xb301", "0x12345678", + "0x78563412", "0x1e6a2c48" } } #[lang = "u64"] impl u64 { - uint_impl! { u64, u64, 64 } + uint_impl! { u64, u64, 64, 18446744073709551615, "", "", 12, "0xaa00000000006e1", "0x6e10aa", + "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48" } } #[lang = "u128"] impl u128 { - uint_impl! { u128, u128, 128 } + uint_impl! { u128, u128, 128, 340282366920938463463374607431768211455, "", "", 16, + "0x13f40000000000000000000000004f76", "0x4f7613f4", "0x12345678901234567890123456789012", + "0x12907856341290785634129078563412", "0x48091e6a2c48091e6a2c48091e6a2c48" } } #[cfg(target_pointer_width = "16")] #[lang = "usize"] impl usize { - uint_impl! { usize, u16, 16 } + uint_impl! { usize, u16, 16, 65536, "", "", 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48" } } #[cfg(target_pointer_width = "32")] #[lang = "usize"] impl usize { - uint_impl! { usize, u32, 32 } + uint_impl! { usize, u32, 32, 4294967295, "", "", 8, "0x10000b3", "0xb301", "0x12345678", + "0x78563412", "0x1e6a2c48" } } #[cfg(target_pointer_width = "64")] #[lang = "usize"] impl usize { - uint_impl! { usize, u64, 64 } + uint_impl! { usize, u64, 64, 18446744073709551615, "", "", 12, "0xaa00000000006e1", "0x6e10aa", + "0x1234567890123456", "0x5634129078563412", "0x6a2c48091e6a2c48" } } /// A classification of floating point numbers. @@ -2874,75 +4305,6 @@ pub enum FpCategory { Normal, } -/// A built-in floating point number. -#[doc(hidden)] -#[unstable(feature = "core_float", - reason = "stable interface is via `impl f{32,64}` in later crates", - issue = "32110")] -pub trait Float: Sized { - /// Returns `true` if this value is NaN and false otherwise. - #[stable(feature = "core", since = "1.6.0")] - fn is_nan(self) -> bool; - /// Returns `true` if this value is positive infinity or negative infinity and - /// false otherwise. - #[stable(feature = "core", since = "1.6.0")] - fn is_infinite(self) -> bool; - /// Returns `true` if this number is neither infinite nor NaN. - #[stable(feature = "core", since = "1.6.0")] - fn is_finite(self) -> bool; - /// Returns `true` if this number is neither zero, infinite, denormal, or NaN. - #[stable(feature = "core", since = "1.6.0")] - fn is_normal(self) -> bool; - /// Returns the category that this number falls into. - #[stable(feature = "core", since = "1.6.0")] - fn classify(self) -> FpCategory; - - /// Computes the absolute value of `self`. Returns `Float::nan()` if the - /// number is `Float::nan()`. - #[stable(feature = "core", since = "1.6.0")] - fn abs(self) -> Self; - /// Returns a number that represents the sign of `self`. - /// - /// - `1.0` if the number is positive, `+0.0` or `Float::infinity()` - /// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()` - /// - `Float::nan()` if the number is `Float::nan()` - #[stable(feature = "core", since = "1.6.0")] - fn signum(self) -> Self; - - /// Returns `true` if `self` is positive, including `+0.0` and - /// `Float::infinity()`. - #[stable(feature = "core", since = "1.6.0")] - fn is_sign_positive(self) -> bool; - /// Returns `true` if `self` is negative, including `-0.0` and - /// `Float::neg_infinity()`. - #[stable(feature = "core", since = "1.6.0")] - fn is_sign_negative(self) -> bool; - - /// Take the reciprocal (inverse) of a number, `1/x`. - #[stable(feature = "core", since = "1.6.0")] - fn recip(self) -> Self; - - /// Raise a number to an integer power. - /// - /// Using this function is generally faster than using `powf` - #[stable(feature = "core", since = "1.6.0")] - fn powi(self, n: i32) -> Self; - - /// Convert radians to degrees. - #[stable(feature = "deg_rad_conversions", since="1.7.0")] - fn to_degrees(self) -> Self; - /// Convert degrees to radians. - #[stable(feature = "deg_rad_conversions", since="1.7.0")] - fn to_radians(self) -> Self; - - /// Returns the maximum of the two numbers. - #[stable(feature = "core_float_min_max", since="1.20.0")] - fn max(self, other: Self) -> Self; - /// Returns the minimum of the two numbers. - #[stable(feature = "core_float_min_max", since="1.20.0")] - fn min(self, other: Self) -> Self; -} - macro_rules! from_str_radix_int_impl { ($($t:ty)*) => {$( #[stable(feature = "rust1", since = "1.0.0")] @@ -2980,10 +4342,9 @@ impl fmt::Display for TryFromIntError { } #[unstable(feature = "try_from", issue = "33417")] -impl From for TryFromIntError { - fn from(infallible: Infallible) -> TryFromIntError { - match infallible { - } +impl From for TryFromIntError { + fn from(never: !) -> TryFromIntError { + never } } @@ -2992,7 +4353,7 @@ macro_rules! try_from_unbounded { ($source:ty, $($target:ty),*) => {$( #[unstable(feature = "try_from", issue = "33417")] impl TryFrom<$source> for $target { - type Error = Infallible; + type Error = TryFromIntError; #[inline] fn try_from(value: $source) -> Result { @@ -3103,7 +4464,7 @@ try_from_lower_bounded!(isize, usize); #[cfg(target_pointer_width = "16")] mod ptr_try_from_impls { use super::TryFromIntError; - use convert::{Infallible, TryFrom}; + use convert::TryFrom; try_from_upper_bounded!(usize, u8); try_from_unbounded!(usize, u16, u32, u64, u128); @@ -3115,21 +4476,18 @@ mod ptr_try_from_impls { try_from_both_bounded!(isize, i8); try_from_unbounded!(isize, i16, i32, i64, i128); - rev!(try_from_unbounded, usize, u16); rev!(try_from_upper_bounded, usize, u32, u64, u128); rev!(try_from_lower_bounded, usize, i8, i16); rev!(try_from_both_bounded, usize, i32, i64, i128); - rev!(try_from_unbounded, isize, u8); rev!(try_from_upper_bounded, isize, u16, u32, u64, u128); - rev!(try_from_unbounded, isize, i16); rev!(try_from_both_bounded, isize, i32, i64, i128); } #[cfg(target_pointer_width = "32")] mod ptr_try_from_impls { use super::TryFromIntError; - use convert::{Infallible, TryFrom}; + use convert::TryFrom; try_from_upper_bounded!(usize, u8, u16); try_from_unbounded!(usize, u32, u64, u128); @@ -3141,21 +4499,21 @@ mod ptr_try_from_impls { try_from_both_bounded!(isize, i8, i16); try_from_unbounded!(isize, i32, i64, i128); - rev!(try_from_unbounded, usize, u16, u32); + rev!(try_from_unbounded, usize, u32); rev!(try_from_upper_bounded, usize, u64, u128); rev!(try_from_lower_bounded, usize, i8, i16, i32); rev!(try_from_both_bounded, usize, i64, i128); - rev!(try_from_unbounded, isize, u8, u16); + rev!(try_from_unbounded, isize, u16); rev!(try_from_upper_bounded, isize, u32, u64, u128); - rev!(try_from_unbounded, isize, i16, i32); + rev!(try_from_unbounded, isize, i32); rev!(try_from_both_bounded, isize, i64, i128); } #[cfg(target_pointer_width = "64")] mod ptr_try_from_impls { use super::TryFromIntError; - use convert::{Infallible, TryFrom}; + use convert::TryFrom; try_from_upper_bounded!(usize, u8, u16, u32); try_from_unbounded!(usize, u64, u128); @@ -3167,14 +4525,14 @@ mod ptr_try_from_impls { try_from_both_bounded!(isize, i8, i16, i32); try_from_unbounded!(isize, i64, i128); - rev!(try_from_unbounded, usize, u16, u32, u64); + rev!(try_from_unbounded, usize, u32, u64); rev!(try_from_upper_bounded, usize, u128); rev!(try_from_lower_bounded, usize, i8, i16, i32, i64); rev!(try_from_both_bounded, usize, i128); - rev!(try_from_unbounded, isize, u8, u16, u32); + rev!(try_from_unbounded, isize, u16, u32); rev!(try_from_upper_bounded, isize, u64, u128); - rev!(try_from_unbounded, isize, i16, i32, i64); + rev!(try_from_unbounded, isize, i32, i64); rev!(try_from_both_bounded, isize, i128); } @@ -3284,6 +4642,13 @@ fn from_str_radix(src: &str, radix: u32) -> Result T are covered by a blanket impl and therefore excluded // Some conversions from and to usize/isize are not implemented due to portability concerns macro_rules! impl_from { - ($Small: ty, $Large: ty, #[$attr:meta]) => { + ($Small: ty, $Large: ty, #[$attr:meta], $doc: expr) => { #[$attr] + #[doc = $doc] impl From<$Small> for $Large { #[inline] fn from(small: $Small) -> $Large { small as $Large } } + }; + ($Small: ty, $Large: ty, #[$attr:meta]) => { + impl_from!($Small, + $Large, + #[$attr], + concat!("Converts `", + stringify!($Small), + "` to `", + stringify!($Large), + "` losslessly.")); } } +macro_rules! impl_from_bool { + ($target: ty, #[$attr:meta]) => { + impl_from!(bool, $target, #[$attr], concat!("Converts a `bool` to a `", + stringify!($target), "`. The resulting value is `0` for `false` and `1` for `true` +values. + +# Examples + +``` +assert_eq!(", stringify!($target), "::from(true), 1); +assert_eq!(", stringify!($target), "::from(false), 0); +```")); + }; +} + +// Bool -> Any +impl_from_bool! { u8, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { u16, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { u32, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { u64, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { u128, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { usize, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { i8, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { i16, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { i32, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { i64, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { i128, #[stable(feature = "from_bool", since = "1.28.0")] } +impl_from_bool! { isize, #[stable(feature = "from_bool", since = "1.28.0")] } + // Unsigned -> Unsigned impl_from! { u8, u16, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { u8, u32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { u8, u64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } -impl_from! { u8, u128, #[unstable(feature = "i128", issue = "35118")] } +impl_from! { u8, u128, #[stable(feature = "i128", since = "1.26.0")] } impl_from! { u8, usize, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { u16, u32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { u16, u64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } -impl_from! { u16, u128, #[unstable(feature = "i128", issue = "35118")] } +impl_from! { u16, u128, #[stable(feature = "i128", since = "1.26.0")] } impl_from! { u32, u64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } -impl_from! { u32, u128, #[unstable(feature = "i128", issue = "35118")] } -impl_from! { u64, u128, #[unstable(feature = "i128", issue = "35118")] } +impl_from! { u32, u128, #[stable(feature = "i128", since = "1.26.0")] } +impl_from! { u64, u128, #[stable(feature = "i128", since = "1.26.0")] } // Signed -> Signed impl_from! { i8, i16, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { i8, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { i8, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } -impl_from! { i8, i128, #[unstable(feature = "i128", issue = "35118")] } +impl_from! { i8, i128, #[stable(feature = "i128", since = "1.26.0")] } impl_from! { i8, isize, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { i16, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { i16, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } -impl_from! { i16, i128, #[unstable(feature = "i128", issue = "35118")] } +impl_from! { i16, i128, #[stable(feature = "i128", since = "1.26.0")] } impl_from! { i32, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } -impl_from! { i32, i128, #[unstable(feature = "i128", issue = "35118")] } -impl_from! { i64, i128, #[unstable(feature = "i128", issue = "35118")] } +impl_from! { i32, i128, #[stable(feature = "i128", since = "1.26.0")] } +impl_from! { i64, i128, #[stable(feature = "i128", since = "1.26.0")] } // Unsigned -> Signed impl_from! { u8, i16, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { u8, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { u8, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } -impl_from! { u8, i128, #[unstable(feature = "i128", issue = "35118")] } +impl_from! { u8, i128, #[stable(feature = "i128", since = "1.26.0")] } impl_from! { u16, i32, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } impl_from! { u16, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } -impl_from! { u16, i128, #[unstable(feature = "i128", issue = "35118")] } +impl_from! { u16, i128, #[stable(feature = "i128", since = "1.26.0")] } impl_from! { u32, i64, #[stable(feature = "lossless_int_conv", since = "1.5.0")] } -impl_from! { u32, i128, #[unstable(feature = "i128", issue = "35118")] } -impl_from! { u64, i128, #[unstable(feature = "i128", issue = "35118")] } +impl_from! { u32, i128, #[stable(feature = "i128", since = "1.26.0")] } +impl_from! { u64, i128, #[stable(feature = "i128", since = "1.26.0")] } + +// The C99 standard defines bounds on INTPTR_MIN, INTPTR_MAX, and UINTPTR_MAX +// which imply that pointer-sized integers must be at least 16 bits: +// https://port70.net/~nsz/c/c99/n1256.html#7.18.2.4 +impl_from! { u16, usize, #[stable(feature = "lossless_iusize_conv", since = "1.26.0")] } +impl_from! { u8, isize, #[stable(feature = "lossless_iusize_conv", since = "1.26.0")] } +impl_from! { i16, isize, #[stable(feature = "lossless_iusize_conv", since = "1.26.0")] } + +// RISC-V defines the possibility of a 128-bit address space (RV128). + +// CHERI proposes 256-bit “capabilities”. Unclear if this would be relevant to usize/isize. +// https://www.cl.cam.ac.uk/research/security/ctsrd/pdfs/20171017a-cheri-poster.pdf +// http://www.csl.sri.com/users/neumann/2012resolve-cheri.pdf + // Note: integers can only be represented with full precision in a float if // they fit in the significand, which is 24 bits in f32 and 53 bits in f64. diff --git a/src/libcore/num/u128.rs b/src/libcore/num/u128.rs index 987ac3e00073..e8c783a1bb54 100644 --- a/src/libcore/num/u128.rs +++ b/src/libcore/num/u128.rs @@ -12,5 +12,5 @@ //! //! *[See also the `u128` primitive type](../../std/primitive.u128.html).* -#![unstable(feature = "i128", issue="35118")] -uint_module! { u128, #[unstable(feature = "i128", issue="35118")] } +#![stable(feature = "i128", since = "1.26.0")] +uint_module! { u128, #[stable(feature = "i128", since="1.26.0")] } diff --git a/src/libcore/num/wrapping.rs b/src/libcore/num/wrapping.rs index ae1b0b3ce11b..1c826c2fa76b 100644 --- a/src/libcore/num/wrapping.rs +++ b/src/libcore/num/wrapping.rs @@ -112,17 +112,19 @@ macro_rules! sh_impl_all { //sh_impl_unsigned! { $t, u16 } //sh_impl_unsigned! { $t, u32 } //sh_impl_unsigned! { $t, u64 } + //sh_impl_unsigned! { $t, u128 } sh_impl_unsigned! { $t, usize } //sh_impl_signed! { $t, i8 } //sh_impl_signed! { $t, i16 } //sh_impl_signed! { $t, i32 } //sh_impl_signed! { $t, i64 } + //sh_impl_signed! { $t, i128 } //sh_impl_signed! { $t, isize } )*) } -sh_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } +sh_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize } // FIXME(30524): impl Op for Wrapping, impl OpAssign for Wrapping macro_rules! wrapping_impl { @@ -317,11 +319,580 @@ macro_rules! wrapping_impl { } forward_ref_unop! { impl Neg, neg for Wrapping<$t>, #[stable(feature = "wrapping_ref", since = "1.14.0")] } + )*) } wrapping_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } +macro_rules! wrapping_int_impl { + ($($t:ty)*) => ($( + impl Wrapping<$t> { + doc_comment! { + concat!("Returns the smallest value that can be represented by this integer type. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +assert_eq!(>::min_value(), ", +"Wrapping(", stringify!($t), "::min_value())); +```"), + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + #[inline] + pub const fn min_value() -> Self { + Wrapping(<$t>::min_value()) + } + } + + doc_comment! { + concat!("Returns the largest value that can be represented by this integer type. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +assert_eq!(>::max_value(), ", +"Wrapping(", stringify!($t), "::max_value())); +```"), + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + #[inline] + pub const fn max_value() -> Self { + Wrapping(<$t>::max_value()) + } + } + + doc_comment! { + concat!("Returns the number of ones in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +let n = Wrapping(0b01001100", stringify!($t), "); + +assert_eq!(n.count_ones(), 3); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn count_ones(self) -> u32 { + self.0.count_ones() + } + } + + doc_comment! { + concat!("Returns the number of zeros in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +assert_eq!(Wrapping(!0", stringify!($t), ").count_zeros(), 0); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn count_zeros(self) -> u32 { + self.0.count_zeros() + } + } + + doc_comment! { + concat!("Returns the number of trailing zeros in the binary representation +of `self`. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +let n = Wrapping(0b0101000", stringify!($t), "); + +assert_eq!(n.trailing_zeros(), 3); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn trailing_zeros(self) -> u32 { + self.0.trailing_zeros() + } + } + + /// Shifts the bits to the left by a specified amount, `n`, + /// wrapping the truncated bits to the end of the resulting + /// integer. + /// + /// Please note this isn't the same operation as `>>`! + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(wrapping_int_impl)] + /// use std::num::Wrapping; + /// + /// let n: Wrapping = Wrapping(0x0123456789ABCDEF); + /// let m: Wrapping = Wrapping(-0x76543210FEDCBA99); + /// + /// assert_eq!(n.rotate_left(32), m); + /// ``` + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn rotate_left(self, n: u32) -> Self { + Wrapping(self.0.rotate_left(n)) + } + + /// Shifts the bits to the right by a specified amount, `n`, + /// wrapping the truncated bits to the beginning of the resulting + /// integer. + /// + /// Please note this isn't the same operation as `<<`! + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(wrapping_int_impl)] + /// use std::num::Wrapping; + /// + /// let n: Wrapping = Wrapping(0x0123456789ABCDEF); + /// let m: Wrapping = Wrapping(-0xFEDCBA987654322); + /// + /// assert_eq!(n.rotate_right(4), m); + /// ``` + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn rotate_right(self, n: u32) -> Self { + Wrapping(self.0.rotate_right(n)) + } + + /// Reverses the byte order of the integer. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(wrapping_int_impl)] + /// use std::num::Wrapping; + /// + /// let n: Wrapping = Wrapping(0b0000000_01010101); + /// assert_eq!(n, Wrapping(85)); + /// + /// let m = n.swap_bytes(); + /// + /// assert_eq!(m, Wrapping(0b01010101_00000000)); + /// assert_eq!(m, Wrapping(21760)); + /// ``` + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn swap_bytes(self) -> Self { + Wrapping(self.0.swap_bytes()) + } + + /// Reverses the bit pattern of the integer. + /// + /// # Examples + /// + /// Please note that this example is shared between integer types. + /// Which explains why `i16` is used here. + /// + /// Basic usage: + /// + /// ``` + /// #![feature(reverse_bits)] + /// use std::num::Wrapping; + /// + /// let n = Wrapping(0b0000000_01010101i16); + /// assert_eq!(n, Wrapping(85)); + /// + /// let m = n.reverse_bits(); + /// + /// assert_eq!(m.0 as u16, 0b10101010_00000000); + /// assert_eq!(m, Wrapping(-22016)); + /// ``` + #[unstable(feature = "reverse_bits", issue = "48763")] + #[inline] + pub fn reverse_bits(self) -> Self { + Wrapping(self.0.reverse_bits()) + } + + doc_comment! { + concat!("Converts an integer from big endian to the target's endianness. + +On big endian this is a no-op. On little endian the bytes are +swapped. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +let n = Wrapping(0x1A", stringify!($t), "); + +if cfg!(target_endian = \"big\") { + assert_eq!(>::from_be(n), n) +} else { + assert_eq!(>::from_be(n), n.swap_bytes()) +} +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn from_be(x: Self) -> Self { + Wrapping(<$t>::from_be(x.0)) + } + } + + doc_comment! { + concat!("Converts an integer from little endian to the target's endianness. + +On little endian this is a no-op. On big endian the bytes are +swapped. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +let n = Wrapping(0x1A", stringify!($t), "); + +if cfg!(target_endian = \"little\") { + assert_eq!(>::from_le(n), n) +} else { + assert_eq!(>::from_le(n), n.swap_bytes()) +} +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn from_le(x: Self) -> Self { + Wrapping(<$t>::from_le(x.0)) + } + } + + doc_comment! { + concat!("Converts `self` to big endian from the target's endianness. + +On big endian this is a no-op. On little endian the bytes are +swapped. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +let n = Wrapping(0x1A", stringify!($t), "); + +if cfg!(target_endian = \"big\") { + assert_eq!(n.to_be(), n) +} else { + assert_eq!(n.to_be(), n.swap_bytes()) +} +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn to_be(self) -> Self { + Wrapping(self.0.to_be()) + } + } + + doc_comment! { + concat!("Converts `self` to little endian from the target's endianness. + +On little endian this is a no-op. On big endian the bytes are +swapped. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +let n = Wrapping(0x1A", stringify!($t), "); + +if cfg!(target_endian = \"little\") { + assert_eq!(n.to_le(), n) +} else { + assert_eq!(n.to_le(), n.swap_bytes()) +} +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn to_le(self) -> Self { + Wrapping(self.0.to_le()) + } + } + + doc_comment! { + concat!("Raises self to the power of `exp`, using exponentiation by squaring. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +assert_eq!(Wrapping(3", stringify!($t), ").pow(4), Wrapping(81)); +``` + +Results that are too large are wrapped: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +assert_eq!(Wrapping(3i8).pow(5), Wrapping(-13)); +assert_eq!(Wrapping(3i8).pow(6), Wrapping(-39)); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn pow(self, exp: u32) -> Self { + Wrapping(self.0.wrapping_pow(exp)) + } + } + } + )*) +} + +wrapping_int_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } + +macro_rules! wrapping_int_impl_signed { + ($($t:ty)*) => ($( + impl Wrapping<$t> { + doc_comment! { + concat!("Returns the number of leading zeros in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +let n = Wrapping(", stringify!($t), "::max_value()) >> 2; + +assert_eq!(n.leading_zeros(), 3); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn leading_zeros(self) -> u32 { + self.0.leading_zeros() + } + } + + doc_comment! { + concat!("Computes the absolute value of `self`, wrapping around at +the boundary of the type. + +The only case where such wrapping can occur is when one takes the absolute value of the negative +minimal value for the type this is a positive value that is too large to represent in the type. In +such a case, this function returns `MIN` itself. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +assert_eq!(Wrapping(100", stringify!($t), ").abs(), Wrapping(100)); +assert_eq!(Wrapping(-100", stringify!($t), ").abs(), Wrapping(100)); +assert_eq!(Wrapping(", stringify!($t), "::min_value()).abs(), Wrapping(", stringify!($t), +"::min_value())); +assert_eq!(Wrapping(-128i8).abs().0 as u8, 128u8); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn abs(self) -> Wrapping<$t> { + Wrapping(self.0.wrapping_abs()) + } + } + + doc_comment! { + concat!("Returns a number representing sign of `self`. + + - `0` if the number is zero + - `1` if the number is positive + - `-1` if the number is negative + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +assert_eq!(Wrapping(10", stringify!($t), ").signum(), Wrapping(1)); +assert_eq!(Wrapping(0", stringify!($t), ").signum(), Wrapping(0)); +assert_eq!(Wrapping(-10", stringify!($t), ").signum(), Wrapping(-1)); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn signum(self) -> Wrapping<$t> { + Wrapping(self.0.signum()) + } + } + + doc_comment! { + concat!("Returns `true` if `self` is positive and `false` if the number is zero or +negative. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +assert!(Wrapping(10", stringify!($t), ").is_positive()); +assert!(!Wrapping(-10", stringify!($t), ").is_positive()); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn is_positive(self) -> bool { + self.0.is_positive() + } + } + + doc_comment! { + concat!("Returns `true` if `self` is negative and `false` if the number is zero or +positive. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +assert!(Wrapping(-10", stringify!($t), ").is_negative()); +assert!(!Wrapping(10", stringify!($t), ").is_negative()); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn is_negative(self) -> bool { + self.0.is_negative() + } + } + } + )*) +} + +wrapping_int_impl_signed! { isize i8 i16 i32 i64 i128 } + +macro_rules! wrapping_int_impl_unsigned { + ($($t:ty)*) => ($( + impl Wrapping<$t> { + doc_comment! { + concat!("Returns the number of leading zeros in the binary representation of `self`. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +let n = Wrapping(", stringify!($t), "::max_value()) >> 2; + +assert_eq!(n.leading_zeros(), 2); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn leading_zeros(self) -> u32 { + self.0.leading_zeros() + } + } + + doc_comment! { + concat!("Returns `true` if and only if `self == 2^k` for some `k`. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_int_impl)] +use std::num::Wrapping; + +assert!(Wrapping(16", stringify!($t), ").is_power_of_two()); +assert!(!Wrapping(10", stringify!($t), ").is_power_of_two()); +```"), + #[inline] + #[unstable(feature = "wrapping_int_impl", issue = "32463")] + pub fn is_power_of_two(self) -> bool { + self.0.is_power_of_two() + } + } + + doc_comment! { + concat!("Returns the smallest power of two greater than or equal to `self`. + +When return value overflows (i.e. `self > (1 << (N-1))` for type +`uN`), overflows to `2^N = 0`. + +# Examples + +Basic usage: + +``` +#![feature(wrapping_next_power_of_two)] +use std::num::Wrapping; + +assert_eq!(Wrapping(2", stringify!($t), ").next_power_of_two(), Wrapping(2)); +assert_eq!(Wrapping(3", stringify!($t), ").next_power_of_two(), Wrapping(4)); +assert_eq!(Wrapping(200_u8).next_power_of_two(), Wrapping(0)); +```"), + #[inline] + #[unstable(feature = "wrapping_next_power_of_two", issue = "32463", + reason = "needs decision on wrapping behaviour")] + pub fn next_power_of_two(self) -> Self { + Wrapping(self.0.wrapping_next_power_of_two()) + } + } + } + )*) +} + +wrapping_int_impl_unsigned! { usize u8 u16 u32 u64 u128 } + mod shift_max { #![allow(non_upper_case_globals)] @@ -347,11 +918,13 @@ mod shift_max { pub const i16: u32 = (1 << 4) - 1; pub const i32: u32 = (1 << 5) - 1; pub const i64: u32 = (1 << 6) - 1; + pub const i128: u32 = (1 << 7) - 1; pub use self::platform::isize; pub const u8: u32 = i8; pub const u16: u32 = i16; pub const u32: u32 = i32; pub const u64: u32 = i64; + pub const u128: u32 = i128; pub use self::platform::usize; } diff --git a/src/libcore/ops/arith.rs b/src/libcore/ops/arith.rs index 8b3d662a6db7..a1bc5463f733 100644 --- a/src/libcore/ops/arith.rs +++ b/src/libcore/ops/arith.rs @@ -75,13 +75,26 @@ /// ``` #[lang = "add"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} + {RHS}`"] +#[rustc_on_unimplemented( + on( + all(_Self="{integer}", RHS="{float}"), + message="cannot add a float to an integer", + ), + on( + all(_Self="{float}", RHS="{integer}"), + message="cannot add an integer to a float", + ), + message="cannot add `{RHS}` to `{Self}`", + label="no implementation for `{Self} + {RHS}`", +)] +#[doc(alias = "+")] pub trait Add { /// The resulting type after applying the `+` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; /// Performs the `+` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn add(self, rhs: RHS) -> Self::Output; } @@ -170,13 +183,16 @@ add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "sub"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} - {RHS}`"] +#[rustc_on_unimplemented(message="cannot subtract `{RHS}` from `{Self}`", + label="no implementation for `{Self} - {RHS}`")] +#[doc(alias = "-")] pub trait Sub { /// The resulting type after applying the `-` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; /// Performs the `-` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn sub(self, rhs: RHS) -> Self::Output; } @@ -287,13 +303,16 @@ sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "mul"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} * {RHS}`"] +#[rustc_on_unimplemented(message="cannot multiply `{RHS}` to `{Self}`", + label="no implementation for `{Self} * {RHS}`")] +#[doc(alias = "*")] pub trait Mul { /// The resulting type after applying the `*` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; /// Performs the `*` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn mul(self, rhs: RHS) -> Self::Output; } @@ -408,13 +427,16 @@ mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "div"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} / {RHS}`"] +#[rustc_on_unimplemented(message="cannot divide `{Self}` by `{RHS}`", + label="no implementation for `{Self} / {RHS}`")] +#[doc(alias = "/")] pub trait Div { /// The resulting type after applying the `/` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; /// Performs the `/` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn div(self, rhs: RHS) -> Self::Output; } @@ -490,13 +512,16 @@ div_impl_float! { f32 f64 } /// ``` #[lang = "rem"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} % {RHS}`"] +#[rustc_on_unimplemented(message="cannot mod `{Self}` by `{RHS}`", + label="no implementation for `{Self} % {RHS}`")] +#[doc(alias = "%")] pub trait Rem { /// The resulting type after applying the `%` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output = Self; /// Performs the `%` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn rem(self, rhs: RHS) -> Self::Output; } @@ -574,12 +599,14 @@ rem_impl_float! { f32 f64 } /// ``` #[lang = "neg"] #[stable(feature = "rust1", since = "1.0.0")] +#[doc(alias = "-")] pub trait Neg { /// The resulting type after applying the `-` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; /// Performs the unary `-` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn neg(self) -> Self::Output; } @@ -647,7 +674,10 @@ neg_impl_numeric! { isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "add_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} += {Rhs}`"] +#[rustc_on_unimplemented(message="cannot add-assign `{Rhs}` to `{Self}`", + label="no implementation for `{Self} += {Rhs}`")] +#[doc(alias = "+")] +#[doc(alias = "+=")] pub trait AddAssign { /// Performs the `+=` operation. #[stable(feature = "op_assign_traits", since = "1.8.0")] @@ -700,7 +730,10 @@ add_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "sub_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} -= {Rhs}`"] +#[rustc_on_unimplemented(message="cannot subtract-assign `{Rhs}` from `{Self}`", + label="no implementation for `{Self} -= {Rhs}`")] +#[doc(alias = "-")] +#[doc(alias = "-=")] pub trait SubAssign { /// Performs the `-=` operation. #[stable(feature = "op_assign_traits", since = "1.8.0")] @@ -744,7 +777,10 @@ sub_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "mul_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} *= {Rhs}`"] +#[rustc_on_unimplemented(message="cannot multiply-assign `{Rhs}` to `{Self}`", + label="no implementation for `{Self} *= {Rhs}`")] +#[doc(alias = "*")] +#[doc(alias = "*=")] pub trait MulAssign { /// Performs the `*=` operation. #[stable(feature = "op_assign_traits", since = "1.8.0")] @@ -788,7 +824,10 @@ mul_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "div_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} /= {Rhs}`"] +#[rustc_on_unimplemented(message="cannot divide-assign `{Self}` by `{Rhs}`", + label="no implementation for `{Self} /= {Rhs}`")] +#[doc(alias = "/")] +#[doc(alias = "/=")] pub trait DivAssign { /// Performs the `/=` operation. #[stable(feature = "op_assign_traits", since = "1.8.0")] @@ -835,7 +874,10 @@ div_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "rem_assign"] #[stable(feature = "op_assign_traits", since = "1.8.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} %= {Rhs}`"] +#[rustc_on_unimplemented(message="cannot mod-assign `{Self}` by `{Rhs}``", + label="no implementation for `{Self} %= {Rhs}`")] +#[doc(alias = "%")] +#[doc(alias = "%=")] pub trait RemAssign { /// Performs the `%=` operation. #[stable(feature = "op_assign_traits", since = "1.8.0")] diff --git a/src/libcore/ops/bit.rs b/src/libcore/ops/bit.rs index 7ac5fc4debf1..3900f365b0ab 100644 --- a/src/libcore/ops/bit.rs +++ b/src/libcore/ops/bit.rs @@ -46,6 +46,7 @@ pub trait Not { type Output; /// Performs the unary `!` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn not(self) -> Self::Output; } @@ -119,14 +120,17 @@ not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// assert_eq!(bv1 & bv2, expected); /// ``` #[lang = "bitand"] +#[doc(alias = "&")] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} & {RHS}`"] +#[rustc_on_unimplemented(message="no implementation for `{Self} & {RHS}`", + label="no implementation for `{Self} & {RHS}`")] pub trait BitAnd { /// The resulting type after applying the `&` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; /// Performs the `&` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn bitand(self, rhs: RHS) -> Self::Output; } @@ -200,14 +204,17 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// assert_eq!(bv1 | bv2, expected); /// ``` #[lang = "bitor"] +#[doc(alias = "|")] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} | {RHS}`"] +#[rustc_on_unimplemented(message="no implementation for `{Self} | {RHS}`", + label="no implementation for `{Self} | {RHS}`")] pub trait BitOr { /// The resulting type after applying the `|` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; /// Performs the `|` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn bitor(self, rhs: RHS) -> Self::Output; } @@ -284,14 +291,17 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// assert_eq!(bv1 ^ bv2, expected); /// ``` #[lang = "bitxor"] +#[doc(alias = "^")] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} ^ {RHS}`"] +#[rustc_on_unimplemented(message="no implementation for `{Self} ^ {RHS}`", + label="no implementation for `{Self} ^ {RHS}`")] pub trait BitXor { /// The resulting type after applying the `^` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; /// Performs the `^` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn bitxor(self, rhs: RHS) -> Self::Output; } @@ -312,7 +322,12 @@ macro_rules! bitxor_impl { bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } -/// The left shift operator `<<`. +/// The left shift operator `<<`. Note that because this trait is implemented +/// for all integer types with multiple right-hand-side types, Rust's type +/// checker has special handling for `_ << _`, setting the result type for +/// integer operations to the type of the left-hand-side operand. This means +/// that though `a << b` and `a.shl(b)` are one and the same from an evaluation +/// standpoint, they are different when it comes to type inference. /// /// # Examples /// @@ -364,14 +379,17 @@ bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// SpinVector { vec: vec![2, 3, 4, 0, 1] }); /// ``` #[lang = "shl"] +#[doc(alias = "<<")] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} << {RHS}`"] -pub trait Shl { +#[rustc_on_unimplemented(message="no implementation for `{Self} << {RHS}`", + label="no implementation for `{Self} << {RHS}`")] +pub trait Shl { /// The resulting type after applying the `<<` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; /// Performs the `<<` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn shl(self, rhs: RHS) -> Self::Output; } @@ -413,7 +431,12 @@ macro_rules! shl_impl_all { shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 } -/// The right shift operator `>>`. +/// The right shift operator `>>`. Note that because this trait is implemented +/// for all integer types with multiple right-hand-side types, Rust's type +/// checker has special handling for `_ >> _`, setting the result type for +/// integer operations to the type of the left-hand-side operand. This means +/// that though `a >> b` and `a.shr(b)` are one and the same from an evaluation +/// standpoint, they are different when it comes to type inference. /// /// # Examples /// @@ -465,14 +488,17 @@ shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 } /// SpinVector { vec: vec![3, 4, 0, 1, 2] }); /// ``` #[lang = "shr"] +#[doc(alias = ">>")] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} >> {RHS}`"] -pub trait Shr { +#[rustc_on_unimplemented(message="no implementation for `{Self} >> {RHS}`", + label="no implementation for `{Self} >> {RHS}`")] +pub trait Shr { /// The resulting type after applying the `>>` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; /// Performs the `>>` operation. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn shr(self, rhs: RHS) -> Self::Output; } @@ -578,8 +604,10 @@ shr_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize } /// assert_eq!(bv, expected); /// ``` #[lang = "bitand_assign"] +#[doc(alias = "&=")] #[stable(feature = "op_assign_traits", since = "1.8.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} &= {Rhs}`"] +#[rustc_on_unimplemented(message="no implementation for `{Self} &= {Rhs}`", + label="no implementation for `{Self} &= {Rhs}`")] pub trait BitAndAssign { /// Performs the `&=` operation. #[stable(feature = "op_assign_traits", since = "1.8.0")] @@ -625,8 +653,10 @@ bitand_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// assert_eq!(prefs, PersonalPreferences { likes_cats: true, likes_dogs: true }); /// ``` #[lang = "bitor_assign"] +#[doc(alias = "|=")] #[stable(feature = "op_assign_traits", since = "1.8.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} |= {Rhs}`"] +#[rustc_on_unimplemented(message="no implementation for `{Self} |= {Rhs}`", + label="no implementation for `{Self} |= {Rhs}`")] pub trait BitOrAssign { /// Performs the `|=` operation. #[stable(feature = "op_assign_traits", since = "1.8.0")] @@ -672,8 +702,10 @@ bitor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// assert_eq!(personality, Personality { has_soul: true, likes_knitting: false}); /// ``` #[lang = "bitxor_assign"] +#[doc(alias = "^=")] #[stable(feature = "op_assign_traits", since = "1.8.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} ^= {Rhs}`"] +#[rustc_on_unimplemented(message="no implementation for `{Self} ^= {Rhs}`", + label="no implementation for `{Self} ^= {Rhs}`")] pub trait BitXorAssign { /// Performs the `^=` operation. #[stable(feature = "op_assign_traits", since = "1.8.0")] @@ -717,9 +749,11 @@ bitxor_assign_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// assert_eq!(scalar, Scalar(16)); /// ``` #[lang = "shl_assign"] +#[doc(alias = "<<=")] #[stable(feature = "op_assign_traits", since = "1.8.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} <<= {Rhs}`"] -pub trait ShlAssign { +#[rustc_on_unimplemented(message="no implementation for `{Self} <<= {Rhs}`", + label="no implementation for `{Self} <<= {Rhs}`")] +pub trait ShlAssign { /// Performs the `<<=` operation. #[stable(feature = "op_assign_traits", since = "1.8.0")] fn shl_assign(&mut self, rhs: Rhs); @@ -783,8 +817,10 @@ shl_assign_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize } /// assert_eq!(scalar, Scalar(4)); /// ``` #[lang = "shr_assign"] +#[doc(alias = ">>=")] #[stable(feature = "op_assign_traits", since = "1.8.0")] -#[rustc_on_unimplemented = "no implementation for `{Self} >>= {Rhs}`"] +#[rustc_on_unimplemented(message="no implementation for `{Self} >>= {Rhs}`", + label="no implementation for `{Self} >>= {Rhs}`")] pub trait ShrAssign { /// Performs the `>>=` operation. #[stable(feature = "op_assign_traits", since = "1.8.0")] diff --git a/src/libcore/ops/deref.rs b/src/libcore/ops/deref.rs index 4ce0740130b9..54eecc82e19e 100644 --- a/src/libcore/ops/deref.rs +++ b/src/libcore/ops/deref.rs @@ -68,6 +68,8 @@ /// assert_eq!('a', *x); /// ``` #[lang = "deref"] +#[doc(alias = "*")] +#[doc(alias = "&*")] #[stable(feature = "rust1", since = "1.0.0")] pub trait Deref { /// The resulting type after dereferencing. @@ -75,6 +77,7 @@ pub trait Deref { type Target: ?Sized; /// Dereferences the value. + #[must_use] #[stable(feature = "rust1", since = "1.0.0")] fn deref(&self) -> &Self::Target; } @@ -162,6 +165,7 @@ impl<'a, T: ?Sized> Deref for &'a mut T { /// assert_eq!('b', *x); /// ``` #[lang = "deref_mut"] +#[doc(alias = "*")] #[stable(feature = "rust1", since = "1.0.0")] pub trait DerefMut: Deref { /// Mutably dereferences the value. diff --git a/src/libcore/ops/drop.rs b/src/libcore/ops/drop.rs index 70ab7b2f3b7e..474f7e34c347 100644 --- a/src/libcore/ops/drop.rs +++ b/src/libcore/ops/drop.rs @@ -95,7 +95,7 @@ pub trait Drop { /// Executes the destructor for this type. /// - /// This method is called implilcitly when the value goes out of scope, + /// This method is called implicitly when the value goes out of scope, /// and cannot be called explicitly (this is compiler error [E0040]). /// However, the [`std::mem::drop`] function in the prelude can be /// used to call the argument's `Drop` implementation. diff --git a/src/libcore/ops/generator.rs b/src/libcore/ops/generator.rs index dc7669d195c1..4b70c5398be4 100644 --- a/src/libcore/ops/generator.rs +++ b/src/libcore/ops/generator.rs @@ -56,11 +56,11 @@ pub enum GeneratorState { /// return "foo" /// }; /// -/// match generator.resume() { +/// match unsafe { generator.resume() } { /// GeneratorState::Yielded(1) => {} /// _ => panic!("unexpected return from resume"), /// } -/// match generator.resume() { +/// match unsafe { generator.resume() } { /// GeneratorState::Complete("foo") => {} /// _ => panic!("unexpected return from resume"), /// } @@ -98,6 +98,10 @@ pub trait Generator { /// generator will continue executing until it either yields or returns, at /// which point this function will return. /// + /// The function is unsafe because it can be used on an immovable generator. + /// After such a call, the immovable generator must not move again, but + /// this is not enforced by the compiler. + /// /// # Return value /// /// The `GeneratorState` enum returned from this function indicates what @@ -116,7 +120,7 @@ pub trait Generator { /// been returned previously. While generator literals in the language are /// guaranteed to panic on resuming after `Complete`, this is not guaranteed /// for all implementations of the `Generator` trait. - fn resume(&mut self) -> GeneratorState; + unsafe fn resume(&mut self) -> GeneratorState; } #[unstable(feature = "generator_trait", issue = "43122")] @@ -125,7 +129,7 @@ impl<'a, T> Generator for &'a mut T { type Yield = T::Yield; type Return = T::Return; - fn resume(&mut self) -> GeneratorState { + unsafe fn resume(&mut self) -> GeneratorState { (**self).resume() } } diff --git a/src/libcore/ops/index.rs b/src/libcore/ops/index.rs index d65c0aba5048..1ac80ecc96ff 100644 --- a/src/libcore/ops/index.rs +++ b/src/libcore/ops/index.rs @@ -60,8 +60,14 @@ /// assert_eq!(nucleotide_count[Nucleotide::T], 12); /// ``` #[lang = "index"] -#[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] +#[rustc_on_unimplemented( + message="the type `{Self}` cannot be indexed by `{Idx}`", + label="`{Self}` cannot be indexed by `{Idx}`", +)] #[stable(feature = "rust1", since = "1.0.0")] +#[doc(alias = "]")] +#[doc(alias = "[")] +#[doc(alias = "[]")] pub trait Index { /// The returned type after indexing. #[stable(feature = "rust1", since = "1.0.0")] @@ -144,8 +150,14 @@ pub trait Index { /// balance[Side::Left] = Weight::Kilogram(3.0); /// ``` #[lang = "index_mut"] -#[rustc_on_unimplemented = "the type `{Self}` cannot be mutably indexed by `{Idx}`"] +#[rustc_on_unimplemented( + message="the type `{Self}` cannot be mutably indexed by `{Idx}`", + label="`{Self}` cannot be mutably indexed by `{Idx}`", +)] #[stable(feature = "rust1", since = "1.0.0")] +#[doc(alias = "[")] +#[doc(alias = "]")] +#[doc(alias = "[]")] pub trait IndexMut: Index { /// Performs the mutable indexing (`container[index]`) operation. #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/ops/mod.rs b/src/libcore/ops/mod.rs index 70ef4487334c..ce4f45762de4 100644 --- a/src/libcore/ops/mod.rs +++ b/src/libcore/ops/mod.rs @@ -161,7 +161,6 @@ mod drop; mod function; mod generator; mod index; -mod place; mod range; mod try; mod unsize; @@ -191,8 +190,8 @@ pub use self::index::{Index, IndexMut}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::range::{Range, RangeFrom, RangeFull, RangeTo}; -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] -pub use self::range::{RangeInclusive, RangeToInclusive}; +#[stable(feature = "inclusive_range", since = "1.26.0")] +pub use self::range::{RangeInclusive, RangeToInclusive, RangeBounds, Bound}; #[unstable(feature = "try_trait", issue = "42327")] pub use self::try::Try; @@ -200,8 +199,5 @@ pub use self::try::Try; #[unstable(feature = "generator_trait", issue = "43122")] pub use self::generator::{Generator, GeneratorState}; -#[unstable(feature = "placement_new_protocol", issue = "27779")] -pub use self::place::{Place, Placer, InPlace, Boxed, BoxPlace}; - #[unstable(feature = "coerce_unsized", issue = "27732")] pub use self::unsize::CoerceUnsized; diff --git a/src/libcore/ops/place.rs b/src/libcore/ops/place.rs deleted file mode 100644 index 9fb171e7b924..000000000000 --- a/src/libcore/ops/place.rs +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/// Both `PLACE <- EXPR` and `box EXPR` desugar into expressions -/// that allocate an intermediate "place" that holds uninitialized -/// state. The desugaring evaluates EXPR, and writes the result at -/// the address returned by the `pointer` method of this trait. -/// -/// A `Place` can be thought of as a special representation for a -/// hypothetical `&uninit` reference (which Rust cannot currently -/// express directly). That is, it represents a pointer to -/// uninitialized storage. -/// -/// The client is responsible for two steps: First, initializing the -/// payload (it can access its address via `pointer`). Second, -/// converting the agent to an instance of the owning pointer, via the -/// appropriate `finalize` method (see the `InPlace`. -/// -/// If evaluating EXPR fails, then it is up to the destructor for the -/// implementation of Place to clean up any intermediate state -/// (e.g. deallocate box storage, pop a stack, etc). -#[unstable(feature = "placement_new_protocol", issue = "27779")] -pub trait Place { - /// Returns the address where the input value will be written. - /// Note that the data at this address is generally uninitialized, - /// and thus one should use `ptr::write` for initializing it. - fn pointer(&mut self) -> *mut Data; -} - -/// Interface to implementations of `PLACE <- EXPR`. -/// -/// `PLACE <- EXPR` effectively desugars into: -/// -/// ``` -/// # #![feature(placement_new_protocol, box_heap)] -/// # use std::ops::{Placer, Place, InPlace}; -/// # #[allow(non_snake_case)] -/// # fn main() { -/// # let PLACE = std::boxed::HEAP; -/// # let EXPR = 1; -/// let p = PLACE; -/// let mut place = Placer::make_place(p); -/// let raw_place = Place::pointer(&mut place); -/// let value = EXPR; -/// unsafe { -/// std::ptr::write(raw_place, value); -/// InPlace::finalize(place) -/// } -/// # ; } -/// ``` -/// -/// The type of `PLACE <- EXPR` is derived from the type of `PLACE`; -/// if the type of `PLACE` is `P`, then the final type of the whole -/// expression is `P::Place::Owner` (see the `InPlace` and `Boxed` -/// traits). -/// -/// Values for types implementing this trait usually are transient -/// intermediate values (e.g. the return value of `Vec::emplace_back`) -/// or `Copy`, since the `make_place` method takes `self` by value. -#[unstable(feature = "placement_new_protocol", issue = "27779")] -pub trait Placer { - /// `Place` is the intermediate agent guarding the - /// uninitialized state for `Data`. - type Place: InPlace; - - /// Creates a fresh place from `self`. - fn make_place(self) -> Self::Place; -} - -/// Specialization of `Place` trait supporting `PLACE <- EXPR`. -#[unstable(feature = "placement_new_protocol", issue = "27779")] -pub trait InPlace: Place { - /// `Owner` is the type of the end value of `PLACE <- EXPR` - /// - /// Note that when `PLACE <- EXPR` is solely used for - /// side-effecting an existing data-structure, - /// e.g. `Vec::emplace_back`, then `Owner` need not carry any - /// information at all (e.g. it can be the unit type `()` in that - /// case). - type Owner; - - /// Converts self into the final value, shifting - /// deallocation/cleanup responsibilities (if any remain), over to - /// the returned instance of `Owner` and forgetting self. - unsafe fn finalize(self) -> Self::Owner; -} - -/// Core trait for the `box EXPR` form. -/// -/// `box EXPR` effectively desugars into: -/// -/// ``` -/// # #![feature(placement_new_protocol)] -/// # use std::ops::{BoxPlace, Place, Boxed}; -/// # #[allow(non_snake_case)] -/// # fn main() { -/// # let EXPR = 1; -/// let mut place = BoxPlace::make_place(); -/// let raw_place = Place::pointer(&mut place); -/// let value = EXPR; -/// # let _: Box<_> = -/// unsafe { -/// ::std::ptr::write(raw_place, value); -/// Boxed::finalize(place) -/// } -/// # ; } -/// ``` -/// -/// The type of `box EXPR` is supplied from its surrounding -/// context; in the above expansion, the result type `T` is used -/// to determine which implementation of `Boxed` to use, and that -/// `` in turn dictates determines which -/// implementation of `BoxPlace` to use, namely: -/// `<::Place as BoxPlace>`. -#[unstable(feature = "placement_new_protocol", issue = "27779")] -pub trait Boxed { - /// The kind of data that is stored in this kind of box. - type Data; /* (`Data` unused b/c cannot yet express below bound.) */ - /// The place that will negotiate the storage of the data. - type Place: BoxPlace; - - /// Converts filled place into final owning value, shifting - /// deallocation/cleanup responsibilities (if any remain), over to - /// returned instance of `Self` and forgetting `filled`. - unsafe fn finalize(filled: Self::Place) -> Self; -} - -/// Specialization of `Place` trait supporting `box EXPR`. -#[unstable(feature = "placement_new_protocol", issue = "27779")] -pub trait BoxPlace : Place { - /// Creates a globally fresh place. - fn make_place() -> Self; -} diff --git a/src/libcore/ops/range.rs b/src/libcore/ops/range.rs index 3f573f7c7eb6..9c635678d7aa 100644 --- a/src/libcore/ops/range.rs +++ b/src/libcore/ops/range.rs @@ -9,6 +9,7 @@ // except according to those terms. use fmt; +use hash::{Hash, Hasher}; /// An unbounded range (`..`). /// @@ -45,6 +46,7 @@ use fmt; /// [`IntoIterator`]: ../iter/trait.Iterator.html /// [`Iterator`]: ../iter/trait.IntoIterator.html /// [slicing index]: ../slice/trait.SliceIndex.html +#[doc(alias = "..")] #[derive(Copy, Clone, PartialEq, Eq, Hash)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeFull; @@ -60,7 +62,7 @@ impl fmt::Debug for RangeFull { /// (`start..end`). /// /// The `Range` `start..end` contains all values with `x >= start` and -/// `x < end`. +/// `x < end`. It is empty unless `start < end`. /// /// # Examples /// @@ -68,12 +70,13 @@ impl fmt::Debug for RangeFull { /// assert_eq!((3..5), std::ops::Range { start: 3, end: 5 }); /// assert_eq!(3 + 4 + 5, (3..6).sum()); /// -/// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ .. ], [0,1,2,3]); -/// assert_eq!(arr[ ..3], [0,1,2 ]); -/// assert_eq!(arr[1.. ], [ 1,2,3]); -/// assert_eq!(arr[1..3], [ 1,2 ]); // Range +/// let arr = ['a', 'b', 'c', 'd']; +/// assert_eq!(arr[ .. ], ['a', 'b', 'c', 'd']); +/// assert_eq!(arr[ ..3], ['a', 'b', 'c', ]); +/// assert_eq!(arr[1.. ], [ 'b', 'c', 'd']); +/// assert_eq!(arr[1..3], [ 'b', 'c' ]); // Range /// ``` +#[doc(alias = "..")] #[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 #[stable(feature = "rust1", since = "1.0.0")] pub struct Range { @@ -92,7 +95,6 @@ impl fmt::Debug for Range { } } -#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] impl> Range { /// Returns `true` if `item` is contained in the range. /// @@ -101,16 +103,55 @@ impl> Range { /// ``` /// #![feature(range_contains)] /// - /// assert!(!(3..5).contains(2)); - /// assert!( (3..5).contains(3)); - /// assert!( (3..5).contains(4)); - /// assert!(!(3..5).contains(5)); + /// use std::f32; /// - /// assert!(!(3..3).contains(3)); - /// assert!(!(3..2).contains(3)); + /// assert!(!(3..5).contains(&2)); + /// assert!( (3..5).contains(&3)); + /// assert!( (3..5).contains(&4)); + /// assert!(!(3..5).contains(&5)); + /// + /// assert!(!(3..3).contains(&3)); + /// assert!(!(3..2).contains(&3)); + /// + /// assert!( (0.0..1.0).contains(&0.5)); + /// assert!(!(0.0..1.0).contains(&f32::NAN)); + /// assert!(!(0.0..f32::NAN).contains(&0.5)); + /// assert!(!(f32::NAN..1.0).contains(&0.5)); /// ``` - pub fn contains(&self, item: Idx) -> bool { - (self.start <= item) && (item < self.end) + #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + pub fn contains(&self, item: &U) -> bool + where + Idx: PartialOrd, + U: ?Sized + PartialOrd, + { + >::contains(self, item) + } + + /// Returns `true` if the range contains no items. + /// + /// # Examples + /// + /// ``` + /// #![feature(range_is_empty)] + /// + /// assert!(!(3..5).is_empty()); + /// assert!( (3..3).is_empty()); + /// assert!( (3..2).is_empty()); + /// ``` + /// + /// The range is empty if either side is incomparable: + /// + /// ``` + /// #![feature(range_is_empty)] + /// + /// use std::f32::NAN; + /// assert!(!(3.0..5.0).is_empty()); + /// assert!( (3.0..NAN).is_empty()); + /// assert!( (NAN..5.0).is_empty()); + /// ``` + #[unstable(feature = "range_is_empty", reason = "recently added", issue = "48111")] + pub fn is_empty(&self) -> bool { + !(self.start < self.end) } } @@ -137,6 +178,7 @@ impl> Range { /// ``` /// /// [`Iterator`]: ../iter/trait.IntoIterator.html +#[doc(alias = "..")] #[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeFrom { @@ -152,7 +194,6 @@ impl fmt::Debug for RangeFrom { } } -#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] impl> RangeFrom { /// Returns `true` if `item` is contained in the range. /// @@ -161,12 +202,23 @@ impl> RangeFrom { /// ``` /// #![feature(range_contains)] /// - /// assert!(!(3..).contains(2)); - /// assert!( (3..).contains(3)); - /// assert!( (3..).contains(1_000_000_000)); + /// use std::f32; + /// + /// assert!(!(3..).contains(&2)); + /// assert!( (3..).contains(&3)); + /// assert!( (3..).contains(&1_000_000_000)); + /// + /// assert!( (0.0..).contains(&0.5)); + /// assert!(!(0.0..).contains(&f32::NAN)); + /// assert!(!(f32::NAN..).contains(&0.5)); /// ``` - pub fn contains(&self, item: Idx) -> bool { - (self.start <= item) + #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + pub fn contains(&self, item: &U) -> bool + where + Idx: PartialOrd, + U: ?Sized + PartialOrd, + { + >::contains(self, item) } } @@ -208,6 +260,7 @@ impl> RangeFrom { /// [`IntoIterator`]: ../iter/trait.Iterator.html /// [`Iterator`]: ../iter/trait.IntoIterator.html /// [slicing index]: ../slice/trait.SliceIndex.html +#[doc(alias = "..")] #[derive(Copy, Clone, PartialEq, Eq, Hash)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeTo { @@ -223,7 +276,6 @@ impl fmt::Debug for RangeTo { } } -#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] impl> RangeTo { /// Returns `true` if `item` is contained in the range. /// @@ -232,74 +284,267 @@ impl> RangeTo { /// ``` /// #![feature(range_contains)] /// - /// assert!( (..5).contains(-1_000_000_000)); - /// assert!( (..5).contains(4)); - /// assert!(!(..5).contains(5)); + /// use std::f32; + /// + /// assert!( (..5).contains(&-1_000_000_000)); + /// assert!( (..5).contains(&4)); + /// assert!(!(..5).contains(&5)); + /// + /// assert!( (..1.0).contains(&0.5)); + /// assert!(!(..1.0).contains(&f32::NAN)); + /// assert!(!(..f32::NAN).contains(&0.5)); /// ``` - pub fn contains(&self, item: Idx) -> bool { - (item < self.end) + #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + pub fn contains(&self, item: &U) -> bool + where + Idx: PartialOrd, + U: ?Sized + PartialOrd, + { + >::contains(self, item) } } /// An range bounded inclusively below and above (`start..=end`). /// /// The `RangeInclusive` `start..=end` contains all values with `x >= start` -/// and `x <= end`. +/// and `x <= end`. It is empty unless `start <= end`. +/// +/// This iterator is [fused], but the specific values of `start` and `end` after +/// iteration has finished are **unspecified** other than that [`.is_empty()`] +/// will return `true` once no more values will be produced. +/// +/// [fused]: ../iter/trait.FusedIterator.html +/// [`.is_empty()`]: #method.is_empty /// /// # Examples /// /// ``` -/// #![feature(inclusive_range,inclusive_range_syntax)] -/// -/// assert_eq!((3..=5), std::ops::RangeInclusive { start: 3, end: 5 }); +/// assert_eq!((3..=5), std::ops::RangeInclusive::new(3, 5)); /// assert_eq!(3 + 4 + 5, (3..=5).sum()); /// /// let arr = [0, 1, 2, 3]; /// assert_eq!(arr[ ..=2], [0,1,2 ]); /// assert_eq!(arr[1..=2], [ 1,2 ]); // RangeInclusive /// ``` -#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[doc(alias = "..=")] +#[derive(Clone)] // not Copy -- see #27186 +#[stable(feature = "inclusive_range", since = "1.26.0")] pub struct RangeInclusive { - /// The lower bound of the range (inclusive). - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] - pub start: Idx, - /// The upper bound of the range (inclusive). - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] - pub end: Idx, + pub(crate) start: Idx, + pub(crate) end: Idx, + pub(crate) is_empty: Option, + // This field is: + // - `None` when next() or next_back() was never called + // - `Some(false)` when `start <= end` assuming no overflow + // - `Some(true)` otherwise + // The field cannot be a simple `bool` because the `..=` constructor can + // accept non-PartialOrd types, also we want the constructor to be const. } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +trait RangeInclusiveEquality: Sized { + fn canonicalized_is_empty(range: &RangeInclusive) -> bool; +} +impl RangeInclusiveEquality for T { + #[inline] + default fn canonicalized_is_empty(range: &RangeInclusive) -> bool { + range.is_empty.unwrap_or_default() + } +} +impl RangeInclusiveEquality for T { + #[inline] + fn canonicalized_is_empty(range: &RangeInclusive) -> bool { + range.is_empty() + } +} + +#[stable(feature = "inclusive_range", since = "1.26.0")] +impl PartialEq for RangeInclusive { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.start == other.start && self.end == other.end + && RangeInclusiveEquality::canonicalized_is_empty(self) + == RangeInclusiveEquality::canonicalized_is_empty(other) + } +} + +#[stable(feature = "inclusive_range", since = "1.26.0")] +impl Eq for RangeInclusive {} + +#[stable(feature = "inclusive_range", since = "1.26.0")] +impl Hash for RangeInclusive { + fn hash(&self, state: &mut H) { + self.start.hash(state); + self.end.hash(state); + RangeInclusiveEquality::canonicalized_is_empty(self).hash(state); + } +} + +impl RangeInclusive { + /// Creates a new inclusive range. Equivalent to writing `start..=end`. + /// + /// # Examples + /// + /// ``` + /// use std::ops::RangeInclusive; + /// + /// assert_eq!(3..=5, RangeInclusive::new(3, 5)); + /// ``` + #[stable(feature = "inclusive_range_methods", since = "1.27.0")] + #[inline] + pub const fn new(start: Idx, end: Idx) -> Self { + Self { start, end, is_empty: None } + } + + /// Returns the lower bound of the range (inclusive). + /// + /// When using an inclusive range for iteration, the values of `start()` and + /// [`end()`] are unspecified after the iteration ended. To determine + /// whether the inclusive range is empty, use the [`is_empty()`] method + /// instead of comparing `start() > end()`. + /// + /// Note: the value returned by this method is unspecified after the range + /// has been iterated to exhaustion. + /// + /// [`end()`]: #method.end + /// [`is_empty()`]: #method.is_empty + /// + /// # Examples + /// + /// ``` + /// assert_eq!((3..=5).start(), &3); + /// ``` + #[stable(feature = "inclusive_range_methods", since = "1.27.0")] + #[inline] + pub fn start(&self) -> &Idx { + &self.start + } + + /// Returns the upper bound of the range (inclusive). + /// + /// When using an inclusive range for iteration, the values of [`start()`] + /// and `end()` are unspecified after the iteration ended. To determine + /// whether the inclusive range is empty, use the [`is_empty()`] method + /// instead of comparing `start() > end()`. + /// + /// Note: the value returned by this method is unspecified after the range + /// has been iterated to exhaustion. + /// + /// [`start()`]: #method.start + /// [`is_empty()`]: #method.is_empty + /// + /// # Examples + /// + /// ``` + /// assert_eq!((3..=5).end(), &5); + /// ``` + #[stable(feature = "inclusive_range_methods", since = "1.27.0")] + #[inline] + pub fn end(&self) -> &Idx { + &self.end + } + + /// Destructures the `RangeInclusive` into (lower bound, upper (inclusive) bound). + /// + /// Note: the value returned by this method is unspecified after the range + /// has been iterated to exhaustion. + /// + /// # Examples + /// + /// ``` + /// assert_eq!((3..=5).into_inner(), (3, 5)); + /// ``` + #[stable(feature = "inclusive_range_methods", since = "1.27.0")] + #[inline] + pub fn into_inner(self) -> (Idx, Idx) { + (self.start, self.end) + } +} + +#[stable(feature = "inclusive_range", since = "1.26.0")] impl fmt::Debug for RangeInclusive { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{:?}..={:?}", self.start, self.end) } } -#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] impl> RangeInclusive { /// Returns `true` if `item` is contained in the range. /// /// # Examples /// /// ``` - /// #![feature(range_contains,inclusive_range_syntax)] + /// #![feature(range_contains)] /// - /// assert!(!(3..=5).contains(2)); - /// assert!( (3..=5).contains(3)); - /// assert!( (3..=5).contains(4)); - /// assert!( (3..=5).contains(5)); - /// assert!(!(3..=5).contains(6)); + /// use std::f32; /// - /// assert!( (3..=3).contains(3)); - /// assert!(!(3..=2).contains(3)); + /// assert!(!(3..=5).contains(&2)); + /// assert!( (3..=5).contains(&3)); + /// assert!( (3..=5).contains(&4)); + /// assert!( (3..=5).contains(&5)); + /// assert!(!(3..=5).contains(&6)); + /// + /// assert!( (3..=3).contains(&3)); + /// assert!(!(3..=2).contains(&3)); + /// + /// assert!( (0.0..=1.0).contains(&1.0)); + /// assert!(!(0.0..=1.0).contains(&f32::NAN)); + /// assert!(!(0.0..=f32::NAN).contains(&0.0)); + /// assert!(!(f32::NAN..=1.0).contains(&1.0)); /// ``` - pub fn contains(&self, item: Idx) -> bool { - self.start <= item && item <= self.end + #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + pub fn contains(&self, item: &U) -> bool + where + Idx: PartialOrd, + U: ?Sized + PartialOrd, + { + >::contains(self, item) + } + + /// Returns `true` if the range contains no items. + /// + /// # Examples + /// + /// ``` + /// #![feature(range_is_empty)] + /// + /// assert!(!(3..=5).is_empty()); + /// assert!(!(3..=3).is_empty()); + /// assert!( (3..=2).is_empty()); + /// ``` + /// + /// The range is empty if either side is incomparable: + /// + /// ``` + /// #![feature(range_is_empty)] + /// + /// use std::f32::NAN; + /// assert!(!(3.0..=5.0).is_empty()); + /// assert!( (3.0..=NAN).is_empty()); + /// assert!( (NAN..=5.0).is_empty()); + /// ``` + /// + /// This method returns `true` after iteration has finished: + /// + /// ``` + /// #![feature(range_is_empty)] + /// + /// let mut r = 3..=5; + /// for _ in r.by_ref() {} + /// // Precise field values are unspecified here + /// assert!(r.is_empty()); + /// ``` + #[unstable(feature = "range_is_empty", reason = "recently added", issue = "48111")] + #[inline] + pub fn is_empty(&self) -> bool { + self.is_empty.unwrap_or_else(|| !(self.start <= self.end)) + } + + // If this range's `is_empty` is field is unknown (`None`), update it to be a concrete value. + #[inline] + pub(crate) fn compute_is_empty(&mut self) { + if self.is_empty.is_none() { + self.is_empty = Some(!(self.start <= self.end)); + } } } @@ -313,7 +558,6 @@ impl> RangeInclusive { /// The `..=end` syntax is a `RangeToInclusive`: /// /// ``` -/// #![feature(inclusive_range,inclusive_range_syntax)] /// assert_eq!((..=5), std::ops::RangeToInclusive{ end: 5 }); /// ``` /// @@ -321,8 +565,6 @@ impl> RangeInclusive { /// `for` loop directly. This won't compile: /// /// ```compile_fail,E0277 -/// #![feature(inclusive_range_syntax)] -/// /// // error[E0277]: the trait bound `std::ops::RangeToInclusive<{integer}>: /// // std::iter::Iterator` is not satisfied /// for i in ..=5 { @@ -334,8 +576,6 @@ impl> RangeInclusive { /// array elements up to and including the index indicated by `end`. /// /// ``` -/// #![feature(inclusive_range_syntax)] -/// /// let arr = [0, 1, 2, 3]; /// assert_eq!(arr[ ..=2], [0,1,2 ]); // RangeToInclusive /// assert_eq!(arr[1..=2], [ 1,2 ]); @@ -344,17 +584,16 @@ impl> RangeInclusive { /// [`IntoIterator`]: ../iter/trait.Iterator.html /// [`Iterator`]: ../iter/trait.IntoIterator.html /// [slicing index]: ../slice/trait.SliceIndex.html +#[doc(alias = "..=")] #[derive(Copy, Clone, PartialEq, Eq, Hash)] -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] pub struct RangeToInclusive { /// The upper bound of the range (inclusive) - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] + #[stable(feature = "inclusive_range", since = "1.26.0")] pub end: Idx, } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl fmt::Debug for RangeToInclusive { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "..={:?}", self.end) @@ -368,16 +607,295 @@ impl> RangeToInclusive { /// # Examples /// /// ``` - /// #![feature(range_contains,inclusive_range_syntax)] + /// #![feature(range_contains)] /// - /// assert!( (..=5).contains(-1_000_000_000)); - /// assert!( (..=5).contains(5)); - /// assert!(!(..=5).contains(6)); + /// use std::f32; + /// + /// assert!( (..=5).contains(&-1_000_000_000)); + /// assert!( (..=5).contains(&5)); + /// assert!(!(..=5).contains(&6)); + /// + /// assert!( (..=1.0).contains(&1.0)); + /// assert!(!(..=1.0).contains(&f32::NAN)); + /// assert!(!(..=f32::NAN).contains(&0.5)); /// ``` - pub fn contains(&self, item: Idx) -> bool { - (item <= self.end) + #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + pub fn contains(&self, item: &U) -> bool + where + Idx: PartialOrd, + U: ?Sized + PartialOrd, + { + >::contains(self, item) } } // RangeToInclusive cannot impl From> // because underflow would be possible with (..0).into() + +/// An endpoint of a range of keys. +/// +/// # Examples +/// +/// `Bound`s are range endpoints: +/// +/// ``` +/// use std::ops::Bound::*; +/// use std::ops::RangeBounds; +/// +/// assert_eq!((..100).start_bound(), Unbounded); +/// assert_eq!((1..12).start_bound(), Included(&1)); +/// assert_eq!((1..12).end_bound(), Excluded(&12)); +/// ``` +/// +/// Using a tuple of `Bound`s as an argument to [`BTreeMap::range`]. +/// Note that in most cases, it's better to use range syntax (`1..5`) instead. +/// +/// ``` +/// use std::collections::BTreeMap; +/// use std::ops::Bound::{Excluded, Included, Unbounded}; +/// +/// let mut map = BTreeMap::new(); +/// map.insert(3, "a"); +/// map.insert(5, "b"); +/// map.insert(8, "c"); +/// +/// for (key, value) in map.range((Excluded(3), Included(8))) { +/// println!("{}: {}", key, value); +/// } +/// +/// assert_eq!(Some((&3, &"a")), map.range((Unbounded, Included(5))).next()); +/// ``` +/// +/// [`BTreeMap::range`]: ../../std/collections/btree_map/struct.BTreeMap.html#method.range +#[stable(feature = "collections_bound", since = "1.17.0")] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +pub enum Bound { + /// An inclusive bound. + #[stable(feature = "collections_bound", since = "1.17.0")] + Included(#[stable(feature = "collections_bound", since = "1.17.0")] T), + /// An exclusive bound. + #[stable(feature = "collections_bound", since = "1.17.0")] + Excluded(#[stable(feature = "collections_bound", since = "1.17.0")] T), + /// An infinite endpoint. Indicates that there is no bound in this direction. + #[stable(feature = "collections_bound", since = "1.17.0")] + Unbounded, +} + +#[stable(feature = "collections_range", since = "1.28.0")] +/// `RangeBounds` is implemented by Rust's built-in range types, produced +/// by range syntax like `..`, `a..`, `..b` or `c..d`. +pub trait RangeBounds { + /// Start index bound. + /// + /// Returns the start value as a `Bound`. + /// + /// # Examples + /// + /// ``` + /// # fn main() { + /// use std::ops::Bound::*; + /// use std::ops::RangeBounds; + /// + /// assert_eq!((..10).start_bound(), Unbounded); + /// assert_eq!((3..10).start_bound(), Included(&3)); + /// # } + /// ``` + #[stable(feature = "collections_range", since = "1.28.0")] + fn start_bound(&self) -> Bound<&T>; + + /// End index bound. + /// + /// Returns the end value as a `Bound`. + /// + /// # Examples + /// + /// ``` + /// # fn main() { + /// use std::ops::Bound::*; + /// use std::ops::RangeBounds; + /// + /// assert_eq!((3..).end_bound(), Unbounded); + /// assert_eq!((3..10).end_bound(), Excluded(&10)); + /// # } + /// ``` + #[stable(feature = "collections_range", since = "1.28.0")] + fn end_bound(&self) -> Bound<&T>; + + + /// Returns `true` if `item` is contained in the range. + /// + /// # Examples + /// + /// ``` + /// #![feature(range_contains)] + /// + /// use std::f32; + /// + /// assert!( (3..5).contains(&4)); + /// assert!(!(3..5).contains(&2)); + /// + /// assert!( (0.0..1.0).contains(&0.5)); + /// assert!(!(0.0..1.0).contains(&f32::NAN)); + /// assert!(!(0.0..f32::NAN).contains(&0.5)); + /// assert!(!(f32::NAN..1.0).contains(&0.5)); + #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + fn contains(&self, item: &U) -> bool + where + T: PartialOrd, + U: ?Sized + PartialOrd, + { + (match self.start_bound() { + Included(ref start) => *start <= item, + Excluded(ref start) => *start < item, + Unbounded => true, + }) + && + (match self.end_bound() { + Included(ref end) => item <= *end, + Excluded(ref end) => item < *end, + Unbounded => true, + }) + } +} + +use self::Bound::{Excluded, Included, Unbounded}; + +#[stable(feature = "collections_range", since = "1.28.0")] +impl RangeBounds for RangeFull { + fn start_bound(&self) -> Bound<&T> { + Unbounded + } + fn end_bound(&self) -> Bound<&T> { + Unbounded + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl RangeBounds for RangeFrom { + fn start_bound(&self) -> Bound<&T> { + Included(&self.start) + } + fn end_bound(&self) -> Bound<&T> { + Unbounded + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl RangeBounds for RangeTo { + fn start_bound(&self) -> Bound<&T> { + Unbounded + } + fn end_bound(&self) -> Bound<&T> { + Excluded(&self.end) + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl RangeBounds for Range { + fn start_bound(&self) -> Bound<&T> { + Included(&self.start) + } + fn end_bound(&self) -> Bound<&T> { + Excluded(&self.end) + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl RangeBounds for RangeInclusive { + fn start_bound(&self) -> Bound<&T> { + Included(&self.start) + } + fn end_bound(&self) -> Bound<&T> { + Included(&self.end) + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl RangeBounds for RangeToInclusive { + fn start_bound(&self) -> Bound<&T> { + Unbounded + } + fn end_bound(&self) -> Bound<&T> { + Included(&self.end) + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl RangeBounds for (Bound, Bound) { + fn start_bound(&self) -> Bound<&T> { + match *self { + (Included(ref start), _) => Included(start), + (Excluded(ref start), _) => Excluded(start), + (Unbounded, _) => Unbounded, + } + } + + fn end_bound(&self) -> Bound<&T> { + match *self { + (_, Included(ref end)) => Included(end), + (_, Excluded(ref end)) => Excluded(end), + (_, Unbounded) => Unbounded, + } + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl<'a, T: ?Sized + 'a> RangeBounds for (Bound<&'a T>, Bound<&'a T>) { + fn start_bound(&self) -> Bound<&T> { + self.0 + } + + fn end_bound(&self) -> Bound<&T> { + self.1 + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl<'a, T> RangeBounds for RangeFrom<&'a T> { + fn start_bound(&self) -> Bound<&T> { + Included(self.start) + } + fn end_bound(&self) -> Bound<&T> { + Unbounded + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl<'a, T> RangeBounds for RangeTo<&'a T> { + fn start_bound(&self) -> Bound<&T> { + Unbounded + } + fn end_bound(&self) -> Bound<&T> { + Excluded(self.end) + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl<'a, T> RangeBounds for Range<&'a T> { + fn start_bound(&self) -> Bound<&T> { + Included(self.start) + } + fn end_bound(&self) -> Bound<&T> { + Excluded(self.end) + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl<'a, T> RangeBounds for RangeInclusive<&'a T> { + fn start_bound(&self) -> Bound<&T> { + Included(self.start) + } + fn end_bound(&self) -> Bound<&T> { + Included(self.end) + } +} + +#[stable(feature = "collections_range", since = "1.28.0")] +impl<'a, T> RangeBounds for RangeToInclusive<&'a T> { + fn start_bound(&self) -> Bound<&T> { + Unbounded + } + fn end_bound(&self) -> Bound<&T> { + Included(self.end) + } +} diff --git a/src/libcore/ops/try.rs b/src/libcore/ops/try.rs index 81e5cb5c3504..4f2d30aa6a8a 100644 --- a/src/libcore/ops/try.rs +++ b/src/libcore/ops/try.rs @@ -20,7 +20,7 @@ any(from_method="from_error", from_method="from_ok"), from_desugaring="?"), message="the `?` operator can only be used in a \ - function that returns `Result` \ + function that returns `Result` or `Option` \ (or another type that implements `{Try}`)", label="cannot use the `?` operator in a function that returns `{Self}`"), on(all(from_method="into_result", from_desugaring="?"), @@ -28,6 +28,7 @@ that implement `{Try}`", label="the `?` operator cannot be applied to type `{Self}`") )] +#[doc(alias = "?")] pub trait Try { /// The type of this value when viewed as successful. #[unstable(feature = "try_trait", issue = "42327")] diff --git a/src/libcore/ops/unsize.rs b/src/libcore/ops/unsize.rs index cd896859b16b..da72f3748425 100644 --- a/src/libcore/ops/unsize.rs +++ b/src/libcore/ops/unsize.rs @@ -13,7 +13,7 @@ use marker::Unsize; /// Trait that indicates that this is a pointer or a wrapper for one, /// where unsizing can be performed on the pointee. /// -/// See the [DST coercion RfC][dst-coerce] and [the nomicon entry on coercion][nomicon-coerce] +/// See the [DST coercion RFC][dst-coerce] and [the nomicon entry on coercion][nomicon-coerce] /// for more details. /// /// For builtin pointer types, pointers to `T` will coerce to pointers to `U` if `T: Unsize` diff --git a/src/libcore/option.rs b/src/libcore/option.rs index 15181dab8531..f743fbfd0752 100644 --- a/src/libcore/option.rs +++ b/src/libcore/option.rs @@ -146,7 +146,8 @@ #![stable(feature = "rust1", since = "1.0.0")] use iter::{FromIterator, FusedIterator, TrustedLen}; -use {mem, ops}; +use {hint, mem, ops::{self, Deref}}; +use mem::PinMut; // Note that this is not a lang item per se, but it has a hidden dependency on // `Iterator`, which is one. The compiler assumes that the `next` method of @@ -233,11 +234,11 @@ impl Option { /// [`usize`]: ../../std/primitive.usize.html /// /// ``` - /// let num_as_str: Option = Some("10".to_string()); + /// let text: Option = Some("Hello, world!".to_string()); /// // First, cast `Option` to `Option<&String>` with `as_ref`, - /// // then consume *that* with `map`, leaving `num_as_str` on the stack. - /// let num_as_int: Option = num_as_str.as_ref().map(|n| n.len()); - /// println!("still can print num_as_str: {:?}", num_as_str); + /// // then consume *that* with `map`, leaving `text` on the stack. + /// let text_length: Option = text.as_ref().map(|s| s.len()); + /// println!("still can print text: {:?}", text); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -269,6 +270,15 @@ impl Option { } } + /// Converts from `Option` to `Option>` + #[inline] + #[unstable(feature = "pin", issue = "49150")] + pub fn as_pin_mut<'a>(self: PinMut<'a, Self>) -> Option> { + unsafe { + PinMut::get_mut_unchecked(self).as_mut().map(|x| PinMut::new_unchecked(x)) + } + } + ///////////////////////////////////////////////////////////////////////// // Getting to contained values ///////////////////////////////////////////////////////////////////////// @@ -628,8 +638,6 @@ impl Option { /// # Examples /// /// ```rust - /// #![feature(option_filter)] - /// /// fn is_even(n: &i32) -> bool { /// n % 2 == 0 /// } @@ -639,7 +647,7 @@ impl Option { /// assert_eq!(Some(4).filter(is_even), Some(4)); /// ``` #[inline] - #[unstable(feature = "option_filter", issue = "45860")] + #[stable(feature = "option_filter", since = "1.27.0")] pub fn filter bool>(self, predicate: P) -> Self { if let Some(x) = self { if predicate(&x) { @@ -707,6 +715,42 @@ impl Option { } } + /// Returns [`Some`] if exactly one of `self`, `optb` is [`Some`], otherwise returns `None`. + /// + /// [`Some`]: #variant.Some + /// [`None`]: #variant.None + /// + /// # Examples + /// + /// ``` + /// #![feature(option_xor)] + /// + /// let x = Some(2); + /// let y: Option = None; + /// assert_eq!(x.xor(y), Some(2)); + /// + /// let x: Option = None; + /// let y = Some(2); + /// assert_eq!(x.xor(y), Some(2)); + /// + /// let x = Some(2); + /// let y = Some(2); + /// assert_eq!(x.xor(y), None); + /// + /// let x: Option = None; + /// let y: Option = None; + /// assert_eq!(x.xor(y), None); + /// ``` + #[inline] + #[unstable(feature = "option_xor", issue = "50512")] + pub fn xor(self, optb: Option) -> Option { + match (self, optb) { + (Some(a), None) => Some(a), + (None, Some(b)) => Some(b), + _ => None, + } + } + ///////////////////////////////////////////////////////////////////////// // Entry-like operations to insert if None and return a reference ///////////////////////////////////////////////////////////////////////// @@ -740,7 +784,7 @@ impl Option { match *self { Some(ref mut v) => v, - _ => unreachable!(), + None => unsafe { hint::unreachable_unchecked() }, } } @@ -773,7 +817,7 @@ impl Option { match *self { Some(ref mut v) => v, - _ => unreachable!(), + None => unsafe { hint::unreachable_unchecked() }, } } @@ -789,18 +833,47 @@ impl Option { /// /// ``` /// let mut x = Some(2); - /// x.take(); + /// let y = x.take(); /// assert_eq!(x, None); + /// assert_eq!(y, Some(2)); /// /// let mut x: Option = None; - /// x.take(); + /// let y = x.take(); /// assert_eq!(x, None); + /// assert_eq!(y, None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn take(&mut self) -> Option { mem::replace(self, None) } + + /// Replaces the actual value in the option by the value given in parameter, + /// returning the old value if present, + /// leaving a [`Some`] in its place without deinitializing either one. + /// + /// [`Some`]: #variant.Some + /// + /// # Examples + /// + /// ``` + /// #![feature(option_replace)] + /// + /// let mut x = Some(2); + /// let old = x.replace(5); + /// assert_eq!(x, Some(5)); + /// assert_eq!(old, Some(2)); + /// + /// let mut x = None; + /// let old = x.replace(3); + /// assert_eq!(x, Some(3)); + /// assert_eq!(old, None); + /// ``` + #[inline] + #[unstable(feature = "option_replace", issue = "51998")] + pub fn replace(&mut self, value: T) -> Option { + mem::replace(self, Some(value)) + } } impl<'a, T: Clone> Option<&'a T> { @@ -829,14 +902,13 @@ impl<'a, T: Clone> Option<&'a mut T> { /// # Examples /// /// ``` - /// #![feature(option_ref_mut_cloned)] /// let mut x = 12; /// let opt_x = Some(&mut x); /// assert_eq!(opt_x, Some(&mut 12)); /// let cloned = opt_x.cloned(); /// assert_eq!(cloned, Some(12)); /// ``` - #[unstable(feature = "option_ref_mut_cloned", issue = "43738")] + #[stable(since = "1.26.0", feature = "option_ref_mut_cloned")] pub fn cloned(self) -> Option { self.map(|t| t.clone()) } @@ -881,6 +953,46 @@ impl Option { } } +#[unstable(feature = "inner_deref", reason = "newly added", issue = "50264")] +impl Option { + /// Converts from `&Option` to `Option<&T::Target>`. + /// + /// Leaves the original Option in-place, creating a new one with a reference + /// to the original one, additionally coercing the contents via `Deref`. + pub fn deref(&self) -> Option<&T::Target> { + self.as_ref().map(|t| t.deref()) + } +} + +impl Option> { + /// Transposes an `Option` of a `Result` into a `Result` of an `Option`. + /// + /// `None` will be mapped to `Ok(None)`. + /// `Some(Ok(_))` and `Some(Err(_))` will be mapped to `Ok(Some(_))` and `Err(_)`. + /// + /// # Examples + /// + /// ``` + /// #![feature(transpose_result)] + /// + /// #[derive(Debug, Eq, PartialEq)] + /// struct SomeErr; + /// + /// let x: Result, SomeErr> = Ok(Some(5)); + /// let y: Option> = Some(Ok(5)); + /// assert_eq!(x, y.transpose()); + /// ``` + #[inline] + #[unstable(feature = "transpose_result", issue = "47338")] + pub fn transpose(self) -> Result, E> { + match self { + Some(Ok(x)) => Ok(Some(x)), + Some(Err(e)) => Err(e), + None => Ok(None), + } + } +} + // This is a separate function to reduce the code size of .expect() itself. #[inline(never)] #[cold] @@ -888,7 +1000,6 @@ fn expect_failed(msg: &str) -> ! { panic!("{}", msg) } - ///////////////////////////////////////////////////////////////////////////// // Trait implementations ///////////////////////////////////////////////////////////////////////////// @@ -1022,7 +1133,7 @@ impl<'a, A> DoubleEndedIterator for Iter<'a, A> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, A> ExactSizeIterator for Iter<'a, A> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, A> FusedIterator for Iter<'a, A> {} #[unstable(feature = "trusted_len", issue = "37572")] @@ -1030,6 +1141,7 @@ unsafe impl<'a, A> TrustedLen for Iter<'a, A> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, A> Clone for Iter<'a, A> { + #[inline] fn clone(&self) -> Iter<'a, A> { Iter { inner: self.inner.clone() } } @@ -1067,7 +1179,7 @@ impl<'a, A> DoubleEndedIterator for IterMut<'a, A> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, A> ExactSizeIterator for IterMut<'a, A> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, A> FusedIterator for IterMut<'a, A> {} #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<'a, A> TrustedLen for IterMut<'a, A> {} @@ -1104,7 +1216,7 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for IntoIter {} #[unstable(feature = "trusted_len", issue = "37572")] @@ -1160,6 +1272,16 @@ impl> FromIterator> for Option { None => None, } } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + if self.found_none { + (0, Some(0)) + } else { + let (_, upper) = self.iter.size_hint(); + (0, upper) + } + } } let mut adapter = Adapter { iter: iter.into_iter(), found_none: false }; @@ -1186,14 +1308,17 @@ impl ops::Try for Option { type Ok = T; type Error = NoneError; + #[inline] fn into_result(self) -> Result { self.ok_or(NoneError) } + #[inline] fn from_ok(v: T) -> Self { Some(v) } + #[inline] fn from_error(_: NoneError) -> Self { None } diff --git a/src/libcore/panic.rs b/src/libcore/panic.rs new file mode 100644 index 000000000000..17cac5aa0a05 --- /dev/null +++ b/src/libcore/panic.rs @@ -0,0 +1,275 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Panic support in the standard library. + +#![unstable(feature = "core_panic_info", + reason = "newly available in libcore", + issue = "44489")] + +use any::Any; +use fmt; + +/// A struct providing information about a panic. +/// +/// `PanicInfo` structure is passed to a panic hook set by the [`set_hook`] +/// function. +/// +/// [`set_hook`]: ../../std/panic/fn.set_hook.html +/// +/// # Examples +/// +/// ```should_panic +/// use std::panic; +/// +/// panic::set_hook(Box::new(|panic_info| { +/// if let Some(s) = panic_info.payload().downcast_ref::<&str>() { +/// println!("panic occurred: {:?}", s); +/// } else { +/// println!("panic occurred"); +/// } +/// })); +/// +/// panic!("Normal panic"); +/// ``` +#[lang = "panic_info"] +#[stable(feature = "panic_hooks", since = "1.10.0")] +#[derive(Debug)] +pub struct PanicInfo<'a> { + payload: &'a (dyn Any + Send), + message: Option<&'a fmt::Arguments<'a>>, + location: Location<'a>, +} + +impl<'a> PanicInfo<'a> { + #![unstable(feature = "panic_internals", + reason = "internal details of the implementation of the `panic!` \ + and related macros", + issue = "0")] + #[doc(hidden)] + #[inline] + pub fn internal_constructor(message: Option<&'a fmt::Arguments<'a>>, + location: Location<'a>) + -> Self { + struct NoPayload; + PanicInfo { payload: &NoPayload, location, message } + } + + #[doc(hidden)] + #[inline] + pub fn set_payload(&mut self, info: &'a (dyn Any + Send)) { + self.payload = info; + } + + /// Returns the payload associated with the panic. + /// + /// This will commonly, but not always, be a `&'static str` or [`String`]. + /// + /// [`String`]: ../../std/string/struct.String.html + /// + /// # Examples + /// + /// ```should_panic + /// use std::panic; + /// + /// panic::set_hook(Box::new(|panic_info| { + /// println!("panic occurred: {:?}", panic_info.payload().downcast_ref::<&str>().unwrap()); + /// })); + /// + /// panic!("Normal panic"); + /// ``` + #[stable(feature = "panic_hooks", since = "1.10.0")] + pub fn payload(&self) -> &(dyn Any + Send) { + self.payload + } + + /// If the `panic!` macro from the `core` crate (not from `std`) + /// was used with a formatting string and some additional arguments, + /// returns that message ready to be used for example with [`fmt::write`] + /// + /// [`fmt::write`]: ../fmt/fn.write.html + #[unstable(feature = "panic_info_message", issue = "44489")] + pub fn message(&self) -> Option<&fmt::Arguments> { + self.message + } + + /// Returns information about the location from which the panic originated, + /// if available. + /// + /// This method will currently always return [`Some`], but this may change + /// in future versions. + /// + /// [`Some`]: ../../std/option/enum.Option.html#variant.Some + /// + /// # Examples + /// + /// ```should_panic + /// use std::panic; + /// + /// panic::set_hook(Box::new(|panic_info| { + /// if let Some(location) = panic_info.location() { + /// println!("panic occurred in file '{}' at line {}", location.file(), + /// location.line()); + /// } else { + /// println!("panic occurred but can't get location information..."); + /// } + /// })); + /// + /// panic!("Normal panic"); + /// ``` + #[stable(feature = "panic_hooks", since = "1.10.0")] + pub fn location(&self) -> Option<&Location> { + // NOTE: If this is changed to sometimes return None, + // deal with that case in std::panicking::default_hook and std::panicking::begin_panic_fmt. + Some(&self.location) + } +} + +#[stable(feature = "panic_hook_display", since = "1.26.0")] +impl<'a> fmt::Display for PanicInfo<'a> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("panicked at ")?; + if let Some(message) = self.message { + write!(formatter, "'{}', ", message)? + } else if let Some(payload) = self.payload.downcast_ref::<&'static str>() { + write!(formatter, "'{}', ", payload)? + } + // NOTE: we cannot use downcast_ref::() here + // since String is not available in libcore! + // The payload is a String when `std::panic!` is called with multiple arguments, + // but in that case the message is also available. + + self.location.fmt(formatter) + } +} + +/// A struct containing information about the location of a panic. +/// +/// This structure is created by the [`location`] method of [`PanicInfo`]. +/// +/// [`location`]: ../../std/panic/struct.PanicInfo.html#method.location +/// [`PanicInfo`]: ../../std/panic/struct.PanicInfo.html +/// +/// # Examples +/// +/// ```should_panic +/// use std::panic; +/// +/// panic::set_hook(Box::new(|panic_info| { +/// if let Some(location) = panic_info.location() { +/// println!("panic occurred in file '{}' at line {}", location.file(), location.line()); +/// } else { +/// println!("panic occurred but can't get location information..."); +/// } +/// })); +/// +/// panic!("Normal panic"); +/// ``` +#[derive(Debug)] +#[stable(feature = "panic_hooks", since = "1.10.0")] +pub struct Location<'a> { + file: &'a str, + line: u32, + col: u32, +} + +impl<'a> Location<'a> { + #![unstable(feature = "panic_internals", + reason = "internal details of the implementation of the `panic!` \ + and related macros", + issue = "0")] + #[doc(hidden)] + pub fn internal_constructor(file: &'a str, line: u32, col: u32) -> Self { + Location { file, line, col } + } + + /// Returns the name of the source file from which the panic originated. + /// + /// # Examples + /// + /// ```should_panic + /// use std::panic; + /// + /// panic::set_hook(Box::new(|panic_info| { + /// if let Some(location) = panic_info.location() { + /// println!("panic occurred in file '{}'", location.file()); + /// } else { + /// println!("panic occurred but can't get location information..."); + /// } + /// })); + /// + /// panic!("Normal panic"); + /// ``` + #[stable(feature = "panic_hooks", since = "1.10.0")] + pub fn file(&self) -> &str { + self.file + } + + /// Returns the line number from which the panic originated. + /// + /// # Examples + /// + /// ```should_panic + /// use std::panic; + /// + /// panic::set_hook(Box::new(|panic_info| { + /// if let Some(location) = panic_info.location() { + /// println!("panic occurred at line {}", location.line()); + /// } else { + /// println!("panic occurred but can't get location information..."); + /// } + /// })); + /// + /// panic!("Normal panic"); + /// ``` + #[stable(feature = "panic_hooks", since = "1.10.0")] + pub fn line(&self) -> u32 { + self.line + } + + /// Returns the column from which the panic originated. + /// + /// # Examples + /// + /// ```should_panic + /// use std::panic; + /// + /// panic::set_hook(Box::new(|panic_info| { + /// if let Some(location) = panic_info.location() { + /// println!("panic occurred at column {}", location.column()); + /// } else { + /// println!("panic occurred but can't get location information..."); + /// } + /// })); + /// + /// panic!("Normal panic"); + /// ``` + #[stable(feature = "panic_col", since = "1.25.0")] + pub fn column(&self) -> u32 { + self.col + } +} + +#[stable(feature = "panic_hook_display", since = "1.26.0")] +impl<'a> fmt::Display for Location<'a> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "{}:{}:{}", self.file, self.line, self.col) + } +} + +/// An internal trait used by libstd to pass data from libstd to `panic_unwind` +/// and other panic runtimes. Not intended to be stabilized any time soon, do +/// not use. +#[unstable(feature = "std_internals", issue = "0")] +#[doc(hidden)] +pub unsafe trait BoxMeUp { + fn box_me_up(&mut self) -> *mut (dyn Any + Send); + fn get(&mut self) -> &(dyn Any + Send); +} diff --git a/src/libcore/panicking.rs b/src/libcore/panicking.rs index 4170d91e5fce..58407de9566e 100644 --- a/src/libcore/panicking.rs +++ b/src/libcore/panicking.rs @@ -37,6 +37,7 @@ issue = "0")] use fmt; +use panic::{Location, PanicInfo}; #[cold] #[inline(never)] // this is the slow path, always #[lang = "panic"] @@ -61,12 +62,17 @@ fn panic_bounds_check(file_line_col: &(&'static str, u32, u32), #[cold] #[inline(never)] pub fn panic_fmt(fmt: fmt::Arguments, file_line_col: &(&'static str, u32, u32)) -> ! { - #[allow(improper_ctypes)] - extern { - #[lang = "panic_fmt"] - #[unwind] - fn panic_impl(fmt: fmt::Arguments, file: &'static str, line: u32, col: u32) -> !; + // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call + #[allow(improper_ctypes)] // PanicInfo contains a trait object which is not FFI safe + extern "Rust" { + #[lang = "panic_impl"] + fn panic_impl(pi: &PanicInfo) -> !; } + let (file, line, col) = *file_line_col; - unsafe { panic_impl(fmt, file, line, col) } + let pi = PanicInfo::internal_constructor( + Some(&fmt), + Location::internal_constructor(file, line, col), + ); + unsafe { panic_impl(&pi) } } diff --git a/src/libcore/prelude/v1.rs b/src/libcore/prelude/v1.rs index 6f4f273a31f5..7c5738e94e54 100644 --- a/src/libcore/prelude/v1.rs +++ b/src/libcore/prelude/v1.rs @@ -57,14 +57,3 @@ pub use option::Option::{self, Some, None}; #[stable(feature = "core_prelude", since = "1.4.0")] #[doc(no_inline)] pub use result::Result::{self, Ok, Err}; - -// Re-exported extension traits for primitive types -#[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] -pub use slice::SliceExt; -#[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] -pub use str::StrExt; -#[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] -pub use char::CharExt; diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index 7f7246df8f2a..61033e751125 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -166,8 +166,6 @@ pub unsafe fn swap(x: *mut T, y: *mut T) { /// Basic usage: /// /// ``` -/// #![feature(swap_nonoverlapping)] -/// /// use std::ptr; /// /// let mut x = [1, 2, 3, 4]; @@ -181,7 +179,7 @@ pub unsafe fn swap(x: *mut T, y: *mut T) { /// assert_eq!(y, [1, 2, 9]); /// ``` #[inline] -#[unstable(feature = "swap_nonoverlapping", issue = "42818")] +#[stable(feature = "swap_nonoverlapping", since = "1.27.0")] pub unsafe fn swap_nonoverlapping(x: *mut T, y: *mut T, count: usize) { let x = x as *mut u8; let y = y as *mut u8; @@ -189,6 +187,19 @@ pub unsafe fn swap_nonoverlapping(x: *mut T, y: *mut T, count: usize) { swap_nonoverlapping_bytes(x, y, len) } +#[inline] +pub(crate) unsafe fn swap_nonoverlapping_one(x: *mut T, y: *mut T) { + // For types smaller than the block optimization below, + // just swap directly to avoid pessimizing codegen. + if mem::size_of::() < 32 { + let z = read(x); + copy_nonoverlapping(y, x, 1); + write(y, z); + } else { + swap_nonoverlapping(x, y, 1); + } +} + #[inline] unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { // The approach here is to utilize simd to swap x & y efficiently. Testing reveals @@ -241,8 +252,9 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { } } -/// Replaces the value at `dest` with `src`, returning the old -/// value, without dropping either. +/// Moves `src` into the pointed `dest`, returning the previous `dest` value. +/// +/// Neither value is dropped. /// /// # Safety /// @@ -436,6 +448,12 @@ pub unsafe fn write_unaligned(dst: *mut T, src: T) { /// `write_bytes`, or `copy`). Note that `*src = foo` counts as a use /// because it will attempt to drop the value previously at `*src`. /// +/// Just like in C, whether an operation is volatile has no bearing whatsoever +/// on questions involving concurrent access from multiple threads. Volatile +/// accesses behave exactly like non-atomic accesses in that regard. In particular, +/// a race between a `read_volatile` and any write operation to the same location +/// is undefined behavior. +/// /// # Examples /// /// Basic usage: @@ -486,6 +504,12 @@ pub unsafe fn read_volatile(src: *const T) -> T { /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been `read` from. /// +/// Just like in C, whether an operation is volatile has no bearing whatsoever +/// on questions involving concurrent access from multiple threads. Volatile +/// accesses behave exactly like non-atomic accesses in that regard. In particular, +/// a race between a `write_volatile` and any other operation (reading or writing) +/// on the same location is undefined behavior. +/// /// # Examples /// /// Basic usage: @@ -579,7 +603,7 @@ impl *const T { /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one - /// byte past the end of an allocated object. + /// byte past the end of *the same* allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// @@ -631,9 +655,15 @@ impl *const T { /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). + /// In particular, the resulting pointer may *not* be used to access a + /// different allocated object than the one `self` points to. In other + /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is + /// *not* the same as `y`, and dereferencing it is undefined behavior + /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` - /// allows the compiler to optimize better. + /// allows the compiler to optimize better. If you need to cross object + /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples /// @@ -665,39 +695,119 @@ impl *const T { /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::()`. /// - /// If the address different between the two pointers ia not a multiple of - /// `mem::size_of::()` then the result of the division is rounded towards - /// zero. + /// This function is the inverse of [`offset`]. /// - /// This function returns `None` if `T` is a zero-sized typed. + /// [`offset`]: #method.offset + /// [`wrapping_offset_from`]: #method.wrapping_offset_from + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and other pointer must be either in bounds or one + /// byte past the end of the same allocated object. + /// + /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. + /// + /// * The distance between the pointers, in bytes, must be an exact multiple + /// of the size of `T`. + /// + /// * The distance being in bounds cannot rely on "wrapping around" the address space. + /// + /// The compiler and standard library generally try to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 263 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using [`wrapping_offset_from`] instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// # Panics + /// + /// This function panics if `T` is a Zero-Sized Type ("ZST"). /// /// # Examples /// /// Basic usage: /// /// ``` - /// #![feature(offset_to)] + /// #![feature(ptr_offset_from)] /// - /// fn main() { - /// let a = [0; 5]; - /// let ptr1: *const i32 = &a[1]; - /// let ptr2: *const i32 = &a[3]; - /// assert_eq!(ptr1.offset_to(ptr2), Some(2)); - /// assert_eq!(ptr2.offset_to(ptr1), Some(-2)); - /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2); - /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1); + /// let a = [0; 5]; + /// let ptr1: *const i32 = &a[1]; + /// let ptr2: *const i32 = &a[3]; + /// unsafe { + /// assert_eq!(ptr2.offset_from(ptr1), 2); + /// assert_eq!(ptr1.offset_from(ptr2), -2); + /// assert_eq!(ptr1.offset(2), ptr2); + /// assert_eq!(ptr2.offset(-2), ptr1); /// } /// ``` - #[unstable(feature = "offset_to", issue = "41079")] + #[unstable(feature = "ptr_offset_from", issue = "41079")] #[inline] - pub fn offset_to(self, other: *const T) -> Option where T: Sized { - let size = mem::size_of::(); - if size == 0 { - None - } else { - let diff = (other as isize).wrapping_sub(self as isize); - Some(diff / size as isize) - } + pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { + let pointee_size = mem::size_of::(); + assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); + + // This is the same sequence that Clang emits for pointer subtraction. + // It can be neither `nsw` nor `nuw` because the input is treated as + // unsigned but then the output is treated as signed, so neither works. + let d = isize::wrapping_sub(self as _, origin as _); + intrinsics::exact_div(d, pointee_size as _) + } + + /// Calculates the distance between two pointers. The returned value is in + /// units of T: the distance in bytes is divided by `mem::size_of::()`. + /// + /// If the address different between the two pointers is not a multiple of + /// `mem::size_of::()` then the result of the division is rounded towards + /// zero. + /// + /// Though this method is safe for any two pointers, note that its result + /// will be mostly useless if the two pointers aren't into the same allocated + /// object, for example if they point to two different local variables. + /// + /// # Panics + /// + /// This function panics if `T` is a zero-sized type. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(ptr_wrapping_offset_from)] + /// + /// let a = [0; 5]; + /// let ptr1: *const i32 = &a[1]; + /// let ptr2: *const i32 = &a[3]; + /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); + /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2); + /// assert_eq!(ptr1.wrapping_offset(2), ptr2); + /// assert_eq!(ptr2.wrapping_offset(-2), ptr1); + /// + /// let ptr1: *const i32 = 3 as _; + /// let ptr2: *const i32 = 13 as _; + /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); + /// ``` + #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")] + #[inline] + pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized { + let pointee_size = mem::size_of::(); + assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); + + let d = isize::wrapping_sub(self as _, origin as _); + d.wrapping_div(pointee_size as _) } /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). @@ -740,8 +850,6 @@ impl *const T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// @@ -750,7 +858,7 @@ impl *const T { /// println!("{}", *ptr.add(2) as char); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn add(self, count: usize) -> Self where T: Sized, @@ -799,8 +907,6 @@ impl *const T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let s: &str = "123"; /// /// unsafe { @@ -809,7 +915,7 @@ impl *const T { /// println!("{}", *end.sub(2) as char); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn sub(self, count: usize) -> Self where T: Sized, @@ -836,8 +942,6 @@ impl *const T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); @@ -852,7 +956,7 @@ impl *const T { /// ptr = ptr.wrapping_add(step); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_add(self, count: usize) -> Self where T: Sized, @@ -879,8 +983,6 @@ impl *const T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); @@ -895,7 +997,7 @@ impl *const T { /// ptr = ptr.wrapping_sub(step); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_sub(self, count: usize) -> Self where T: Sized, @@ -922,8 +1024,6 @@ impl *const T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let x = 12; /// let y = &x as *const i32; /// @@ -931,7 +1031,7 @@ impl *const T { /// assert_eq!(y.read(), 12); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read(self) -> T where T: Sized, @@ -969,13 +1069,17 @@ impl *const T { /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// + /// Just like in C, whether an operation is volatile has no bearing whatsoever + /// on questions involving concurrent access from multiple threads. Volatile + /// accesses behave exactly like non-atomic accesses in that regard. In particular, + /// a race between a `read_volatile` and any write operation to the same location + /// is undefined behavior. + /// /// # Examples /// /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let x = 12; /// let y = &x as *const i32; /// @@ -983,7 +1087,7 @@ impl *const T { /// assert_eq!(y.read_volatile(), 12); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_volatile(self) -> T where T: Sized, @@ -1010,8 +1114,6 @@ impl *const T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let x = 12; /// let y = &x as *const i32; /// @@ -1019,7 +1121,7 @@ impl *const T { /// assert_eq!(y.read_unaligned(), 12); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_unaligned(self) -> T where T: Sized, @@ -1038,16 +1140,14 @@ impl *const T { /// /// Care must be taken with the ownership of `self` and `dest`. /// This method semantically moves the values of `self` into `dest`. - /// However it does not drop the contents of `self`, or prevent the contents - /// of `dest` from being dropped or used. + /// However it does not drop the contents of `dest`, or prevent the contents + /// of `self` from being dropped or used. /// /// # Examples /// /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` - /// #![feature(pointer_methods)] - /// /// # #[allow(dead_code)] /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { /// let mut dst = Vec::with_capacity(elts); @@ -1056,7 +1156,7 @@ impl *const T { /// dst /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to(self, dest: *mut T, count: usize) where T: Sized, @@ -1085,8 +1185,6 @@ impl *const T { /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` - /// #![feature(pointer_methods)] - /// /// # #[allow(dead_code)] /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { /// let mut dst = Vec::with_capacity(elts); @@ -1095,7 +1193,7 @@ impl *const T { /// dst /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) where T: Sized, @@ -1103,15 +1201,22 @@ impl *const T { copy_nonoverlapping(self, dest, count) } - /// Computes the byte offset that needs to be applied in order to - /// make the pointer aligned to `align`. + /// Computes the offset that needs to be applied to the pointer in order to make it aligned to + /// `align`. + /// /// If it is not possible to align the pointer, the implementation returns /// `usize::max_value()`. /// - /// There are no guarantees whatsover that offsetting the pointer will not - /// overflow or go beyond the allocation that the pointer points into. - /// It is up to the caller to ensure that the returned offset is correct - /// in all terms other than alignment. + /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be + /// used with the `offset` or `offset_to` methods. + /// + /// There are no guarantees whatsover that offsetting the pointer will not overflow or go + /// beyond the allocation that the pointer points into. It is up to the caller to ensure that + /// the returned offset is correct in all terms other than alignment. + /// + /// # Panics + /// + /// The function panics if `align` is not a power-of-two. /// /// # Examples /// @@ -1135,13 +1240,17 @@ impl *const T { /// # } } /// ``` #[unstable(feature = "align_offset", issue = "44488")] - pub fn align_offset(self, align: usize) -> usize { + pub fn align_offset(self, align: usize) -> usize where T: Sized { + if !align.is_power_of_two() { + panic!("align_offset: align is not a power-of-two"); + } unsafe { - intrinsics::align_offset(self as *const _, align) + align_offset(self, align) } } } + #[lang = "mut_ptr"] impl *mut T { /// Returns `true` if the pointer is null. @@ -1215,7 +1324,7 @@ impl *mut T { /// Behavior: /// /// * Both the starting and resulting pointer must be either in bounds or one - /// byte past the end of an allocated object. + /// byte past the end of *the same* allocated object. /// /// * The computed offset, **in bytes**, cannot overflow an `isize`. /// @@ -1266,9 +1375,15 @@ impl *mut T { /// /// The resulting pointer does not need to be in bounds, but it is /// potentially hazardous to dereference (which requires `unsafe`). + /// In particular, the resulting pointer may *not* be used to access a + /// different allocated object than the one `self` points to. In other + /// words, `x.wrapping_offset(y.wrapping_offset_from(x))` is + /// *not* the same as `y`, and dereferencing it is undefined behavior + /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` - /// allows the compiler to optimize better. + /// allows the compiler to optimize better. If you need to cross object + /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples /// @@ -1330,77 +1445,108 @@ impl *mut T { /// Calculates the distance between two pointers. The returned value is in /// units of T: the distance in bytes is divided by `mem::size_of::()`. /// - /// If the address different between the two pointers ia not a multiple of - /// `mem::size_of::()` then the result of the division is rounded towards - /// zero. + /// This function is the inverse of [`offset`]. /// - /// This function returns `None` if `T` is a zero-sized typed. + /// [`offset`]: #method.offset-1 + /// [`wrapping_offset_from`]: #method.wrapping_offset_from-1 + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is Undefined + /// Behavior: + /// + /// * Both the starting and other pointer must be either in bounds or one + /// byte past the end of the same allocated object. + /// + /// * The distance between the pointers, **in bytes**, cannot overflow an `isize`. + /// + /// * The distance between the pointers, in bytes, must be an exact multiple + /// of the size of `T`. + /// + /// * The distance being in bounds cannot rely on "wrapping around" the address space. + /// + /// The compiler and standard library generally try to ensure allocations + /// never reach a size where an offset is a concern. For instance, `Vec` + /// and `Box` ensure they never allocate more than `isize::MAX` bytes, so + /// `ptr_into_vec.offset_from(vec.as_ptr())` is always safe. + /// + /// Most platforms fundamentally can't even construct such an allocation. + /// For instance, no known 64-bit platform can ever serve a request + /// for 263 bytes due to page-table limitations or splitting the address space. + /// However, some 32-bit and 16-bit platforms may successfully serve a request for + /// more than `isize::MAX` bytes with things like Physical Address + /// Extension. As such, memory acquired directly from allocators or memory + /// mapped files *may* be too large to handle with this function. + /// + /// Consider using [`wrapping_offset_from`] instead if these constraints are + /// difficult to satisfy. The only advantage of this method is that it + /// enables more aggressive compiler optimizations. + /// + /// # Panics + /// + /// This function panics if `T` is a Zero-Sized Type ("ZST"). /// /// # Examples /// /// Basic usage: /// /// ``` - /// #![feature(offset_to)] + /// #![feature(ptr_offset_from)] /// - /// fn main() { - /// let mut a = [0; 5]; - /// let ptr1: *mut i32 = &mut a[1]; - /// let ptr2: *mut i32 = &mut a[3]; - /// assert_eq!(ptr1.offset_to(ptr2), Some(2)); - /// assert_eq!(ptr2.offset_to(ptr1), Some(-2)); - /// assert_eq!(unsafe { ptr1.offset(2) }, ptr2); - /// assert_eq!(unsafe { ptr2.offset(-2) }, ptr1); + /// let mut a = [0; 5]; + /// let ptr1: *mut i32 = &mut a[1]; + /// let ptr2: *mut i32 = &mut a[3]; + /// unsafe { + /// assert_eq!(ptr2.offset_from(ptr1), 2); + /// assert_eq!(ptr1.offset_from(ptr2), -2); + /// assert_eq!(ptr1.offset(2), ptr2); + /// assert_eq!(ptr2.offset(-2), ptr1); /// } /// ``` - #[unstable(feature = "offset_to", issue = "41079")] + #[unstable(feature = "ptr_offset_from", issue = "41079")] #[inline] - pub fn offset_to(self, other: *const T) -> Option where T: Sized { - let size = mem::size_of::(); - if size == 0 { - None - } else { - let diff = (other as isize).wrapping_sub(self as isize); - Some(diff / size as isize) - } + pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { + (self as *const T).offset_from(origin) } - /// Computes the byte offset that needs to be applied in order to - /// make the pointer aligned to `align`. - /// If it is not possible to align the pointer, the implementation returns - /// `usize::max_value()`. + /// Calculates the distance between two pointers. The returned value is in + /// units of T: the distance in bytes is divided by `mem::size_of::()`. /// - /// There are no guarantees whatsover that offsetting the pointer will not - /// overflow or go beyond the allocation that the pointer points into. - /// It is up to the caller to ensure that the returned offset is correct - /// in all terms other than alignment. + /// If the address different between the two pointers is not a multiple of + /// `mem::size_of::()` then the result of the division is rounded towards + /// zero. + /// + /// Though this method is safe for any two pointers, note that its result + /// will be mostly useless if the two pointers aren't into the same allocated + /// object, for example if they point to two different local variables. + /// + /// # Panics + /// + /// This function panics if `T` is a zero-sized type. /// /// # Examples /// - /// Accessing adjacent `u8` as `u16` + /// Basic usage: /// /// ``` - /// # #![feature(align_offset)] - /// # fn foo(n: usize) { - /// # use std::mem::align_of; - /// # unsafe { - /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; - /// let ptr = &x[n] as *const u8; - /// let offset = ptr.align_offset(align_of::()); - /// if offset < x.len() - n - 1 { - /// let u16_ptr = ptr.offset(offset as isize) as *const u16; - /// assert_ne!(*u16_ptr, 500); - /// } else { - /// // while the pointer can be aligned via `offset`, it would point - /// // outside the allocation - /// } - /// # } } + /// #![feature(ptr_wrapping_offset_from)] + /// + /// let mut a = [0; 5]; + /// let ptr1: *mut i32 = &mut a[1]; + /// let ptr2: *mut i32 = &mut a[3]; + /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); + /// assert_eq!(ptr1.wrapping_offset_from(ptr2), -2); + /// assert_eq!(ptr1.wrapping_offset(2), ptr2); + /// assert_eq!(ptr2.wrapping_offset(-2), ptr1); + /// + /// let ptr1: *mut i32 = 3 as _; + /// let ptr2: *mut i32 = 13 as _; + /// assert_eq!(ptr2.wrapping_offset_from(ptr1), 2); /// ``` - #[unstable(feature = "align_offset", issue = "44488")] - pub fn align_offset(self, align: usize) -> usize { - unsafe { - intrinsics::align_offset(self as *const _, align) - } + #[unstable(feature = "ptr_wrapping_offset_from", issue = "41079")] + #[inline] + pub fn wrapping_offset_from(self, origin: *const T) -> isize where T: Sized { + (self as *const T).wrapping_offset_from(origin) } /// Calculates the offset from a pointer (convenience for `.offset(count as isize)`). @@ -1443,8 +1589,6 @@ impl *mut T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let s: &str = "123"; /// let ptr: *const u8 = s.as_ptr(); /// @@ -1453,7 +1597,7 @@ impl *mut T { /// println!("{}", *ptr.add(2) as char); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn add(self, count: usize) -> Self where T: Sized, @@ -1502,8 +1646,6 @@ impl *mut T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let s: &str = "123"; /// /// unsafe { @@ -1512,7 +1654,7 @@ impl *mut T { /// println!("{}", *end.sub(2) as char); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn sub(self, count: usize) -> Self where T: Sized, @@ -1539,8 +1681,6 @@ impl *mut T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// // Iterate using a raw pointer in increments of two elements /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); @@ -1555,7 +1695,7 @@ impl *mut T { /// ptr = ptr.wrapping_add(step); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_add(self, count: usize) -> Self where T: Sized, @@ -1582,8 +1722,6 @@ impl *mut T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// // Iterate using a raw pointer in increments of two elements (backwards) /// let data = [1u8, 2, 3, 4, 5]; /// let mut ptr: *const u8 = data.as_ptr(); @@ -1598,7 +1736,7 @@ impl *mut T { /// ptr = ptr.wrapping_sub(step); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub fn wrapping_sub(self, count: usize) -> Self where T: Sized, @@ -1625,8 +1763,6 @@ impl *mut T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let x = 12; /// let y = &x as *const i32; /// @@ -1634,7 +1770,7 @@ impl *mut T { /// assert_eq!(y.read(), 12); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read(self) -> T where T: Sized, @@ -1668,17 +1804,21 @@ impl *mut T { /// Beyond accepting a raw pointer, this is unsafe because it semantically /// moves the value out of `self` without preventing further usage of `self`. /// If `T` is not `Copy`, then care must be taken to ensure that the value at - /// `src` is not used before the data is overwritten again (e.g. with `write`, + /// `self` is not used before the data is overwritten again (e.g. with `write`, /// `write_bytes`, or `copy`). Note that `*self = foo` counts as a use /// because it will attempt to drop the value previously at `*self`. /// + /// Just like in C, whether an operation is volatile has no bearing whatsoever + /// on questions involving concurrent access from multiple threads. Volatile + /// accesses behave exactly like non-atomic accesses in that regard. In particular, + /// a race between a `read_volatile` and any write operation to the same location + /// is undefined behavior. + /// /// # Examples /// /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let x = 12; /// let y = &x as *const i32; /// @@ -1686,7 +1826,7 @@ impl *mut T { /// assert_eq!(y.read_volatile(), 12); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_volatile(self) -> T where T: Sized, @@ -1713,8 +1853,6 @@ impl *mut T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let x = 12; /// let y = &x as *const i32; /// @@ -1722,7 +1860,7 @@ impl *mut T { /// assert_eq!(y.read_unaligned(), 12); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn read_unaligned(self) -> T where T: Sized, @@ -1749,8 +1887,6 @@ impl *mut T { /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` - /// #![feature(pointer_methods)] - /// /// # #[allow(dead_code)] /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { /// let mut dst = Vec::with_capacity(elts); @@ -1759,7 +1895,7 @@ impl *mut T { /// dst /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to(self, dest: *mut T, count: usize) where T: Sized, @@ -1788,8 +1924,6 @@ impl *mut T { /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` - /// #![feature(pointer_methods)] - /// /// # #[allow(dead_code)] /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { /// let mut dst = Vec::with_capacity(elts); @@ -1798,7 +1932,7 @@ impl *mut T { /// dst /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_to_nonoverlapping(self, dest: *mut T, count: usize) where T: Sized, @@ -1825,8 +1959,6 @@ impl *mut T { /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` - /// #![feature(pointer_methods)] - /// /// # #[allow(dead_code)] /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { /// let mut dst: Vec = Vec::with_capacity(elts); @@ -1835,7 +1967,7 @@ impl *mut T { /// dst /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_from(self, src: *const T, count: usize) where T: Sized, @@ -1864,8 +1996,6 @@ impl *mut T { /// Efficiently create a Rust vector from an unsafe buffer: /// /// ``` - /// #![feature(pointer_methods)] - /// /// # #[allow(dead_code)] /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { /// let mut dst: Vec = Vec::with_capacity(elts); @@ -1874,7 +2004,7 @@ impl *mut T { /// dst /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn copy_from_nonoverlapping(self, src: *const T, count: usize) where T: Sized, @@ -1899,7 +2029,7 @@ impl *mut T { /// /// This has all the same safety problems as `ptr::read` with respect to /// invalid pointers, types, and double drops. - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn drop_in_place(self) { drop_in_place(self) @@ -1929,8 +2059,6 @@ impl *mut T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; @@ -1940,7 +2068,7 @@ impl *mut T { /// assert_eq!(y.read(), 12); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write(self, val: T) where T: Sized, @@ -1954,8 +2082,6 @@ impl *mut T { /// # Examples /// /// ``` - /// #![feature(pointer_methods)] - /// /// let mut vec = vec![0; 4]; /// unsafe { /// let vec_ptr = vec.as_mut_ptr(); @@ -1963,7 +2089,7 @@ impl *mut T { /// } /// assert_eq!(vec, [b'a', b'a', 0, 0]); /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_bytes(self, val: u8, count: usize) where T: Sized, @@ -2003,13 +2129,17 @@ impl *mut T { /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been `read` from. /// + /// Just like in C, whether an operation is volatile has no bearing whatsoever + /// on questions involving concurrent access from multiple threads. Volatile + /// accesses behave exactly like non-atomic accesses in that regard. In particular, + /// a race between a `write_volatile` and any other operation (reading or writing) + /// on the same location is undefined behavior. + /// /// # Examples /// /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; @@ -2019,7 +2149,7 @@ impl *mut T { /// assert_eq!(y.read_volatile(), 12); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_volatile(self, val: T) where T: Sized, @@ -2040,8 +2170,8 @@ impl *mut T { /// allocations or resources, so care must be taken not to overwrite an object /// that should be dropped. /// - /// Additionally, it does not drop `src`. Semantically, `src` is moved into the - /// location pointed to by `dst`. + /// Additionally, it does not drop `self`. Semantically, `self` is moved into the + /// location pointed to by `val`. /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been `read` from. @@ -2051,8 +2181,6 @@ impl *mut T { /// Basic usage: /// /// ``` - /// #![feature(pointer_methods)] - /// /// let mut x = 0; /// let y = &mut x as *mut i32; /// let z = 12; @@ -2062,7 +2190,7 @@ impl *mut T { /// assert_eq!(y.read_unaligned(), 12); /// } /// ``` - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn write_unaligned(self, val: T) where T: Sized, @@ -2077,7 +2205,7 @@ impl *mut T { /// /// This is only unsafe because it accepts a raw pointer. /// Otherwise, this operation is identical to `mem::replace`. - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn replace(self, src: T) -> T where T: Sized, @@ -2095,15 +2223,193 @@ impl *mut T { /// as arguments. /// /// Ensure that these pointers are valid before calling `swap`. - #[unstable(feature = "pointer_methods", issue = "43941")] + #[stable(feature = "pointer_methods", since = "1.26.0")] #[inline] pub unsafe fn swap(self, with: *mut T) where T: Sized, { swap(self, with) } + + /// Computes the offset that needs to be applied to the pointer in order to make it aligned to + /// `align`. + /// + /// If it is not possible to align the pointer, the implementation returns + /// `usize::max_value()`. + /// + /// The offset is expressed in number of `T` elements, and not bytes. The value returned can be + /// used with the `offset` or `offset_to` methods. + /// + /// There are no guarantees whatsover that offsetting the pointer will not overflow or go + /// beyond the allocation that the pointer points into. It is up to the caller to ensure that + /// the returned offset is correct in all terms other than alignment. + /// + /// # Panics + /// + /// The function panics if `align` is not a power-of-two. + /// + /// # Examples + /// + /// Accessing adjacent `u8` as `u16` + /// + /// ``` + /// # #![feature(align_offset)] + /// # fn foo(n: usize) { + /// # use std::mem::align_of; + /// # unsafe { + /// let x = [5u8, 6u8, 7u8, 8u8, 9u8]; + /// let ptr = &x[n] as *const u8; + /// let offset = ptr.align_offset(align_of::()); + /// if offset < x.len() - n - 1 { + /// let u16_ptr = ptr.offset(offset as isize) as *const u16; + /// assert_ne!(*u16_ptr, 500); + /// } else { + /// // while the pointer can be aligned via `offset`, it would point + /// // outside the allocation + /// } + /// # } } + /// ``` + #[unstable(feature = "align_offset", issue = "44488")] + pub fn align_offset(self, align: usize) -> usize where T: Sized { + if !align.is_power_of_two() { + panic!("align_offset: align is not a power-of-two"); + } + unsafe { + align_offset(self, align) + } + } } +/// Align pointer `p`. +/// +/// Calculate offset (in terms of elements of `stride` stride) that has to be applied +/// to pointer `p` so that pointer `p` would get aligned to `a`. +/// +/// Note: This implementation has been carefully tailored to not panic. It is UB for this to panic. +/// The only real change that can be made here is change of `INV_TABLE_MOD_16` and associated +/// constants. +/// +/// If we ever decide to make it possible to call the intrinsic with `a` that is not a +/// power-of-two, it will probably be more prudent to just change to a naive implementation rather +/// than trying to adapt this to accomodate that change. +/// +/// Any questions go to @nagisa. +#[lang="align_offset"] +pub(crate) unsafe fn align_offset(p: *const T, a: usize) -> usize { + /// Calculate multiplicative modular inverse of `x` modulo `m`. + /// + /// This implementation is tailored for align_offset and has following preconditions: + /// + /// * `m` is a power-of-two; + /// * `x < m`; (if `x ≥ m`, pass in `x % m` instead) + /// + /// Implementation of this function shall not panic. Ever. + #[inline] + fn mod_inv(x: usize, m: usize) -> usize { + /// Multiplicative modular inverse table modulo 2⁴ = 16. + /// + /// Note, that this table does not contain values where inverse does not exist (i.e. for + /// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.) + const INV_TABLE_MOD_16: [usize; 8] = [1, 11, 13, 7, 9, 3, 5, 15]; + /// Modulo for which the `INV_TABLE_MOD_16` is intended. + const INV_TABLE_MOD: usize = 16; + /// INV_TABLE_MOD² + const INV_TABLE_MOD_SQUARED: usize = INV_TABLE_MOD * INV_TABLE_MOD; + + let table_inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1]; + if m <= INV_TABLE_MOD { + table_inverse & (m - 1) + } else { + // We iterate "up" using the following formula: + // + // $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$ + // + // until 2²ⁿ ≥ m. Then we can reduce to our desired `m` by taking the result `mod m`. + let mut inverse = table_inverse; + let mut going_mod = INV_TABLE_MOD_SQUARED; + loop { + // y = y * (2 - xy) mod n + // + // Note, that we use wrapping operations here intentionally – the original formula + // uses e.g. subtraction `mod n`. It is entirely fine to do them `mod + // usize::max_value()` instead, because we take the result `mod n` at the end + // anyway. + inverse = inverse.wrapping_mul( + 2usize.wrapping_sub(x.wrapping_mul(inverse)) + ) & (going_mod - 1); + if going_mod > m { + return inverse & (m - 1); + } + going_mod = going_mod.wrapping_mul(going_mod); + } + } + } + + let stride = ::mem::size_of::(); + let a_minus_one = a.wrapping_sub(1); + let pmoda = p as usize & a_minus_one; + + if pmoda == 0 { + // Already aligned. Yay! + return 0; + } + + if stride <= 1 { + return if stride == 0 { + // If the pointer is not aligned, and the element is zero-sized, then no amount of + // elements will ever align the pointer. + !0 + } else { + a.wrapping_sub(pmoda) + }; + } + + let smoda = stride & a_minus_one; + // a is power-of-two so cannot be 0. stride = 0 is handled above. + let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)); + let gcd = 1usize << gcdpow; + + if gcd == 1 { + // This branch solves for the variable $o$ in following linear congruence equation: + // + // ⎰ p + o ≡ 0 (mod a) # $p + o$ must be aligned to specified alignment $a$ + // ⎱ o ≡ 0 (mod s) # offset $o$ must be a multiple of stride $s$ + // + // where + // + // * a, s are co-prime + // + // This gives us the formula below: + // + // o = (a - (p mod a)) * (s⁻¹ mod a) * s + // + // The first term is “the relative alignment of p to a”, the second term is “how does + // incrementing p by one s change the relative alignment of p”, the third term is + // translating change in units of s to a byte count. + // + // Furthermore, the result produced by this solution is not “minimal”, so it is necessary + // to take the result $o mod lcm(s, a)$. Since $s$ and $a$ are co-prime (i.e. $gcd(s, a) = + // 1$) and $lcm(s, a) = s * a / gcd(s, a)$, we can replace $lcm(s, a)$ with just a $s * a$. + // + // (Author note: we decided later on to express the offset in "elements" rather than bytes, + // which drops the multiplication by `s` on both sides of the modulo.) + return intrinsics::unchecked_rem(a.wrapping_sub(pmoda).wrapping_mul(mod_inv(smoda, a)), a); + } + + if p as usize & (gcd - 1) == 0 { + // This can be aligned, but `a` and `stride` are not co-prime, so a somewhat adapted + // formula is used. + let j = a.wrapping_sub(pmoda) >> gcdpow; + let k = smoda >> gcdpow; + return intrinsics::unchecked_rem(j.wrapping_mul(mod_inv(k, a)), a >> gcdpow); + } + + // Cannot be aligned at all. + usize::max_value() +} + + + // Equality for pointers #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for *const T { @@ -2321,7 +2627,7 @@ impl PartialOrd for *mut T { /// its owning Unique. /// /// If you're uncertain of whether it's correct to use `Unique` for your purposes, -/// consider using `Shared`, which has weaker semantics. +/// consider using `NonNull`, which has weaker semantics. /// /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer /// is never dereferenced. This is so that enums may use this forbidden value @@ -2330,9 +2636,11 @@ impl PartialOrd for *mut T { /// /// Unlike `*mut T`, `Unique` is covariant over `T`. This should always be correct /// for any type which upholds Unique's aliasing requirements. -#[allow(missing_debug_implementations)] -#[unstable(feature = "unique", reason = "needs an RFC to flesh out design", - issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0", + reason = "use NonNull instead and consider PhantomData \ + (if you also use #[may_dangle]), Send, and/or Sync")] +#[doc(hidden)] +#[repr(transparent)] pub struct Unique { pointer: NonZero<*const T>, // NOTE: this marker has no consequences for variance, but is necessary @@ -2343,61 +2651,76 @@ pub struct Unique { _marker: PhantomData, } +#[unstable(feature = "ptr_internals", issue = "0")] +impl fmt::Debug for Unique { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&self.as_ptr(), f) + } +} + /// `Unique` pointers are `Send` if `T` is `Send` because the data they /// reference is unaliased. Note that this aliasing invariant is /// unenforced by the type system; the abstraction using the /// `Unique` must enforce it. -#[unstable(feature = "unique", issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0")] unsafe impl Send for Unique { } /// `Unique` pointers are `Sync` if `T` is `Sync` because the data they /// reference is unaliased. Note that this aliasing invariant is /// unenforced by the type system; the abstraction using the /// `Unique` must enforce it. -#[unstable(feature = "unique", issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0")] unsafe impl Sync for Unique { } -#[unstable(feature = "unique", issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0")] impl Unique { /// Creates a new `Unique` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. - pub fn empty() -> Self { + /// + /// Note that the pointer value may potentially represent a valid pointer to + /// a `T`, which means this must not be used as a "not yet initialized" + /// sentinel value. Types that lazily allocate must track initialization by + /// some other means. + // FIXME: rename to dangling() to match NonNull? + pub const fn empty() -> Self { unsafe { - let ptr = mem::align_of::() as *mut T; - Unique::new_unchecked(ptr) + Unique::new_unchecked(mem::align_of::() as *mut T) } } } -#[unstable(feature = "unique", issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0")] impl Unique { /// Creates a new `Unique`. /// /// # Safety /// /// `ptr` must be non-null. - #[unstable(feature = "unique", issue = "27730")] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { - Unique { pointer: NonZero::new_unchecked(ptr), _marker: PhantomData } + Unique { pointer: NonZero(ptr as _), _marker: PhantomData } } /// Creates a new `Unique` if `ptr` is non-null. pub fn new(ptr: *mut T) -> Option { - NonZero::new(ptr as *const T).map(|nz| Unique { pointer: nz, _marker: PhantomData }) + if !ptr.is_null() { + Some(Unique { pointer: NonZero(ptr as _), _marker: PhantomData }) + } else { + None + } } /// Acquires the underlying `*mut` pointer. pub fn as_ptr(self) -> *mut T { - self.pointer.get() as *mut T + self.pointer.0 as *mut T } /// Dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer - /// (unbound) lifetime is needed, use `&*my_ptr.ptr()`. + /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } @@ -2406,43 +2729,50 @@ impl Unique { /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer - /// (unbound) lifetime is needed, use `&mut *my_ptr.ptr()`. + /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } } -#[unstable(feature = "unique", issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0")] impl Clone for Unique { fn clone(&self) -> Self { *self } } -#[unstable(feature = "unique", issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0")] impl Copy for Unique { } -#[unstable(feature = "unique", issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0")] impl CoerceUnsized> for Unique where T: Unsize { } -#[unstable(feature = "unique", issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0")] impl fmt::Pointer for Unique { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } -#[unstable(feature = "unique", issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From<&'a mut T> for Unique { fn from(reference: &'a mut T) -> Self { - Unique { pointer: NonZero::from(reference), _marker: PhantomData } + Unique { pointer: NonZero(reference as _), _marker: PhantomData } } } -#[unstable(feature = "unique", issue = "27730")] +#[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From<&'a T> for Unique { fn from(reference: &'a T) -> Self { - Unique { pointer: NonZero::from(reference), _marker: PhantomData } + Unique { pointer: NonZero(reference as _), _marker: PhantomData } + } +} + +#[unstable(feature = "ptr_internals", issue = "0")] +impl<'a, T: ?Sized> From> for Unique { + fn from(p: NonNull) -> Self { + Unique { pointer: p.pointer, _marker: PhantomData } } } @@ -2450,77 +2780,87 @@ impl<'a, T: ?Sized> From<&'a T> for Unique { /// /// This is often the correct thing to use when building data structures using /// raw pointers, but is ultimately more dangerous to use because of its additional -/// properties. If you're not sure if you should use `Shared`, just use `*mut T`! +/// properties. If you're not sure if you should use `NonNull`, just use `*mut T`! /// /// Unlike `*mut T`, the pointer must always be non-null, even if the pointer /// is never dereferenced. This is so that enums may use this forbidden value -/// as a discriminant -- `Option>` has the same size as `Shared`. +/// as a discriminant -- `Option>` has the same size as `*mut T`. /// However the pointer may still dangle if it isn't dereferenced. /// -/// Unlike `*mut T`, `Shared` is covariant over `T`. If this is incorrect +/// Unlike `*mut T`, `NonNull` is covariant over `T`. If this is incorrect /// for your use case, you should include some PhantomData in your type to /// provide invariance, such as `PhantomData>` or `PhantomData<&'a mut T>`. /// Usually this won't be necessary; covariance is correct for most safe abstractions, /// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they /// provide a public API that follows the normal shared XOR mutable rules of Rust. -#[allow(missing_debug_implementations)] -#[unstable(feature = "shared", reason = "needs an RFC to flesh out design", - issue = "27730")] -pub struct Shared { +#[stable(feature = "nonnull", since = "1.25.0")] +#[repr(transparent)] +pub struct NonNull { pointer: NonZero<*const T>, } -/// `Shared` pointers are not `Send` because the data they reference may be aliased. +/// `NonNull` pointers are not `Send` because the data they reference may be aliased. // NB: This impl is unnecessary, but should provide better error messages. -#[unstable(feature = "shared", issue = "27730")] -impl !Send for Shared { } +#[stable(feature = "nonnull", since = "1.25.0")] +impl !Send for NonNull { } -/// `Shared` pointers are not `Sync` because the data they reference may be aliased. +/// `NonNull` pointers are not `Sync` because the data they reference may be aliased. // NB: This impl is unnecessary, but should provide better error messages. -#[unstable(feature = "shared", issue = "27730")] -impl !Sync for Shared { } +#[stable(feature = "nonnull", since = "1.25.0")] +impl !Sync for NonNull { } -#[unstable(feature = "shared", issue = "27730")] -impl Shared { - /// Creates a new `Shared` that is dangling, but well-aligned. +impl NonNull { + /// Creates a new `NonNull` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. - pub fn empty() -> Self { + /// + /// Note that the pointer value may potentially represent a valid pointer to + /// a `T`, which means this must not be used as a "not yet initialized" + /// sentinel value. Types that lazily allocate must track initialization by + /// some other means. + #[stable(feature = "nonnull", since = "1.25.0")] + pub fn dangling() -> Self { unsafe { let ptr = mem::align_of::() as *mut T; - Shared::new_unchecked(ptr) + NonNull::new_unchecked(ptr) } } } -#[unstable(feature = "shared", issue = "27730")] -impl Shared { - /// Creates a new `Shared`. +impl NonNull { + /// Creates a new `NonNull`. /// /// # Safety /// /// `ptr` must be non-null. - #[unstable(feature = "shared", issue = "27730")] + #[stable(feature = "nonnull", since = "1.25.0")] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { - Shared { pointer: NonZero::new_unchecked(ptr) } + NonNull { pointer: NonZero(ptr as _) } } - /// Creates a new `Shared` if `ptr` is non-null. + /// Creates a new `NonNull` if `ptr` is non-null. + #[stable(feature = "nonnull", since = "1.25.0")] pub fn new(ptr: *mut T) -> Option { - NonZero::new(ptr as *const T).map(|nz| Shared { pointer: nz }) + if !ptr.is_null() { + Some(NonNull { pointer: NonZero(ptr as _) }) + } else { + None + } } /// Acquires the underlying `*mut` pointer. + #[stable(feature = "nonnull", since = "1.25.0")] pub fn as_ptr(self) -> *mut T { - self.pointer.get() as *mut T + self.pointer.0 as *mut T } /// Dereferences the content. /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer - /// (unbound) lifetime is needed, use `&*my_ptr.ptr()`. + /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`. + #[stable(feature = "nonnull", since = "1.25.0")] pub unsafe fn as_ref(&self) -> &T { &*self.as_ptr() } @@ -2529,56 +2869,96 @@ impl Shared { /// /// The resulting lifetime is bound to self so this behaves "as if" /// it were actually an instance of T that is getting borrowed. If a longer - /// (unbound) lifetime is needed, use `&mut *my_ptr.ptr_mut()`. + /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`. + #[stable(feature = "nonnull", since = "1.25.0")] pub unsafe fn as_mut(&mut self) -> &mut T { &mut *self.as_ptr() } - /// Acquires the underlying pointer as a `*mut` pointer. - #[rustc_deprecated(since = "1.19", reason = "renamed to `as_ptr` for ergonomics/consistency")] - #[unstable(feature = "shared", issue = "27730")] - pub unsafe fn as_mut_ptr(&self) -> *mut T { - self.as_ptr() + /// Cast to a pointer of another type + #[stable(feature = "nonnull_cast", since = "1.27.0")] + pub fn cast(self) -> NonNull { + unsafe { + NonNull::new_unchecked(self.as_ptr() as *mut U) + } } } -#[unstable(feature = "shared", issue = "27730")] -impl Clone for Shared { +#[stable(feature = "nonnull", since = "1.25.0")] +impl Clone for NonNull { fn clone(&self) -> Self { *self } } -#[unstable(feature = "shared", issue = "27730")] -impl Copy for Shared { } +#[stable(feature = "nonnull", since = "1.25.0")] +impl Copy for NonNull { } -#[unstable(feature = "shared", issue = "27730")] -impl CoerceUnsized> for Shared where T: Unsize { } +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl CoerceUnsized> for NonNull where T: Unsize { } -#[unstable(feature = "shared", issue = "27730")] -impl fmt::Pointer for Shared { +#[stable(feature = "nonnull", since = "1.25.0")] +impl fmt::Debug for NonNull { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.as_ptr(), f) } } -#[unstable(feature = "shared", issue = "27730")] -impl From> for Shared { +#[stable(feature = "nonnull", since = "1.25.0")] +impl fmt::Pointer for NonNull { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&self.as_ptr(), f) + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl Eq for NonNull {} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl PartialEq for NonNull { + fn eq(&self, other: &Self) -> bool { + self.as_ptr() == other.as_ptr() + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl Ord for NonNull { + fn cmp(&self, other: &Self) -> Ordering { + self.as_ptr().cmp(&other.as_ptr()) + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl PartialOrd for NonNull { + fn partial_cmp(&self, other: &Self) -> Option { + self.as_ptr().partial_cmp(&other.as_ptr()) + } +} + +#[stable(feature = "nonnull", since = "1.25.0")] +impl hash::Hash for NonNull { + fn hash(&self, state: &mut H) { + self.as_ptr().hash(state) + } +} + +#[unstable(feature = "ptr_internals", issue = "0")] +impl From> for NonNull { fn from(unique: Unique) -> Self { - Shared { pointer: unique.pointer } + NonNull { pointer: unique.pointer } } } -#[unstable(feature = "shared", issue = "27730")] -impl<'a, T: ?Sized> From<&'a mut T> for Shared { +#[stable(feature = "nonnull", since = "1.25.0")] +impl<'a, T: ?Sized> From<&'a mut T> for NonNull { fn from(reference: &'a mut T) -> Self { - Shared { pointer: NonZero::from(reference) } + NonNull { pointer: NonZero(reference as _) } } } -#[unstable(feature = "shared", issue = "27730")] -impl<'a, T: ?Sized> From<&'a T> for Shared { +#[stable(feature = "nonnull", since = "1.25.0")] +impl<'a, T: ?Sized> From<&'a T> for NonNull { fn from(reference: &'a T) -> Self { - Shared { pointer: NonZero::from(reference) } + NonNull { pointer: NonZero(reference as _) } } } diff --git a/src/libcore/result.rs b/src/libcore/result.rs index 2ace3d2aee87..ac908342655b 100644 --- a/src/libcore/result.rs +++ b/src/libcore/result.rs @@ -242,7 +242,7 @@ use fmt; use iter::{FromIterator, FusedIterator, TrustedLen}; -use ops; +use ops::{self, Deref}; /// `Result` is a type that represents either success ([`Ok`]) or failure ([`Err`]). /// @@ -251,7 +251,7 @@ use ops; /// [`Ok`]: enum.Result.html#variant.Ok /// [`Err`]: enum.Result.html#variant.Err #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)] -#[must_use] +#[must_use = "this `Result` may be an `Err` variant, which should be handled"] #[stable(feature = "rust1", since = "1.0.0")] pub enum Result { /// Contains the success value @@ -909,6 +909,73 @@ impl Result { } } +#[unstable(feature = "inner_deref", reason = "newly added", issue = "50264")] +impl Result { + /// Converts from `&Result` to `Result<&T::Target, &E>`. + /// + /// Leaves the original Result in-place, creating a new one with a reference + /// to the original one, additionally coercing the `Ok` arm of the Result via + /// `Deref`. + pub fn deref_ok(&self) -> Result<&T::Target, &E> { + self.as_ref().map(|t| t.deref()) + } +} + +#[unstable(feature = "inner_deref", reason = "newly added", issue = "50264")] +impl Result { + /// Converts from `&Result` to `Result<&T, &E::Target>`. + /// + /// Leaves the original Result in-place, creating a new one with a reference + /// to the original one, additionally coercing the `Err` arm of the Result via + /// `Deref`. + pub fn deref_err(&self) -> Result<&T, &E::Target> + { + self.as_ref().map_err(|e| e.deref()) + } +} + +#[unstable(feature = "inner_deref", reason = "newly added", issue = "50264")] +impl Result { + /// Converts from `&Result` to `Result<&T::Target, &E::Target>`. + /// + /// Leaves the original Result in-place, creating a new one with a reference + /// to the original one, additionally coercing both the `Ok` and `Err` arms + /// of the Result via `Deref`. + pub fn deref(&self) -> Result<&T::Target, &E::Target> + { + self.as_ref().map(|t| t.deref()).map_err(|e| e.deref()) + } +} + +impl Result, E> { + /// Transposes a `Result` of an `Option` into an `Option` of a `Result`. + /// + /// `Ok(None)` will be mapped to `None`. + /// `Ok(Some(_))` and `Err(_)` will be mapped to `Some(Ok(_))` and `Some(Err(_))`. + /// + /// # Examples + /// + /// ``` + /// #![feature(transpose_result)] + /// + /// #[derive(Debug, Eq, PartialEq)] + /// struct SomeErr; + /// + /// let x: Result, SomeErr> = Ok(Some(5)); + /// let y: Option> = Some(Ok(5)); + /// assert_eq!(x.transpose(), y); + /// ``` + #[inline] + #[unstable(feature = "transpose_result", issue = "47338")] + pub fn transpose(self) -> Option> { + match self { + Ok(Some(x)) => Some(Ok(x)), + Ok(None) => None, + Err(e) => Some(Err(e)), + } + } +} + // This is a separate function to reduce the code size of the methods #[inline(never)] #[cold] @@ -1009,7 +1076,7 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Iter<'a, T> {} #[unstable(feature = "trusted_len", issue = "37572")] @@ -1017,6 +1084,7 @@ unsafe impl<'a, A> TrustedLen for Iter<'a, A> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { + #[inline] fn clone(&self) -> Iter<'a, T> { Iter { inner: self.inner } } } @@ -1053,7 +1121,7 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for IterMut<'a, T> {} #[unstable(feature = "trusted_len", issue = "37572")] @@ -1096,7 +1164,7 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for IntoIter {} #[unstable(feature = "trusted_len", issue = "37572")] @@ -1168,14 +1236,17 @@ impl ops::Try for Result { type Ok = T; type Error = E; + #[inline] fn into_result(self) -> Self { self } + #[inline] fn from_ok(v: T) -> Self { Ok(v) } + #[inline] fn from_error(v: E) -> Self { Err(v) } diff --git a/src/libcore/slice/memchr.rs b/src/libcore/slice/memchr.rs index 69c9cb37dcfd..c9d3c7fea983 100644 --- a/src/libcore/slice/memchr.rs +++ b/src/libcore/slice/memchr.rs @@ -39,21 +39,10 @@ fn repeat_byte(b: u8) -> usize { (b as usize) << 8 | b as usize } -#[cfg(target_pointer_width = "32")] +#[cfg(not(target_pointer_width = "16"))] #[inline] fn repeat_byte(b: u8) -> usize { - let mut rep = (b as usize) << 8 | b as usize; - rep = rep << 16 | rep; - rep -} - -#[cfg(target_pointer_width = "64")] -#[inline] -fn repeat_byte(b: u8) -> usize { - let mut rep = (b as usize) << 8 | b as usize; - rep = rep << 16 | rep; - rep = rep << 32 | rep; - rep + (b as usize) * (::usize::MAX / 255) } /// Return the first index matching the byte `x` in `text`. @@ -111,27 +100,30 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { // - the first remaining bytes, < 2 word size let len = text.len(); let ptr = text.as_ptr(); - let usize_bytes = mem::size_of::(); + type Chunk = usize; - // search to an aligned boundary - let end_align = (ptr as usize + len) & (usize_bytes - 1); - let mut offset; - if end_align > 0 { - offset = if end_align >= len { 0 } else { len - end_align }; - if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) { - return Some(offset + index); - } - } else { - offset = len; + let (min_aligned_offset, max_aligned_offset) = { + // We call this just to obtain the length of the prefix and suffix. + // In the middle we always process two chunks at once. + let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() }; + (prefix.len(), len - suffix.len()) + }; + + let mut offset = max_aligned_offset; + if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) { + return Some(offset + index); } - // search the body of the text + // search the body of the text, make sure we don't cross min_aligned_offset. + // offset is always aligned, so just testing `>` is sufficient and avoids possible + // overflow. let repeated_x = repeat_byte(x); + let chunk_bytes = mem::size_of::(); - while offset >= 2 * usize_bytes { + while offset > min_aligned_offset { unsafe { - let u = *(ptr.offset(offset as isize - 2 * usize_bytes as isize) as *const usize); - let v = *(ptr.offset(offset as isize - usize_bytes as isize) as *const usize); + let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk); + let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk); // break if there is a matching byte let zu = contains_zero_byte(u ^ repeated_x); @@ -140,91 +132,9 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { break; } } - offset -= 2 * usize_bytes; + offset -= 2 * chunk_bytes; } // find the byte before the point the body loop stopped text[..offset].iter().rposition(|elt| *elt == x) } - -// test fallback implementations on all platforms -#[test] -fn matches_one() { - assert_eq!(Some(0), memchr(b'a', b"a")); -} - -#[test] -fn matches_begin() { - assert_eq!(Some(0), memchr(b'a', b"aaaa")); -} - -#[test] -fn matches_end() { - assert_eq!(Some(4), memchr(b'z', b"aaaaz")); -} - -#[test] -fn matches_nul() { - assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00")); -} - -#[test] -fn matches_past_nul() { - assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z")); -} - -#[test] -fn no_match_empty() { - assert_eq!(None, memchr(b'a', b"")); -} - -#[test] -fn no_match() { - assert_eq!(None, memchr(b'a', b"xyz")); -} - -#[test] -fn matches_one_reversed() { - assert_eq!(Some(0), memrchr(b'a', b"a")); -} - -#[test] -fn matches_begin_reversed() { - assert_eq!(Some(3), memrchr(b'a', b"aaaa")); -} - -#[test] -fn matches_end_reversed() { - assert_eq!(Some(0), memrchr(b'z', b"zaaaa")); -} - -#[test] -fn matches_nul_reversed() { - assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00")); -} - -#[test] -fn matches_past_nul_reversed() { - assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa")); -} - -#[test] -fn no_match_empty_reversed() { - assert_eq!(None, memrchr(b'a', b"")); -} - -#[test] -fn no_match_reversed() { - assert_eq!(None, memrchr(b'a', b"xyz")); -} - -#[test] -fn each_alignment_reversed() { - let mut data = [1u8; 64]; - let needle = 2; - let pos = 40; - data[pos] = needle; - for start in 0..16 { - assert_eq!(Some(pos - start), memrchr(needle, &data[start..])); - } -} diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index 244bf476cafb..a3960556d341 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -59,510 +59,389 @@ mod rotate; mod sort; #[repr(C)] -struct Repr { - pub data: *const T, - pub len: usize, +union Repr<'a, T: 'a> { + rust: &'a [T], + rust_mut: &'a mut [T], + raw: FatPtr, +} + +#[repr(C)] +struct FatPtr { + data: *const T, + len: usize, } // // Extension traits // -/// Extension methods for slices. -#[unstable(feature = "core_slice_ext", - reason = "stable interface provided by `impl [T]` in later crates", - issue = "32110")] -#[allow(missing_docs)] // documented elsewhere -pub trait SliceExt { - type Item; - - #[stable(feature = "core", since = "1.6.0")] - fn split_at(&self, mid: usize) -> (&[Self::Item], &[Self::Item]); - - #[stable(feature = "core", since = "1.6.0")] - fn iter(&self) -> Iter; - - #[stable(feature = "core", since = "1.6.0")] - fn split

(&self, pred: P) -> Split - where P: FnMut(&Self::Item) -> bool; - - #[unstable(feature = "slice_rsplit", issue = "41020")] - fn rsplit

(&self, pred: P) -> RSplit - where P: FnMut(&Self::Item) -> bool; - - #[stable(feature = "core", since = "1.6.0")] - fn splitn

(&self, n: usize, pred: P) -> SplitN - where P: FnMut(&Self::Item) -> bool; - - #[stable(feature = "core", since = "1.6.0")] - fn rsplitn

(&self, n: usize, pred: P) -> RSplitN - where P: FnMut(&Self::Item) -> bool; - - #[stable(feature = "core", since = "1.6.0")] - fn windows(&self, size: usize) -> Windows; - - #[stable(feature = "core", since = "1.6.0")] - fn chunks(&self, size: usize) -> Chunks; - - #[unstable(feature = "exact_chunks", issue = "47115")] - fn exact_chunks(&self, size: usize) -> ExactChunks; - - #[stable(feature = "core", since = "1.6.0")] - fn get(&self, index: I) -> Option<&I::Output> - where I: SliceIndex; - #[stable(feature = "core", since = "1.6.0")] - fn first(&self) -> Option<&Self::Item>; - - #[stable(feature = "core", since = "1.6.0")] - fn split_first(&self) -> Option<(&Self::Item, &[Self::Item])>; - - #[stable(feature = "core", since = "1.6.0")] - fn split_last(&self) -> Option<(&Self::Item, &[Self::Item])>; - - #[stable(feature = "core", since = "1.6.0")] - fn last(&self) -> Option<&Self::Item>; - - #[stable(feature = "core", since = "1.6.0")] - unsafe fn get_unchecked(&self, index: I) -> &I::Output - where I: SliceIndex; - #[stable(feature = "core", since = "1.6.0")] - fn as_ptr(&self) -> *const Self::Item; - - #[stable(feature = "core", since = "1.6.0")] - fn binary_search(&self, x: &Self::Item) -> Result - where Self::Item: Ord; - - #[stable(feature = "core", since = "1.6.0")] - fn binary_search_by<'a, F>(&'a self, f: F) -> Result - where F: FnMut(&'a Self::Item) -> Ordering; - - #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")] - fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result - where F: FnMut(&'a Self::Item) -> B, - B: Ord; - - #[stable(feature = "core", since = "1.6.0")] - fn len(&self) -> usize; - - #[stable(feature = "core", since = "1.6.0")] - fn is_empty(&self) -> bool { self.len() == 0 } - - #[stable(feature = "core", since = "1.6.0")] - fn get_mut(&mut self, index: I) -> Option<&mut I::Output> - where I: SliceIndex; - #[stable(feature = "core", since = "1.6.0")] - fn iter_mut(&mut self) -> IterMut; - - #[stable(feature = "core", since = "1.6.0")] - fn first_mut(&mut self) -> Option<&mut Self::Item>; - - #[stable(feature = "core", since = "1.6.0")] - fn split_first_mut(&mut self) -> Option<(&mut Self::Item, &mut [Self::Item])>; - - #[stable(feature = "core", since = "1.6.0")] - fn split_last_mut(&mut self) -> Option<(&mut Self::Item, &mut [Self::Item])>; - - #[stable(feature = "core", since = "1.6.0")] - fn last_mut(&mut self) -> Option<&mut Self::Item>; - - #[stable(feature = "core", since = "1.6.0")] - fn split_mut

(&mut self, pred: P) -> SplitMut - where P: FnMut(&Self::Item) -> bool; - - #[unstable(feature = "slice_rsplit", issue = "41020")] - fn rsplit_mut

(&mut self, pred: P) -> RSplitMut - where P: FnMut(&Self::Item) -> bool; - - #[stable(feature = "core", since = "1.6.0")] - fn splitn_mut

(&mut self, n: usize, pred: P) -> SplitNMut - where P: FnMut(&Self::Item) -> bool; - - #[stable(feature = "core", since = "1.6.0")] - fn rsplitn_mut

(&mut self, n: usize, pred: P) -> RSplitNMut - where P: FnMut(&Self::Item) -> bool; - - #[stable(feature = "core", since = "1.6.0")] - fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut; - - #[unstable(feature = "exact_chunks", issue = "47115")] - fn exact_chunks_mut(&mut self, size: usize) -> ExactChunksMut; - - #[stable(feature = "core", since = "1.6.0")] - fn swap(&mut self, a: usize, b: usize); - - #[stable(feature = "core", since = "1.6.0")] - fn split_at_mut(&mut self, mid: usize) -> (&mut [Self::Item], &mut [Self::Item]); - - #[stable(feature = "core", since = "1.6.0")] - fn reverse(&mut self); - - #[stable(feature = "core", since = "1.6.0")] - unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut I::Output - where I: SliceIndex; - #[stable(feature = "core", since = "1.6.0")] - fn as_mut_ptr(&mut self) -> *mut Self::Item; - - #[stable(feature = "core", since = "1.6.0")] - fn contains(&self, x: &Self::Item) -> bool where Self::Item: PartialEq; - - #[stable(feature = "core", since = "1.6.0")] - fn starts_with(&self, needle: &[Self::Item]) -> bool where Self::Item: PartialEq; - - #[stable(feature = "core", since = "1.6.0")] - fn ends_with(&self, needle: &[Self::Item]) -> bool where Self::Item: PartialEq; - - #[unstable(feature = "slice_rotate", issue = "41891")] - fn rotate_left(&mut self, mid: usize); - - #[unstable(feature = "slice_rotate", issue = "41891")] - fn rotate_right(&mut self, k: usize); - - #[stable(feature = "clone_from_slice", since = "1.7.0")] - fn clone_from_slice(&mut self, src: &[Self::Item]) where Self::Item: Clone; - - #[stable(feature = "copy_from_slice", since = "1.9.0")] - fn copy_from_slice(&mut self, src: &[Self::Item]) where Self::Item: Copy; - - #[unstable(feature = "swap_with_slice", issue = "44030")] - fn swap_with_slice(&mut self, src: &mut [Self::Item]); - - #[stable(feature = "sort_unstable", since = "1.20.0")] - fn sort_unstable(&mut self) - where Self::Item: Ord; - - #[stable(feature = "sort_unstable", since = "1.20.0")] - fn sort_unstable_by(&mut self, compare: F) - where F: FnMut(&Self::Item, &Self::Item) -> Ordering; - - #[stable(feature = "sort_unstable", since = "1.20.0")] - fn sort_unstable_by_key(&mut self, f: F) - where F: FnMut(&Self::Item) -> B, - B: Ord; -} - -// Use macros to be generic over const/mut -macro_rules! slice_offset { - ($ptr:expr, $by:expr) => {{ - let ptr = $ptr; - if size_from_ptr(ptr) == 0 { - (ptr as *mut i8).wrapping_offset($by) as _ - } else { - ptr.offset($by) - } - }}; -} - -// make a &T from a *const T -macro_rules! make_ref { - ($ptr:expr) => {{ - let ptr = $ptr; - if size_from_ptr(ptr) == 0 { - // Use a non-null pointer value - &*(1 as *mut _) - } else { - &*ptr - } - }}; -} - -// make a &mut T from a *mut T -macro_rules! make_ref_mut { - ($ptr:expr) => {{ - let ptr = $ptr; - if size_from_ptr(ptr) == 0 { - // Use a non-null pointer value - &mut *(1 as *mut _) - } else { - &mut *ptr - } - }}; -} - -#[unstable(feature = "core_slice_ext", - reason = "stable interface provided by `impl [T]` in later crates", - issue = "32110")] -impl SliceExt for [T] { - type Item = T; - +#[lang = "slice"] +#[cfg(not(test))] +impl [T] { + /// Returns the number of elements in the slice. + /// + /// # Examples + /// + /// ``` + /// let a = [1, 2, 3]; + /// assert_eq!(a.len(), 3); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn split_at(&self, mid: usize) -> (&[T], &[T]) { - (&self[..mid], &self[mid..]) - } - - #[inline] - fn iter(&self) -> Iter { + #[rustc_const_unstable(feature = "const_slice_len")] + pub const fn len(&self) -> usize { unsafe { - let p = if mem::size_of::() == 0 { - 1 as *const _ - } else { - let p = self.as_ptr(); - assume(!p.is_null()); - p - }; - - Iter { - ptr: p, - end: slice_offset!(p, self.len() as isize), - _marker: marker::PhantomData - } + Repr { rust: self }.raw.len } } + /// Returns `true` if the slice has a length of 0. + /// + /// # Examples + /// + /// ``` + /// let a = [1, 2, 3]; + /// assert!(!a.is_empty()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn split

(&self, pred: P) -> Split - where P: FnMut(&T) -> bool - { - Split { - v: self, - pred, - finished: false - } + #[rustc_const_unstable(feature = "const_slice_len")] + pub const fn is_empty(&self) -> bool { + self.len() == 0 } + /// Returns the first element of the slice, or `None` if it is empty. + /// + /// # Examples + /// + /// ``` + /// let v = [10, 40, 30]; + /// assert_eq!(Some(&10), v.first()); + /// + /// let w: &[i32] = &[]; + /// assert_eq!(None, w.first()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn rsplit

(&self, pred: P) -> RSplit - where P: FnMut(&T) -> bool - { - RSplit { inner: self.split(pred) } - } - - #[inline] - fn splitn

(&self, n: usize, pred: P) -> SplitN - where P: FnMut(&T) -> bool - { - SplitN { - inner: GenericSplitN { - iter: self.split(pred), - count: n - } - } - } - - #[inline] - fn rsplitn

(&self, n: usize, pred: P) -> RSplitN - where P: FnMut(&T) -> bool - { - RSplitN { - inner: GenericSplitN { - iter: self.rsplit(pred), - count: n - } - } - } - - #[inline] - fn windows(&self, size: usize) -> Windows { - assert!(size != 0); - Windows { v: self, size: size } - } - - #[inline] - fn chunks(&self, chunk_size: usize) -> Chunks { - assert!(chunk_size != 0); - Chunks { v: self, chunk_size: chunk_size } - } - - #[inline] - fn exact_chunks(&self, chunk_size: usize) -> ExactChunks { - assert!(chunk_size != 0); - let rem = self.len() % chunk_size; - let len = self.len() - rem; - ExactChunks { v: &self[..len], chunk_size: chunk_size} - } - - #[inline] - fn get(&self, index: I) -> Option<&I::Output> - where I: SliceIndex<[T]> - { - index.get(self) - } - - #[inline] - fn first(&self) -> Option<&T> { + pub fn first(&self) -> Option<&T> { if self.is_empty() { None } else { Some(&self[0]) } } + /// Returns a mutable pointer to the first element of the slice, or `None` if it is empty. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [0, 1, 2]; + /// + /// if let Some(first) = x.first_mut() { + /// *first = 5; + /// } + /// assert_eq!(x, &[5, 1, 2]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn split_first(&self) -> Option<(&T, &[T])> { - if self.is_empty() { None } else { Some((&self[0], &self[1..])) } - } - - #[inline] - fn split_last(&self) -> Option<(&T, &[T])> { - let len = self.len(); - if len == 0 { None } else { Some((&self[len - 1], &self[..(len - 1)])) } - } - - #[inline] - fn last(&self) -> Option<&T> { - if self.is_empty() { None } else { Some(&self[self.len() - 1]) } - } - - #[inline] - unsafe fn get_unchecked(&self, index: I) -> &I::Output - where I: SliceIndex<[T]> - { - index.get_unchecked(self) - } - - #[inline] - fn as_ptr(&self) -> *const T { - self as *const [T] as *const T - } - - fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result - where F: FnMut(&'a T) -> Ordering - { - let s = self; - let mut size = s.len(); - if size == 0 { - return Err(0); - } - let mut base = 0usize; - while size > 1 { - let half = size / 2; - let mid = base + half; - // mid is always in [0, size), that means mid is >= 0 and < size. - // mid >= 0: by definition - // mid < size: mid = size / 2 + size / 4 + size / 8 ... - let cmp = f(unsafe { s.get_unchecked(mid) }); - base = if cmp == Greater { base } else { mid }; - size -= half; - } - // base is always in [0, size) because base <= mid. - let cmp = f(unsafe { s.get_unchecked(base) }); - if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) } - } - - #[inline] - fn len(&self) -> usize { - unsafe { - mem::transmute::<&[T], Repr>(self).len - } - } - - #[inline] - fn get_mut(&mut self, index: I) -> Option<&mut I::Output> - where I: SliceIndex<[T]> - { - index.get_mut(self) - } - - #[inline] - fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) { - let len = self.len(); - let ptr = self.as_mut_ptr(); - - unsafe { - assert!(mid <= len); - - (from_raw_parts_mut(ptr, mid), - from_raw_parts_mut(ptr.offset(mid as isize), len - mid)) - } - } - - #[inline] - fn iter_mut(&mut self) -> IterMut { - unsafe { - let p = if mem::size_of::() == 0 { - 1 as *mut _ - } else { - let p = self.as_mut_ptr(); - assume(!p.is_null()); - p - }; - - IterMut { - ptr: p, - end: slice_offset!(p, self.len() as isize), - _marker: marker::PhantomData - } - } - } - - #[inline] - fn last_mut(&mut self) -> Option<&mut T> { - let len = self.len(); - if len == 0 { return None; } - Some(&mut self[len - 1]) - } - - #[inline] - fn first_mut(&mut self) -> Option<&mut T> { + pub fn first_mut(&mut self) -> Option<&mut T> { if self.is_empty() { None } else { Some(&mut self[0]) } } + /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty. + /// + /// # Examples + /// + /// ``` + /// let x = &[0, 1, 2]; + /// + /// if let Some((first, elements)) = x.split_first() { + /// assert_eq!(first, &0); + /// assert_eq!(elements, &[1, 2]); + /// } + /// ``` + #[stable(feature = "slice_splits", since = "1.5.0")] #[inline] - fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> { + pub fn split_first(&self) -> Option<(&T, &[T])> { + if self.is_empty() { None } else { Some((&self[0], &self[1..])) } + } + + /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [0, 1, 2]; + /// + /// if let Some((first, elements)) = x.split_first_mut() { + /// *first = 3; + /// elements[0] = 4; + /// elements[1] = 5; + /// } + /// assert_eq!(x, &[3, 4, 5]); + /// ``` + #[stable(feature = "slice_splits", since = "1.5.0")] + #[inline] + pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> { if self.is_empty() { None } else { let split = self.split_at_mut(1); Some((&mut split.0[0], split.1)) } } + /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty. + /// + /// # Examples + /// + /// ``` + /// let x = &[0, 1, 2]; + /// + /// if let Some((last, elements)) = x.split_last() { + /// assert_eq!(last, &2); + /// assert_eq!(elements, &[0, 1]); + /// } + /// ``` + #[stable(feature = "slice_splits", since = "1.5.0")] #[inline] - fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> { + pub fn split_last(&self) -> Option<(&T, &[T])> { + let len = self.len(); + if len == 0 { None } else { Some((&self[len - 1], &self[..(len - 1)])) } + } + + /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [0, 1, 2]; + /// + /// if let Some((last, elements)) = x.split_last_mut() { + /// *last = 3; + /// elements[0] = 4; + /// elements[1] = 5; + /// } + /// assert_eq!(x, &[4, 5, 3]); + /// ``` + #[stable(feature = "slice_splits", since = "1.5.0")] + #[inline] + pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> { let len = self.len(); if len == 0 { None } else { let split = self.split_at_mut(len - 1); Some((&mut split.1[0], split.0)) } + } + /// Returns the last element of the slice, or `None` if it is empty. + /// + /// # Examples + /// + /// ``` + /// let v = [10, 40, 30]; + /// assert_eq!(Some(&30), v.last()); + /// + /// let w: &[i32] = &[]; + /// assert_eq!(None, w.last()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn split_mut

(&mut self, pred: P) -> SplitMut - where P: FnMut(&T) -> bool + pub fn last(&self) -> Option<&T> { + if self.is_empty() { None } else { Some(&self[self.len() - 1]) } + } + + /// Returns a mutable pointer to the last item in the slice. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [0, 1, 2]; + /// + /// if let Some(last) = x.last_mut() { + /// *last = 10; + /// } + /// assert_eq!(x, &[0, 1, 10]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn last_mut(&mut self) -> Option<&mut T> { + let len = self.len(); + if len == 0 { return None; } + Some(&mut self[len - 1]) + } + + /// Returns a reference to an element or subslice depending on the type of + /// index. + /// + /// - If given a position, returns a reference to the element at that + /// position or `None` if out of bounds. + /// - If given a range, returns the subslice corresponding to that range, + /// or `None` if out of bounds. + /// + /// # Examples + /// + /// ``` + /// let v = [10, 40, 30]; + /// assert_eq!(Some(&40), v.get(1)); + /// assert_eq!(Some(&[10, 40][..]), v.get(0..2)); + /// assert_eq!(None, v.get(3)); + /// assert_eq!(None, v.get(0..4)); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn get(&self, index: I) -> Option<&I::Output> + where I: SliceIndex { - SplitMut { v: self, pred: pred, finished: false } + index.get(self) } + /// Returns a mutable reference to an element or subslice depending on the + /// type of index (see [`get`]) or `None` if the index is out of bounds. + /// + /// [`get`]: #method.get + /// + /// # Examples + /// + /// ``` + /// let x = &mut [0, 1, 2]; + /// + /// if let Some(elem) = x.get_mut(1) { + /// *elem = 42; + /// } + /// assert_eq!(x, &[0, 42, 2]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn rsplit_mut

(&mut self, pred: P) -> RSplitMut - where P: FnMut(&T) -> bool + pub fn get_mut(&mut self, index: I) -> Option<&mut I::Output> + where I: SliceIndex { - RSplitMut { inner: self.split_mut(pred) } + index.get_mut(self) } + /// Returns a reference to an element or subslice, without doing bounds + /// checking. + /// + /// This is generally not recommended, use with caution! For a safe + /// alternative see [`get`]. + /// + /// [`get`]: #method.get + /// + /// # Examples + /// + /// ``` + /// let x = &[1, 2, 4]; + /// + /// unsafe { + /// assert_eq!(x.get_unchecked(1), &2); + /// } + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn splitn_mut

(&mut self, n: usize, pred: P) -> SplitNMut - where P: FnMut(&T) -> bool + pub unsafe fn get_unchecked(&self, index: I) -> &I::Output + where I: SliceIndex { - SplitNMut { - inner: GenericSplitN { - iter: self.split_mut(pred), - count: n - } - } + index.get_unchecked(self) } + /// Returns a mutable reference to an element or subslice, without doing + /// bounds checking. + /// + /// This is generally not recommended, use with caution! For a safe + /// alternative see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// let x = &mut [1, 2, 4]; + /// + /// unsafe { + /// let elem = x.get_unchecked_mut(1); + /// *elem = 13; + /// } + /// assert_eq!(x, &[1, 13, 4]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn rsplitn_mut

(&mut self, n: usize, pred: P) -> RSplitNMut where - P: FnMut(&T) -> bool, + pub unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut I::Output + where I: SliceIndex { - RSplitNMut { - inner: GenericSplitN { - iter: self.rsplit_mut(pred), - count: n - } - } + index.get_unchecked_mut(self) } + /// Returns a raw pointer to the slice's buffer. + /// + /// The caller must ensure that the slice outlives the pointer this + /// function returns, or else it will end up pointing to garbage. + /// + /// Modifying the container referenced by this slice may cause its buffer + /// to be reallocated, which would also make any pointers to it invalid. + /// + /// # Examples + /// + /// ``` + /// let x = &[1, 2, 4]; + /// let x_ptr = x.as_ptr(); + /// + /// unsafe { + /// for i in 0..x.len() { + /// assert_eq!(x.get_unchecked(i), &*x_ptr.offset(i as isize)); + /// } + /// } + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut { - assert!(chunk_size != 0); - ChunksMut { v: self, chunk_size: chunk_size } + #[rustc_const_unstable(feature = "const_slice_as_ptr")] + pub const fn as_ptr(&self) -> *const T { + self as *const [T] as *const T } + /// Returns an unsafe mutable pointer to the slice's buffer. + /// + /// The caller must ensure that the slice outlives the pointer this + /// function returns, or else it will end up pointing to garbage. + /// + /// Modifying the container referenced by this slice may cause its buffer + /// to be reallocated, which would also make any pointers to it invalid. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [1, 2, 4]; + /// let x_ptr = x.as_mut_ptr(); + /// + /// unsafe { + /// for i in 0..x.len() { + /// *x_ptr.offset(i as isize) += 2; + /// } + /// } + /// assert_eq!(x, &[3, 4, 6]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn exact_chunks_mut(&mut self, chunk_size: usize) -> ExactChunksMut { - assert!(chunk_size != 0); - let rem = self.len() % chunk_size; - let len = self.len() - rem; - ExactChunksMut { v: &mut self[..len], chunk_size: chunk_size} + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut [T] as *mut T } + /// Swaps two elements in the slice. + /// + /// # Arguments + /// + /// * a - The index of the first element + /// * b - The index of the second element + /// + /// # Panics + /// + /// Panics if `a` or `b` are out of bounds. + /// + /// # Examples + /// + /// ``` + /// let mut v = ["a", "b", "c", "d"]; + /// v.swap(1, 3); + /// assert!(v == ["a", "d", "c", "b"]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn swap(&mut self, a: usize, b: usize) { + pub fn swap(&mut self, a: usize, b: usize) { unsafe { // Can't take two mutable loans from one vector, so instead just cast // them to their raw pointers to do the swap @@ -572,7 +451,18 @@ impl SliceExt for [T] { } } - fn reverse(&mut self) { + /// Reverses the order of elements in the slice, in place. + /// + /// # Examples + /// + /// ``` + /// let mut v = [1, 2, 3]; + /// v.reverse(); + /// assert!(v == [3, 2, 1]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn reverse(&mut self) { let mut i: usize = 0; let ln = self.len(); @@ -635,42 +525,920 @@ impl SliceExt for [T] { } } + /// Returns an iterator over the slice. + /// + /// # Examples + /// + /// ``` + /// let x = &[1, 2, 4]; + /// let mut iterator = x.iter(); + /// + /// assert_eq!(iterator.next(), Some(&1)); + /// assert_eq!(iterator.next(), Some(&2)); + /// assert_eq!(iterator.next(), Some(&4)); + /// assert_eq!(iterator.next(), None); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut I::Output - where I: SliceIndex<[T]> + pub fn iter(&self) -> Iter { + unsafe { + let ptr = self.as_ptr(); + assume(!ptr.is_null()); + + let end = if mem::size_of::() == 0 { + (ptr as *const u8).wrapping_offset(self.len() as isize) as *const T + } else { + ptr.offset(self.len() as isize) + }; + + Iter { + ptr, + end, + _marker: marker::PhantomData + } + } + } + + /// Returns an iterator that allows modifying each value. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [1, 2, 4]; + /// for elem in x.iter_mut() { + /// *elem += 2; + /// } + /// assert_eq!(x, &[3, 4, 6]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn iter_mut(&mut self) -> IterMut { + unsafe { + let ptr = self.as_mut_ptr(); + assume(!ptr.is_null()); + + let end = if mem::size_of::() == 0 { + (ptr as *mut u8).wrapping_offset(self.len() as isize) as *mut T + } else { + ptr.offset(self.len() as isize) + }; + + IterMut { + ptr, + end, + _marker: marker::PhantomData + } + } + } + + /// Returns an iterator over all contiguous windows of length + /// `size`. The windows overlap. If the slice is shorter than + /// `size`, the iterator returns no values. + /// + /// # Panics + /// + /// Panics if `size` is 0. + /// + /// # Examples + /// + /// ``` + /// let slice = ['r', 'u', 's', 't']; + /// let mut iter = slice.windows(2); + /// assert_eq!(iter.next().unwrap(), &['r', 'u']); + /// assert_eq!(iter.next().unwrap(), &['u', 's']); + /// assert_eq!(iter.next().unwrap(), &['s', 't']); + /// assert!(iter.next().is_none()); + /// ``` + /// + /// If the slice is shorter than `size`: + /// + /// ``` + /// let slice = ['f', 'o', 'o']; + /// let mut iter = slice.windows(4); + /// assert!(iter.next().is_none()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn windows(&self, size: usize) -> Windows { + assert!(size != 0); + Windows { v: self, size } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a + /// time. The chunks are slices and do not overlap. If `chunk_size` does + /// not divide the length of the slice, then the last chunk will + /// not have length `chunk_size`. + /// + /// See [`exact_chunks`] for a variant of this iterator that returns chunks + /// of always exactly `chunk_size` elements. + /// + /// # Panics + /// + /// Panics if `chunk_size` is 0. + /// + /// # Examples + /// + /// ``` + /// let slice = ['l', 'o', 'r', 'e', 'm']; + /// let mut iter = slice.chunks(2); + /// assert_eq!(iter.next().unwrap(), &['l', 'o']); + /// assert_eq!(iter.next().unwrap(), &['r', 'e']); + /// assert_eq!(iter.next().unwrap(), &['m']); + /// assert!(iter.next().is_none()); + /// ``` + /// + /// [`exact_chunks`]: #method.exact_chunks + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn chunks(&self, chunk_size: usize) -> Chunks { + assert!(chunk_size != 0); + Chunks { v: self, chunk_size } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time. + /// The chunks are mutable slices, and do not overlap. If `chunk_size` does + /// not divide the length of the slice, then the last chunk will not + /// have length `chunk_size`. + /// + /// See [`exact_chunks_mut`] for a variant of this iterator that returns chunks + /// of always exactly `chunk_size` elements. + /// + /// # Panics + /// + /// Panics if `chunk_size` is 0. + /// + /// # Examples + /// + /// ``` + /// let v = &mut [0, 0, 0, 0, 0]; + /// let mut count = 1; + /// + /// for chunk in v.chunks_mut(2) { + /// for elem in chunk.iter_mut() { + /// *elem += count; + /// } + /// count += 1; + /// } + /// assert_eq!(v, &[1, 1, 2, 2, 3]); + /// ``` + /// + /// [`exact_chunks_mut`]: #method.exact_chunks_mut + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut { + assert!(chunk_size != 0); + ChunksMut { v: self, chunk_size } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a + /// time. The chunks are slices and do not overlap. If `chunk_size` does + /// not divide the length of the slice, then the last up to `chunk_size-1` + /// elements will be omitted and can be retrieved from the `remainder` + /// function of the iterator. + /// + /// Due to each chunk having exactly `chunk_size` elements, the compiler + /// can often optimize the resulting code better than in the case of + /// [`chunks`]. + /// + /// # Panics + /// + /// Panics if `chunk_size` is 0. + /// + /// # Examples + /// + /// ``` + /// #![feature(exact_chunks)] + /// + /// let slice = ['l', 'o', 'r', 'e', 'm']; + /// let mut iter = slice.exact_chunks(2); + /// assert_eq!(iter.next().unwrap(), &['l', 'o']); + /// assert_eq!(iter.next().unwrap(), &['r', 'e']); + /// assert!(iter.next().is_none()); + /// ``` + /// + /// [`chunks`]: #method.chunks + #[unstable(feature = "exact_chunks", issue = "47115")] + #[inline] + pub fn exact_chunks(&self, chunk_size: usize) -> ExactChunks { + assert!(chunk_size != 0); + let rem = self.len() % chunk_size; + let len = self.len() - rem; + let (fst, snd) = self.split_at(len); + ExactChunks { v: fst, rem: snd, chunk_size } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time. + /// The chunks are mutable slices, and do not overlap. If `chunk_size` does + /// not divide the length of the slice, then the last up to `chunk_size-1` + /// elements will be omitted and can be retrieved from the `into_remainder` + /// function of the iterator. + /// + /// Due to each chunk having exactly `chunk_size` elements, the compiler + /// can often optimize the resulting code better than in the case of + /// [`chunks_mut`]. + /// + /// # Panics + /// + /// Panics if `chunk_size` is 0. + /// + /// # Examples + /// + /// ``` + /// #![feature(exact_chunks)] + /// + /// let v = &mut [0, 0, 0, 0, 0]; + /// let mut count = 1; + /// + /// for chunk in v.exact_chunks_mut(2) { + /// for elem in chunk.iter_mut() { + /// *elem += count; + /// } + /// count += 1; + /// } + /// assert_eq!(v, &[1, 1, 2, 2, 0]); + /// ``` + /// + /// [`chunks_mut`]: #method.chunks_mut + #[unstable(feature = "exact_chunks", issue = "47115")] + #[inline] + pub fn exact_chunks_mut(&mut self, chunk_size: usize) -> ExactChunksMut { + assert!(chunk_size != 0); + let rem = self.len() % chunk_size; + let len = self.len() - rem; + let (fst, snd) = self.split_at_mut(len); + ExactChunksMut { v: fst, rem: snd, chunk_size } + } + + /// Divides one slice into two at an index. + /// + /// The first will contain all indices from `[0, mid)` (excluding + /// the index `mid` itself) and the second will contain all + /// indices from `[mid, len)` (excluding the index `len` itself). + /// + /// # Panics + /// + /// Panics if `mid > len`. + /// + /// # Examples + /// + /// ``` + /// let v = [1, 2, 3, 4, 5, 6]; + /// + /// { + /// let (left, right) = v.split_at(0); + /// assert!(left == []); + /// assert!(right == [1, 2, 3, 4, 5, 6]); + /// } + /// + /// { + /// let (left, right) = v.split_at(2); + /// assert!(left == [1, 2]); + /// assert!(right == [3, 4, 5, 6]); + /// } + /// + /// { + /// let (left, right) = v.split_at(6); + /// assert!(left == [1, 2, 3, 4, 5, 6]); + /// assert!(right == []); + /// } + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn split_at(&self, mid: usize) -> (&[T], &[T]) { + (&self[..mid], &self[mid..]) + } + + /// Divides one mutable slice into two at an index. + /// + /// The first will contain all indices from `[0, mid)` (excluding + /// the index `mid` itself) and the second will contain all + /// indices from `[mid, len)` (excluding the index `len` itself). + /// + /// # Panics + /// + /// Panics if `mid > len`. + /// + /// # Examples + /// + /// ``` + /// let mut v = [1, 0, 3, 0, 5, 6]; + /// // scoped to restrict the lifetime of the borrows + /// { + /// let (left, right) = v.split_at_mut(2); + /// assert!(left == [1, 0]); + /// assert!(right == [3, 0, 5, 6]); + /// left[1] = 2; + /// right[1] = 4; + /// } + /// assert!(v == [1, 2, 3, 4, 5, 6]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) { + let len = self.len(); + let ptr = self.as_mut_ptr(); + + unsafe { + assert!(mid <= len); + + (from_raw_parts_mut(ptr, mid), + from_raw_parts_mut(ptr.offset(mid as isize), len - mid)) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred`. The matched element is not contained in the subslices. + /// + /// # Examples + /// + /// ``` + /// let slice = [10, 40, 33, 20]; + /// let mut iter = slice.split(|num| num % 3 == 0); + /// + /// assert_eq!(iter.next().unwrap(), &[10, 40]); + /// assert_eq!(iter.next().unwrap(), &[20]); + /// assert!(iter.next().is_none()); + /// ``` + /// + /// If the first element is matched, an empty slice will be the first item + /// returned by the iterator. Similarly, if the last element in the slice + /// is matched, an empty slice will be the last item returned by the + /// iterator: + /// + /// ``` + /// let slice = [10, 40, 33]; + /// let mut iter = slice.split(|num| num % 3 == 0); + /// + /// assert_eq!(iter.next().unwrap(), &[10, 40]); + /// assert_eq!(iter.next().unwrap(), &[]); + /// assert!(iter.next().is_none()); + /// ``` + /// + /// If two matched elements are directly adjacent, an empty slice will be + /// present between them: + /// + /// ``` + /// let slice = [10, 6, 33, 20]; + /// let mut iter = slice.split(|num| num % 3 == 0); + /// + /// assert_eq!(iter.next().unwrap(), &[10]); + /// assert_eq!(iter.next().unwrap(), &[]); + /// assert_eq!(iter.next().unwrap(), &[20]); + /// assert!(iter.next().is_none()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn split(&self, pred: F) -> Split + where F: FnMut(&T) -> bool { - index.get_unchecked_mut(self) + Split { + v: self, + pred, + finished: false + } } + /// Returns an iterator over mutable subslices separated by elements that + /// match `pred`. The matched element is not contained in the subslices. + /// + /// # Examples + /// + /// ``` + /// let mut v = [10, 40, 30, 20, 60, 50]; + /// + /// for group in v.split_mut(|num| *num % 3 == 0) { + /// group[0] = 1; + /// } + /// assert_eq!(v, [1, 40, 30, 1, 60, 1]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn as_mut_ptr(&mut self) -> *mut T { - self as *mut [T] as *mut T + pub fn split_mut(&mut self, pred: F) -> SplitMut + where F: FnMut(&T) -> bool + { + SplitMut { v: self, pred, finished: false } } + /// Returns an iterator over subslices separated by elements that match + /// `pred`, starting at the end of the slice and working backwards. + /// The matched element is not contained in the subslices. + /// + /// # Examples + /// + /// ``` + /// let slice = [11, 22, 33, 0, 44, 55]; + /// let mut iter = slice.rsplit(|num| *num == 0); + /// + /// assert_eq!(iter.next().unwrap(), &[44, 55]); + /// assert_eq!(iter.next().unwrap(), &[11, 22, 33]); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// As with `split()`, if the first or last element is matched, an empty + /// slice will be the first (or last) item returned by the iterator. + /// + /// ``` + /// let v = &[0, 1, 1, 2, 3, 5, 8]; + /// let mut it = v.rsplit(|n| *n % 2 == 0); + /// assert_eq!(it.next().unwrap(), &[]); + /// assert_eq!(it.next().unwrap(), &[3, 5]); + /// assert_eq!(it.next().unwrap(), &[1, 1]); + /// assert_eq!(it.next().unwrap(), &[]); + /// assert_eq!(it.next(), None); + /// ``` + #[stable(feature = "slice_rsplit", since = "1.27.0")] #[inline] - fn contains(&self, x: &T) -> bool where T: PartialEq { + pub fn rsplit(&self, pred: F) -> RSplit + where F: FnMut(&T) -> bool + { + RSplit { inner: self.split(pred) } + } + + /// Returns an iterator over mutable subslices separated by elements that + /// match `pred`, starting at the end of the slice and working + /// backwards. The matched element is not contained in the subslices. + /// + /// # Examples + /// + /// ``` + /// let mut v = [100, 400, 300, 200, 600, 500]; + /// + /// let mut count = 0; + /// for group in v.rsplit_mut(|num| *num % 3 == 0) { + /// count += 1; + /// group[0] = count; + /// } + /// assert_eq!(v, [3, 400, 300, 2, 600, 1]); + /// ``` + /// + #[stable(feature = "slice_rsplit", since = "1.27.0")] + #[inline] + pub fn rsplit_mut(&mut self, pred: F) -> RSplitMut + where F: FnMut(&T) -> bool + { + RSplitMut { inner: self.split_mut(pred) } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred`, limited to returning at most `n` items. The matched element is + /// not contained in the subslices. + /// + /// The last element returned, if any, will contain the remainder of the + /// slice. + /// + /// # Examples + /// + /// Print the slice split once by numbers divisible by 3 (i.e. `[10, 40]`, + /// `[20, 60, 50]`): + /// + /// ``` + /// let v = [10, 40, 30, 20, 60, 50]; + /// + /// for group in v.splitn(2, |num| *num % 3 == 0) { + /// println!("{:?}", group); + /// } + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn splitn(&self, n: usize, pred: F) -> SplitN + where F: FnMut(&T) -> bool + { + SplitN { + inner: GenericSplitN { + iter: self.split(pred), + count: n + } + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred`, limited to returning at most `n` items. The matched element is + /// not contained in the subslices. + /// + /// The last element returned, if any, will contain the remainder of the + /// slice. + /// + /// # Examples + /// + /// ``` + /// let mut v = [10, 40, 30, 20, 60, 50]; + /// + /// for group in v.splitn_mut(2, |num| *num % 3 == 0) { + /// group[0] = 1; + /// } + /// assert_eq!(v, [1, 40, 30, 1, 60, 50]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn splitn_mut(&mut self, n: usize, pred: F) -> SplitNMut + where F: FnMut(&T) -> bool + { + SplitNMut { + inner: GenericSplitN { + iter: self.split_mut(pred), + count: n + } + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred` limited to returning at most `n` items. This starts at the end of + /// the slice and works backwards. The matched element is not contained in + /// the subslices. + /// + /// The last element returned, if any, will contain the remainder of the + /// slice. + /// + /// # Examples + /// + /// Print the slice split once, starting from the end, by numbers divisible + /// by 3 (i.e. `[50]`, `[10, 40, 30, 20]`): + /// + /// ``` + /// let v = [10, 40, 30, 20, 60, 50]; + /// + /// for group in v.rsplitn(2, |num| *num % 3 == 0) { + /// println!("{:?}", group); + /// } + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn rsplitn(&self, n: usize, pred: F) -> RSplitN + where F: FnMut(&T) -> bool + { + RSplitN { + inner: GenericSplitN { + iter: self.rsplit(pred), + count: n + } + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred` limited to returning at most `n` items. This starts at the end of + /// the slice and works backwards. The matched element is not contained in + /// the subslices. + /// + /// The last element returned, if any, will contain the remainder of the + /// slice. + /// + /// # Examples + /// + /// ``` + /// let mut s = [10, 40, 30, 20, 60, 50]; + /// + /// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) { + /// group[0] = 1; + /// } + /// assert_eq!(s, [1, 40, 30, 20, 60, 1]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn rsplitn_mut(&mut self, n: usize, pred: F) -> RSplitNMut + where F: FnMut(&T) -> bool + { + RSplitNMut { + inner: GenericSplitN { + iter: self.rsplit_mut(pred), + count: n + } + } + } + + /// Returns `true` if the slice contains an element with the given value. + /// + /// # Examples + /// + /// ``` + /// let v = [10, 40, 30]; + /// assert!(v.contains(&30)); + /// assert!(!v.contains(&50)); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn contains(&self, x: &T) -> bool + where T: PartialEq + { x.slice_contains(self) } - #[inline] - fn starts_with(&self, needle: &[T]) -> bool where T: PartialEq { + /// Returns `true` if `needle` is a prefix of the slice. + /// + /// # Examples + /// + /// ``` + /// let v = [10, 40, 30]; + /// assert!(v.starts_with(&[10])); + /// assert!(v.starts_with(&[10, 40])); + /// assert!(!v.starts_with(&[50])); + /// assert!(!v.starts_with(&[10, 50])); + /// ``` + /// + /// Always returns `true` if `needle` is an empty slice: + /// + /// ``` + /// let v = &[10, 40, 30]; + /// assert!(v.starts_with(&[])); + /// let v: &[u8] = &[]; + /// assert!(v.starts_with(&[])); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn starts_with(&self, needle: &[T]) -> bool + where T: PartialEq + { let n = needle.len(); self.len() >= n && needle == &self[..n] } - #[inline] - fn ends_with(&self, needle: &[T]) -> bool where T: PartialEq { + /// Returns `true` if `needle` is a suffix of the slice. + /// + /// # Examples + /// + /// ``` + /// let v = [10, 40, 30]; + /// assert!(v.ends_with(&[30])); + /// assert!(v.ends_with(&[40, 30])); + /// assert!(!v.ends_with(&[50])); + /// assert!(!v.ends_with(&[50, 30])); + /// ``` + /// + /// Always returns `true` if `needle` is an empty slice: + /// + /// ``` + /// let v = &[10, 40, 30]; + /// assert!(v.ends_with(&[])); + /// let v: &[u8] = &[]; + /// assert!(v.ends_with(&[])); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn ends_with(&self, needle: &[T]) -> bool + where T: PartialEq + { let (m, n) = (self.len(), needle.len()); m >= n && needle == &self[m-n..] } - fn binary_search(&self, x: &T) -> Result + /// Binary searches this sorted slice for a given element. + /// + /// If the value is found then `Ok` is returned, containing the + /// index of the matching element; if the value is not found then + /// `Err` is returned, containing the index where a matching + /// element could be inserted while maintaining sorted order. + /// + /// # Examples + /// + /// Looks up a series of four elements. The first is found, with a + /// uniquely determined position; the second and third are not + /// found; the fourth could match any position in `[1, 4]`. + /// + /// ``` + /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]; + /// + /// assert_eq!(s.binary_search(&13), Ok(9)); + /// assert_eq!(s.binary_search(&4), Err(7)); + /// assert_eq!(s.binary_search(&100), Err(13)); + /// let r = s.binary_search(&1); + /// assert!(match r { Ok(1..=4) => true, _ => false, }); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn binary_search(&self, x: &T) -> Result where T: Ord { self.binary_search_by(|p| p.cmp(x)) } - fn rotate_left(&mut self, mid: usize) { + /// Binary searches this sorted slice with a comparator function. + /// + /// The comparator function should implement an order consistent + /// with the sort order of the underlying slice, returning an + /// order code that indicates whether its argument is `Less`, + /// `Equal` or `Greater` the desired target. + /// + /// If a matching value is found then returns `Ok`, containing + /// the index for the matched element; if no match is found then + /// `Err` is returned, containing the index where a matching + /// element could be inserted while maintaining sorted order. + /// + /// # Examples + /// + /// Looks up a series of four elements. The first is found, with a + /// uniquely determined position; the second and third are not + /// found; the fourth could match any position in `[1, 4]`. + /// + /// ``` + /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]; + /// + /// let seek = 13; + /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Ok(9)); + /// let seek = 4; + /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(7)); + /// let seek = 100; + /// assert_eq!(s.binary_search_by(|probe| probe.cmp(&seek)), Err(13)); + /// let seek = 1; + /// let r = s.binary_search_by(|probe| probe.cmp(&seek)); + /// assert!(match r { Ok(1..=4) => true, _ => false, }); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result + where F: FnMut(&'a T) -> Ordering + { + let s = self; + let mut size = s.len(); + if size == 0 { + return Err(0); + } + let mut base = 0usize; + while size > 1 { + let half = size / 2; + let mid = base + half; + // mid is always in [0, size), that means mid is >= 0 and < size. + // mid >= 0: by definition + // mid < size: mid = size / 2 + size / 4 + size / 8 ... + let cmp = f(unsafe { s.get_unchecked(mid) }); + base = if cmp == Greater { base } else { mid }; + size -= half; + } + // base is always in [0, size) because base <= mid. + let cmp = f(unsafe { s.get_unchecked(base) }); + if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) } + + } + + /// Binary searches this sorted slice with a key extraction function. + /// + /// Assumes that the slice is sorted by the key, for instance with + /// [`sort_by_key`] using the same key extraction function. + /// + /// If a matching value is found then returns `Ok`, containing the + /// index for the matched element; if no match is found then `Err` + /// is returned, containing the index where a matching element could + /// be inserted while maintaining sorted order. + /// + /// [`sort_by_key`]: #method.sort_by_key + /// + /// # Examples + /// + /// Looks up a series of four elements in a slice of pairs sorted by + /// their second elements. The first is found, with a uniquely + /// determined position; the second and third are not found; the + /// fourth could match any position in `[1, 4]`. + /// + /// ``` + /// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1), + /// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13), + /// (1, 21), (2, 34), (4, 55)]; + /// + /// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9)); + /// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7)); + /// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13)); + /// let r = s.binary_search_by_key(&1, |&(a,b)| b); + /// assert!(match r { Ok(1..=4) => true, _ => false, }); + /// ``` + #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")] + #[inline] + pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result + where F: FnMut(&'a T) -> B, + B: Ord + { + self.binary_search_by(|k| f(k).cmp(b)) + } + + /// Sorts the slice, but may not preserve the order of equal elements. + /// + /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), + /// and `O(n log n)` worst-case. + /// + /// # Current implementation + /// + /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters, + /// which combines the fast average case of randomized quicksort with the fast worst case of + /// heapsort, while achieving linear time on slices with certain patterns. It uses some + /// randomization to avoid degenerate cases, but with a fixed seed to always provide + /// deterministic behavior. + /// + /// It is typically faster than stable sorting, except in a few special cases, e.g. when the + /// slice consists of several concatenated sorted sequences. + /// + /// # Examples + /// + /// ``` + /// let mut v = [-5, 4, 1, -3, 2]; + /// + /// v.sort_unstable(); + /// assert!(v == [-5, -3, 1, 2, 4]); + /// ``` + /// + /// [pdqsort]: https://github.com/orlp/pdqsort + #[stable(feature = "sort_unstable", since = "1.20.0")] + #[inline] + pub fn sort_unstable(&mut self) + where T: Ord + { + sort::quicksort(self, |a, b| a.lt(b)); + } + + /// Sorts the slice with a comparator function, but may not preserve the order of equal + /// elements. + /// + /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), + /// and `O(n log n)` worst-case. + /// + /// # Current implementation + /// + /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters, + /// which combines the fast average case of randomized quicksort with the fast worst case of + /// heapsort, while achieving linear time on slices with certain patterns. It uses some + /// randomization to avoid degenerate cases, but with a fixed seed to always provide + /// deterministic behavior. + /// + /// It is typically faster than stable sorting, except in a few special cases, e.g. when the + /// slice consists of several concatenated sorted sequences. + /// + /// # Examples + /// + /// ``` + /// let mut v = [5, 4, 1, 3, 2]; + /// v.sort_unstable_by(|a, b| a.cmp(b)); + /// assert!(v == [1, 2, 3, 4, 5]); + /// + /// // reverse sorting + /// v.sort_unstable_by(|a, b| b.cmp(a)); + /// assert!(v == [5, 4, 3, 2, 1]); + /// ``` + /// + /// [pdqsort]: https://github.com/orlp/pdqsort + #[stable(feature = "sort_unstable", since = "1.20.0")] + #[inline] + pub fn sort_unstable_by(&mut self, mut compare: F) + where F: FnMut(&T, &T) -> Ordering + { + sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less); + } + + /// Sorts the slice with a key extraction function, but may not preserve the order of equal + /// elements. + /// + /// This sort is unstable (i.e. may reorder equal elements), in-place (i.e. does not allocate), + /// and `O(m n log(m n))` worst-case, where the key function is `O(m)`. + /// + /// # Current implementation + /// + /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters, + /// which combines the fast average case of randomized quicksort with the fast worst case of + /// heapsort, while achieving linear time on slices with certain patterns. It uses some + /// randomization to avoid degenerate cases, but with a fixed seed to always provide + /// deterministic behavior. + /// + /// # Examples + /// + /// ``` + /// let mut v = [-5i32, 4, 1, -3, 2]; + /// + /// v.sort_unstable_by_key(|k| k.abs()); + /// assert!(v == [1, 2, -3, 4, -5]); + /// ``` + /// + /// [pdqsort]: https://github.com/orlp/pdqsort + #[stable(feature = "sort_unstable", since = "1.20.0")] + #[inline] + pub fn sort_unstable_by_key(&mut self, mut f: F) + where F: FnMut(&T) -> K, K: Ord + { + sort::quicksort(self, |a, b| f(a).lt(&f(b))); + } + + /// Rotates the slice in-place such that the first `mid` elements of the + /// slice move to the end while the last `self.len() - mid` elements move to + /// the front. After calling `rotate_left`, the element previously at index + /// `mid` will become the first element in the slice. + /// + /// # Panics + /// + /// This function will panic if `mid` is greater than the length of the + /// slice. Note that `mid == self.len()` does _not_ panic and is a no-op + /// rotation. + /// + /// # Complexity + /// + /// Takes linear (in `self.len()`) time. + /// + /// # Examples + /// + /// ``` + /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; + /// a.rotate_left(2); + /// assert_eq!(a, ['c', 'd', 'e', 'f', 'a', 'b']); + /// ``` + /// + /// Rotating a subslice: + /// + /// ``` + /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; + /// a[1..5].rotate_left(1); + /// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']); + /// ``` + #[stable(feature = "slice_rotate", since = "1.26.0")] + pub fn rotate_left(&mut self, mid: usize) { assert!(mid <= self.len()); let k = self.len() - mid; @@ -680,7 +1448,38 @@ impl SliceExt for [T] { } } - fn rotate_right(&mut self, k: usize) { + /// Rotates the slice in-place such that the first `self.len() - k` + /// elements of the slice move to the end while the last `k` elements move + /// to the front. After calling `rotate_right`, the element previously at + /// index `self.len() - k` will become the first element in the slice. + /// + /// # Panics + /// + /// This function will panic if `k` is greater than the length of the + /// slice. Note that `k == self.len()` does _not_ panic and is a no-op + /// rotation. + /// + /// # Complexity + /// + /// Takes linear (in `self.len()`) time. + /// + /// # Examples + /// + /// ``` + /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; + /// a.rotate_right(2); + /// assert_eq!(a, ['e', 'f', 'a', 'b', 'c', 'd']); + /// ``` + /// + /// Rotate a subslice: + /// + /// ``` + /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; + /// a[1..5].rotate_right(1); + /// assert_eq!(a, ['a', 'e', 'b', 'c', 'd', 'f']); + /// ``` + #[stable(feature = "slice_rotate", since = "1.26.0")] + pub fn rotate_right(&mut self, k: usize) { assert!(k <= self.len()); let mid = self.len() - k; @@ -690,8 +1489,63 @@ impl SliceExt for [T] { } } - #[inline] - fn clone_from_slice(&mut self, src: &[T]) where T: Clone { + /// Copies the elements from `src` into `self`. + /// + /// The length of `src` must be the same as `self`. + /// + /// If `src` implements `Copy`, it can be more performant to use + /// [`copy_from_slice`]. + /// + /// # Panics + /// + /// This function will panic if the two slices have different lengths. + /// + /// # Examples + /// + /// Cloning two elements from a slice into another: + /// + /// ``` + /// let src = [1, 2, 3, 4]; + /// let mut dst = [0, 0]; + /// + /// // Because the slices have to be the same length, + /// // we slice the source slice from four elements + /// // to two. It will panic if we don't do this. + /// dst.clone_from_slice(&src[2..]); + /// + /// assert_eq!(src, [1, 2, 3, 4]); + /// assert_eq!(dst, [3, 4]); + /// ``` + /// + /// Rust enforces that there can only be one mutable reference with no + /// immutable references to a particular piece of data in a particular + /// scope. Because of this, attempting to use `clone_from_slice` on a + /// single slice will result in a compile failure: + /// + /// ```compile_fail + /// let mut slice = [1, 2, 3, 4, 5]; + /// + /// slice[..2].clone_from_slice(&slice[3..]); // compile fail! + /// ``` + /// + /// To work around this, we can use [`split_at_mut`] to create two distinct + /// sub-slices from a slice: + /// + /// ``` + /// let mut slice = [1, 2, 3, 4, 5]; + /// + /// { + /// let (left, right) = slice.split_at_mut(2); + /// left.clone_from_slice(&right[1..]); + /// } + /// + /// assert_eq!(slice, [4, 5, 3, 4, 5]); + /// ``` + /// + /// [`copy_from_slice`]: #method.copy_from_slice + /// [`split_at_mut`]: #method.split_at_mut + #[stable(feature = "clone_from_slice", since = "1.7.0")] + pub fn clone_from_slice(&mut self, src: &[T]) where T: Clone { assert!(self.len() == src.len(), "destination and source slices have different lengths"); // NOTE: We need to explicitly slice them to the same length @@ -702,57 +1556,355 @@ impl SliceExt for [T] { for i in 0..len { self[i].clone_from(&src[i]); } + } - #[inline] - fn copy_from_slice(&mut self, src: &[T]) where T: Copy { - assert!(self.len() == src.len(), - "destination and source slices have different lengths"); + /// Copies all elements from `src` into `self`, using a memcpy. + /// + /// The length of `src` must be the same as `self`. + /// + /// If `src` does not implement `Copy`, use [`clone_from_slice`]. + /// + /// # Panics + /// + /// This function will panic if the two slices have different lengths. + /// + /// # Examples + /// + /// Copying two elements from a slice into another: + /// + /// ``` + /// let src = [1, 2, 3, 4]; + /// let mut dst = [0, 0]; + /// + /// // Because the slices have to be the same length, + /// // we slice the source slice from four elements + /// // to two. It will panic if we don't do this. + /// dst.copy_from_slice(&src[2..]); + /// + /// assert_eq!(src, [1, 2, 3, 4]); + /// assert_eq!(dst, [3, 4]); + /// ``` + /// + /// Rust enforces that there can only be one mutable reference with no + /// immutable references to a particular piece of data in a particular + /// scope. Because of this, attempting to use `copy_from_slice` on a + /// single slice will result in a compile failure: + /// + /// ```compile_fail + /// let mut slice = [1, 2, 3, 4, 5]; + /// + /// slice[..2].copy_from_slice(&slice[3..]); // compile fail! + /// ``` + /// + /// To work around this, we can use [`split_at_mut`] to create two distinct + /// sub-slices from a slice: + /// + /// ``` + /// let mut slice = [1, 2, 3, 4, 5]; + /// + /// { + /// let (left, right) = slice.split_at_mut(2); + /// left.copy_from_slice(&right[1..]); + /// } + /// + /// assert_eq!(slice, [4, 5, 3, 4, 5]); + /// ``` + /// + /// [`clone_from_slice`]: #method.clone_from_slice + /// [`split_at_mut`]: #method.split_at_mut + #[stable(feature = "copy_from_slice", since = "1.9.0")] + pub fn copy_from_slice(&mut self, src: &[T]) where T: Copy { + assert_eq!(self.len(), src.len(), + "destination and source slices have different lengths"); unsafe { ptr::copy_nonoverlapping( src.as_ptr(), self.as_mut_ptr(), self.len()); } } - #[inline] - fn swap_with_slice(&mut self, src: &mut [T]) { - assert!(self.len() == src.len(), + /// Swaps all elements in `self` with those in `other`. + /// + /// The length of `other` must be the same as `self`. + /// + /// # Panics + /// + /// This function will panic if the two slices have different lengths. + /// + /// # Example + /// + /// Swapping two elements across slices: + /// + /// ``` + /// let mut slice1 = [0, 0]; + /// let mut slice2 = [1, 2, 3, 4]; + /// + /// slice1.swap_with_slice(&mut slice2[2..]); + /// + /// assert_eq!(slice1, [3, 4]); + /// assert_eq!(slice2, [1, 2, 0, 0]); + /// ``` + /// + /// Rust enforces that there can only be one mutable reference to a + /// particular piece of data in a particular scope. Because of this, + /// attempting to use `swap_with_slice` on a single slice will result in + /// a compile failure: + /// + /// ```compile_fail + /// let mut slice = [1, 2, 3, 4, 5]; + /// slice[..2].swap_with_slice(&mut slice[3..]); // compile fail! + /// ``` + /// + /// To work around this, we can use [`split_at_mut`] to create two distinct + /// mutable sub-slices from a slice: + /// + /// ``` + /// let mut slice = [1, 2, 3, 4, 5]; + /// + /// { + /// let (left, right) = slice.split_at_mut(2); + /// left.swap_with_slice(&mut right[1..]); + /// } + /// + /// assert_eq!(slice, [4, 5, 3, 1, 2]); + /// ``` + /// + /// [`split_at_mut`]: #method.split_at_mut + #[stable(feature = "swap_with_slice", since = "1.27.0")] + pub fn swap_with_slice(&mut self, other: &mut [T]) { + assert!(self.len() == other.len(), "destination and source slices have different lengths"); unsafe { ptr::swap_nonoverlapping( - self.as_mut_ptr(), src.as_mut_ptr(), self.len()); + self.as_mut_ptr(), other.as_mut_ptr(), self.len()); } } - #[inline] - fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result - where F: FnMut(&'a Self::Item) -> B, - B: Ord - { - self.binary_search_by(|k| f(k).cmp(b)) + /// Function to calculate lenghts of the middle and trailing slice for `align_to{,_mut}`. + fn align_to_offsets(&self) -> (usize, usize) { + // What we gonna do about `rest` is figure out what multiple of `U`s we can put in a + // lowest number of `T`s. And how many `T`s we need for each such "multiple". + // + // Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider + // for example a case where size_of:: = 16, size_of:: = 24. We can put 2 Us in + // place of every 3 Ts in the `rest` slice. A bit more complicated. + // + // Formula to calculate this is: + // + // Us = lcm(size_of::, size_of::) / size_of:: + // Ts = lcm(size_of::, size_of::) / size_of:: + // + // Expanded and simplified: + // + // Us = size_of:: / gcd(size_of::, size_of::) + // Ts = size_of:: / gcd(size_of::, size_of::) + // + // Luckily since all this is constant-evaluated... performance here matters not! + #[inline] + fn gcd(a: usize, b: usize) -> usize { + // iterative stein’s algorithm + // We should still make this `const fn` (and revert to recursive algorithm if we do) + // because relying on llvm to consteval all this is… well, it makes me + let (ctz_a, mut ctz_b) = unsafe { + if a == 0 { return b; } + if b == 0 { return a; } + (::intrinsics::cttz_nonzero(a), ::intrinsics::cttz_nonzero(b)) + }; + let k = ctz_a.min(ctz_b); + let mut a = a >> ctz_a; + let mut b = b; + loop { + // remove all factors of 2 from b + b >>= ctz_b; + if a > b { + ::mem::swap(&mut a, &mut b); + } + b = b - a; + unsafe { + if b == 0 { + break; + } + ctz_b = ::intrinsics::cttz_nonzero(b); + } + } + a << k + } + let gcd: usize = gcd(::mem::size_of::(), ::mem::size_of::()); + let ts: usize = ::mem::size_of::() / gcd; + let us: usize = ::mem::size_of::() / gcd; + + // Armed with this knowledge, we can find how many `U`s we can fit! + let us_len = self.len() / ts * us; + // And how many `T`s will be in the trailing slice! + let ts_len = self.len() % ts; + (us_len, ts_len) } - #[inline] - fn sort_unstable(&mut self) - where Self::Item: Ord - { - sort::quicksort(self, |a, b| a.lt(b)); + /// Transmute the slice to a slice of another type, ensuring aligment of the types is + /// maintained. + /// + /// This method splits the slice into three distinct slices: prefix, correctly aligned middle + /// slice of a new type, and the suffix slice. The middle slice will have the greatest length + /// possible for a given type and input slice. + /// + /// This method has no purpose when either input element `T` or output element `U` are + /// zero-sized and will return the original slice without splitting anything. + /// + /// # Unsafety + /// + /// This method is essentially a `transmute` with respect to the elements in the returned + /// middle slice, so all the usual caveats pertaining to `transmute::` also apply here. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// # #![feature(slice_align_to)] + /// unsafe { + /// let bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7]; + /// let (prefix, shorts, suffix) = bytes.align_to::(); + /// // less_efficient_algorithm_for_bytes(prefix); + /// // more_efficient_algorithm_for_aligned_shorts(shorts); + /// // less_efficient_algorithm_for_bytes(suffix); + /// } + /// ``` + #[unstable(feature = "slice_align_to", issue = "44488")] + pub unsafe fn align_to(&self) -> (&[T], &[U], &[T]) { + // Note that most of this function will be constant-evaluated, + if ::mem::size_of::() == 0 || ::mem::size_of::() == 0 { + // handle ZSTs specially, which is – don't handle them at all. + return (self, &[], &[]); + } + + // First, find at what point do we split between the first and 2nd slice. Easy with + // ptr.align_offset. + let ptr = self.as_ptr(); + let offset = ::ptr::align_offset(ptr, ::mem::align_of::()); + if offset > self.len() { + (self, &[], &[]) + } else { + let (left, rest) = self.split_at(offset); + // now `rest` is definitely aligned, so `from_raw_parts_mut` below is okay + let (us_len, ts_len) = rest.align_to_offsets::(); + (left, + from_raw_parts(rest.as_ptr() as *const U, us_len), + from_raw_parts(rest.as_ptr().offset((rest.len() - ts_len) as isize), ts_len)) + } } + /// Transmute the slice to a slice of another type, ensuring aligment of the types is + /// maintained. + /// + /// This method splits the slice into three distinct slices: prefix, correctly aligned middle + /// slice of a new type, and the suffix slice. The middle slice will have the greatest length + /// possible for a given type and input slice. + /// + /// This method has no purpose when either input element `T` or output element `U` are + /// zero-sized and will return the original slice without splitting anything. + /// + /// # Unsafety + /// + /// This method is essentially a `transmute` with respect to the elements in the returned + /// middle slice, so all the usual caveats pertaining to `transmute::` also apply here. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// # #![feature(slice_align_to)] + /// unsafe { + /// let mut bytes: [u8; 7] = [1, 2, 3, 4, 5, 6, 7]; + /// let (prefix, shorts, suffix) = bytes.align_to_mut::(); + /// // less_efficient_algorithm_for_bytes(prefix); + /// // more_efficient_algorithm_for_aligned_shorts(shorts); + /// // less_efficient_algorithm_for_bytes(suffix); + /// } + /// ``` + #[unstable(feature = "slice_align_to", issue = "44488")] + pub unsafe fn align_to_mut(&mut self) -> (&mut [T], &mut [U], &mut [T]) { + // Note that most of this function will be constant-evaluated, + if ::mem::size_of::() == 0 || ::mem::size_of::() == 0 { + // handle ZSTs specially, which is – don't handle them at all. + return (self, &mut [], &mut []); + } + + // First, find at what point do we split between the first and 2nd slice. Easy with + // ptr.align_offset. + let ptr = self.as_ptr(); + let offset = ::ptr::align_offset(ptr, ::mem::align_of::()); + if offset > self.len() { + (self, &mut [], &mut []) + } else { + let (left, rest) = self.split_at_mut(offset); + // now `rest` is definitely aligned, so `from_raw_parts_mut` below is okay + let (us_len, ts_len) = rest.align_to_offsets::(); + let mut_ptr = rest.as_mut_ptr(); + (left, + from_raw_parts_mut(mut_ptr as *mut U, us_len), + from_raw_parts_mut(mut_ptr.offset((rest.len() - ts_len) as isize), ts_len)) + } + } +} + +#[lang = "slice_u8"] +#[cfg(not(test))] +impl [u8] { + /// Checks if all bytes in this slice are within the ASCII range. + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] #[inline] - fn sort_unstable_by(&mut self, mut compare: F) - where F: FnMut(&Self::Item, &Self::Item) -> Ordering - { - sort::quicksort(self, |a, b| compare(a, b) == Ordering::Less); + pub fn is_ascii(&self) -> bool { + self.iter().all(|b| b.is_ascii()) } + /// Checks that two slices are an ASCII case-insensitive match. + /// + /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`, + /// but without allocating and copying temporaries. + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] #[inline] - fn sort_unstable_by_key(&mut self, mut f: F) - where F: FnMut(&Self::Item) -> B, - B: Ord - { - sort::quicksort(self, |a, b| f(a).lt(&f(b))); + pub fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool { + self.len() == other.len() && + self.iter().zip(other).all(|(a, b)| { + a.eq_ignore_ascii_case(b) + }) } + + /// Converts this slice to its ASCII upper case equivalent in-place. + /// + /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z', + /// but non-ASCII letters are unchanged. + /// + /// To return a new uppercased value without modifying the existing one, use + /// [`to_ascii_uppercase`]. + /// + /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] + #[inline] + pub fn make_ascii_uppercase(&mut self) { + for byte in self { + byte.make_ascii_uppercase(); + } + } + + /// Converts this slice to its ASCII lower case equivalent in-place. + /// + /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', + /// but non-ASCII letters are unchanged. + /// + /// To return a new lowercased value without modifying the existing one, use + /// [`to_ascii_lowercase`]. + /// + /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] + #[inline] + pub fn make_ascii_lowercase(&mut self) { + for byte in self { + byte.make_ascii_lowercase(); + } + } + } #[stable(feature = "rust1", since = "1.0.0")] @@ -791,39 +1943,73 @@ fn slice_index_order_fail(index: usize, end: usize) -> ! { panic!("slice index starts at {} but ends at {}", index, end); } +#[inline(never)] +#[cold] +fn slice_index_overflow_fail() -> ! { + panic!("attempted to index slice up to maximum usize"); +} + +mod private_slice_index { + use super::ops; + #[stable(feature = "slice_get_slice", since = "1.28.0")] + pub trait Sealed {} + + #[stable(feature = "slice_get_slice", since = "1.28.0")] + impl Sealed for usize {} + #[stable(feature = "slice_get_slice", since = "1.28.0")] + impl Sealed for ops::Range {} + #[stable(feature = "slice_get_slice", since = "1.28.0")] + impl Sealed for ops::RangeTo {} + #[stable(feature = "slice_get_slice", since = "1.28.0")] + impl Sealed for ops::RangeFrom {} + #[stable(feature = "slice_get_slice", since = "1.28.0")] + impl Sealed for ops::RangeFull {} + #[stable(feature = "slice_get_slice", since = "1.28.0")] + impl Sealed for ops::RangeInclusive {} + #[stable(feature = "slice_get_slice", since = "1.28.0")] + impl Sealed for ops::RangeToInclusive {} +} + /// A helper trait used for indexing operations. -#[unstable(feature = "slice_get_slice", issue = "35729")] +#[stable(feature = "slice_get_slice", since = "1.28.0")] #[rustc_on_unimplemented = "slice indices are of type `usize` or ranges of `usize`"] -pub trait SliceIndex { +pub trait SliceIndex: private_slice_index::Sealed { /// The output type returned by methods. + #[stable(feature = "slice_get_slice", since = "1.28.0")] type Output: ?Sized; /// Returns a shared reference to the output at this location, if in /// bounds. + #[unstable(feature = "slice_index_methods", issue = "0")] fn get(self, slice: &T) -> Option<&Self::Output>; /// Returns a mutable reference to the output at this location, if in /// bounds. + #[unstable(feature = "slice_index_methods", issue = "0")] fn get_mut(self, slice: &mut T) -> Option<&mut Self::Output>; /// Returns a shared reference to the output at this location, without /// performing any bounds checking. + #[unstable(feature = "slice_index_methods", issue = "0")] unsafe fn get_unchecked(self, slice: &T) -> &Self::Output; /// Returns a mutable reference to the output at this location, without /// performing any bounds checking. + #[unstable(feature = "slice_index_methods", issue = "0")] unsafe fn get_unchecked_mut(self, slice: &mut T) -> &mut Self::Output; /// Returns a shared reference to the output at this location, panicking /// if out of bounds. + #[unstable(feature = "slice_index_methods", issue = "0")] fn index(self, slice: &T) -> &Self::Output; /// Returns a mutable reference to the output at this location, panicking /// if out of bounds. + #[unstable(feature = "slice_index_methods", issue = "0")] fn index_mut(self, slice: &mut T) -> &mut Self::Output; } -#[stable(feature = "slice-get-slice-impls", since = "1.15.0")] +#[stable(feature = "slice_get_slice_impls", since = "1.15.0")] impl SliceIndex<[T]> for usize { type Output = T; @@ -872,7 +2058,7 @@ impl SliceIndex<[T]> for usize { } } -#[stable(feature = "slice-get-slice-impls", since = "1.15.0")] +#[stable(feature = "slice_get_slice_impls", since = "1.15.0")] impl SliceIndex<[T]> for ops::Range { type Output = [T]; @@ -933,7 +2119,7 @@ impl SliceIndex<[T]> for ops::Range { } } -#[stable(feature = "slice-get-slice-impls", since = "1.15.0")] +#[stable(feature = "slice_get_slice_impls", since = "1.15.0")] impl SliceIndex<[T]> for ops::RangeTo { type Output = [T]; @@ -968,7 +2154,7 @@ impl SliceIndex<[T]> for ops::RangeTo { } } -#[stable(feature = "slice-get-slice-impls", since = "1.15.0")] +#[stable(feature = "slice_get_slice_impls", since = "1.15.0")] impl SliceIndex<[T]> for ops::RangeFrom { type Output = [T]; @@ -1003,7 +2189,7 @@ impl SliceIndex<[T]> for ops::RangeFrom { } } -#[stable(feature = "slice-get-slice-impls", since = "1.15.0")] +#[stable(feature = "slice_get_slice_impls", since = "1.15.0")] impl SliceIndex<[T]> for ops::RangeFull { type Output = [T]; @@ -1039,48 +2225,46 @@ impl SliceIndex<[T]> for ops::RangeFull { } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl SliceIndex<[T]> for ops::RangeInclusive { type Output = [T]; #[inline] fn get(self, slice: &[T]) -> Option<&[T]> { - if self.end == usize::max_value() { None } - else { (self.start..self.end + 1).get(slice) } + if *self.end() == usize::max_value() { None } + else { (*self.start()..self.end() + 1).get(slice) } } #[inline] fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> { - if self.end == usize::max_value() { None } - else { (self.start..self.end + 1).get_mut(slice) } + if *self.end() == usize::max_value() { None } + else { (*self.start()..self.end() + 1).get_mut(slice) } } #[inline] unsafe fn get_unchecked(self, slice: &[T]) -> &[T] { - (self.start..self.end + 1).get_unchecked(slice) + (*self.start()..self.end() + 1).get_unchecked(slice) } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] { - (self.start..self.end + 1).get_unchecked_mut(slice) + (*self.start()..self.end() + 1).get_unchecked_mut(slice) } #[inline] fn index(self, slice: &[T]) -> &[T] { - assert!(self.end != usize::max_value(), - "attempted to index slice up to maximum usize"); - (self.start..self.end + 1).index(slice) + if *self.end() == usize::max_value() { slice_index_overflow_fail(); } + (*self.start()..self.end() + 1).index(slice) } #[inline] fn index_mut(self, slice: &mut [T]) -> &mut [T] { - assert!(self.end != usize::max_value(), - "attempted to index slice up to maximum usize"); - (self.start..self.end + 1).index_mut(slice) + if *self.end() == usize::max_value() { slice_index_overflow_fail(); } + (*self.start()..self.end() + 1).index_mut(slice) } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl SliceIndex<[T]> for ops::RangeToInclusive { type Output = [T]; @@ -1155,14 +2339,88 @@ impl<'a, T> IntoIterator for &'a mut [T] { } } -#[inline] +// Macro helper functions +#[inline(always)] fn size_from_ptr(_: *const T) -> usize { mem::size_of::() } +// Inlining is_empty and len makes a huge performance difference +macro_rules! is_empty { + // The way we encode the length of a ZST iterator, this works both for ZST + // and non-ZST. + ($self: ident) => {$self.ptr == $self.end} +} +// To get rid of some bounds checks (see `position`), we compute the length in a somewhat +// unexpected way. (Tested by `codegen/slice-position-bounds-check`.) +macro_rules! len { + ($self: ident) => {{ + let start = $self.ptr; + let diff = ($self.end as usize).wrapping_sub(start as usize); + let size = size_from_ptr(start); + if size == 0 { + diff + } else { + // Using division instead of `offset_from` helps LLVM remove bounds checks + diff / size + } + }} +} + // The shared definition of the `Iter` and `IterMut` iterators macro_rules! iterator { - (struct $name:ident -> $ptr:ty, $elem:ty, $mkref:ident) => { + (struct $name:ident -> $ptr:ty, $elem:ty, $raw_mut:tt, $( $mut_:tt )*) => { + impl<'a, T> $name<'a, T> { + // Helper function for creating a slice from the iterator. + #[inline(always)] + fn make_slice(&self) -> &'a [T] { + unsafe { from_raw_parts(self.ptr, len!(self)) } + } + + // Helper function for moving the start of the iterator forwards by `offset` elements, + // returning the old start. + // Unsafe because the offset must be in-bounds or one-past-the-end. + #[inline(always)] + unsafe fn post_inc_start(&mut self, offset: isize) -> * $raw_mut T { + if mem::size_of::() == 0 { + // This is *reducing* the length. `ptr` never changes with ZST. + self.end = (self.end as * $raw_mut u8).wrapping_offset(-offset) as * $raw_mut T; + self.ptr + } else { + let old = self.ptr; + self.ptr = self.ptr.offset(offset); + old + } + } + + // Helper function for moving the end of the iterator backwards by `offset` elements, + // returning the new end. + // Unsafe because the offset must be in-bounds or one-past-the-end. + #[inline(always)] + unsafe fn pre_dec_end(&mut self, offset: isize) -> * $raw_mut T { + if mem::size_of::() == 0 { + self.end = (self.end as * $raw_mut u8).wrapping_offset(-offset) as * $raw_mut T; + self.ptr + } else { + self.end = self.end.offset(-offset); + self.end + } + } + } + + #[stable(feature = "rust1", since = "1.0.0")] + impl<'a, T> ExactSizeIterator for $name<'a, T> { + #[inline(always)] + fn len(&self) -> usize { + len!(self) + } + + #[inline(always)] + fn is_empty(&self) -> bool { + is_empty!(self) + } + } + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for $name<'a, T> { type Item = $elem; @@ -1171,33 +2429,48 @@ macro_rules! iterator { fn next(&mut self) -> Option<$elem> { // could be implemented with slices, but this avoids bounds checks unsafe { + assume(!self.ptr.is_null()); if mem::size_of::() != 0 { - assume(!self.ptr.is_null()); assume(!self.end.is_null()); } - if self.ptr == self.end { + if is_empty!(self) { None } else { - Some($mkref!(self.ptr.post_inc())) + Some(& $( $mut_ )* *self.post_inc_start(1)) } } } #[inline] fn size_hint(&self) -> (usize, Option) { - let exact = ptrdistance(self.ptr, self.end); + let exact = len!(self); (exact, Some(exact)) } #[inline] fn count(self) -> usize { - self.len() + len!(self) } #[inline] fn nth(&mut self, n: usize) -> Option<$elem> { - // Call helper method. Can't put the definition here because mut versus const. - self.iter_nth(n) + if n >= len!(self) { + // This iterator is now empty. + if mem::size_of::() == 0 { + // We have to do it this way as `ptr` may never be 0, but `end` + // could be (due to wrapping). + self.end = self.ptr; + } else { + self.ptr = self.end; + } + return None; + } + // We are in bounds. `offset` does the right thing even for ZSTs. + unsafe { + let elem = Some(& $( $mut_ )* *self.ptr.offset(n as isize)); + self.post_inc_start((n as isize).wrapping_add(1)); + elem + } } #[inline] @@ -1212,14 +2485,14 @@ macro_rules! iterator { // manual unrolling is needed when there are conditional exits from the loop let mut accum = init; unsafe { - while ptrdistance(self.ptr, self.end) >= 4 { - accum = f(accum, $mkref!(self.ptr.post_inc()))?; - accum = f(accum, $mkref!(self.ptr.post_inc()))?; - accum = f(accum, $mkref!(self.ptr.post_inc()))?; - accum = f(accum, $mkref!(self.ptr.post_inc()))?; + while len!(self) >= 4 { + accum = f(accum, & $( $mut_ )* *self.post_inc_start(1))?; + accum = f(accum, & $( $mut_ )* *self.post_inc_start(1))?; + accum = f(accum, & $( $mut_ )* *self.post_inc_start(1))?; + accum = f(accum, & $( $mut_ )* *self.post_inc_start(1))?; } - while self.ptr != self.end { - accum = f(accum, $mkref!(self.ptr.post_inc()))?; + while !is_empty!(self) { + accum = f(accum, & $( $mut_ )* *self.post_inc_start(1))?; } } Try::from_ok(accum) @@ -1244,8 +2517,8 @@ macro_rules! iterator { Self: Sized, P: FnMut(Self::Item) -> bool, { - // The addition might panic on overflow - let n = self.len(); + // The addition might panic on overflow. + let n = len!(self); self.try_fold(0, move |i, x| { if predicate(x) { Err(i) } else { Ok(i + 1) } @@ -1262,8 +2535,7 @@ macro_rules! iterator { Self: Sized + ExactSizeIterator + DoubleEndedIterator { // No need for an overflow check here, because `ExactSizeIterator` - // implies that the number of elements fits into a `usize`. - let n = self.len(); + let n = len!(self); self.try_rfold(n, move |i, x| { let i = i - 1; if predicate(x) { Err(i) } @@ -1282,14 +2554,14 @@ macro_rules! iterator { fn next_back(&mut self) -> Option<$elem> { // could be implemented with slices, but this avoids bounds checks unsafe { + assume(!self.ptr.is_null()); if mem::size_of::() != 0 { - assume(!self.ptr.is_null()); assume(!self.end.is_null()); } - if self.end == self.ptr { + if is_empty!(self) { None } else { - Some($mkref!(self.end.pre_dec())) + Some(& $( $mut_ )* *self.pre_dec_end(1)) } } } @@ -1301,14 +2573,15 @@ macro_rules! iterator { // manual unrolling is needed when there are conditional exits from the loop let mut accum = init; unsafe { - while ptrdistance(self.ptr, self.end) >= 4 { - accum = f(accum, $mkref!(self.end.pre_dec()))?; - accum = f(accum, $mkref!(self.end.pre_dec()))?; - accum = f(accum, $mkref!(self.end.pre_dec()))?; - accum = f(accum, $mkref!(self.end.pre_dec()))?; + while len!(self) >= 4 { + accum = f(accum, & $( $mut_ )* *self.pre_dec_end(1))?; + accum = f(accum, & $( $mut_ )* *self.pre_dec_end(1))?; + accum = f(accum, & $( $mut_ )* *self.pre_dec_end(1))?; + accum = f(accum, & $( $mut_ )* *self.pre_dec_end(1))?; } - while self.ptr != self.end { - accum = f(accum, $mkref!(self.end.pre_dec()))?; + // inlining is_empty everywhere makes a huge performance difference + while !is_empty!(self) { + accum = f(accum, & $( $mut_ )* *self.pre_dec_end(1))?; } } Try::from_ok(accum) @@ -1327,37 +2600,15 @@ macro_rules! iterator { accum } } + + #[stable(feature = "fused", since = "1.26.0")] + impl<'a, T> FusedIterator for $name<'a, T> {} + + #[unstable(feature = "trusted_len", issue = "37572")] + unsafe impl<'a, T> TrustedLen for $name<'a, T> {} } } -macro_rules! make_slice { - ($start: expr, $end: expr) => {{ - let start = $start; - let diff = ($end as usize).wrapping_sub(start as usize); - if size_from_ptr(start) == 0 { - // use a non-null pointer value - unsafe { from_raw_parts(1 as *const _, diff) } - } else { - let len = diff / size_from_ptr(start); - unsafe { from_raw_parts(start, len) } - } - }} -} - -macro_rules! make_mut_slice { - ($start: expr, $end: expr) => {{ - let start = $start; - let diff = ($end as usize).wrapping_sub(start as usize); - if size_from_ptr(start) == 0 { - // use a non-null pointer value - unsafe { from_raw_parts_mut(1 as *mut _, diff) } - } else { - let len = diff / size_from_ptr(start); - unsafe { from_raw_parts_mut(start, len) } - } - }} -} - /// Immutable slice iterator /// /// This struct is created by the [`iter`] method on [slices]. @@ -1381,7 +2632,9 @@ macro_rules! make_mut_slice { #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T: 'a> { ptr: *const T, - end: *const T, + end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that + // ptr == end is a quick test for the Iterator being empty, that works + // for both ZST and non-ZST. _marker: marker::PhantomData<&'a T>, } @@ -1426,38 +2679,11 @@ impl<'a, T> Iter<'a, T> { /// ``` #[stable(feature = "iter_to_slice", since = "1.4.0")] pub fn as_slice(&self) -> &'a [T] { - make_slice!(self.ptr, self.end) - } - - // Helper function for Iter::nth - fn iter_nth(&mut self, n: usize) -> Option<&'a T> { - match self.as_slice().get(n) { - Some(elem_ref) => unsafe { - self.ptr = slice_offset!(self.ptr, (n as isize).wrapping_add(1)); - Some(elem_ref) - }, - None => { - self.ptr = self.end; - None - } - } + self.make_slice() } } -iterator!{struct Iter -> *const T, &'a T, make_ref} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for Iter<'a, T> { - fn is_empty(&self) -> bool { - self.ptr == self.end - } -} - -#[unstable(feature = "fused", issue = "35602")] -impl<'a, T> FusedIterator for Iter<'a, T> {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl<'a, T> TrustedLen for Iter<'a, T> {} +iterator!{struct Iter -> *const T, &'a T, const, /* no mut */} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { @@ -1498,7 +2724,9 @@ impl<'a, T> AsRef<[T]> for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { ptr: *mut T, - end: *mut T, + end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that + // ptr == end is a quick test for the Iterator being empty, that works + // for both ZST and non-ZST. _marker: marker::PhantomData<&'a mut T>, } @@ -1506,7 +2734,7 @@ pub struct IterMut<'a, T: 'a> { impl<'a, T: 'a + fmt::Debug> fmt::Debug for IterMut<'a, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("IterMut") - .field(&make_slice!(self.ptr, self.end)) + .field(&self.make_slice()) .finish() } } @@ -1520,9 +2748,7 @@ impl<'a, T> IterMut<'a, T> { /// View the underlying data as a subslice of the original data. /// /// To avoid creating `&mut` references that alias, this is forced - /// to consume the iterator. Consider using the `Slice` and - /// `SliceMut` implementations for obtaining slices with more - /// restricted lifetimes that do not consume the iterator. + /// to consume the iterator. /// /// # Examples /// @@ -1554,83 +2780,11 @@ impl<'a, T> IterMut<'a, T> { /// ``` #[stable(feature = "iter_to_slice", since = "1.4.0")] pub fn into_slice(self) -> &'a mut [T] { - make_mut_slice!(self.ptr, self.end) - } - - // Helper function for IterMut::nth - fn iter_nth(&mut self, n: usize) -> Option<&'a mut T> { - match make_mut_slice!(self.ptr, self.end).get_mut(n) { - Some(elem_ref) => unsafe { - self.ptr = slice_offset!(self.ptr, (n as isize).wrapping_add(1)); - Some(elem_ref) - }, - None => { - self.ptr = self.end; - None - } - } + unsafe { from_raw_parts_mut(self.ptr, len!(self)) } } } -iterator!{struct IterMut -> *mut T, &'a mut T, make_ref_mut} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for IterMut<'a, T> { - fn is_empty(&self) -> bool { - self.ptr == self.end - } -} - -#[unstable(feature = "fused", issue = "35602")] -impl<'a, T> FusedIterator for IterMut<'a, T> {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl<'a, T> TrustedLen for IterMut<'a, T> {} - - -// Return the number of elements of `T` from `start` to `end`. -// Return the arithmetic difference if `T` is zero size. -#[inline(always)] -fn ptrdistance(start: *const T, end: *const T) -> usize { - match start.offset_to(end) { - Some(x) => x as usize, - None => (end as usize).wrapping_sub(start as usize), - } -} - -// Extension methods for raw pointers, used by the iterators -trait PointerExt : Copy { - unsafe fn slice_offset(self, i: isize) -> Self; - - /// Increments `self` by 1, but returns the old value. - #[inline(always)] - unsafe fn post_inc(&mut self) -> Self { - let current = *self; - *self = self.slice_offset(1); - current - } - - /// Decrements `self` by 1, and returns the new value. - #[inline(always)] - unsafe fn pre_dec(&mut self) -> Self { - *self = self.slice_offset(-1); - *self - } -} - -impl PointerExt for *const T { - #[inline(always)] - unsafe fn slice_offset(self, i: isize) -> Self { - slice_offset!(self, i) - } -} - -impl PointerExt for *mut T { - #[inline(always)] - unsafe fn slice_offset(self, i: isize) -> Self { - slice_offset!(self, i) - } -} +iterator!{struct IterMut -> *mut T, &'a mut T, mut, mut} /// An internal abstraction over the splitting iterators, so that /// splitn, splitn_mut etc can be implemented once. @@ -1729,7 +2883,7 @@ impl<'a, T, P> SplitIter for Split<'a, T, P> where P: FnMut(&T) -> bool { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T, P> FusedIterator for Split<'a, T, P> where P: FnMut(&T) -> bool {} /// An iterator over the subslices of the vector which are separated @@ -1827,7 +2981,7 @@ impl<'a, T, P> DoubleEndedIterator for SplitMut<'a, T, P> where } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T, P> FusedIterator for SplitMut<'a, T, P> where P: FnMut(&T) -> bool {} /// An iterator over subslices separated by elements that match a predicate @@ -1837,13 +2991,13 @@ impl<'a, T, P> FusedIterator for SplitMut<'a, T, P> where P: FnMut(&T) -> bool { /// /// [`rsplit`]: ../../std/primitive.slice.html#method.rsplit /// [slices]: ../../std/primitive.slice.html -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] #[derive(Clone)] // Is this correct, or does it incorrectly require `T: Clone`? pub struct RSplit<'a, T:'a, P> where P: FnMut(&T) -> bool { inner: Split<'a, T, P> } -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for RSplit<'a, T, P> where P: FnMut(&T) -> bool { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("RSplit") @@ -1853,7 +3007,7 @@ impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for RSplit<'a, T, P> where P: FnMut(& } } -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] impl<'a, T, P> Iterator for RSplit<'a, T, P> where P: FnMut(&T) -> bool { type Item = &'a [T]; @@ -1868,7 +3022,7 @@ impl<'a, T, P> Iterator for RSplit<'a, T, P> where P: FnMut(&T) -> bool { } } -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] impl<'a, T, P> DoubleEndedIterator for RSplit<'a, T, P> where P: FnMut(&T) -> bool { #[inline] fn next_back(&mut self) -> Option<&'a [T]> { @@ -1876,7 +3030,7 @@ impl<'a, T, P> DoubleEndedIterator for RSplit<'a, T, P> where P: FnMut(&T) -> bo } } -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] impl<'a, T, P> SplitIter for RSplit<'a, T, P> where P: FnMut(&T) -> bool { #[inline] fn finish(&mut self) -> Option<&'a [T]> { @@ -1884,8 +3038,7 @@ impl<'a, T, P> SplitIter for RSplit<'a, T, P> where P: FnMut(&T) -> bool { } } -//#[unstable(feature = "fused", issue = "35602")] -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] impl<'a, T, P> FusedIterator for RSplit<'a, T, P> where P: FnMut(&T) -> bool {} /// An iterator over the subslices of the vector which are separated @@ -1895,12 +3048,12 @@ impl<'a, T, P> FusedIterator for RSplit<'a, T, P> where P: FnMut(&T) -> bool {} /// /// [`rsplit_mut`]: ../../std/primitive.slice.html#method.rsplit_mut /// [slices]: ../../std/primitive.slice.html -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] pub struct RSplitMut<'a, T:'a, P> where P: FnMut(&T) -> bool { inner: SplitMut<'a, T, P> } -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for RSplitMut<'a, T, P> where P: FnMut(&T) -> bool { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("RSplitMut") @@ -1910,7 +3063,7 @@ impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for RSplitMut<'a, T, P> where P: FnMu } } -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] impl<'a, T, P> SplitIter for RSplitMut<'a, T, P> where P: FnMut(&T) -> bool { #[inline] fn finish(&mut self) -> Option<&'a mut [T]> { @@ -1918,7 +3071,7 @@ impl<'a, T, P> SplitIter for RSplitMut<'a, T, P> where P: FnMut(&T) -> bool { } } -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] impl<'a, T, P> Iterator for RSplitMut<'a, T, P> where P: FnMut(&T) -> bool { type Item = &'a mut [T]; @@ -1933,7 +3086,7 @@ impl<'a, T, P> Iterator for RSplitMut<'a, T, P> where P: FnMut(&T) -> bool { } } -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] impl<'a, T, P> DoubleEndedIterator for RSplitMut<'a, T, P> where P: FnMut(&T) -> bool, { @@ -1943,8 +3096,7 @@ impl<'a, T, P> DoubleEndedIterator for RSplitMut<'a, T, P> where } } -//#[unstable(feature = "fused", issue = "35602")] -#[unstable(feature = "slice_rsplit", issue = "41020")] +#[stable(feature = "slice_rsplit", since = "1.27.0")] impl<'a, T, P> FusedIterator for RSplitMut<'a, T, P> where P: FnMut(&T) -> bool {} /// An private iterator over subslices separated by elements that @@ -2080,7 +3232,7 @@ macro_rules! forward_iterator { } } - #[unstable(feature = "fused", issue = "35602")] + #[stable(feature = "fused", since = "1.26.0")] impl<'a, $elem, P> FusedIterator for $name<'a, $elem, P> where P: FnMut(&T) -> bool {} } @@ -2186,7 +3338,10 @@ impl<'a, T> DoubleEndedIterator for Windows<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Windows<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for Windows<'a, T> {} + +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Windows<'a, T> {} #[doc(hidden)] @@ -2305,7 +3460,10 @@ impl<'a, T> DoubleEndedIterator for Chunks<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Chunks<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for Chunks<'a, T> {} + +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for Chunks<'a, T> {} #[doc(hidden)] @@ -2421,7 +3579,10 @@ impl<'a, T> DoubleEndedIterator for ChunksMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for ChunksMut<'a, T> {} -#[unstable(feature = "fused", issue = "35602")] +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for ChunksMut<'a, T> {} + +#[stable(feature = "fused", since = "1.26.0")] impl<'a, T> FusedIterator for ChunksMut<'a, T> {} #[doc(hidden)] @@ -2441,25 +3602,39 @@ unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> { /// time). /// /// When the slice len is not evenly divided by the chunk size, the last -/// up to `chunk_size-1` elements will be omitted. +/// up to `chunk_size-1` elements will be omitted but can be retrieved from +/// the [`remainder`] function from the iterator. /// /// This struct is created by the [`exact_chunks`] method on [slices]. /// /// [`exact_chunks`]: ../../std/primitive.slice.html#method.exact_chunks +/// [`remainder`]: ../../std/slice/struct.ExactChunks.html#method.remainder /// [slices]: ../../std/primitive.slice.html #[derive(Debug)] #[unstable(feature = "exact_chunks", issue = "47115")] pub struct ExactChunks<'a, T:'a> { v: &'a [T], + rem: &'a [T], chunk_size: usize } +#[unstable(feature = "exact_chunks", issue = "47115")] +impl<'a, T> ExactChunks<'a, T> { + /// Return the remainder of the original slice that is not going to be + /// returned by the iterator. The returned slice has at most `chunk_size-1` + /// elements. + pub fn remainder(&self) -> &'a [T] { + self.rem + } +} + // FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[unstable(feature = "exact_chunks", issue = "47115")] impl<'a, T> Clone for ExactChunks<'a, T> { fn clone(&self) -> ExactChunks<'a, T> { ExactChunks { v: self.v, + rem: self.rem, chunk_size: self.chunk_size, } } @@ -2531,7 +3706,10 @@ impl<'a, T> ExactSizeIterator for ExactChunks<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for ExactChunks<'a, T> {} + +#[unstable(feature = "exact_chunks", issue = "47115")] impl<'a, T> FusedIterator for ExactChunks<'a, T> {} #[doc(hidden)] @@ -2544,20 +3722,35 @@ unsafe impl<'a, T> TrustedRandomAccess for ExactChunks<'a, T> { } /// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` -/// elements at a time). When the slice len is not evenly divided by the chunk -/// size, the last up to `chunk_size-1` elements will be omitted. +/// elements at a time). +/// +/// When the slice len is not evenly divided by the chunk size, the last up to +/// `chunk_size-1` elements will be omitted but can be retrieved from the +/// [`into_remainder`] function from the iterator. /// /// This struct is created by the [`exact_chunks_mut`] method on [slices]. /// /// [`exact_chunks_mut`]: ../../std/primitive.slice.html#method.exact_chunks_mut +/// [`into_remainder`]: ../../std/slice/struct.ExactChunksMut.html#method.into_remainder /// [slices]: ../../std/primitive.slice.html #[derive(Debug)] #[unstable(feature = "exact_chunks", issue = "47115")] pub struct ExactChunksMut<'a, T:'a> { v: &'a mut [T], + rem: &'a mut [T], chunk_size: usize } +#[unstable(feature = "exact_chunks", issue = "47115")] +impl<'a, T> ExactChunksMut<'a, T> { + /// Return the remainder of the original slice that is not going to be + /// returned by the iterator. The returned slice has at most `chunk_size-1` + /// elements. + pub fn into_remainder(self) -> &'a mut [T] { + self.rem + } +} + #[unstable(feature = "exact_chunks", issue = "47115")] impl<'a, T> Iterator for ExactChunksMut<'a, T> { type Item = &'a mut [T]; @@ -2628,7 +3821,10 @@ impl<'a, T> ExactSizeIterator for ExactChunksMut<'a, T> { } } -#[unstable(feature = "fused", issue = "35602")] +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for ExactChunksMut<'a, T> {} + +#[unstable(feature = "exact_chunks", issue = "47115")] impl<'a, T> FusedIterator for ExactChunksMut<'a, T> {} #[doc(hidden)] @@ -2654,10 +3850,11 @@ unsafe impl<'a, T> TrustedRandomAccess for ExactChunksMut<'a, T> { /// valid for `len` elements, nor whether the lifetime inferred is a suitable /// lifetime for the returned slice. /// -/// `p` must be non-null, even for zero-length slices, because non-zero bits -/// are required to distinguish between a zero-length slice within `Some()` -/// from `None`. `p` can be a bogus non-dereferencable pointer, such as `0x1`, -/// for zero-length slices, though. +/// `data` must be non-null and aligned, even for zero-length slices. One +/// reason for this is that enum layout optimizations may rely on references +/// (including slices of any length) being aligned and non-null to distinguish +/// them from other data. You can obtain a pointer that is usable as `data` +/// for zero-length slices using [`NonNull::dangling()`]. /// /// # Caveat /// @@ -2672,17 +3869,19 @@ unsafe impl<'a, T> TrustedRandomAccess for ExactChunksMut<'a, T> { /// ``` /// use std::slice; /// -/// // manifest a slice out of thin air! -/// let ptr = 0x1234 as *const usize; -/// let amt = 10; -/// unsafe { -/// let slice = slice::from_raw_parts(ptr, amt); -/// } +/// // manifest a slice for a single element +/// let x = 42; +/// let ptr = &x as *const _; +/// let slice = unsafe { slice::from_raw_parts(ptr, 1) }; +/// assert_eq!(slice[0], 42); /// ``` +/// +/// [`NonNull::dangling()`]: ../../std/ptr/struct.NonNull.html#method.dangling #[inline] #[stable(feature = "rust1", since = "1.0.0")] -pub unsafe fn from_raw_parts<'a, T>(p: *const T, len: usize) -> &'a [T] { - mem::transmute(Repr { data: p, len: len }) +pub unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] { + debug_assert!(data as usize % mem::align_of::() == 0, "attempt to create unaligned slice"); + Repr { raw: FatPtr { data, len } }.rust } /// Performs the same functionality as `from_raw_parts`, except that a mutable @@ -2690,16 +3889,17 @@ pub unsafe fn from_raw_parts<'a, T>(p: *const T, len: usize) -> &'a [T] { /// /// This function is unsafe for the same reasons as `from_raw_parts`, as well /// as not being able to provide a non-aliasing guarantee of the returned -/// mutable slice. `p` must be non-null even for zero-length slices as with -/// `from_raw_parts`. +/// mutable slice. `data` must be non-null and aligned even for zero-length +/// slices as with `from_raw_parts`. #[inline] #[stable(feature = "rust1", since = "1.0.0")] -pub unsafe fn from_raw_parts_mut<'a, T>(p: *mut T, len: usize) -> &'a mut [T] { - mem::transmute(Repr { data: p, len: len }) +pub unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] { + debug_assert!(data as usize % mem::align_of::() == 0, "attempt to create unaligned slice"); + Repr { raw: FatPtr { data, len} }.rust_mut } /// Converts a reference to T into a slice of length 1 (without copying). -#[unstable(feature = "from_ref", issue = "45703")] +#[stable(feature = "from_ref", since = "1.28.0")] pub fn from_ref(s: &T) -> &[T] { unsafe { from_raw_parts(s, 1) @@ -2707,8 +3907,8 @@ pub fn from_ref(s: &T) -> &[T] { } /// Converts a reference to T into a slice of length 1 (without copying). -#[unstable(feature = "from_ref", issue = "45703")] -pub fn from_ref_mut(s: &mut T) -> &mut [T] { +#[stable(feature = "from_ref", since = "1.28.0")] +pub fn from_mut(s: &mut T) -> &mut [T] { unsafe { from_raw_parts_mut(s, 1) } diff --git a/src/libcore/slice/rotate.rs b/src/libcore/slice/rotate.rs index e4a4e33c1729..28ef53ccb5cb 100644 --- a/src/libcore/slice/rotate.rs +++ b/src/libcore/slice/rotate.rs @@ -48,7 +48,6 @@ impl RawArray { /// # Safety /// /// The specified range must be valid for reading and writing. -/// The type `T` must have non-zero size. /// /// # Algorithm /// @@ -73,6 +72,7 @@ pub unsafe fn ptr_rotate(mut left: usize, mid: *mut T, mut right: usize) { loop { let delta = cmp::min(left, right); if delta <= RawArray::::cap() { + // We will always hit this immediately for ZST. break; } diff --git a/src/libcore/str/lossy.rs b/src/libcore/str/lossy.rs new file mode 100644 index 000000000000..186d6adbc91c --- /dev/null +++ b/src/libcore/str/lossy.rs @@ -0,0 +1,212 @@ +// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use char; +use str as core_str; +use fmt; +use fmt::Write; +use mem; + +/// Lossy UTF-8 string. +#[unstable(feature = "str_internals", issue = "0")] +pub struct Utf8Lossy { + bytes: [u8] +} + +impl Utf8Lossy { + pub fn from_str(s: &str) -> &Utf8Lossy { + Utf8Lossy::from_bytes(s.as_bytes()) + } + + pub fn from_bytes(bytes: &[u8]) -> &Utf8Lossy { + unsafe { mem::transmute(bytes) } + } + + pub fn chunks(&self) -> Utf8LossyChunksIter { + Utf8LossyChunksIter { source: &self.bytes } + } +} + + +/// Iterator over lossy UTF-8 string +#[unstable(feature = "str_internals", issue = "0")] +#[allow(missing_debug_implementations)] +pub struct Utf8LossyChunksIter<'a> { + source: &'a [u8], +} + +#[unstable(feature = "str_internals", issue = "0")] +#[derive(PartialEq, Eq, Debug)] +pub struct Utf8LossyChunk<'a> { + /// Sequence of valid chars. + /// Can be empty between broken UTF-8 chars. + pub valid: &'a str, + /// Single broken char, empty if none. + /// Empty iff iterator item is last. + pub broken: &'a [u8], +} + +impl<'a> Iterator for Utf8LossyChunksIter<'a> { + type Item = Utf8LossyChunk<'a>; + + fn next(&mut self) -> Option> { + if self.source.len() == 0 { + return None; + } + + const TAG_CONT_U8: u8 = 128; + fn unsafe_get(xs: &[u8], i: usize) -> u8 { + unsafe { *xs.get_unchecked(i) } + } + fn safe_get(xs: &[u8], i: usize) -> u8 { + if i >= xs.len() { 0 } else { unsafe_get(xs, i) } + } + + let mut i = 0; + while i < self.source.len() { + let i_ = i; + + let byte = unsafe_get(self.source, i); + i += 1; + + if byte < 128 { + + } else { + let w = core_str::utf8_char_width(byte); + + macro_rules! error { () => ({ + unsafe { + let r = Utf8LossyChunk { + valid: core_str::from_utf8_unchecked(&self.source[0..i_]), + broken: &self.source[i_..i], + }; + self.source = &self.source[i..]; + return Some(r); + } + })} + + match w { + 2 => { + if safe_get(self.source, i) & 192 != TAG_CONT_U8 { + error!(); + } + i += 1; + } + 3 => { + match (byte, safe_get(self.source, i)) { + (0xE0, 0xA0 ..= 0xBF) => (), + (0xE1 ..= 0xEC, 0x80 ..= 0xBF) => (), + (0xED, 0x80 ..= 0x9F) => (), + (0xEE ..= 0xEF, 0x80 ..= 0xBF) => (), + _ => { + error!(); + } + } + i += 1; + if safe_get(self.source, i) & 192 != TAG_CONT_U8 { + error!(); + } + i += 1; + } + 4 => { + match (byte, safe_get(self.source, i)) { + (0xF0, 0x90 ..= 0xBF) => (), + (0xF1 ..= 0xF3, 0x80 ..= 0xBF) => (), + (0xF4, 0x80 ..= 0x8F) => (), + _ => { + error!(); + } + } + i += 1; + if safe_get(self.source, i) & 192 != TAG_CONT_U8 { + error!(); + } + i += 1; + if safe_get(self.source, i) & 192 != TAG_CONT_U8 { + error!(); + } + i += 1; + } + _ => { + error!(); + } + } + } + } + + let r = Utf8LossyChunk { + valid: unsafe { core_str::from_utf8_unchecked(self.source) }, + broken: &[], + }; + self.source = &[]; + Some(r) + } +} + + +impl fmt::Display for Utf8Lossy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // If we're the empty string then our iterator won't actually yield + // anything, so perform the formatting manually + if self.bytes.len() == 0 { + return "".fmt(f) + } + + for Utf8LossyChunk { valid, broken } in self.chunks() { + // If we successfully decoded the whole chunk as a valid string then + // we can return a direct formatting of the string which will also + // respect various formatting flags if possible. + if valid.len() == self.bytes.len() { + assert!(broken.is_empty()); + return valid.fmt(f) + } + + f.write_str(valid)?; + if !broken.is_empty() { + f.write_char(char::REPLACEMENT_CHARACTER)?; + } + } + Ok(()) + } +} + +impl fmt::Debug for Utf8Lossy { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_char('"')?; + + for Utf8LossyChunk { valid, broken } in self.chunks() { + + // Valid part. + // Here we partially parse UTF-8 again which is suboptimal. + { + let mut from = 0; + for (i, c) in valid.char_indices() { + let esc = c.escape_debug(); + // If char needs escaping, flush backlog so far and write, else skip + if esc.len() != 1 { + f.write_str(&valid[from..i])?; + for c in esc { + f.write_char(c)?; + } + from = i + c.len_utf8(); + } + } + f.write_str(&valid[from..])?; + } + + // Broken parts of string as hex escape. + for &b in broken { + write!(f, "\\x{:02x}", b)?; + } + } + + f.write_char('"') + } +} diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index 765b369e4b25..810d19df0c5b 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -19,13 +19,17 @@ use self::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; use char; use fmt; -use iter::{Map, Cloned, FusedIterator, TrustedLen}; +use iter::{Map, Cloned, FusedIterator, TrustedLen, Filter}; use iter_private::TrustedRandomAccess; -use slice::{self, SliceIndex}; +use slice::{self, SliceIndex, Split as SliceSplit}; use mem; pub mod pattern; +#[unstable(feature = "str_internals", issue = "0")] +#[allow(missing_docs)] +pub mod lossy; + /// A trait to abstract the idea of creating a new instance of a type from a /// string. /// @@ -165,6 +169,37 @@ Section: Creating a string /// /// [`String`]: ../../std/string/struct.String.html#method.from_utf8 /// [`&str`]: ../../std/str/fn.from_utf8.html +/// +/// # Examples +/// +/// This error type’s methods can be used to create functionality +/// similar to `String::from_utf8_lossy` without allocating heap memory: +/// +/// ``` +/// fn from_utf8_lossy(mut input: &[u8], mut push: F) where F: FnMut(&str) { +/// loop { +/// match ::std::str::from_utf8(input) { +/// Ok(valid) => { +/// push(valid); +/// break +/// } +/// Err(error) => { +/// let (valid, after_valid) = input.split_at(error.valid_up_to()); +/// unsafe { +/// push(::std::str::from_utf8_unchecked(valid)) +/// } +/// push("\u{FFFD}"); +/// +/// if let Some(invalid_sequence_length) = error.error_len() { +/// input = &after_valid[invalid_sequence_length..] +/// } else { +/// break +/// } +/// } +/// } +/// } +/// } +/// ``` #[derive(Copy, Eq, PartialEq, Clone, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Utf8Error { @@ -209,7 +244,10 @@ impl Utf8Error { /// The length provided is that of the invalid byte sequence /// that starts at the index given by `valid_up_to()`. /// Decoding should resume after that sequence - /// (after inserting a U+FFFD REPLACEMENT CHARACTER) in case of lossy decoding. + /// (after inserting a [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD]) in case of + /// lossy decoding. + /// + /// [U+FFFD]: ../../std/char/constant.REPLACEMENT_CHARACTER.html #[stable(feature = "utf8_error_error_len", since = "1.20.0")] pub fn error_len(&self) -> Option { self.error_len.map(|len| len as usize) @@ -341,37 +379,6 @@ pub fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> { Ok(unsafe { from_utf8_unchecked_mut(v) }) } -/// Forms a str from a pointer and a length. -/// -/// The `len` argument is the number of bytes in the string. -/// -/// # Safety -/// -/// This function is unsafe as there is no guarantee that the given pointer is -/// valid for `len` bytes, nor whether the lifetime inferred is a suitable -/// lifetime for the returned str. -/// -/// The data must be valid UTF-8 -/// -/// `p` must be non-null, even for zero-length strs, because non-zero bits -/// are required to distinguish between a zero-length str within `Some()` -/// from `None`. `p` can be a bogus non-dereferencable pointer, such as `0x1`, -/// for zero-length strs, though. -/// -/// # Caveat -/// -/// The lifetime for the returned str is inferred from its usage. To -/// prevent accidental misuse, it's suggested to tie the lifetime to whichever -/// source lifetime is safe in the context, such as by providing a helper -/// function taking the lifetime of a host value for the str, or by explicit -/// annotation. -/// Performs the same functionality as `from_raw_parts`, except that a mutable -/// str is returned. -/// -unsafe fn from_raw_parts_mut<'a>(p: *mut u8, len: usize) -> &'a mut str { - from_utf8_unchecked_mut(slice::from_raw_parts_mut(p, len)) -} - /// Converts a slice of bytes to a string slice without checking /// that the string contains valid UTF-8. /// @@ -609,7 +616,7 @@ impl<'a> DoubleEndedIterator for Chars<'a> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a> FusedIterator for Chars<'a> {} impl<'a> Chars<'a> { @@ -692,17 +699,14 @@ impl<'a> Iterator for CharIndices<'a> { impl<'a> DoubleEndedIterator for CharIndices<'a> { #[inline] fn next_back(&mut self) -> Option<(usize, char)> { - match self.iter.next_back() { - None => None, - Some(ch) => { - let index = self.front_offset + self.iter.iter.len(); - Some((index, ch)) - } - } + self.iter.next_back().map(|ch| { + let index = self.front_offset + self.iter.iter.len(); + (index, ch) + }) } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a> FusedIterator for CharIndices<'a> {} impl<'a> CharIndices<'a> { @@ -817,7 +821,7 @@ impl<'a> ExactSizeIterator for Bytes<'a> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a> FusedIterator for Bytes<'a> {} #[unstable(feature = "trusted_len", issue = "37572")] @@ -977,10 +981,10 @@ macro_rules! generate_pattern_iterators { } } - #[unstable(feature = "fused", issue = "35602")] + #[stable(feature = "fused", since = "1.26.0")] impl<'a, P: Pattern<'a>> FusedIterator for $forward_iterator<'a, P> {} - #[unstable(feature = "fused", issue = "35602")] + #[stable(feature = "fused", since = "1.26.0")] impl<'a, P: Pattern<'a>> FusedIterator for $reverse_iterator<'a, P> where P::Searcher: ReverseSearcher<'a> {} @@ -1051,7 +1055,7 @@ impl<'a, P: Pattern<'a>> SplitInternal<'a, P> { if !self.finished && (self.allow_trailing_empty || self.end - self.start > 0) { self.finished = true; unsafe { - let string = self.matcher.haystack().slice_unchecked(self.start, self.end); + let string = self.matcher.haystack().get_unchecked(self.start..self.end); Some(string) } } else { @@ -1066,7 +1070,7 @@ impl<'a, P: Pattern<'a>> SplitInternal<'a, P> { let haystack = self.matcher.haystack(); match self.matcher.next_match() { Some((a, b)) => unsafe { - let elt = haystack.slice_unchecked(self.start, a); + let elt = haystack.get_unchecked(self.start..a); self.start = b; Some(elt) }, @@ -1091,13 +1095,13 @@ impl<'a, P: Pattern<'a>> SplitInternal<'a, P> { let haystack = self.matcher.haystack(); match self.matcher.next_match_back() { Some((a, b)) => unsafe { - let elt = haystack.slice_unchecked(b, self.end); + let elt = haystack.get_unchecked(b..self.end); self.end = a; Some(elt) }, None => unsafe { self.finished = true; - Some(haystack.slice_unchecked(self.start, self.end)) + Some(haystack.get_unchecked(self.start..self.end)) }, } } @@ -1218,7 +1222,7 @@ impl<'a, P: Pattern<'a>> MatchIndicesInternal<'a, P> { #[inline] fn next(&mut self) -> Option<(usize, &'a str)> { self.0.next_match().map(|(start, end)| unsafe { - (start, self.0.haystack().slice_unchecked(start, end)) + (start, self.0.haystack().get_unchecked(start..end)) }) } @@ -1227,7 +1231,7 @@ impl<'a, P: Pattern<'a>> MatchIndicesInternal<'a, P> { where P::Searcher: ReverseSearcher<'a> { self.0.next_match_back().map(|(start, end)| unsafe { - (start, self.0.haystack().slice_unchecked(start, end)) + (start, self.0.haystack().get_unchecked(start..end)) }) } } @@ -1270,7 +1274,7 @@ impl<'a, P: Pattern<'a>> MatchesInternal<'a, P> { fn next(&mut self) -> Option<&'a str> { self.0.next_match().map(|(a, b)| unsafe { // Indices are known to be on utf8 boundaries - self.0.haystack().slice_unchecked(a, b) + self.0.haystack().get_unchecked(a..b) }) } @@ -1280,7 +1284,7 @@ impl<'a, P: Pattern<'a>> MatchesInternal<'a, P> { { self.0.next_match_back().map(|(a, b)| unsafe { // Indices are known to be on utf8 boundaries - self.0.haystack().slice_unchecked(a, b) + self.0.haystack().get_unchecked(a..b) }) } } @@ -1337,7 +1341,7 @@ impl<'a> DoubleEndedIterator for Lines<'a> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a> FusedIterator for Lines<'a> {} /// Created with the method [`lines_any`]. @@ -1403,7 +1407,7 @@ impl<'a> DoubleEndedIterator for LinesAny<'a> { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] #[allow(deprecated)] impl<'a> FusedIterator for LinesAny<'a> {} @@ -1480,10 +1484,10 @@ fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { }, 3 => { match (first, next!()) { - (0xE0 , 0xA0 ... 0xBF) | - (0xE1 ... 0xEC, 0x80 ... 0xBF) | - (0xED , 0x80 ... 0x9F) | - (0xEE ... 0xEF, 0x80 ... 0xBF) => {} + (0xE0 , 0xA0 ..= 0xBF) | + (0xE1 ..= 0xEC, 0x80 ..= 0xBF) | + (0xED , 0x80 ..= 0x9F) | + (0xEE ..= 0xEF, 0x80 ..= 0xBF) => {} _ => err!(Some(1)) } if next!() & !CONT_MASK != TAG_CONT_U8 { @@ -1492,9 +1496,9 @@ fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { } 4 => { match (first, next!()) { - (0xF0 , 0x90 ... 0xBF) | - (0xF1 ... 0xF3, 0x80 ... 0xBF) | - (0xF4 , 0x80 ... 0x8F) => {} + (0xF0 , 0x90 ..= 0xBF) | + (0xF1 ..= 0xF3, 0x80 ..= 0xBF) | + (0xF4 , 0x80 ..= 0x8F) => {} _ => err!(Some(1)) } if next!() & !CONT_MASK != TAG_CONT_U8 { @@ -1566,7 +1570,7 @@ static UTF8_CHAR_WIDTH: [u8; 256] = [ #[unstable(feature = "str_internals", issue = "0")] #[inline] pub fn utf8_char_width(b: u8) -> usize { - return UTF8_CHAR_WIDTH[b as usize] as usize; + UTF8_CHAR_WIDTH[b as usize] as usize } /// Mask of the value bits of a continuation byte. @@ -1779,9 +1783,7 @@ mod traits { } } - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] + #[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::Index> for str { type Output = str; @@ -1791,9 +1793,7 @@ mod traits { } } - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] + #[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::Index> for str { type Output = str; @@ -1803,18 +1803,14 @@ mod traits { } } - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] + #[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::IndexMut> for str { #[inline] fn index_mut(&mut self, index: ops::RangeInclusive) -> &mut str { index.index_mut(self) } } - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] + #[stable(feature = "inclusive_range", since = "1.26.0")] impl ops::IndexMut> for str { #[inline] fn index_mut(&mut self, index: ops::RangeToInclusive) -> &mut str { @@ -1822,6 +1818,12 @@ mod traits { } } + #[inline(never)] + #[cold] + fn str_index_overflow_fail() -> ! { + panic!("attempted to index str up to maximum usize"); + } + #[stable(feature = "str_checked_slicing", since = "1.20.0")] impl SliceIndex for ops::RangeFull { type Output = str; @@ -1997,202 +1999,73 @@ mod traits { } } - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] + #[stable(feature = "inclusive_range", since = "1.26.0")] impl SliceIndex for ops::RangeInclusive { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { - if let Some(end) = self.end.checked_add(1) { - (self.start..end).get(slice) - } else { - None - } + if *self.end() == usize::max_value() { None } + else { (*self.start()..self.end()+1).get(slice) } } #[inline] fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> { - if let Some(end) = self.end.checked_add(1) { - (self.start..end).get_mut(slice) - } else { - None - } + if *self.end() == usize::max_value() { None } + else { (*self.start()..self.end()+1).get_mut(slice) } } #[inline] unsafe fn get_unchecked(self, slice: &str) -> &Self::Output { - (self.start..self.end+1).get_unchecked(slice) + (*self.start()..self.end()+1).get_unchecked(slice) } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output { - (self.start..self.end+1).get_unchecked_mut(slice) + (*self.start()..self.end()+1).get_unchecked_mut(slice) } #[inline] fn index(self, slice: &str) -> &Self::Output { - assert!(self.end != usize::max_value(), - "attempted to index str up to maximum usize"); - (self.start..self.end+1).index(slice) + if *self.end() == usize::max_value() { str_index_overflow_fail(); } + (*self.start()..self.end()+1).index(slice) } #[inline] fn index_mut(self, slice: &mut str) -> &mut Self::Output { - assert!(self.end != usize::max_value(), - "attempted to index str up to maximum usize"); - (self.start..self.end+1).index_mut(slice) + if *self.end() == usize::max_value() { str_index_overflow_fail(); } + (*self.start()..self.end()+1).index_mut(slice) } } - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] + #[stable(feature = "inclusive_range", since = "1.26.0")] impl SliceIndex for ops::RangeToInclusive { type Output = str; #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { - if self.end < usize::max_value() && slice.is_char_boundary(self.end + 1) { - Some(unsafe { self.get_unchecked(slice) }) - } else { - None - } + if self.end == usize::max_value() { None } + else { (..self.end+1).get(slice) } } #[inline] fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> { - if self.end < usize::max_value() && slice.is_char_boundary(self.end + 1) { - Some(unsafe { self.get_unchecked_mut(slice) }) - } else { - None - } + if self.end == usize::max_value() { None } + else { (..self.end+1).get_mut(slice) } } #[inline] unsafe fn get_unchecked(self, slice: &str) -> &Self::Output { - let ptr = slice.as_ptr(); - super::from_utf8_unchecked(slice::from_raw_parts(ptr, self.end + 1)) + (..self.end+1).get_unchecked(slice) } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output { - let ptr = slice.as_ptr(); - super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr as *mut u8, self.end + 1)) + (..self.end+1).get_unchecked_mut(slice) } #[inline] fn index(self, slice: &str) -> &Self::Output { - assert!(self.end != usize::max_value(), - "attempted to index str up to maximum usize"); - let end = self.end + 1; - self.get(slice).unwrap_or_else(|| super::slice_error_fail(slice, 0, end)) + if self.end == usize::max_value() { str_index_overflow_fail(); } + (..self.end+1).index(slice) } #[inline] fn index_mut(self, slice: &mut str) -> &mut Self::Output { - assert!(self.end != usize::max_value(), - "attempted to index str up to maximum usize"); - if slice.is_char_boundary(self.end) { - unsafe { self.get_unchecked_mut(slice) } - } else { - super::slice_error_fail(slice, 0, self.end + 1) - } + if self.end == usize::max_value() { str_index_overflow_fail(); } + (..self.end+1).index_mut(slice) } } - -} - - -/// Methods for string slices -#[allow(missing_docs)] -#[doc(hidden)] -#[unstable(feature = "core_str_ext", - reason = "stable interface provided by `impl str` in later crates", - issue = "32110")] -pub trait StrExt { - // NB there are no docs here are they're all located on the StrExt trait in - // liballoc, not here. - - #[stable(feature = "core", since = "1.6.0")] - fn contains<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool; - #[stable(feature = "core", since = "1.6.0")] - fn chars(&self) -> Chars; - #[stable(feature = "core", since = "1.6.0")] - fn bytes(&self) -> Bytes; - #[stable(feature = "core", since = "1.6.0")] - fn char_indices(&self) -> CharIndices; - #[stable(feature = "core", since = "1.6.0")] - fn split<'a, P: Pattern<'a>>(&'a self, pat: P) -> Split<'a, P>; - #[stable(feature = "core", since = "1.6.0")] - fn rsplit<'a, P: Pattern<'a>>(&'a self, pat: P) -> RSplit<'a, P> - where P::Searcher: ReverseSearcher<'a>; - #[stable(feature = "core", since = "1.6.0")] - fn splitn<'a, P: Pattern<'a>>(&'a self, count: usize, pat: P) -> SplitN<'a, P>; - #[stable(feature = "core", since = "1.6.0")] - fn rsplitn<'a, P: Pattern<'a>>(&'a self, count: usize, pat: P) -> RSplitN<'a, P> - where P::Searcher: ReverseSearcher<'a>; - #[stable(feature = "core", since = "1.6.0")] - fn split_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> SplitTerminator<'a, P>; - #[stable(feature = "core", since = "1.6.0")] - fn rsplit_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> RSplitTerminator<'a, P> - where P::Searcher: ReverseSearcher<'a>; - #[stable(feature = "core", since = "1.6.0")] - fn matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> Matches<'a, P>; - #[stable(feature = "core", since = "1.6.0")] - fn rmatches<'a, P: Pattern<'a>>(&'a self, pat: P) -> RMatches<'a, P> - where P::Searcher: ReverseSearcher<'a>; - #[stable(feature = "core", since = "1.6.0")] - fn match_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> MatchIndices<'a, P>; - #[stable(feature = "core", since = "1.6.0")] - fn rmatch_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> RMatchIndices<'a, P> - where P::Searcher: ReverseSearcher<'a>; - #[stable(feature = "core", since = "1.6.0")] - fn lines(&self) -> Lines; - #[stable(feature = "core", since = "1.6.0")] - #[rustc_deprecated(since = "1.6.0", reason = "use lines() instead now")] - #[allow(deprecated)] - fn lines_any(&self) -> LinesAny; - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - fn get>(&self, i: I) -> Option<&I::Output>; - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - fn get_mut>(&mut self, i: I) -> Option<&mut I::Output>; - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - unsafe fn get_unchecked>(&self, i: I) -> &I::Output; - #[stable(feature = "str_checked_slicing", since = "1.20.0")] - unsafe fn get_unchecked_mut>(&mut self, i: I) -> &mut I::Output; - #[stable(feature = "core", since = "1.6.0")] - unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str; - #[stable(feature = "core", since = "1.6.0")] - unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str; - #[stable(feature = "core", since = "1.6.0")] - fn starts_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool; - #[stable(feature = "core", since = "1.6.0")] - fn ends_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool - where P::Searcher: ReverseSearcher<'a>; - #[stable(feature = "core", since = "1.6.0")] - fn trim_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str - where P::Searcher: DoubleEndedSearcher<'a>; - #[stable(feature = "core", since = "1.6.0")] - fn trim_left_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str; - #[stable(feature = "core", since = "1.6.0")] - fn trim_right_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str - where P::Searcher: ReverseSearcher<'a>; - #[stable(feature = "is_char_boundary", since = "1.9.0")] - fn is_char_boundary(&self, index: usize) -> bool; - #[stable(feature = "core", since = "1.6.0")] - fn as_bytes(&self) -> &[u8]; - #[stable(feature = "str_mut_extras", since = "1.20.0")] - unsafe fn as_bytes_mut(&mut self) -> &mut [u8]; - #[stable(feature = "core", since = "1.6.0")] - fn find<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option; - #[stable(feature = "core", since = "1.6.0")] - fn rfind<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option - where P::Searcher: ReverseSearcher<'a>; - fn find_str<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option; - #[stable(feature = "core", since = "1.6.0")] - fn split_at(&self, mid: usize) -> (&str, &str); - #[stable(feature = "core", since = "1.6.0")] - fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str); - #[stable(feature = "core", since = "1.6.0")] - fn as_ptr(&self) -> *const u8; - #[stable(feature = "core", since = "1.6.0")] - fn len(&self) -> usize; - #[stable(feature = "core", since = "1.6.0")] - fn is_empty(&self) -> bool; - #[stable(feature = "core", since = "1.6.0")] - fn parse(&self) -> Result; } // truncate `&str` to length at most equal to `max` @@ -2239,30 +2112,1030 @@ fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! { index, ch, char_range, s_trunc, ellipsis); } -#[stable(feature = "core", since = "1.6.0")] -impl StrExt for str { +#[lang = "str"] +#[cfg(not(test))] +impl str { + /// Returns the length of `self`. + /// + /// This length is in bytes, not [`char`]s or graphemes. In other words, + /// it may not be what a human considers the length of the string. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let len = "foo".len(); + /// assert_eq!(3, len); + /// + /// let len = "ƒoo".len(); // fancy f! + /// assert_eq!(4, len); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn contains<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool { - pat.is_contained_in(self) + #[rustc_const_unstable(feature = "const_str_len")] + pub const fn len(&self) -> usize { + self.as_bytes().len() } + /// Returns `true` if `self` has a length of zero bytes. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s = ""; + /// assert!(s.is_empty()); + /// + /// let s = "not empty"; + /// assert!(!s.is_empty()); + /// ``` #[inline] - fn chars(&self) -> Chars { + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_str_len")] + pub const fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Checks that `index`-th byte lies at the start and/or end of a + /// UTF-8 code point sequence. + /// + /// The start and end of the string (when `index == self.len()`) are + /// considered to be + /// boundaries. + /// + /// Returns `false` if `index` is greater than `self.len()`. + /// + /// # Examples + /// + /// ``` + /// let s = "Löwe 老虎 Léopard"; + /// assert!(s.is_char_boundary(0)); + /// // start of `老` + /// assert!(s.is_char_boundary(6)); + /// assert!(s.is_char_boundary(s.len())); + /// + /// // second byte of `ö` + /// assert!(!s.is_char_boundary(2)); + /// + /// // third byte of `老` + /// assert!(!s.is_char_boundary(8)); + /// ``` + #[stable(feature = "is_char_boundary", since = "1.9.0")] + #[inline] + pub fn is_char_boundary(&self, index: usize) -> bool { + // 0 and len are always ok. + // Test for 0 explicitly so that it can optimize out the check + // easily and skip reading string data for that case. + if index == 0 || index == self.len() { return true; } + match self.as_bytes().get(index) { + None => false, + // This is bit magic equivalent to: b < 128 || b >= 192 + Some(&b) => (b as i8) >= -0x40, + } + } + + /// Converts a string slice to a byte slice. To convert the byte slice back + /// into a string slice, use the [`str::from_utf8`] function. + /// + /// [`str::from_utf8`]: ./str/fn.from_utf8.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let bytes = "bors".as_bytes(); + /// assert_eq!(b"bors", bytes); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline(always)] + #[rustc_const_unstable(feature="const_str_as_bytes")] + pub const fn as_bytes(&self) -> &[u8] { + union Slices<'a> { + str: &'a str, + slice: &'a [u8], + } + unsafe { Slices { str: self }.slice } + } + + /// Converts a mutable string slice to a mutable byte slice. To convert the + /// mutable byte slice back into a mutable string slice, use the + /// [`str::from_utf8_mut`] function. + /// + /// [`str::from_utf8_mut`]: ./str/fn.from_utf8_mut.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut s = String::from("Hello"); + /// let bytes = unsafe { s.as_bytes_mut() }; + /// + /// assert_eq!(b"Hello", bytes); + /// ``` + /// + /// Mutability: + /// + /// ``` + /// let mut s = String::from("🗻∈🌏"); + /// + /// unsafe { + /// let bytes = s.as_bytes_mut(); + /// + /// bytes[0] = 0xF0; + /// bytes[1] = 0x9F; + /// bytes[2] = 0x8D; + /// bytes[3] = 0x94; + /// } + /// + /// assert_eq!("🍔∈🌏", s); + /// ``` + #[stable(feature = "str_mut_extras", since = "1.20.0")] + #[inline(always)] + pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] { + &mut *(self as *mut str as *mut [u8]) + } + + /// Converts a string slice to a raw pointer. + /// + /// As string slices are a slice of bytes, the raw pointer points to a + /// [`u8`]. This pointer will be pointing to the first byte of the string + /// slice. + /// + /// [`u8`]: primitive.u8.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s = "Hello"; + /// let ptr = s.as_ptr(); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + #[rustc_const_unstable(feature = "const_str_as_ptr")] + pub const fn as_ptr(&self) -> *const u8 { + self as *const str as *const u8 + } + + /// Returns a subslice of `str`. + /// + /// This is the non-panicking alternative to indexing the `str`. Returns + /// [`None`] whenever equivalent indexing operation would panic. + /// + /// [`None`]: option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// ``` + /// let v = String::from("🗻∈🌏"); + /// + /// assert_eq!(Some("🗻"), v.get(0..4)); + /// + /// // indices not on UTF-8 sequence boundaries + /// assert!(v.get(1..).is_none()); + /// assert!(v.get(..8).is_none()); + /// + /// // out of bounds + /// assert!(v.get(..42).is_none()); + /// ``` + #[stable(feature = "str_checked_slicing", since = "1.20.0")] + #[inline] + pub fn get>(&self, i: I) -> Option<&I::Output> { + i.get(self) + } + + /// Returns a mutable subslice of `str`. + /// + /// This is the non-panicking alternative to indexing the `str`. Returns + /// [`None`] whenever equivalent indexing operation would panic. + /// + /// [`None`]: option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// ``` + /// let mut v = String::from("hello"); + /// // correct length + /// assert!(v.get_mut(0..5).is_some()); + /// // out of bounds + /// assert!(v.get_mut(..42).is_none()); + /// assert_eq!(Some("he"), v.get_mut(0..2).map(|v| &*v)); + /// + /// assert_eq!("hello", v); + /// { + /// let s = v.get_mut(0..2); + /// let s = s.map(|s| { + /// s.make_ascii_uppercase(); + /// &*s + /// }); + /// assert_eq!(Some("HE"), s); + /// } + /// assert_eq!("HEllo", v); + /// ``` + #[stable(feature = "str_checked_slicing", since = "1.20.0")] + #[inline] + pub fn get_mut>(&mut self, i: I) -> Option<&mut I::Output> { + i.get_mut(self) + } + + /// Returns a unchecked subslice of `str`. + /// + /// This is the unchecked alternative to indexing the `str`. + /// + /// # Safety + /// + /// Callers of this function are responsible that these preconditions are + /// satisfied: + /// + /// * The starting index must come before the ending index; + /// * Indexes must be within bounds of the original slice; + /// * Indexes must lie on UTF-8 sequence boundaries. + /// + /// Failing that, the returned string slice may reference invalid memory or + /// violate the invariants communicated by the `str` type. + /// + /// # Examples + /// + /// ``` + /// let v = "🗻∈🌏"; + /// unsafe { + /// assert_eq!("🗻", v.get_unchecked(0..4)); + /// assert_eq!("∈", v.get_unchecked(4..7)); + /// assert_eq!("🌏", v.get_unchecked(7..11)); + /// } + /// ``` + #[stable(feature = "str_checked_slicing", since = "1.20.0")] + #[inline] + pub unsafe fn get_unchecked>(&self, i: I) -> &I::Output { + i.get_unchecked(self) + } + + /// Returns a mutable, unchecked subslice of `str`. + /// + /// This is the unchecked alternative to indexing the `str`. + /// + /// # Safety + /// + /// Callers of this function are responsible that these preconditions are + /// satisfied: + /// + /// * The starting index must come before the ending index; + /// * Indexes must be within bounds of the original slice; + /// * Indexes must lie on UTF-8 sequence boundaries. + /// + /// Failing that, the returned string slice may reference invalid memory or + /// violate the invariants communicated by the `str` type. + /// + /// # Examples + /// + /// ``` + /// let mut v = String::from("🗻∈🌏"); + /// unsafe { + /// assert_eq!("🗻", v.get_unchecked_mut(0..4)); + /// assert_eq!("∈", v.get_unchecked_mut(4..7)); + /// assert_eq!("🌏", v.get_unchecked_mut(7..11)); + /// } + /// ``` + #[stable(feature = "str_checked_slicing", since = "1.20.0")] + #[inline] + pub unsafe fn get_unchecked_mut>(&mut self, i: I) -> &mut I::Output { + i.get_unchecked_mut(self) + } + + /// Creates a string slice from another string slice, bypassing safety + /// checks. + /// + /// This is generally not recommended, use with caution! For a safe + /// alternative see [`str`] and [`Index`]. + /// + /// [`str`]: primitive.str.html + /// [`Index`]: ops/trait.Index.html + /// + /// This new slice goes from `begin` to `end`, including `begin` but + /// excluding `end`. + /// + /// To get a mutable string slice instead, see the + /// [`slice_mut_unchecked`] method. + /// + /// [`slice_mut_unchecked`]: #method.slice_mut_unchecked + /// + /// # Safety + /// + /// Callers of this function are responsible that three preconditions are + /// satisfied: + /// + /// * `begin` must come before `end`. + /// * `begin` and `end` must be byte positions within the string slice. + /// * `begin` and `end` must lie on UTF-8 sequence boundaries. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s = "Löwe 老虎 Léopard"; + /// + /// unsafe { + /// assert_eq!("Löwe 老虎 Léopard", s.slice_unchecked(0, 21)); + /// } + /// + /// let s = "Hello, world!"; + /// + /// unsafe { + /// assert_eq!("world", s.slice_unchecked(7, 12)); + /// } + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_deprecated(since = "1.29.0", reason = "use `get_unchecked(begin..end)` instead")] + #[inline] + pub unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str { + (begin..end).get_unchecked(self) + } + + /// Creates a string slice from another string slice, bypassing safety + /// checks. + /// This is generally not recommended, use with caution! For a safe + /// alternative see [`str`] and [`IndexMut`]. + /// + /// [`str`]: primitive.str.html + /// [`IndexMut`]: ops/trait.IndexMut.html + /// + /// This new slice goes from `begin` to `end`, including `begin` but + /// excluding `end`. + /// + /// To get an immutable string slice instead, see the + /// [`slice_unchecked`] method. + /// + /// [`slice_unchecked`]: #method.slice_unchecked + /// + /// # Safety + /// + /// Callers of this function are responsible that three preconditions are + /// satisfied: + /// + /// * `begin` must come before `end`. + /// * `begin` and `end` must be byte positions within the string slice. + /// * `begin` and `end` must lie on UTF-8 sequence boundaries. + #[stable(feature = "str_slice_mut", since = "1.5.0")] + #[rustc_deprecated(since = "1.29.0", reason = "use `get_unchecked_mut(begin..end)` instead")] + #[inline] + pub unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str { + (begin..end).get_unchecked_mut(self) + } + + /// Divide one string slice into two at an index. + /// + /// The argument, `mid`, should be a byte offset from the start of the + /// string. It must also be on the boundary of a UTF-8 code point. + /// + /// The two slices returned go from the start of the string slice to `mid`, + /// and from `mid` to the end of the string slice. + /// + /// To get mutable string slices instead, see the [`split_at_mut`] + /// method. + /// + /// [`split_at_mut`]: #method.split_at_mut + /// + /// # Panics + /// + /// Panics if `mid` is not on a UTF-8 code point boundary, or if it is + /// beyond the last code point of the string slice. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s = "Per Martin-Löf"; + /// + /// let (first, last) = s.split_at(3); + /// + /// assert_eq!("Per", first); + /// assert_eq!(" Martin-Löf", last); + /// ``` + #[inline] + #[stable(feature = "str_split_at", since = "1.4.0")] + pub fn split_at(&self, mid: usize) -> (&str, &str) { + // is_char_boundary checks that the index is in [0, .len()] + if self.is_char_boundary(mid) { + unsafe { + (self.get_unchecked(0..mid), + self.get_unchecked(mid..self.len())) + } + } else { + slice_error_fail(self, 0, mid) + } + } + + /// Divide one mutable string slice into two at an index. + /// + /// The argument, `mid`, should be a byte offset from the start of the + /// string. It must also be on the boundary of a UTF-8 code point. + /// + /// The two slices returned go from the start of the string slice to `mid`, + /// and from `mid` to the end of the string slice. + /// + /// To get immutable string slices instead, see the [`split_at`] method. + /// + /// [`split_at`]: #method.split_at + /// + /// # Panics + /// + /// Panics if `mid` is not on a UTF-8 code point boundary, or if it is + /// beyond the last code point of the string slice. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut s = "Per Martin-Löf".to_string(); + /// { + /// let (first, last) = s.split_at_mut(3); + /// first.make_ascii_uppercase(); + /// assert_eq!("PER", first); + /// assert_eq!(" Martin-Löf", last); + /// } + /// assert_eq!("PER Martin-Löf", s); + /// ``` + #[inline] + #[stable(feature = "str_split_at", since = "1.4.0")] + pub fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str) { + // is_char_boundary checks that the index is in [0, .len()] + if self.is_char_boundary(mid) { + let len = self.len(); + let ptr = self.as_ptr() as *mut u8; + unsafe { + (from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, mid)), + from_utf8_unchecked_mut(slice::from_raw_parts_mut( + ptr.offset(mid as isize), + len - mid + ))) + } + } else { + slice_error_fail(self, 0, mid) + } + } + + /// Returns an iterator over the [`char`]s of a string slice. + /// + /// As a string slice consists of valid UTF-8, we can iterate through a + /// string slice by [`char`]. This method returns such an iterator. + /// + /// It's important to remember that [`char`] represents a Unicode Scalar + /// Value, and may not match your idea of what a 'character' is. Iteration + /// over grapheme clusters may be what you actually want. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let word = "goodbye"; + /// + /// let count = word.chars().count(); + /// assert_eq!(7, count); + /// + /// let mut chars = word.chars(); + /// + /// assert_eq!(Some('g'), chars.next()); + /// assert_eq!(Some('o'), chars.next()); + /// assert_eq!(Some('o'), chars.next()); + /// assert_eq!(Some('d'), chars.next()); + /// assert_eq!(Some('b'), chars.next()); + /// assert_eq!(Some('y'), chars.next()); + /// assert_eq!(Some('e'), chars.next()); + /// + /// assert_eq!(None, chars.next()); + /// ``` + /// + /// Remember, [`char`]s may not match your human intuition about characters: + /// + /// ``` + /// let y = "y̆"; + /// + /// let mut chars = y.chars(); + /// + /// assert_eq!(Some('y'), chars.next()); // not 'y̆' + /// assert_eq!(Some('\u{0306}'), chars.next()); + /// + /// assert_eq!(None, chars.next()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn chars(&self) -> Chars { Chars{iter: self.as_bytes().iter()} } + /// Returns an iterator over the [`char`]s of a string slice, and their + /// positions. + /// + /// As a string slice consists of valid UTF-8, we can iterate through a + /// string slice by [`char`]. This method returns an iterator of both + /// these [`char`]s, as well as their byte positions. + /// + /// The iterator yields tuples. The position is first, the [`char`] is + /// second. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let word = "goodbye"; + /// + /// let count = word.char_indices().count(); + /// assert_eq!(7, count); + /// + /// let mut char_indices = word.char_indices(); + /// + /// assert_eq!(Some((0, 'g')), char_indices.next()); + /// assert_eq!(Some((1, 'o')), char_indices.next()); + /// assert_eq!(Some((2, 'o')), char_indices.next()); + /// assert_eq!(Some((3, 'd')), char_indices.next()); + /// assert_eq!(Some((4, 'b')), char_indices.next()); + /// assert_eq!(Some((5, 'y')), char_indices.next()); + /// assert_eq!(Some((6, 'e')), char_indices.next()); + /// + /// assert_eq!(None, char_indices.next()); + /// ``` + /// + /// Remember, [`char`]s may not match your human intuition about characters: + /// + /// ``` + /// let yes = "y̆es"; + /// + /// let mut char_indices = yes.char_indices(); + /// + /// assert_eq!(Some((0, 'y')), char_indices.next()); // not (0, 'y̆') + /// assert_eq!(Some((1, '\u{0306}')), char_indices.next()); + /// + /// // note the 3 here - the last character took up two bytes + /// assert_eq!(Some((3, 'e')), char_indices.next()); + /// assert_eq!(Some((4, 's')), char_indices.next()); + /// + /// assert_eq!(None, char_indices.next()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn bytes(&self) -> Bytes { - Bytes(self.as_bytes().iter().cloned()) - } - - #[inline] - fn char_indices(&self) -> CharIndices { + pub fn char_indices(&self) -> CharIndices { CharIndices { front_offset: 0, iter: self.chars() } } + /// An iterator over the bytes of a string slice. + /// + /// As a string slice consists of a sequence of bytes, we can iterate + /// through a string slice by byte. This method returns such an iterator. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut bytes = "bors".bytes(); + /// + /// assert_eq!(Some(b'b'), bytes.next()); + /// assert_eq!(Some(b'o'), bytes.next()); + /// assert_eq!(Some(b'r'), bytes.next()); + /// assert_eq!(Some(b's'), bytes.next()); + /// + /// assert_eq!(None, bytes.next()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn split<'a, P: Pattern<'a>>(&'a self, pat: P) -> Split<'a, P> { + pub fn bytes(&self) -> Bytes { + Bytes(self.as_bytes().iter().cloned()) + } + + /// Split a string slice by whitespace. + /// + /// The iterator returned will return string slices that are sub-slices of + /// the original string slice, separated by any amount of whitespace. + /// + /// 'Whitespace' is defined according to the terms of the Unicode Derived + /// Core Property `White_Space`. If you only want to split on ASCII whitespace + /// instead, use [`split_ascii_whitespace`]. + /// + /// [`split_ascii_whitespace`]: #method.split_ascii_whitespace + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut iter = "A few words".split_whitespace(); + /// + /// assert_eq!(Some("A"), iter.next()); + /// assert_eq!(Some("few"), iter.next()); + /// assert_eq!(Some("words"), iter.next()); + /// + /// assert_eq!(None, iter.next()); + /// ``` + /// + /// All kinds of whitespace are considered: + /// + /// ``` + /// let mut iter = " Mary had\ta\u{2009}little \n\t lamb".split_whitespace(); + /// assert_eq!(Some("Mary"), iter.next()); + /// assert_eq!(Some("had"), iter.next()); + /// assert_eq!(Some("a"), iter.next()); + /// assert_eq!(Some("little"), iter.next()); + /// assert_eq!(Some("lamb"), iter.next()); + /// + /// assert_eq!(None, iter.next()); + /// ``` + #[stable(feature = "split_whitespace", since = "1.1.0")] + #[inline] + pub fn split_whitespace(&self) -> SplitWhitespace { + SplitWhitespace { inner: self.split(IsWhitespace).filter(IsNotEmpty) } + } + + /// Split a string slice by ASCII whitespace. + /// + /// The iterator returned will return string slices that are sub-slices of + /// the original string slice, separated by any amount of ASCII whitespace. + /// + /// To split by Unicode `Whitespace` instead, use [`split_whitespace`]. + /// + /// [`split_whitespace`]: #method.split_whitespace + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(split_ascii_whitespace)] + /// let mut iter = "A few words".split_ascii_whitespace(); + /// + /// assert_eq!(Some("A"), iter.next()); + /// assert_eq!(Some("few"), iter.next()); + /// assert_eq!(Some("words"), iter.next()); + /// + /// assert_eq!(None, iter.next()); + /// ``` + /// + /// All kinds of ASCII whitespace are considered: + /// + /// ``` + /// let mut iter = " Mary had\ta little \n\t lamb".split_whitespace(); + /// assert_eq!(Some("Mary"), iter.next()); + /// assert_eq!(Some("had"), iter.next()); + /// assert_eq!(Some("a"), iter.next()); + /// assert_eq!(Some("little"), iter.next()); + /// assert_eq!(Some("lamb"), iter.next()); + /// + /// assert_eq!(None, iter.next()); + /// ``` + #[unstable(feature = "split_ascii_whitespace", issue = "48656")] + #[inline] + pub fn split_ascii_whitespace(&self) -> SplitAsciiWhitespace { + let inner = self + .as_bytes() + .split(IsAsciiWhitespace) + .filter(IsNotEmpty) + .map(UnsafeBytesToStr); + SplitAsciiWhitespace { inner } + } + + /// An iterator over the lines of a string, as string slices. + /// + /// Lines are ended with either a newline (`\n`) or a carriage return with + /// a line feed (`\r\n`). + /// + /// The final line ending is optional. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let text = "foo\r\nbar\n\nbaz\n"; + /// let mut lines = text.lines(); + /// + /// assert_eq!(Some("foo"), lines.next()); + /// assert_eq!(Some("bar"), lines.next()); + /// assert_eq!(Some(""), lines.next()); + /// assert_eq!(Some("baz"), lines.next()); + /// + /// assert_eq!(None, lines.next()); + /// ``` + /// + /// The final line ending isn't required: + /// + /// ``` + /// let text = "foo\nbar\n\r\nbaz"; + /// let mut lines = text.lines(); + /// + /// assert_eq!(Some("foo"), lines.next()); + /// assert_eq!(Some("bar"), lines.next()); + /// assert_eq!(Some(""), lines.next()); + /// assert_eq!(Some("baz"), lines.next()); + /// + /// assert_eq!(None, lines.next()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn lines(&self) -> Lines { + Lines(self.split_terminator('\n').map(LinesAnyMap)) + } + + /// An iterator over the lines of a string. + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_deprecated(since = "1.4.0", reason = "use lines() instead now")] + #[inline] + #[allow(deprecated)] + pub fn lines_any(&self) -> LinesAny { + LinesAny(self.lines()) + } + + /// Returns an iterator of `u16` over the string encoded as UTF-16. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let text = "Zażółć gęślą jaźń"; + /// + /// let utf8_len = text.len(); + /// let utf16_len = text.encode_utf16().count(); + /// + /// assert!(utf16_len <= utf8_len); + /// ``` + #[stable(feature = "encode_utf16", since = "1.8.0")] + pub fn encode_utf16(&self) -> EncodeUtf16 { + EncodeUtf16 { chars: self.chars(), extra: 0 } + } + + /// Returns `true` if the given pattern matches a sub-slice of + /// this string slice. + /// + /// Returns `false` if it does not. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let bananas = "bananas"; + /// + /// assert!(bananas.contains("nana")); + /// assert!(!bananas.contains("apples")); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn contains<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool { + pat.is_contained_in(self) + } + + /// Returns `true` if the given pattern matches a prefix of this + /// string slice. + /// + /// Returns `false` if it does not. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let bananas = "bananas"; + /// + /// assert!(bananas.starts_with("bana")); + /// assert!(!bananas.starts_with("nana")); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn starts_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool { + pat.is_prefix_of(self) + } + + /// Returns `true` if the given pattern matches a suffix of this + /// string slice. + /// + /// Returns `false` if it does not. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let bananas = "bananas"; + /// + /// assert!(bananas.ends_with("anas")); + /// assert!(!bananas.ends_with("nana")); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn ends_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool + where P::Searcher: ReverseSearcher<'a> + { + pat.is_suffix_of(self) + } + + /// Returns the byte index of the first character of this string slice that + /// matches the pattern. + /// + /// Returns [`None`] if the pattern doesn't match. + /// + /// The pattern can be a `&str`, [`char`], or a closure that determines if + /// a character matches. + /// + /// [`None`]: option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Simple patterns: + /// + /// ``` + /// let s = "Löwe 老虎 Léopard"; + /// + /// assert_eq!(s.find('L'), Some(0)); + /// assert_eq!(s.find('é'), Some(14)); + /// assert_eq!(s.find("Léopard"), Some(13)); + /// ``` + /// + /// More complex patterns using point-free style and closures: + /// + /// ``` + /// let s = "Löwe 老虎 Léopard"; + /// + /// assert_eq!(s.find(char::is_whitespace), Some(5)); + /// assert_eq!(s.find(char::is_lowercase), Some(1)); + /// assert_eq!(s.find(|c: char| c.is_whitespace() || c.is_lowercase()), Some(1)); + /// assert_eq!(s.find(|c: char| (c < 'o') && (c > 'a')), Some(4)); + /// ``` + /// + /// Not finding the pattern: + /// + /// ``` + /// let s = "Löwe 老虎 Léopard"; + /// let x: &[_] = &['1', '2']; + /// + /// assert_eq!(s.find(x), None); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn find<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option { + pat.into_searcher(self).next_match().map(|(i, _)| i) + } + + /// Returns the byte index of the last character of this string slice that + /// matches the pattern. + /// + /// Returns [`None`] if the pattern doesn't match. + /// + /// The pattern can be a `&str`, [`char`], or a closure that determines if + /// a character matches. + /// + /// [`None`]: option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Simple patterns: + /// + /// ``` + /// let s = "Löwe 老虎 Léopard"; + /// + /// assert_eq!(s.rfind('L'), Some(13)); + /// assert_eq!(s.rfind('é'), Some(14)); + /// ``` + /// + /// More complex patterns with closures: + /// + /// ``` + /// let s = "Löwe 老虎 Léopard"; + /// + /// assert_eq!(s.rfind(char::is_whitespace), Some(12)); + /// assert_eq!(s.rfind(char::is_lowercase), Some(20)); + /// ``` + /// + /// Not finding the pattern: + /// + /// ``` + /// let s = "Löwe 老虎 Léopard"; + /// let x: &[_] = &['1', '2']; + /// + /// assert_eq!(s.rfind(x), None); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn rfind<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option + where P::Searcher: ReverseSearcher<'a> + { + pat.into_searcher(self).next_match_back().map(|(i, _)| i) + } + + /// An iterator over substrings of this string slice, separated by + /// characters matched by a pattern. + /// + /// The pattern can be a `&str`, [`char`], or a closure that determines the + /// split. + /// + /// # Iterator behavior + /// + /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern + /// allows a reverse search and forward/reverse search yields the same + /// elements. This is true for, eg, [`char`] but not for `&str`. + /// + /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html + /// + /// If the pattern allows a reverse search but its results might differ + /// from a forward search, the [`rsplit`] method can be used. + /// + /// [`rsplit`]: #method.rsplit + /// + /// # Examples + /// + /// Simple patterns: + /// + /// ``` + /// let v: Vec<&str> = "Mary had a little lamb".split(' ').collect(); + /// assert_eq!(v, ["Mary", "had", "a", "little", "lamb"]); + /// + /// let v: Vec<&str> = "".split('X').collect(); + /// assert_eq!(v, [""]); + /// + /// let v: Vec<&str> = "lionXXtigerXleopard".split('X').collect(); + /// assert_eq!(v, ["lion", "", "tiger", "leopard"]); + /// + /// let v: Vec<&str> = "lion::tiger::leopard".split("::").collect(); + /// assert_eq!(v, ["lion", "tiger", "leopard"]); + /// + /// let v: Vec<&str> = "abc1def2ghi".split(char::is_numeric).collect(); + /// assert_eq!(v, ["abc", "def", "ghi"]); + /// + /// let v: Vec<&str> = "lionXtigerXleopard".split(char::is_uppercase).collect(); + /// assert_eq!(v, ["lion", "tiger", "leopard"]); + /// ``` + /// + /// A more complex pattern, using a closure: + /// + /// ``` + /// let v: Vec<&str> = "abc1defXghi".split(|c| c == '1' || c == 'X').collect(); + /// assert_eq!(v, ["abc", "def", "ghi"]); + /// ``` + /// + /// If a string contains multiple contiguous separators, you will end up + /// with empty strings in the output: + /// + /// ``` + /// let x = "||||a||b|c".to_string(); + /// let d: Vec<_> = x.split('|').collect(); + /// + /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]); + /// ``` + /// + /// Contiguous separators are separated by the empty string. + /// + /// ``` + /// let x = "(///)".to_string(); + /// let d: Vec<_> = x.split('/').collect(); + /// + /// assert_eq!(d, &["(", "", "", ")"]); + /// ``` + /// + /// Separators at the start or end of a string are neighbored + /// by empty strings. + /// + /// ``` + /// let d: Vec<_> = "010".split("0").collect(); + /// assert_eq!(d, &["", "1", ""]); + /// ``` + /// + /// When the empty string is used as a separator, it separates + /// every character in the string, along with the beginning + /// and end of the string. + /// + /// ``` + /// let f: Vec<_> = "rust".split("").collect(); + /// assert_eq!(f, &["", "r", "u", "s", "t", ""]); + /// ``` + /// + /// Contiguous separators can lead to possibly surprising behavior + /// when whitespace is used as the separator. This code is correct: + /// + /// ``` + /// let x = " a b c".to_string(); + /// let d: Vec<_> = x.split(' ').collect(); + /// + /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]); + /// ``` + /// + /// It does _not_ give you: + /// + /// ```,ignore + /// assert_eq!(d, &["a", "b", "c"]); + /// ``` + /// + /// Use [`split_whitespace`] for this behavior. + /// + /// [`split_whitespace`]: #method.split_whitespace + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn split<'a, P: Pattern<'a>>(&'a self, pat: P) -> Split<'a, P> { Split(SplitInternal { start: 0, end: self.len(), @@ -2272,121 +3145,521 @@ impl StrExt for str { }) } + /// An iterator over substrings of the given string slice, separated by + /// characters matched by a pattern and yielded in reverse order. + /// + /// The pattern can be a `&str`, [`char`], or a closure that determines the + /// split. + /// + /// # Iterator behavior + /// + /// The returned iterator requires that the pattern supports a reverse + /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse + /// search yields the same elements. + /// + /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html + /// + /// For iterating from the front, the [`split`] method can be used. + /// + /// [`split`]: #method.split + /// + /// # Examples + /// + /// Simple patterns: + /// + /// ``` + /// let v: Vec<&str> = "Mary had a little lamb".rsplit(' ').collect(); + /// assert_eq!(v, ["lamb", "little", "a", "had", "Mary"]); + /// + /// let v: Vec<&str> = "".rsplit('X').collect(); + /// assert_eq!(v, [""]); + /// + /// let v: Vec<&str> = "lionXXtigerXleopard".rsplit('X').collect(); + /// assert_eq!(v, ["leopard", "tiger", "", "lion"]); + /// + /// let v: Vec<&str> = "lion::tiger::leopard".rsplit("::").collect(); + /// assert_eq!(v, ["leopard", "tiger", "lion"]); + /// ``` + /// + /// A more complex pattern, using a closure: + /// + /// ``` + /// let v: Vec<&str> = "abc1defXghi".rsplit(|c| c == '1' || c == 'X').collect(); + /// assert_eq!(v, ["ghi", "def", "abc"]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn rsplit<'a, P: Pattern<'a>>(&'a self, pat: P) -> RSplit<'a, P> + pub fn rsplit<'a, P: Pattern<'a>>(&'a self, pat: P) -> RSplit<'a, P> where P::Searcher: ReverseSearcher<'a> { RSplit(self.split(pat).0) } + /// An iterator over substrings of the given string slice, separated by + /// characters matched by a pattern. + /// + /// The pattern can be a `&str`, [`char`], or a closure that determines the + /// split. + /// + /// Equivalent to [`split`], except that the trailing substring + /// is skipped if empty. + /// + /// [`split`]: #method.split + /// + /// This method can be used for string data that is _terminated_, + /// rather than _separated_ by a pattern. + /// + /// # Iterator behavior + /// + /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern + /// allows a reverse search and forward/reverse search yields the same + /// elements. This is true for, eg, [`char`] but not for `&str`. + /// + /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html + /// + /// If the pattern allows a reverse search but its results might differ + /// from a forward search, the [`rsplit_terminator`] method can be used. + /// + /// [`rsplit_terminator`]: #method.rsplit_terminator + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let v: Vec<&str> = "A.B.".split_terminator('.').collect(); + /// assert_eq!(v, ["A", "B"]); + /// + /// let v: Vec<&str> = "A..B..".split_terminator(".").collect(); + /// assert_eq!(v, ["A", "", "B", ""]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn splitn<'a, P: Pattern<'a>>(&'a self, count: usize, pat: P) -> SplitN<'a, P> { - SplitN(SplitNInternal { - iter: self.split(pat).0, - count, - }) - } - - #[inline] - fn rsplitn<'a, P: Pattern<'a>>(&'a self, count: usize, pat: P) -> RSplitN<'a, P> - where P::Searcher: ReverseSearcher<'a> - { - RSplitN(self.splitn(count, pat).0) - } - - #[inline] - fn split_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> SplitTerminator<'a, P> { + pub fn split_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> SplitTerminator<'a, P> { SplitTerminator(SplitInternal { allow_trailing_empty: false, ..self.split(pat).0 }) } + /// An iterator over substrings of `self`, separated by characters + /// matched by a pattern and yielded in reverse order. + /// + /// The pattern can be a simple `&str`, [`char`], or a closure that + /// determines the split. + /// Additional libraries might provide more complex patterns like + /// regular expressions. + /// + /// Equivalent to [`split`], except that the trailing substring is + /// skipped if empty. + /// + /// [`split`]: #method.split + /// + /// This method can be used for string data that is _terminated_, + /// rather than _separated_ by a pattern. + /// + /// # Iterator behavior + /// + /// The returned iterator requires that the pattern supports a + /// reverse search, and it will be double ended if a forward/reverse + /// search yields the same elements. + /// + /// For iterating from the front, the [`split_terminator`] method can be + /// used. + /// + /// [`split_terminator`]: #method.split_terminator + /// + /// # Examples + /// + /// ``` + /// let v: Vec<&str> = "A.B.".rsplit_terminator('.').collect(); + /// assert_eq!(v, ["B", "A"]); + /// + /// let v: Vec<&str> = "A..B..".rsplit_terminator(".").collect(); + /// assert_eq!(v, ["", "B", "", "A"]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn rsplit_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> RSplitTerminator<'a, P> + pub fn rsplit_terminator<'a, P: Pattern<'a>>(&'a self, pat: P) -> RSplitTerminator<'a, P> where P::Searcher: ReverseSearcher<'a> { RSplitTerminator(self.split_terminator(pat).0) } + /// An iterator over substrings of the given string slice, separated by a + /// pattern, restricted to returning at most `n` items. + /// + /// If `n` substrings are returned, the last substring (the `n`th substring) + /// will contain the remainder of the string. + /// + /// The pattern can be a `&str`, [`char`], or a closure that determines the + /// split. + /// + /// # Iterator behavior + /// + /// The returned iterator will not be double ended, because it is + /// not efficient to support. + /// + /// If the pattern allows a reverse search, the [`rsplitn`] method can be + /// used. + /// + /// [`rsplitn`]: #method.rsplitn + /// + /// # Examples + /// + /// Simple patterns: + /// + /// ``` + /// let v: Vec<&str> = "Mary had a little lambda".splitn(3, ' ').collect(); + /// assert_eq!(v, ["Mary", "had", "a little lambda"]); + /// + /// let v: Vec<&str> = "lionXXtigerXleopard".splitn(3, "X").collect(); + /// assert_eq!(v, ["lion", "", "tigerXleopard"]); + /// + /// let v: Vec<&str> = "abcXdef".splitn(1, 'X').collect(); + /// assert_eq!(v, ["abcXdef"]); + /// + /// let v: Vec<&str> = "".splitn(1, 'X').collect(); + /// assert_eq!(v, [""]); + /// ``` + /// + /// A more complex pattern, using a closure: + /// + /// ``` + /// let v: Vec<&str> = "abc1defXghi".splitn(2, |c| c == '1' || c == 'X').collect(); + /// assert_eq!(v, ["abc", "defXghi"]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] #[inline] - fn matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> Matches<'a, P> { + pub fn splitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> SplitN<'a, P> { + SplitN(SplitNInternal { + iter: self.split(pat).0, + count: n, + }) + } + + /// An iterator over substrings of this string slice, separated by a + /// pattern, starting from the end of the string, restricted to returning + /// at most `n` items. + /// + /// If `n` substrings are returned, the last substring (the `n`th substring) + /// will contain the remainder of the string. + /// + /// The pattern can be a `&str`, [`char`], or a closure that + /// determines the split. + /// + /// # Iterator behavior + /// + /// The returned iterator will not be double ended, because it is not + /// efficient to support. + /// + /// For splitting from the front, the [`splitn`] method can be used. + /// + /// [`splitn`]: #method.splitn + /// + /// # Examples + /// + /// Simple patterns: + /// + /// ``` + /// let v: Vec<&str> = "Mary had a little lamb".rsplitn(3, ' ').collect(); + /// assert_eq!(v, ["lamb", "little", "Mary had a"]); + /// + /// let v: Vec<&str> = "lionXXtigerXleopard".rsplitn(3, 'X').collect(); + /// assert_eq!(v, ["leopard", "tiger", "lionX"]); + /// + /// let v: Vec<&str> = "lion::tiger::leopard".rsplitn(2, "::").collect(); + /// assert_eq!(v, ["leopard", "lion::tiger"]); + /// ``` + /// + /// A more complex pattern, using a closure: + /// + /// ``` + /// let v: Vec<&str> = "abc1defXghi".rsplitn(2, |c| c == '1' || c == 'X').collect(); + /// assert_eq!(v, ["ghi", "abc1def"]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + pub fn rsplitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> RSplitN<'a, P> + where P::Searcher: ReverseSearcher<'a> + { + RSplitN(self.splitn(n, pat).0) + } + + /// An iterator over the disjoint matches of a pattern within the given string + /// slice. + /// + /// The pattern can be a `&str`, [`char`], or a closure that + /// determines if a character matches. + /// + /// # Iterator behavior + /// + /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern + /// allows a reverse search and forward/reverse search yields the same + /// elements. This is true for, eg, [`char`] but not for `&str`. + /// + /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html + /// + /// If the pattern allows a reverse search but its results might differ + /// from a forward search, the [`rmatches`] method can be used. + /// + /// [`rmatches`]: #method.rmatches + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let v: Vec<&str> = "abcXXXabcYYYabc".matches("abc").collect(); + /// assert_eq!(v, ["abc", "abc", "abc"]); + /// + /// let v: Vec<&str> = "1abc2abc3".matches(char::is_numeric).collect(); + /// assert_eq!(v, ["1", "2", "3"]); + /// ``` + #[stable(feature = "str_matches", since = "1.2.0")] + #[inline] + pub fn matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> Matches<'a, P> { Matches(MatchesInternal(pat.into_searcher(self))) } + /// An iterator over the disjoint matches of a pattern within this string slice, + /// yielded in reverse order. + /// + /// The pattern can be a `&str`, [`char`], or a closure that determines if + /// a character matches. + /// + /// # Iterator behavior + /// + /// The returned iterator requires that the pattern supports a reverse + /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse + /// search yields the same elements. + /// + /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html + /// + /// For iterating from the front, the [`matches`] method can be used. + /// + /// [`matches`]: #method.matches + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let v: Vec<&str> = "abcXXXabcYYYabc".rmatches("abc").collect(); + /// assert_eq!(v, ["abc", "abc", "abc"]); + /// + /// let v: Vec<&str> = "1abc2abc3".rmatches(char::is_numeric).collect(); + /// assert_eq!(v, ["3", "2", "1"]); + /// ``` + #[stable(feature = "str_matches", since = "1.2.0")] #[inline] - fn rmatches<'a, P: Pattern<'a>>(&'a self, pat: P) -> RMatches<'a, P> + pub fn rmatches<'a, P: Pattern<'a>>(&'a self, pat: P) -> RMatches<'a, P> where P::Searcher: ReverseSearcher<'a> { RMatches(self.matches(pat).0) } + /// An iterator over the disjoint matches of a pattern within this string + /// slice as well as the index that the match starts at. + /// + /// For matches of `pat` within `self` that overlap, only the indices + /// corresponding to the first match are returned. + /// + /// The pattern can be a `&str`, [`char`], or a closure that determines + /// if a character matches. + /// + /// # Iterator behavior + /// + /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern + /// allows a reverse search and forward/reverse search yields the same + /// elements. This is true for, eg, [`char`] but not for `&str`. + /// + /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html + /// + /// If the pattern allows a reverse search but its results might differ + /// from a forward search, the [`rmatch_indices`] method can be used. + /// + /// [`rmatch_indices`]: #method.rmatch_indices + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let v: Vec<_> = "abcXXXabcYYYabc".match_indices("abc").collect(); + /// assert_eq!(v, [(0, "abc"), (6, "abc"), (12, "abc")]); + /// + /// let v: Vec<_> = "1abcabc2".match_indices("abc").collect(); + /// assert_eq!(v, [(1, "abc"), (4, "abc")]); + /// + /// let v: Vec<_> = "ababa".match_indices("aba").collect(); + /// assert_eq!(v, [(0, "aba")]); // only the first `aba` + /// ``` + #[stable(feature = "str_match_indices", since = "1.5.0")] #[inline] - fn match_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> MatchIndices<'a, P> { + pub fn match_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> MatchIndices<'a, P> { MatchIndices(MatchIndicesInternal(pat.into_searcher(self))) } + /// An iterator over the disjoint matches of a pattern within `self`, + /// yielded in reverse order along with the index of the match. + /// + /// For matches of `pat` within `self` that overlap, only the indices + /// corresponding to the last match are returned. + /// + /// The pattern can be a `&str`, [`char`], or a closure that determines if a + /// character matches. + /// + /// # Iterator behavior + /// + /// The returned iterator requires that the pattern supports a reverse + /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse + /// search yields the same elements. + /// + /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html + /// + /// For iterating from the front, the [`match_indices`] method can be used. + /// + /// [`match_indices`]: #method.match_indices + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let v: Vec<_> = "abcXXXabcYYYabc".rmatch_indices("abc").collect(); + /// assert_eq!(v, [(12, "abc"), (6, "abc"), (0, "abc")]); + /// + /// let v: Vec<_> = "1abcabc2".rmatch_indices("abc").collect(); + /// assert_eq!(v, [(4, "abc"), (1, "abc")]); + /// + /// let v: Vec<_> = "ababa".rmatch_indices("aba").collect(); + /// assert_eq!(v, [(2, "aba")]); // only the last `aba` + /// ``` + #[stable(feature = "str_match_indices", since = "1.5.0")] #[inline] - fn rmatch_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> RMatchIndices<'a, P> + pub fn rmatch_indices<'a, P: Pattern<'a>>(&'a self, pat: P) -> RMatchIndices<'a, P> where P::Searcher: ReverseSearcher<'a> { RMatchIndices(self.match_indices(pat).0) } - #[inline] - fn lines(&self) -> Lines { - Lines(self.split_terminator('\n').map(LinesAnyMap)) + + /// Returns a string slice with leading and trailing whitespace removed. + /// + /// 'Whitespace' is defined according to the terms of the Unicode Derived + /// Core Property `White_Space`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s = " Hello\tworld\t"; + /// + /// assert_eq!("Hello\tworld", s.trim()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn trim(&self) -> &str { + self.trim_matches(|c: char| c.is_whitespace()) } - #[inline] - #[allow(deprecated)] - fn lines_any(&self) -> LinesAny { - LinesAny(self.lines()) + /// Returns a string slice with leading whitespace removed. + /// + /// 'Whitespace' is defined according to the terms of the Unicode Derived + /// Core Property `White_Space`. + /// + /// # Text directionality + /// + /// A string is a sequence of bytes. 'Left' in this context means the first + /// position of that byte string; for a language like Arabic or Hebrew + /// which are 'right to left' rather than 'left to right', this will be + /// the _right_ side, not the left. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s = " Hello\tworld\t"; + /// + /// assert_eq!("Hello\tworld\t", s.trim_left()); + /// ``` + /// + /// Directionality: + /// + /// ``` + /// let s = " English"; + /// assert!(Some('E') == s.trim_left().chars().next()); + /// + /// let s = " עברית"; + /// assert!(Some('ע') == s.trim_left().chars().next()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn trim_left(&self) -> &str { + self.trim_left_matches(|c: char| c.is_whitespace()) } - #[inline] - fn get>(&self, i: I) -> Option<&I::Output> { - i.get(self) + /// Returns a string slice with trailing whitespace removed. + /// + /// 'Whitespace' is defined according to the terms of the Unicode Derived + /// Core Property `White_Space`. + /// + /// # Text directionality + /// + /// A string is a sequence of bytes. 'Right' in this context means the last + /// position of that byte string; for a language like Arabic or Hebrew + /// which are 'right to left' rather than 'left to right', this will be + /// the _left_ side, not the right. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s = " Hello\tworld\t"; + /// + /// assert_eq!(" Hello\tworld", s.trim_right()); + /// ``` + /// + /// Directionality: + /// + /// ``` + /// let s = "English "; + /// assert!(Some('h') == s.trim_right().chars().rev().next()); + /// + /// let s = "עברית "; + /// assert!(Some('ת') == s.trim_right().chars().rev().next()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn trim_right(&self) -> &str { + self.trim_right_matches(|c: char| c.is_whitespace()) } - #[inline] - fn get_mut>(&mut self, i: I) -> Option<&mut I::Output> { - i.get_mut(self) - } - - #[inline] - unsafe fn get_unchecked>(&self, i: I) -> &I::Output { - i.get_unchecked(self) - } - - #[inline] - unsafe fn get_unchecked_mut>(&mut self, i: I) -> &mut I::Output { - i.get_unchecked_mut(self) - } - - #[inline] - unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str { - (begin..end).get_unchecked(self) - } - - #[inline] - unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str { - (begin..end).get_unchecked_mut(self) - } - - #[inline] - fn starts_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool { - pat.is_prefix_of(self) - } - - #[inline] - fn ends_with<'a, P: Pattern<'a>>(&'a self, pat: P) -> bool - where P::Searcher: ReverseSearcher<'a> - { - pat.is_suffix_of(self) - } - - #[inline] - fn trim_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str + /// Returns a string slice with all prefixes and suffixes that match a + /// pattern repeatedly removed. + /// + /// The pattern can be a [`char`] or a closure that determines if a + /// character matches. + /// + /// # Examples + /// + /// Simple patterns: + /// + /// ``` + /// assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar"); + /// assert_eq!("123foo1bar123".trim_matches(char::is_numeric), "foo1bar"); + /// + /// let x: &[_] = &['1', '2']; + /// assert_eq!("12foo1bar12".trim_matches(x), "foo1bar"); + /// ``` + /// + /// A more complex pattern, using a closure: + /// + /// ``` + /// assert_eq!("1foo1barXX".trim_matches(|c| c == '1' || c == 'X'), "foo1bar"); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn trim_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str where P::Searcher: DoubleEndedSearcher<'a> { let mut i = 0; @@ -2402,12 +3675,36 @@ impl StrExt for str { } unsafe { // Searcher is known to return valid indices - self.slice_unchecked(i, j) + self.get_unchecked(i..j) } } - #[inline] - fn trim_left_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str { + /// Returns a string slice with all prefixes that match a pattern + /// repeatedly removed. + /// + /// The pattern can be a `&str`, [`char`], or a closure that determines if + /// a character matches. + /// + /// # Text directionality + /// + /// A string is a sequence of bytes. 'Left' in this context means the first + /// position of that byte string; for a language like Arabic or Hebrew + /// which are 'right to left' rather than 'left to right', this will be + /// the _right_ side, not the left. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert_eq!("11foo1bar11".trim_left_matches('1'), "foo1bar11"); + /// assert_eq!("123foo1bar123".trim_left_matches(char::is_numeric), "foo1bar123"); + /// + /// let x: &[_] = &['1', '2']; + /// assert_eq!("12foo1bar12".trim_left_matches(x), "foo1bar12"); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn trim_left_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str { let mut i = self.len(); let mut matcher = pat.into_searcher(self); if let Some((a, _)) = matcher.next_reject() { @@ -2415,12 +3712,42 @@ impl StrExt for str { } unsafe { // Searcher is known to return valid indices - self.slice_unchecked(i, self.len()) + self.get_unchecked(i..self.len()) } } - #[inline] - fn trim_right_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str + /// Returns a string slice with all suffixes that match a pattern + /// repeatedly removed. + /// + /// The pattern can be a `&str`, [`char`], or a closure that + /// determines if a character matches. + /// + /// # Text directionality + /// + /// A string is a sequence of bytes. 'Right' in this context means the last + /// position of that byte string; for a language like Arabic or Hebrew + /// which are 'right to left' rather than 'left to right', this will be + /// the _left_ side, not the right. + /// + /// # Examples + /// + /// Simple patterns: + /// + /// ``` + /// assert_eq!("11foo1bar11".trim_right_matches('1'), "11foo1bar"); + /// assert_eq!("123foo1bar123".trim_right_matches(char::is_numeric), "123foo1bar"); + /// + /// let x: &[_] = &['1', '2']; + /// assert_eq!("12foo1bar12".trim_right_matches(x), "12foo1bar"); + /// ``` + /// + /// A more complex pattern, using a closure: + /// + /// ``` + /// assert_eq!("1fooX".trim_right_matches(|c| c == '1' || c == 'X'), "1foo"); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn trim_right_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str where P::Searcher: ReverseSearcher<'a> { let mut j = 0; @@ -2430,89 +3757,127 @@ impl StrExt for str { } unsafe { // Searcher is known to return valid indices - self.slice_unchecked(0, j) + self.get_unchecked(0..j) } } + /// Parses this string slice into another type. + /// + /// Because `parse` is so general, it can cause problems with type + /// inference. As such, `parse` is one of the few times you'll see + /// the syntax affectionately known as the 'turbofish': `::<>`. This + /// helps the inference algorithm understand specifically which type + /// you're trying to parse into. + /// + /// `parse` can parse any type that implements the [`FromStr`] trait. + /// + /// [`FromStr`]: str/trait.FromStr.html + /// + /// # Errors + /// + /// Will return [`Err`] if it's not possible to parse this string slice into + /// the desired type. + /// + /// [`Err`]: str/trait.FromStr.html#associatedtype.Err + /// + /// # Examples + /// + /// Basic usage + /// + /// ``` + /// let four: u32 = "4".parse().unwrap(); + /// + /// assert_eq!(4, four); + /// ``` + /// + /// Using the 'turbofish' instead of annotating `four`: + /// + /// ``` + /// let four = "4".parse::(); + /// + /// assert_eq!(Ok(4), four); + /// ``` + /// + /// Failing to parse: + /// + /// ``` + /// let nope = "j".parse::(); + /// + /// assert!(nope.is_err()); + /// ``` #[inline] - fn is_char_boundary(&self, index: usize) -> bool { - // 0 and len are always ok. - // Test for 0 explicitly so that it can optimize out the check - // easily and skip reading string data for that case. - if index == 0 || index == self.len() { return true; } - match self.as_bytes().get(index) { - None => false, - // This is bit magic equivalent to: b < 128 || b >= 192 - Some(&b) => (b as i8) >= -0x40, - } + #[stable(feature = "rust1", since = "1.0.0")] + pub fn parse(&self) -> Result { + FromStr::from_str(self) } + /// Checks if all characters in this string are within the ASCII range. + /// + /// # Examples + /// + /// ``` + /// let ascii = "hello!\n"; + /// let non_ascii = "Grüße, Jürgen ❤"; + /// + /// assert!(ascii.is_ascii()); + /// assert!(!non_ascii.is_ascii()); + /// ``` + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { &*(self as *const str as *const [u8]) } + pub fn is_ascii(&self) -> bool { + // We can treat each byte as character here: all multibyte characters + // start with a byte that is not in the ascii range, so we will stop + // there already. + self.bytes().all(|b| b.is_ascii()) } + /// Checks that two strings are an ASCII case-insensitive match. + /// + /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`, + /// but without allocating and copying temporaries. + /// + /// # Examples + /// + /// ``` + /// assert!("Ferris".eq_ignore_ascii_case("FERRIS")); + /// assert!("Ferrös".eq_ignore_ascii_case("FERRöS")); + /// assert!(!"Ferrös".eq_ignore_ascii_case("FERRÖS")); + /// ``` + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] #[inline] - unsafe fn as_bytes_mut(&mut self) -> &mut [u8] { - &mut *(self as *mut str as *mut [u8]) + pub fn eq_ignore_ascii_case(&self, other: &str) -> bool { + self.as_bytes().eq_ignore_ascii_case(other.as_bytes()) } - fn find<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option { - pat.into_searcher(self).next_match().map(|(i, _)| i) + /// Converts this string to its ASCII upper case equivalent in-place. + /// + /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z', + /// but non-ASCII letters are unchanged. + /// + /// To return a new uppercased value without modifying the existing one, use + /// [`to_ascii_uppercase`]. + /// + /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] + pub fn make_ascii_uppercase(&mut self) { + let me = unsafe { self.as_bytes_mut() }; + me.make_ascii_uppercase() } - fn rfind<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option - where P::Searcher: ReverseSearcher<'a> - { - pat.into_searcher(self).next_match_back().map(|(i, _)| i) + /// Converts this string to its ASCII lower case equivalent in-place. + /// + /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', + /// but non-ASCII letters are unchanged. + /// + /// To return a new lowercased value without modifying the existing one, use + /// [`to_ascii_lowercase`]. + /// + /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase + #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] + pub fn make_ascii_lowercase(&mut self) { + let me = unsafe { self.as_bytes_mut() }; + me.make_ascii_lowercase() } - - fn find_str<'a, P: Pattern<'a>>(&'a self, pat: P) -> Option { - self.find(pat) - } - - #[inline] - fn split_at(&self, mid: usize) -> (&str, &str) { - // is_char_boundary checks that the index is in [0, .len()] - if self.is_char_boundary(mid) { - unsafe { - (self.slice_unchecked(0, mid), - self.slice_unchecked(mid, self.len())) - } - } else { - slice_error_fail(self, 0, mid) - } - } - - fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str) { - // is_char_boundary checks that the index is in [0, .len()] - if self.is_char_boundary(mid) { - let len = self.len(); - let ptr = self.as_ptr() as *mut u8; - unsafe { - (from_raw_parts_mut(ptr, mid), - from_raw_parts_mut(ptr.offset(mid as isize), len - mid)) - } - } else { - slice_error_fail(self, 0, mid) - } - } - - #[inline] - fn as_ptr(&self) -> *const u8 { - self as *const str as *const u8 - } - - #[inline] - fn len(&self) -> usize { - self.as_bytes().len() - } - - #[inline] - fn is_empty(&self) -> bool { self.len() == 0 } - - #[inline] - fn parse(&self) -> Result { FromStr::from_str(self) } } #[stable(feature = "rust1", since = "1.0.0")] @@ -2528,3 +3893,240 @@ impl<'a> Default for &'a str { /// Creates an empty str fn default() -> &'a str { "" } } + +#[stable(feature = "default_mut_str", since = "1.28.0")] +impl<'a> Default for &'a mut str { + /// Creates an empty mutable str + fn default() -> &'a mut str { unsafe { from_utf8_unchecked_mut(&mut []) } } +} + +/// An iterator over the non-whitespace substrings of a string, +/// separated by any amount of whitespace. +/// +/// This struct is created by the [`split_whitespace`] method on [`str`]. +/// See its documentation for more. +/// +/// [`split_whitespace`]: ../../std/primitive.str.html#method.split_whitespace +/// [`str`]: ../../std/primitive.str.html +#[stable(feature = "split_whitespace", since = "1.1.0")] +#[derive(Clone, Debug)] +pub struct SplitWhitespace<'a> { + inner: Filter, IsNotEmpty>, +} + +/// An iterator over the non-ASCII-whitespace substrings of a string, +/// separated by any amount of ASCII whitespace. +/// +/// This struct is created by the [`split_ascii_whitespace`] method on [`str`]. +/// See its documentation for more. +/// +/// [`split_ascii_whitespace`]: ../../std/primitive.str.html#method.split_ascii_whitespace +/// [`str`]: ../../std/primitive.str.html +#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +#[derive(Clone, Debug)] +pub struct SplitAsciiWhitespace<'a> { + inner: Map, IsNotEmpty>, UnsafeBytesToStr>, +} + +#[derive(Clone)] +struct IsWhitespace; + +impl FnOnce<(char, )> for IsWhitespace { + type Output = bool; + + #[inline] + extern "rust-call" fn call_once(mut self, arg: (char, )) -> bool { + self.call_mut(arg) + } +} + +impl FnMut<(char, )> for IsWhitespace { + #[inline] + extern "rust-call" fn call_mut(&mut self, arg: (char, )) -> bool { + arg.0.is_whitespace() + } +} + +#[derive(Clone)] +struct IsAsciiWhitespace; + +impl<'a> FnOnce<(&'a u8, )> for IsAsciiWhitespace { + type Output = bool; + + #[inline] + extern "rust-call" fn call_once(mut self, arg: (&u8, )) -> bool { + self.call_mut(arg) + } +} + +impl<'a> FnMut<(&'a u8, )> for IsAsciiWhitespace { + #[inline] + extern "rust-call" fn call_mut(&mut self, arg: (&u8, )) -> bool { + arg.0.is_ascii_whitespace() + } +} + +#[derive(Clone)] +struct IsNotEmpty; + +impl<'a, 'b> FnOnce<(&'a &'b str, )> for IsNotEmpty { + type Output = bool; + + #[inline] + extern "rust-call" fn call_once(mut self, arg: (&'a &'b str, )) -> bool { + self.call_mut(arg) + } +} + +impl<'a, 'b> FnMut<(&'a &'b str, )> for IsNotEmpty { + #[inline] + extern "rust-call" fn call_mut(&mut self, arg: (&'a &'b str, )) -> bool { + !arg.0.is_empty() + } +} + +impl<'a, 'b> FnOnce<(&'a &'b [u8], )> for IsNotEmpty { + type Output = bool; + + #[inline] + extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u8], )) -> bool { + self.call_mut(arg) + } +} + +impl<'a, 'b> FnMut<(&'a &'b [u8], )> for IsNotEmpty { + #[inline] + extern "rust-call" fn call_mut(&mut self, arg: (&'a &'b [u8], )) -> bool { + !arg.0.is_empty() + } +} + +#[derive(Clone)] +struct UnsafeBytesToStr; + +impl<'a> FnOnce<(&'a [u8], )> for UnsafeBytesToStr { + type Output = &'a str; + + #[inline] + extern "rust-call" fn call_once(mut self, arg: (&'a [u8], )) -> &'a str { + self.call_mut(arg) + } +} + +impl<'a> FnMut<(&'a [u8], )> for UnsafeBytesToStr { + #[inline] + extern "rust-call" fn call_mut(&mut self, arg: (&'a [u8], )) -> &'a str { + unsafe { from_utf8_unchecked(arg.0) } + } +} + + +#[stable(feature = "split_whitespace", since = "1.1.0")] +impl<'a> Iterator for SplitWhitespace<'a> { + type Item = &'a str; + + #[inline] + fn next(&mut self) -> Option<&'a str> { + self.inner.next() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +#[stable(feature = "split_whitespace", since = "1.1.0")] +impl<'a> DoubleEndedIterator for SplitWhitespace<'a> { + #[inline] + fn next_back(&mut self) -> Option<&'a str> { + self.inner.next_back() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl<'a> FusedIterator for SplitWhitespace<'a> {} + +#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +impl<'a> Iterator for SplitAsciiWhitespace<'a> { + type Item = &'a str; + + #[inline] + fn next(&mut self) -> Option<&'a str> { + self.inner.next() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +impl<'a> DoubleEndedIterator for SplitAsciiWhitespace<'a> { + #[inline] + fn next_back(&mut self) -> Option<&'a str> { + self.inner.next_back() + } +} + +#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +impl<'a> FusedIterator for SplitAsciiWhitespace<'a> {} + +/// An iterator of [`u16`] over the string encoded as UTF-16. +/// +/// [`u16`]: ../../std/primitive.u16.html +/// +/// This struct is created by the [`encode_utf16`] method on [`str`]. +/// See its documentation for more. +/// +/// [`encode_utf16`]: ../../std/primitive.str.html#method.encode_utf16 +/// [`str`]: ../../std/primitive.str.html +#[derive(Clone)] +#[stable(feature = "encode_utf16", since = "1.8.0")] +pub struct EncodeUtf16<'a> { + chars: Chars<'a>, + extra: u16, +} + +#[stable(feature = "collection_debug", since = "1.17.0")] +impl<'a> fmt::Debug for EncodeUtf16<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("EncodeUtf16 { .. }") + } +} + +#[stable(feature = "encode_utf16", since = "1.8.0")] +impl<'a> Iterator for EncodeUtf16<'a> { + type Item = u16; + + #[inline] + fn next(&mut self) -> Option { + if self.extra != 0 { + let tmp = self.extra; + self.extra = 0; + return Some(tmp); + } + + let mut buf = [0; 2]; + self.chars.next().map(|ch| { + let n = ch.encode_utf16(&mut buf).len(); + if n == 2 { + self.extra = buf[1]; + } + buf[0] + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (low, high) = self.chars.size_hint(); + // every char gets either one u16 or two u16, + // so this iterator is between 1 or 2 times as + // long as the underlying iterator. + (low, high.and_then(|n| n.checked_mul(2))) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl<'a> FusedIterator for EncodeUtf16<'a> {} diff --git a/src/libcore/str/pattern.rs b/src/libcore/str/pattern.rs index 089d691773a1..5e63fa9ff354 100644 --- a/src/libcore/str/pattern.rs +++ b/src/libcore/str/pattern.rs @@ -258,7 +258,7 @@ pub struct CharSearcher<'a> { /// `finger` is the current byte index of the forward search. /// Imagine that it exists before the byte at its index, i.e. - /// haystack[finger] is the first byte of the slice we must inspect during + /// `haystack[finger]` is the first byte of the slice we must inspect during /// forward searching finger: usize, /// `finger_back` is the current byte index of the reverse search. @@ -324,7 +324,7 @@ unsafe impl<'a> Searcher<'a> for CharSearcher<'a> { // the second byte when searching for the third. // // However, this is totally okay. While we have the invariant that - // self.finger is on a UTF8 boundary, this invariant is not relid upon + // self.finger is on a UTF8 boundary, this invariant is not relied upon // within this method (it is relied upon in CharSearcher::next()). // // We only exit this method when we reach the end of the string, or if we @@ -354,7 +354,7 @@ unsafe impl<'a> ReverseSearcher<'a> for CharSearcher<'a> { #[inline] fn next_back(&mut self) -> SearchStep { let old_finger = self.finger_back; - let slice = unsafe { self.haystack.slice_unchecked(self.finger, old_finger) }; + let slice = unsafe { self.haystack.get_unchecked(self.finger..old_finger) }; let mut iter = slice.chars(); let old_len = iter.iter.len(); if let Some(ch) = iter.next_back() { diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index 3da9e9c87dde..617e067e0787 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -29,7 +29,7 @@ //! //! [`Ordering`]: enum.Ordering.html //! -//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations +//! [1]: https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations //! [2]: ../../../nomicon/atomics.html //! //! Atomic variables are safe to share between threads (they implement [`Sync`]) @@ -97,9 +97,10 @@ use fmt; /// Save power or switch hyperthreads in a busy-wait spin-loop. /// /// This function is deliberately more primitive than -/// `std::thread::yield_now` and does not directly yield to the -/// system's scheduler. In some cases it might be useful to use a -/// combination of both functions. Careful benchmarking is advised. +/// [`std::thread::yield_now`](../../../std/thread/fn.yield_now.html) and +/// does not directly yield to the system's scheduler. +/// In some cases it might be useful to use a combination of both functions. +/// Careful benchmarking is advised. /// /// On some platforms this function may not do anything at all. #[inline] @@ -123,6 +124,7 @@ pub fn spin_loop_hint() { /// [`bool`]: ../../../std/primitive.bool.html #[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] +#[repr(transparent)] pub struct AtomicBool { v: UnsafeCell, } @@ -146,6 +148,7 @@ unsafe impl Sync for AtomicBool {} /// This type has the same in-memory representation as a `*mut T`. #[cfg(target_has_atomic = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] +#[repr(transparent)] pub struct AtomicPtr { p: UnsafeCell<*mut T>, } @@ -175,50 +178,85 @@ unsafe impl Sync for AtomicPtr {} /// "relaxed" atomics allow all reorderings. /// /// Rust's memory orderings are [the same as -/// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations). +/// LLVM's](https://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations). /// /// For more information see the [nomicon]. /// /// [nomicon]: ../../../nomicon/atomics.html #[stable(feature = "rust1", since = "1.0.0")] #[derive(Copy, Clone, Debug)] +#[non_exhaustive] pub enum Ordering { /// No ordering constraints, only atomic operations. /// /// Corresponds to LLVM's [`Monotonic`] ordering. /// - /// [`Monotonic`]: http://llvm.org/docs/Atomics.html#monotonic + /// [`Monotonic`]: https://llvm.org/docs/Atomics.html#monotonic #[stable(feature = "rust1", since = "1.0.0")] Relaxed, - /// When coupled with a store, all previous writes become visible - /// to the other threads that perform a load with [`Acquire`] ordering - /// on the same value. + /// When coupled with a store, all previous operations become ordered + /// before any load of this value with [`Acquire`] (or stronger) ordering. + /// In particular, all previous writes become visible to all threads + /// that perform an [`Acquire`] (or stronger) load of this value. /// - /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire + /// Notice that using this ordering for an operation that combines loads + /// and stores leads to a [`Relaxed`] load operation! + /// + /// This ordering is only applicable for operations that can perform a store. + /// + /// Corresponds to LLVM's [`Release`] ordering. + /// + /// [`Release`]: https://llvm.org/docs/Atomics.html#release + /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire + /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic #[stable(feature = "rust1", since = "1.0.0")] Release, - /// When coupled with a load, all subsequent loads will see data - /// written before a store with [`Release`] ordering on the same value - /// in other threads. + /// When coupled with a load, if the loaded value was written by a store operation with + /// [`Release`] (or stronger) ordering, then all subsequent operations + /// become ordered after that store. In particular, all subsequent loads will see data + /// written before the store. /// - /// [`Release`]: http://llvm.org/docs/Atomics.html#release + /// Notice that using this ordering for an operation that combines loads + /// and stores leads to a [`Relaxed`] store operation! + /// + /// This ordering is only applicable for operations that can perform a load. + /// + /// Corresponds to LLVM's [`Acquire`] ordering. + /// + /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire + /// [`Release`]: https://llvm.org/docs/Atomics.html#release + /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic #[stable(feature = "rust1", since = "1.0.0")] Acquire, - /// When coupled with a load, uses [`Acquire`] ordering, and with a store - /// [`Release`] ordering. + /// Has the effects of both [`Acquire`] and [`Release`] together: + /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering. /// - /// [`Acquire`]: http://llvm.org/docs/Atomics.html#acquire - /// [`Release`]: http://llvm.org/docs/Atomics.html#release + /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up + /// not performing any store and hence it has just `Acquire` ordering. However, + /// `AcqRel` will never perform [`Relaxed`] accesses. + /// + /// This ordering is only applicable for operations that combine both loads and stores. + /// + /// Corresponds to LLVM's [`AcquireRelease`] ordering. + /// + /// [`AcquireRelease`]: https://llvm.org/docs/Atomics.html#acquirerelease + /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire + /// [`Release`]: https://llvm.org/docs/Atomics.html#release + /// [`Relaxed`]: https://llvm.org/docs/Atomics.html#monotonic #[stable(feature = "rust1", since = "1.0.0")] AcqRel, - /// Like `AcqRel` with the additional guarantee that all threads see all + /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store + /// operations, respectively) with the additional guarantee that all threads see all /// sequentially consistent operations in the same order. + /// + /// Corresponds to LLVM's [`SequentiallyConsistent`] ordering. + /// + /// [`SequentiallyConsistent`]: https://llvm.org/docs/Atomics.html#sequentiallyconsistent + /// [`Acquire`]: https://llvm.org/docs/Atomics.html#acquire + /// [`Release`]: https://llvm.org/docs/Atomics.html#release + /// [`AcqRel`]: https://llvm.org/docs/Atomics.html#acquirerelease #[stable(feature = "rust1", since = "1.0.0")] SeqCst, - // Prevent exhaustive matching to allow for future extension - #[doc(hidden)] - #[unstable(feature = "future_atomic_orderings", issue = "0")] - __Nonexhaustive, } /// An [`AtomicBool`] initialized to `false`. @@ -285,21 +323,24 @@ impl AtomicBool { #[inline] #[stable(feature = "atomic_access", since = "1.15.0")] pub fn into_inner(self) -> bool { - unsafe { self.v.into_inner() != 0 } + self.v.into_inner() != 0 } /// Loads a value from the bool. /// /// `load` takes an [`Ordering`] argument which describes the memory ordering - /// of this operation. + /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`]. /// /// # Panics /// /// Panics if `order` is [`Release`] or [`AcqRel`]. /// /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel + /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst /// /// # Examples /// @@ -319,9 +360,18 @@ impl AtomicBool { /// Stores a value into the bool. /// /// `store` takes an [`Ordering`] argument which describes the memory ordering - /// of this operation. + /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`]. + /// + /// # Panics + /// + /// Panics if `order` is [`Acquire`] or [`AcqRel`]. /// /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel + /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst /// /// # Examples /// @@ -333,13 +383,6 @@ impl AtomicBool { /// some_bool.store(false, Ordering::Relaxed); /// assert_eq!(some_bool.load(Ordering::Relaxed), false); /// ``` - /// - /// # Panics - /// - /// Panics if `order` is [`Acquire`] or [`AcqRel`]. - /// - /// [`Acquire`]: enum.Ordering.html#variant.Acquire - /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn store(&self, val: bool, order: Ordering) { @@ -351,9 +394,14 @@ impl AtomicBool { /// Stores a value into the bool, returning the previous value. /// /// `swap` takes an [`Ordering`] argument which describes the memory ordering - /// of this operation. + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. /// /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire /// /// # Examples /// @@ -367,6 +415,7 @@ impl AtomicBool { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[cfg(target_has_atomic = "cas")] pub fn swap(&self, val: bool, order: Ordering) -> bool { unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 } } @@ -377,9 +426,16 @@ impl AtomicBool { /// was updated. /// /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory - /// ordering of this operation. + /// ordering of this operation. Notice that even when using [`AcqRel`], the operation + /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics. + /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it + /// happens, and using [`Release`] makes the load part [`Relaxed`]. /// /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel /// [`bool`]: ../../../std/primitive.bool.html /// /// # Examples @@ -397,6 +453,7 @@ impl AtomicBool { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[cfg(target_has_atomic = "cas")] pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool { match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { Ok(x) => x, @@ -412,13 +469,18 @@ impl AtomicBool { /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. The first describes the required ordering if the /// operation succeeds while the second describes the required ordering when the - /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and must - /// be equivalent or weaker than the success ordering. + /// operation fails. Using [`Acquire`] as success ordering makes the store part + /// of this operation [`Relaxed`], and using [`Release`] makes the successful load + /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] + /// and must be equivalent to or weaker than the success ordering. + /// /// /// [`bool`]: ../../../std/primitive.bool.html /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed /// [`Release`]: enum.Ordering.html#variant.Release - /// [`AcqRel`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst /// /// # Examples /// @@ -442,6 +504,7 @@ impl AtomicBool { /// ``` #[inline] #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] + #[cfg(target_has_atomic = "cas")] pub fn compare_exchange(&self, current: bool, new: bool, @@ -464,16 +527,20 @@ impl AtomicBool { /// previous value. /// /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory - /// ordering of this operation. The first describes the required ordering if the operation - /// succeeds while the second describes the required ordering when the operation fails. The - /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or - /// weaker than the success ordering. + /// ordering of this operation. The first describes the required ordering if the + /// operation succeeds while the second describes the required ordering when the + /// operation fails. Using [`Acquire`] as success ordering makes the store part + /// of this operation [`Relaxed`], and using [`Release`] makes the successful load + /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] + /// and must be equivalent to or weaker than the success ordering. /// /// [`bool`]: ../../../std/primitive.bool.html /// [`compare_exchange`]: #method.compare_exchange /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed /// [`Release`]: enum.Ordering.html#variant.Release - /// [`AcqRel`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst /// /// # Examples /// @@ -514,6 +581,16 @@ impl AtomicBool { /// /// Returns the previous value. /// + /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// /// # Examples /// /// ``` @@ -533,6 +610,7 @@ impl AtomicBool { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[cfg(target_has_atomic = "cas")] pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { unsafe { atomic_and(self.v.get(), val as u8, order) != 0 } } @@ -544,6 +622,16 @@ impl AtomicBool { /// /// Returns the previous value. /// + /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// /// # Examples /// /// ``` @@ -564,6 +652,7 @@ impl AtomicBool { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[cfg(target_has_atomic = "cas")] pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { // We can't use atomic_nand here because it can result in a bool with // an invalid value. This happens because the atomic operation is done @@ -587,6 +676,16 @@ impl AtomicBool { /// /// Returns the previous value. /// + /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// /// # Examples /// /// ``` @@ -606,6 +705,7 @@ impl AtomicBool { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[cfg(target_has_atomic = "cas")] pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { unsafe { atomic_or(self.v.get(), val as u8, order) != 0 } } @@ -617,6 +717,16 @@ impl AtomicBool { /// /// Returns the previous value. /// + /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. + /// + /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// /// # Examples /// /// ``` @@ -636,6 +746,7 @@ impl AtomicBool { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[cfg(target_has_atomic = "cas")] pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 } } @@ -695,21 +806,24 @@ impl AtomicPtr { #[inline] #[stable(feature = "atomic_access", since = "1.15.0")] pub fn into_inner(self) -> *mut T { - unsafe { self.p.into_inner() } + self.p.into_inner() } /// Loads a value from the pointer. /// /// `load` takes an [`Ordering`] argument which describes the memory ordering - /// of this operation. + /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`]. /// /// # Panics /// /// Panics if `order` is [`Release`] or [`AcqRel`]. /// /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel + /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst /// /// # Examples /// @@ -730,9 +844,18 @@ impl AtomicPtr { /// Stores a value into the pointer. /// /// `store` takes an [`Ordering`] argument which describes the memory ordering - /// of this operation. + /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`]. + /// + /// # Panics + /// + /// Panics if `order` is [`Acquire`] or [`AcqRel`]. /// /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel + /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst /// /// # Examples /// @@ -746,13 +869,6 @@ impl AtomicPtr { /// /// some_ptr.store(other_ptr, Ordering::Relaxed); /// ``` - /// - /// # Panics - /// - /// Panics if `order` is [`Acquire`] or [`AcqRel`]. - /// - /// [`Acquire`]: enum.Ordering.html#variant.Acquire - /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn store(&self, ptr: *mut T, order: Ordering) { @@ -764,9 +880,14 @@ impl AtomicPtr { /// Stores a value into the pointer, returning the previous value. /// /// `swap` takes an [`Ordering`] argument which describes the memory ordering - /// of this operation. + /// of this operation. All ordering modes are possible. Note that using + /// [`Acquire`] makes the store part of this operation [`Relaxed`], and + /// using [`Release`] makes the load part [`Relaxed`]. /// /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire /// /// # Examples /// @@ -782,6 +903,7 @@ impl AtomicPtr { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[cfg(target_has_atomic = "cas")] pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T } } @@ -792,9 +914,16 @@ impl AtomicPtr { /// was updated. /// /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory - /// ordering of this operation. + /// ordering of this operation. Notice that even when using [`AcqRel`], the operation + /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics. + /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it + /// happens, and using [`Release`] makes the load part [`Relaxed`]. /// /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed + /// [`Release`]: enum.Ordering.html#variant.Release + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel /// /// # Examples /// @@ -811,6 +940,7 @@ impl AtomicPtr { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[cfg(target_has_atomic = "cas")] pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T { match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { Ok(x) => x, @@ -824,14 +954,18 @@ impl AtomicPtr { /// the previous value. On success this value is guaranteed to be equal to `current`. /// /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory - /// ordering of this operation. The first describes the required ordering if - /// the operation succeeds while the second describes the required ordering when - /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] - /// and must be equivalent or weaker than the success ordering. + /// ordering of this operation. The first describes the required ordering if the + /// operation succeeds while the second describes the required ordering when the + /// operation fails. Using [`Acquire`] as success ordering makes the store part + /// of this operation [`Relaxed`], and using [`Release`] makes the successful load + /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] + /// and must be equivalent to or weaker than the success ordering. /// /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed /// [`Release`]: enum.Ordering.html#variant.Release - /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst /// /// # Examples /// @@ -849,6 +983,7 @@ impl AtomicPtr { /// ``` #[inline] #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] + #[cfg(target_has_atomic = "cas")] pub fn compare_exchange(&self, current: *mut T, new: *mut T, @@ -876,15 +1011,19 @@ impl AtomicPtr { /// previous value. /// /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory - /// ordering of this operation. The first describes the required ordering if the operation - /// succeeds while the second describes the required ordering when the operation fails. The - /// failure ordering can't be [`Release`] or [`AcqRel`] and must be equivalent or - /// weaker than the success ordering. + /// ordering of this operation. The first describes the required ordering if the + /// operation succeeds while the second describes the required ordering when the + /// operation fails. Using [`Acquire`] as success ordering makes the store part + /// of this operation [`Relaxed`], and using [`Release`] makes the successful load + /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] + /// and must be equivalent to or weaker than the success ordering. /// /// [`compare_exchange`]: #method.compare_exchange /// [`Ordering`]: enum.Ordering.html + /// [`Relaxed`]: enum.Ordering.html#variant.Relaxed /// [`Release`]: enum.Ordering.html#variant.Release - /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel + /// [`Acquire`]: enum.Ordering.html#variant.Acquire + /// [`SeqCst`]: enum.Ordering.html#variant.SeqCst /// /// # Examples /// @@ -945,7 +1084,10 @@ macro_rules! atomic_int { $stable_debug:meta, $stable_access:meta, $stable_from:meta, + $stable_nand:meta, $s_int_type:expr, $int_ref:expr, + $extra_feature:expr, + $min_fn:ident, $max_fn:ident, $int_type:ident $atomic_type:ident $atomic_init:ident) => { /// An integer type which can be safely shared between threads. /// @@ -957,13 +1099,9 @@ macro_rules! atomic_int { /// ). For more about the differences between atomic types and /// non-atomic types, please see the [module-level documentation]. /// - /// Please note that examples are shared between atomic variants of - /// primitive integer types, so it's normal that they are all - /// demonstrating [`AtomicIsize`]. - /// /// [module-level documentation]: index.html - /// [`AtomicIsize`]: struct.AtomicIsize.html #[$stable] + #[repr(transparent)] pub struct $atomic_type { v: UnsafeCell<$int_type>, } @@ -988,9 +1126,7 @@ macro_rules! atomic_int { #[$stable_debug] impl fmt::Debug for $atomic_type { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple(stringify!($atomic_type)) - .field(&self.load(Ordering::SeqCst)) - .finish() + fmt::Debug::fmt(&self.load(Ordering::SeqCst), f) } } @@ -999,373 +1135,678 @@ macro_rules! atomic_int { unsafe impl Sync for $atomic_type {} impl $atomic_type { - /// Creates a new atomic integer. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::AtomicIsize; - /// - /// let atomic_forty_two = AtomicIsize::new(42); - /// ``` - #[inline] - #[$stable] - pub const fn new(v: $int_type) -> Self { - $atomic_type {v: UnsafeCell::new(v)} - } + doc_comment! { + concat!("Creates a new atomic integer. - /// Returns a mutable reference to the underlying integer. - /// - /// This is safe because the mutable reference guarantees that no other threads are - /// concurrently accessing the atomic data. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let mut some_isize = AtomicIsize::new(10); - /// assert_eq!(*some_isize.get_mut(), 10); - /// *some_isize.get_mut() = 5; - /// assert_eq!(some_isize.load(Ordering::SeqCst), 5); - /// ``` - #[inline] - #[$stable_access] - pub fn get_mut(&mut self) -> &mut $int_type { - unsafe { &mut *self.v.get() } - } +# Examples - /// Consumes the atomic and returns the contained value. - /// - /// This is safe because passing `self` by value guarantees that no other threads are - /// concurrently accessing the atomic data. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::AtomicIsize; - /// - /// let some_isize = AtomicIsize::new(5); - /// assert_eq!(some_isize.into_inner(), 5); - /// ``` - #[inline] - #[$stable_access] - pub fn into_inner(self) -> $int_type { - unsafe { self.v.into_inner() } - } +``` +", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), "; - /// Loads a value from the atomic integer. - /// - /// `load` takes an [`Ordering`] argument which describes the memory ordering of this - /// operation. - /// - /// # Panics - /// - /// Panics if `order` is [`Release`] or [`AcqRel`]. - /// - /// [`Ordering`]: enum.Ordering.html - /// [`Release`]: enum.Ordering.html#variant.Release - /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let some_isize = AtomicIsize::new(5); - /// - /// assert_eq!(some_isize.load(Ordering::Relaxed), 5); - /// ``` - #[inline] - #[$stable] - pub fn load(&self, order: Ordering) -> $int_type { - unsafe { atomic_load(self.v.get(), order) } - } - - /// Stores a value into the atomic integer. - /// - /// `store` takes an [`Ordering`] argument which describes the memory ordering of this - /// operation. - /// - /// [`Ordering`]: enum.Ordering.html - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let some_isize = AtomicIsize::new(5); - /// - /// some_isize.store(10, Ordering::Relaxed); - /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); - /// ``` - /// - /// # Panics - /// - /// Panics if `order` is [`Acquire`] or [`AcqRel`]. - /// - /// [`Acquire`]: enum.Ordering.html#variant.Acquire - /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel - #[inline] - #[$stable] - pub fn store(&self, val: $int_type, order: Ordering) { - unsafe { atomic_store(self.v.get(), val, order); } - } - - /// Stores a value into the atomic integer, returning the previous value. - /// - /// `swap` takes an [`Ordering`] argument which describes the memory ordering of this - /// operation. - /// - /// [`Ordering`]: enum.Ordering.html - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let some_isize = AtomicIsize::new(5); - /// - /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5); - /// ``` - #[inline] - #[$stable] - pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type { - unsafe { atomic_swap(self.v.get(), val, order) } - } - - /// Stores a value into the atomic integer if the current value is the same as the - /// `current` value. - /// - /// The return value is always the previous value. If it is equal to `current`, then the - /// value was updated. - /// - /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory - /// ordering of this operation. - /// - /// [`Ordering`]: enum.Ordering.html - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let some_isize = AtomicIsize::new(5); - /// - /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5); - /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); - /// - /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10); - /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); - /// ``` - #[inline] - #[$stable] - pub fn compare_and_swap(&self, - current: $int_type, - new: $int_type, - order: Ordering) -> $int_type { - match self.compare_exchange(current, - new, - order, - strongest_failure_ordering(order)) { - Ok(x) => x, - Err(x) => x, +let atomic_forty_two = ", stringify!($atomic_type), "::new(42); +```"), + #[inline] + #[$stable] + pub const fn new(v: $int_type) -> Self { + $atomic_type {v: UnsafeCell::new(v)} } } - /// Stores a value into the atomic integer if the current value is the same as the - /// `current` value. - /// - /// The return value is a result indicating whether the new value was written and - /// containing the previous value. On success this value is guaranteed to be equal to - /// `current`. - /// - /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory - /// ordering of this operation. The first describes the required ordering if - /// the operation succeeds while the second describes the required ordering when - /// the operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and - /// must be equivalent or weaker than the success ordering. - /// - /// [`Ordering`]: enum.Ordering.html - /// [`Release`]: enum.Ordering.html#variant.Release - /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let some_isize = AtomicIsize::new(5); - /// - /// assert_eq!(some_isize.compare_exchange(5, 10, - /// Ordering::Acquire, - /// Ordering::Relaxed), - /// Ok(5)); - /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); - /// - /// assert_eq!(some_isize.compare_exchange(6, 12, - /// Ordering::SeqCst, - /// Ordering::Acquire), - /// Err(10)); - /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); - /// ``` - #[inline] - #[$stable_cxchg] - pub fn compare_exchange(&self, - current: $int_type, - new: $int_type, - success: Ordering, - failure: Ordering) -> Result<$int_type, $int_type> { - unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } - } + doc_comment! { + concat!("Returns a mutable reference to the underlying integer. - /// Stores a value into the atomic integer if the current value is the same as the - /// `current` value. - /// - /// Unlike [`compare_exchange`], this function is allowed to spuriously fail even - /// when the comparison succeeds, which can result in more efficient code on some - /// platforms. The return value is a result indicating whether the new value was - /// written and containing the previous value. - /// - /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory - /// ordering of this operation. The first describes the required ordering if the - /// operation succeeds while the second describes the required ordering when the - /// operation fails. The failure ordering can't be [`Release`] or [`AcqRel`] and - /// must be equivalent or weaker than the success ordering. - /// - /// [`compare_exchange`]: #method.compare_exchange - /// [`Ordering`]: enum.Ordering.html - /// [`Release`]: enum.Ordering.html#variant.Release - /// [`AcqRel`]: enum.Ordering.html#variant.AcqRel - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let val = AtomicIsize::new(4); - /// - /// let mut old = val.load(Ordering::Relaxed); - /// loop { - /// let new = old * 2; - /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { - /// Ok(_) => break, - /// Err(x) => old = x, - /// } - /// } - /// ``` - #[inline] - #[$stable_cxchg] - pub fn compare_exchange_weak(&self, - current: $int_type, - new: $int_type, - success: Ordering, - failure: Ordering) -> Result<$int_type, $int_type> { - unsafe { - atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) +This is safe because the mutable reference guarantees that no other threads are +concurrently accessing the atomic data. + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let mut some_var = ", stringify!($atomic_type), "::new(10); +assert_eq!(*some_var.get_mut(), 10); +*some_var.get_mut() = 5; +assert_eq!(some_var.load(Ordering::SeqCst), 5); +```"), + #[inline] + #[$stable_access] + pub fn get_mut(&mut self) -> &mut $int_type { + unsafe { &mut *self.v.get() } } } - /// Adds to the current value, returning the previous value. - /// - /// This operation wraps around on overflow. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let foo = AtomicIsize::new(0); - /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); - /// assert_eq!(foo.load(Ordering::SeqCst), 10); - /// ``` - #[inline] - #[$stable] - pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type { - unsafe { atomic_add(self.v.get(), val, order) } + doc_comment! { + concat!("Consumes the atomic and returns the contained value. + +This is safe because passing `self` by value guarantees that no other threads are +concurrently accessing the atomic data. + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), "; + +let some_var = ", stringify!($atomic_type), "::new(5); +assert_eq!(some_var.into_inner(), 5); +```"), + #[inline] + #[$stable_access] + pub fn into_inner(self) -> $int_type { + self.v.into_inner() + } } - /// Subtracts from the current value, returning the previous value. - /// - /// This operation wraps around on overflow. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let foo = AtomicIsize::new(0); - /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0); - /// assert_eq!(foo.load(Ordering::SeqCst), -10); - /// ``` - #[inline] - #[$stable] - pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type { - unsafe { atomic_sub(self.v.get(), val, order) } + doc_comment! { + concat!("Loads a value from the atomic integer. + +`load` takes an [`Ordering`] argument which describes the memory ordering of this operation. +Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`]. + +# Panics + +Panics if `order` is [`Release`] or [`AcqRel`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire +[`AcqRel`]: enum.Ordering.html#variant.AcqRel +[`SeqCst`]: enum.Ordering.html#variant.SeqCst + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let some_var = ", stringify!($atomic_type), "::new(5); + +assert_eq!(some_var.load(Ordering::Relaxed), 5); +```"), + #[inline] + #[$stable] + pub fn load(&self, order: Ordering) -> $int_type { + unsafe { atomic_load(self.v.get(), order) } + } } - /// Bitwise "and" with the current value. - /// - /// Performs a bitwise "and" operation on the current value and the argument `val`, and - /// sets the new value to the result. - /// - /// Returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let foo = AtomicIsize::new(0b101101); - /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); - /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001); - #[inline] - #[$stable] - pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { - unsafe { atomic_and(self.v.get(), val, order) } + doc_comment! { + concat!("Stores a value into the atomic integer. + +`store` takes an [`Ordering`] argument which describes the memory ordering of this operation. + Possible values are [`SeqCst`], [`Release`] and [`Relaxed`]. + +# Panics + +Panics if `order` is [`Acquire`] or [`AcqRel`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire +[`AcqRel`]: enum.Ordering.html#variant.AcqRel +[`SeqCst`]: enum.Ordering.html#variant.SeqCst + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let some_var = ", stringify!($atomic_type), "::new(5); + +some_var.store(10, Ordering::Relaxed); +assert_eq!(some_var.load(Ordering::Relaxed), 10); +```"), + #[inline] + #[$stable] + pub fn store(&self, val: $int_type, order: Ordering) { + unsafe { atomic_store(self.v.get(), val, order); } + } } - /// Bitwise "or" with the current value. - /// - /// Performs a bitwise "or" operation on the current value and the argument `val`, and - /// sets the new value to the result. - /// - /// Returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let foo = AtomicIsize::new(0b101101); - /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); - /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111); - #[inline] - #[$stable] - pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { - unsafe { atomic_or(self.v.get(), val, order) } + doc_comment! { + concat!("Stores a value into the atomic integer, returning the previous value. + +`swap` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let some_var = ", stringify!($atomic_type), "::new(5); + +assert_eq!(some_var.swap(10, Ordering::Relaxed), 5); +```"), + #[inline] + #[$stable] + #[cfg(target_has_atomic = "cas")] + pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_swap(self.v.get(), val, order) } + } } - /// Bitwise "xor" with the current value. - /// - /// Performs a bitwise "xor" operation on the current value and the argument `val`, and - /// sets the new value to the result. - /// - /// Returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let foo = AtomicIsize::new(0b101101); - /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); - /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110); - #[inline] - #[$stable] - pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { - unsafe { atomic_xor(self.v.get(), val, order) } + doc_comment! { + concat!("Stores a value into the atomic integer if the current value is the same as +the `current` value. + +The return value is always the previous value. If it is equal to `current`, then the +value was updated. + +`compare_and_swap` also takes an [`Ordering`] argument which describes the memory +ordering of this operation. Notice that even when using [`AcqRel`], the operation +might fail and hence just perform an `Acquire` load, but not have `Release` semantics. +Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it +happens, and using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire +[`AcqRel`]: enum.Ordering.html#variant.AcqRel + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let some_var = ", stringify!($atomic_type), "::new(5); + +assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5); +assert_eq!(some_var.load(Ordering::Relaxed), 10); + +assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10); +assert_eq!(some_var.load(Ordering::Relaxed), 10); +```"), + #[inline] + #[$stable] + #[cfg(target_has_atomic = "cas")] + pub fn compare_and_swap(&self, + current: $int_type, + new: $int_type, + order: Ordering) -> $int_type { + match self.compare_exchange(current, + new, + order, + strongest_failure_ordering(order)) { + Ok(x) => x, + Err(x) => x, + } + } } + + doc_comment! { + concat!("Stores a value into the atomic integer if the current value is the same as +the `current` value. + +The return value is a result indicating whether the new value was written and +containing the previous value. On success this value is guaranteed to be equal to +`current`. + +`compare_exchange` takes two [`Ordering`] arguments to describe the memory +ordering of this operation. The first describes the required ordering if the +operation succeeds while the second describes the required ordering when the +operation fails. Using [`Acquire`] as success ordering makes the store part +of this operation [`Relaxed`], and using [`Release`] makes the successful load +[`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] +and must be equivalent to or weaker than the success ordering. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire +[`SeqCst`]: enum.Ordering.html#variant.SeqCst + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let some_var = ", stringify!($atomic_type), "::new(5); + +assert_eq!(some_var.compare_exchange(5, 10, + Ordering::Acquire, + Ordering::Relaxed), + Ok(5)); +assert_eq!(some_var.load(Ordering::Relaxed), 10); + +assert_eq!(some_var.compare_exchange(6, 12, + Ordering::SeqCst, + Ordering::Acquire), + Err(10)); +assert_eq!(some_var.load(Ordering::Relaxed), 10); +```"), + #[inline] + #[$stable_cxchg] + #[cfg(target_has_atomic = "cas")] + pub fn compare_exchange(&self, + current: $int_type, + new: $int_type, + success: Ordering, + failure: Ordering) -> Result<$int_type, $int_type> { + unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } + } + } + + doc_comment! { + concat!("Stores a value into the atomic integer if the current value is the same as +the `current` value. + +Unlike [`compare_exchange`], this function is allowed to spuriously fail even +when the comparison succeeds, which can result in more efficient code on some +platforms. The return value is a result indicating whether the new value was +written and containing the previous value. + +`compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory +ordering of this operation. The first describes the required ordering if the +operation succeeds while the second describes the required ordering when the +operation fails. Using [`Acquire`] as success ordering makes the store part +of this operation [`Relaxed`], and using [`Release`] makes the successful load +[`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] +and must be equivalent to or weaker than the success ordering. + +[`compare_exchange`]: #method.compare_exchange +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire +[`SeqCst`]: enum.Ordering.html#variant.SeqCst + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let val = ", stringify!($atomic_type), "::new(4); + +let mut old = val.load(Ordering::Relaxed); +loop { + let new = old * 2; + match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { + Ok(_) => break, + Err(x) => old = x, + } +} +```"), + #[inline] + #[$stable_cxchg] + pub fn compare_exchange_weak(&self, + current: $int_type, + new: $int_type, + success: Ordering, + failure: Ordering) -> Result<$int_type, $int_type> { + unsafe { + atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) + } + } + } + + doc_comment! { + concat!("Adds to the current value, returning the previous value. + +This operation wraps around on overflow. + +`fetch_add` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(0); +assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); +assert_eq!(foo.load(Ordering::SeqCst), 10); +```"), + #[inline] + #[$stable] + pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_add(self.v.get(), val, order) } + } + } + + doc_comment! { + concat!("Subtracts from the current value, returning the previous value. + +This operation wraps around on overflow. + +`fetch_sub` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(20); +assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20); +assert_eq!(foo.load(Ordering::SeqCst), 10); +```"), + #[inline] + #[$stable] + pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_sub(self.v.get(), val, order) } + } + } + + doc_comment! { + concat!("Bitwise \"and\" with the current value. + +Performs a bitwise \"and\" operation on the current value and the argument `val`, and +sets the new value to the result. + +Returns the previous value. + +`fetch_and` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(0b101101); +assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); +assert_eq!(foo.load(Ordering::SeqCst), 0b100001); +```"), + #[inline] + #[$stable] + pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_and(self.v.get(), val, order) } + } + } + + doc_comment! { + concat!("Bitwise \"nand\" with the current value. + +Performs a bitwise \"nand\" operation on the current value and the argument `val`, and +sets the new value to the result. + +Returns the previous value. + +`fetch_nand` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire + +# Examples + +``` +", $extra_feature, " +use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(0x13); +assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13); +assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31)); +```"), + #[inline] + #[$stable_nand] + pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_nand(self.v.get(), val, order) } + } + } + + doc_comment! { + concat!("Bitwise \"or\" with the current value. + +Performs a bitwise \"or\" operation on the current value and the argument `val`, and +sets the new value to the result. + +Returns the previous value. + +`fetch_or` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(0b101101); +assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); +assert_eq!(foo.load(Ordering::SeqCst), 0b111111); +```"), + #[inline] + #[$stable] + pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_or(self.v.get(), val, order) } + } + } + + doc_comment! { + concat!("Bitwise \"xor\" with the current value. + +Performs a bitwise \"xor\" operation on the current value and the argument `val`, and +sets the new value to the result. + +Returns the previous value. + +`fetch_xor` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire + +# Examples + +``` +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(0b101101); +assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); +assert_eq!(foo.load(Ordering::SeqCst), 0b011110); +```"), + #[inline] + #[$stable] + pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_xor(self.v.get(), val, order) } + } + } + + doc_comment! { + concat!("Fetches the value, and applies a function to it that returns an optional +new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else +`Err(previous_value)`. + +Note: This may call the function multiple times if the value has been changed from other threads in +the meantime, as long as the function returns `Some(_)`, but the function will have been applied +but once to the stored value. + +`fetch_update` takes two [`Ordering`] arguments to describe the memory +ordering of this operation. The first describes the required ordering for loads +and failed updates while the second describes the required ordering when the +operation finally succeeds. Beware that this is different from the two +modes in [`compare_exchange`]! + +Using [`Acquire`] as success ordering makes the store part +of this operation [`Relaxed`], and using [`Release`] makes the final successful load +[`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] +and must be equivalent to or weaker than the success ordering. + +[`bool`]: ../../../std/primitive.bool.html +[`compare_exchange`]: #method.compare_exchange +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire +[`SeqCst`]: enum.Ordering.html#variant.SeqCst + +# Examples + +```rust +#![feature(no_more_cas)] +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let x = ", stringify!($atomic_type), "::new(7); +assert_eq!(x.fetch_update(|_| None, Ordering::SeqCst, Ordering::SeqCst), Err(7)); +assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(7)); +assert_eq!(x.fetch_update(|x| Some(x + 1), Ordering::SeqCst, Ordering::SeqCst), Ok(8)); +assert_eq!(x.load(Ordering::SeqCst), 9); +```"), + #[inline] + #[unstable(feature = "no_more_cas", + reason = "no more CAS loops in user code", + issue = "48655")] + pub fn fetch_update(&self, + mut f: F, + fetch_order: Ordering, + set_order: Ordering) -> Result<$int_type, $int_type> + where F: FnMut($int_type) -> Option<$int_type> { + let mut prev = self.load(fetch_order); + while let Some(next) = f(prev) { + match self.compare_exchange_weak(prev, next, set_order, fetch_order) { + x @ Ok(_) => return x, + Err(next_prev) => prev = next_prev + } + } + Err(prev) + } + } + + doc_comment! { + concat!("Maximum with the current value. + +Finds the maximum of the current value and the argument `val`, and +sets the new value to the result. + +Returns the previous value. + +`fetch_max` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire + +# Examples + +``` +#![feature(atomic_min_max)] +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(23); +assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23); +assert_eq!(foo.load(Ordering::SeqCst), 42); +``` + +If you want to obtain the maximum value in one step, you can use the following: + +``` +#![feature(atomic_min_max)] +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(23); +let bar = 42; +let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar); +assert!(max_foo == 42); +```"), + #[inline] + #[unstable(feature = "atomic_min_max", + reason = "easier and faster min/max than writing manual CAS loop", + issue = "48655")] + pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { $max_fn(self.v.get(), val, order) } + } + } + + doc_comment! { + concat!("Minimum with the current value. + +Finds the minimum of the current value and the argument `val`, and +sets the new value to the result. + +Returns the previous value. + +`fetch_min` takes an [`Ordering`] argument which describes the memory ordering +of this operation. All ordering modes are possible. Note that using +[`Acquire`] makes the store part of this operation [`Relaxed`], and +using [`Release`] makes the load part [`Relaxed`]. + +[`Ordering`]: enum.Ordering.html +[`Relaxed`]: enum.Ordering.html#variant.Relaxed +[`Release`]: enum.Ordering.html#variant.Release +[`Acquire`]: enum.Ordering.html#variant.Acquire + +# Examples + +``` +#![feature(atomic_min_max)] +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(23); +assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23); +assert_eq!(foo.load(Ordering::Relaxed), 23); +assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23); +assert_eq!(foo.load(Ordering::Relaxed), 22); +``` + +If you want to obtain the minimum value in one step, you can use the following: + +``` +#![feature(atomic_min_max)] +", $extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering}; + +let foo = ", stringify!($atomic_type), "::new(23); +let bar = 12; +let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar); +assert_eq!(min_foo, 12); +```"), + #[inline] + #[unstable(feature = "atomic_min_max", + reason = "easier and faster min/max than writing manual CAS loop", + issue = "48655")] + pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { $min_fn(self.v.get(), val, order) } + } + } + } } } @@ -1377,7 +1818,10 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), "i8", "../../../std/primitive.i8.html", + "#![feature(integer_atomics)]\n\n", + atomic_min, atomic_max, i8 AtomicI8 ATOMIC_I8_INIT } #[cfg(target_has_atomic = "8")] @@ -1387,7 +1831,10 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), "u8", "../../../std/primitive.u8.html", + "#![feature(integer_atomics)]\n\n", + atomic_umin, atomic_umax, u8 AtomicU8 ATOMIC_U8_INIT } #[cfg(target_has_atomic = "16")] @@ -1397,7 +1844,10 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), "i16", "../../../std/primitive.i16.html", + "#![feature(integer_atomics)]\n\n", + atomic_min, atomic_max, i16 AtomicI16 ATOMIC_I16_INIT } #[cfg(target_has_atomic = "16")] @@ -1407,7 +1857,10 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), "u16", "../../../std/primitive.u16.html", + "#![feature(integer_atomics)]\n\n", + atomic_umin, atomic_umax, u16 AtomicU16 ATOMIC_U16_INIT } #[cfg(target_has_atomic = "32")] @@ -1417,7 +1870,10 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), "i32", "../../../std/primitive.i32.html", + "#![feature(integer_atomics)]\n\n", + atomic_min, atomic_max, i32 AtomicI32 ATOMIC_I32_INIT } #[cfg(target_has_atomic = "32")] @@ -1427,7 +1883,10 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), "u32", "../../../std/primitive.u32.html", + "#![feature(integer_atomics)]\n\n", + atomic_umin, atomic_umax, u32 AtomicU32 ATOMIC_U32_INIT } #[cfg(target_has_atomic = "64")] @@ -1437,7 +1896,10 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), "i64", "../../../std/primitive.i64.html", + "#![feature(integer_atomics)]\n\n", + atomic_min, atomic_max, i64 AtomicI64 ATOMIC_I64_INIT } #[cfg(target_has_atomic = "64")] @@ -1447,7 +1909,10 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), "u64", "../../../std/primitive.u64.html", + "#![feature(integer_atomics)]\n\n", + atomic_umin, atomic_umax, u64 AtomicU64 ATOMIC_U64_INIT } #[cfg(target_has_atomic = "ptr")] @@ -1457,7 +1922,10 @@ atomic_int!{ stable(feature = "atomic_debug", since = "1.3.0"), stable(feature = "atomic_access", since = "1.15.0"), stable(feature = "atomic_from", since = "1.23.0"), + stable(feature = "atomic_nand", since = "1.27.0"), "isize", "../../../std/primitive.isize.html", + "", + atomic_min, atomic_max, isize AtomicIsize ATOMIC_ISIZE_INIT } #[cfg(target_has_atomic = "ptr")] @@ -1467,11 +1935,15 @@ atomic_int!{ stable(feature = "atomic_debug", since = "1.3.0"), stable(feature = "atomic_access", since = "1.15.0"), stable(feature = "atomic_from", since = "1.23.0"), + stable(feature = "atomic_nand", since = "1.27.0"), "usize", "../../../std/primitive.usize.html", + "", + atomic_umin, atomic_umax, usize AtomicUsize ATOMIC_USIZE_INIT } #[inline] +#[cfg(target_has_atomic = "cas")] fn strongest_failure_ordering(order: Ordering) -> Ordering { match order { Release => Relaxed, @@ -1479,7 +1951,6 @@ fn strongest_failure_ordering(order: Ordering) -> Ordering { SeqCst => SeqCst, Acquire => Acquire, AcqRel => Acquire, - __Nonexhaustive => __Nonexhaustive, } } @@ -1491,7 +1962,6 @@ unsafe fn atomic_store(dst: *mut T, val: T, order: Ordering) { SeqCst => intrinsics::atomic_store(dst, val), Acquire => panic!("there is no such thing as an acquire store"), AcqRel => panic!("there is no such thing as an acquire/release store"), - __Nonexhaustive => panic!("invalid memory ordering"), } } @@ -1503,11 +1973,11 @@ unsafe fn atomic_load(dst: *const T, order: Ordering) -> T { SeqCst => intrinsics::atomic_load(dst), Release => panic!("there is no such thing as a release load"), AcqRel => panic!("there is no such thing as an acquire/release load"), - __Nonexhaustive => panic!("invalid memory ordering"), } } #[inline] +#[cfg(target_has_atomic = "cas")] unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xchg_acq(dst, val), @@ -1515,7 +1985,6 @@ unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), SeqCst => intrinsics::atomic_xchg(dst, val), - __Nonexhaustive => panic!("invalid memory ordering"), } } @@ -1528,7 +1997,6 @@ unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), SeqCst => intrinsics::atomic_xadd(dst, val), - __Nonexhaustive => panic!("invalid memory ordering"), } } @@ -1541,11 +2009,11 @@ unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), SeqCst => intrinsics::atomic_xsub(dst, val), - __Nonexhaustive => panic!("invalid memory ordering"), } } #[inline] +#[cfg(target_has_atomic = "cas")] unsafe fn atomic_compare_exchange(dst: *mut T, old: T, new: T, @@ -1562,8 +2030,6 @@ unsafe fn atomic_compare_exchange(dst: *mut T, (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new), (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new), (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new), - (__Nonexhaustive, _) => panic!("invalid memory ordering"), - (_, __Nonexhaustive) => panic!("invalid memory ordering"), (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), (_, Release) => panic!("there is no such thing as a release failure ordering"), _ => panic!("a failure ordering can't be stronger than a success ordering"), @@ -1588,8 +2054,6 @@ unsafe fn atomic_compare_exchange_weak(dst: *mut T, (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new), (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new), (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new), - (__Nonexhaustive, _) => panic!("invalid memory ordering"), - (_, __Nonexhaustive) => panic!("invalid memory ordering"), (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), (_, Release) => panic!("there is no such thing as a release failure ordering"), _ => panic!("a failure ordering can't be stronger than a success ordering"), @@ -1605,7 +2069,17 @@ unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { AcqRel => intrinsics::atomic_and_acqrel(dst, val), Relaxed => intrinsics::atomic_and_relaxed(dst, val), SeqCst => intrinsics::atomic_and(dst, val), - __Nonexhaustive => panic!("invalid memory ordering"), + } +} + +#[inline] +unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_nand_acq(dst, val), + Release => intrinsics::atomic_nand_rel(dst, val), + AcqRel => intrinsics::atomic_nand_acqrel(dst, val), + Relaxed => intrinsics::atomic_nand_relaxed(dst, val), + SeqCst => intrinsics::atomic_nand(dst, val), } } @@ -1617,7 +2091,6 @@ unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { AcqRel => intrinsics::atomic_or_acqrel(dst, val), Relaxed => intrinsics::atomic_or_relaxed(dst, val), SeqCst => intrinsics::atomic_or(dst, val), - __Nonexhaustive => panic!("invalid memory ordering"), } } @@ -1629,7 +2102,54 @@ unsafe fn atomic_xor(dst: *mut T, val: T, order: Ordering) -> T { AcqRel => intrinsics::atomic_xor_acqrel(dst, val), Relaxed => intrinsics::atomic_xor_relaxed(dst, val), SeqCst => intrinsics::atomic_xor(dst, val), - __Nonexhaustive => panic!("invalid memory ordering"), + } +} + +/// returns the max value (signed comparison) +#[inline] +unsafe fn atomic_max(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_max_acq(dst, val), + Release => intrinsics::atomic_max_rel(dst, val), + AcqRel => intrinsics::atomic_max_acqrel(dst, val), + Relaxed => intrinsics::atomic_max_relaxed(dst, val), + SeqCst => intrinsics::atomic_max(dst, val), + } +} + +/// returns the min value (signed comparison) +#[inline] +unsafe fn atomic_min(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_min_acq(dst, val), + Release => intrinsics::atomic_min_rel(dst, val), + AcqRel => intrinsics::atomic_min_acqrel(dst, val), + Relaxed => intrinsics::atomic_min_relaxed(dst, val), + SeqCst => intrinsics::atomic_min(dst, val), + } +} + +/// returns the max value (signed comparison) +#[inline] +unsafe fn atomic_umax(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_umax_acq(dst, val), + Release => intrinsics::atomic_umax_rel(dst, val), + AcqRel => intrinsics::atomic_umax_acqrel(dst, val), + Relaxed => intrinsics::atomic_umax_relaxed(dst, val), + SeqCst => intrinsics::atomic_umax(dst, val), + } +} + +/// returns the min value (signed comparison) +#[inline] +unsafe fn atomic_umin(dst: *mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_umin_acq(dst, val), + Release => intrinsics::atomic_umin_rel(dst, val), + AcqRel => intrinsics::atomic_umin_acqrel(dst, val), + Relaxed => intrinsics::atomic_umin_relaxed(dst, val), + SeqCst => intrinsics::atomic_umin(dst, val), } } @@ -1719,7 +2239,6 @@ pub fn fence(order: Ordering) { AcqRel => intrinsics::atomic_fence_acqrel(), SeqCst => intrinsics::atomic_fence(), Relaxed => panic!("there is no such thing as a relaxed fence"), - __Nonexhaustive => panic!("invalid memory ordering"), } } } @@ -1809,7 +2328,6 @@ pub fn compiler_fence(order: Ordering) { AcqRel => intrinsics::atomic_singlethreadfence_acqrel(), SeqCst => intrinsics::atomic_singlethreadfence(), Relaxed => panic!("there is no such thing as a relaxed compiler fence"), - __Nonexhaustive => panic!("invalid memory ordering"), } } } @@ -1819,7 +2337,7 @@ pub fn compiler_fence(order: Ordering) { #[stable(feature = "atomic_debug", since = "1.3.0")] impl fmt::Debug for AtomicBool { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish() + fmt::Debug::fmt(&self.load(Ordering::SeqCst), f) } } @@ -1827,7 +2345,7 @@ impl fmt::Debug for AtomicBool { #[stable(feature = "atomic_debug", since = "1.3.0")] impl fmt::Debug for AtomicPtr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("AtomicPtr").field(&self.load(Ordering::SeqCst)).finish() + fmt::Debug::fmt(&self.load(Ordering::SeqCst), f) } } diff --git a/src/libcore/task/context.rs b/src/libcore/task/context.rs new file mode 100644 index 000000000000..5a29c8528ef3 --- /dev/null +++ b/src/libcore/task/context.rs @@ -0,0 +1,98 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] + +use fmt; +use super::{Spawn, Waker, LocalWaker}; + +/// Information about the currently-running task. +/// +/// Contexts are always tied to the stack, since they are set up specifically +/// when performing a single `poll` step on a task. +pub struct Context<'a> { + local_waker: &'a LocalWaker, + spawner: &'a mut dyn Spawn, +} + +impl<'a> fmt::Debug for Context<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Context") + .finish() + } +} + +impl<'a> Context<'a> { + /// Create a new task `Context` with the provided `local_waker`, `waker`, + /// and `spawner`. + #[inline] + pub fn new( + local_waker: &'a LocalWaker, + spawner: &'a mut dyn Spawn, + ) -> Context<'a> { + Context { local_waker, spawner } + } + + /// Get the `LocalWaker` associated with the current task. + #[inline] + pub fn local_waker(&self) -> &'a LocalWaker { + self.local_waker + } + + /// Get the `Waker` associated with the current task. + #[inline] + pub fn waker(&self) -> &'a Waker { + unsafe { &*(self.local_waker as *const LocalWaker as *const Waker) } + } + + /// Get the spawner associated with this task. + /// + /// This method is useful primarily if you want to explicitly handle + /// spawn failures. + #[inline] + pub fn spawner(&mut self) -> &mut dyn Spawn { + self.spawner + } + + /// Produce a context like the current one, but using the given waker + /// instead. + /// + /// This advanced method is primarily used when building "internal + /// schedulers" within a task, where you want to provide some customized + /// wakeup logic. + #[inline] + pub fn with_waker<'b>( + &'b mut self, + local_waker: &'b LocalWaker, + ) -> Context<'b> { + Context { + local_waker, + spawner: self.spawner, + } + } + + /// Produce a context like the current one, but using the given spawner + /// instead. + /// + /// This advanced method is primarily used when building "internal + /// schedulers" within a task. + #[inline] + pub fn with_spawner<'b, Sp: Spawn>( + &'b mut self, + spawner: &'b mut Sp, + ) -> Context<'b> { + Context { + local_waker: self.local_waker, + spawner, + } + } +} diff --git a/src/libcore/task/mod.rs b/src/libcore/task/mod.rs new file mode 100644 index 000000000000..f51e5f7ce0e3 --- /dev/null +++ b/src/libcore/task/mod.rs @@ -0,0 +1,27 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] + +//! Types and Traits for working with asynchronous tasks. + +mod context; +pub use self::context::Context; + +mod spawn; +pub use self::spawn::{Spawn, SpawnErrorKind, SpawnObjError, SpawnLocalObjError}; + +mod poll; +pub use self::poll::Poll; + +mod wake; +pub use self::wake::{Waker, LocalWaker, UnsafeWake}; diff --git a/src/libcore/task/poll.rs b/src/libcore/task/poll.rs new file mode 100644 index 000000000000..fb027efc6dca --- /dev/null +++ b/src/libcore/task/poll.rs @@ -0,0 +1,137 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] + +use ops::Try; +use result::Result; + +/// Indicates whether a value is available or if the current task has been +/// scheduled to receive a wakeup instead. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum Poll { + /// Represents that a value is immediately ready. + Ready(T), + + /// Represents that a value is not ready yet. + /// + /// When a function returns `Pending`, the function *must* also + /// ensure that the current task is scheduled to be awoken when + /// progress can be made. + Pending, +} + +impl Poll { + /// Change the ready value of this `Poll` with the closure provided + pub fn map(self, f: F) -> Poll + where F: FnOnce(T) -> U + { + match self { + Poll::Ready(t) => Poll::Ready(f(t)), + Poll::Pending => Poll::Pending, + } + } + + /// Returns whether this is `Poll::Ready` + #[inline] + pub fn is_ready(&self) -> bool { + match *self { + Poll::Ready(_) => true, + Poll::Pending => false, + } + } + + /// Returns whether this is `Poll::Pending` + #[inline] + pub fn is_pending(&self) -> bool { + !self.is_ready() + } +} + +impl Poll> { + /// Change the success value of this `Poll` with the closure provided + pub fn map_ok(self, f: F) -> Poll> + where F: FnOnce(T) -> U + { + match self { + Poll::Ready(Ok(t)) => Poll::Ready(Ok(f(t))), + Poll::Ready(Err(e)) => Poll::Ready(Err(e)), + Poll::Pending => Poll::Pending, + } + } + + /// Change the error value of this `Poll` with the closure provided + pub fn map_err(self, f: F) -> Poll> + where F: FnOnce(E) -> U + { + match self { + Poll::Ready(Ok(t)) => Poll::Ready(Ok(t)), + Poll::Ready(Err(e)) => Poll::Ready(Err(f(e))), + Poll::Pending => Poll::Pending, + } + } +} + +impl From for Poll { + fn from(t: T) -> Poll { + Poll::Ready(t) + } +} + +impl Try for Poll> { + type Ok = Poll; + type Error = E; + + #[inline] + fn into_result(self) -> Result { + match self { + Poll::Ready(Ok(x)) => Ok(Poll::Ready(x)), + Poll::Ready(Err(e)) => Err(e), + Poll::Pending => Ok(Poll::Pending), + } + } + + #[inline] + fn from_error(e: Self::Error) -> Self { + Poll::Ready(Err(e)) + } + + #[inline] + fn from_ok(x: Self::Ok) -> Self { + x.map(Ok) + } +} + +impl Try for Poll>> { + type Ok = Poll>; + type Error = E; + + #[inline] + fn into_result(self) -> Result { + match self { + Poll::Ready(Some(Ok(x))) => Ok(Poll::Ready(Some(x))), + Poll::Ready(Some(Err(e))) => Err(e), + Poll::Ready(None) => Ok(Poll::Ready(None)), + Poll::Pending => Ok(Poll::Pending), + } + } + + #[inline] + fn from_error(e: Self::Error) -> Self { + Poll::Ready(Some(Err(e))) + } + + #[inline] + fn from_ok(x: Self::Ok) -> Self { + x.map(|x| x.map(Ok)) + } +} diff --git a/src/libcore/task/spawn.rs b/src/libcore/task/spawn.rs new file mode 100644 index 000000000000..58ee85d232bc --- /dev/null +++ b/src/libcore/task/spawn.rs @@ -0,0 +1,93 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] + +use fmt; +use future::{FutureObj, LocalFutureObj}; + +/// Spawns tasks that poll futures to completion onto its associated task +/// executor. +/// +/// The term "task" refers to a kind of lightweight "thread". Task executors +/// are responsible for scheduling the execution of tasks on operating system +/// threads. +pub trait Spawn { + /// Spawns a new task with the given future. The future will be polled until + /// completion. + /// + /// # Errors + /// + /// The executor may be unable to spawn tasks, either because it has + /// been shut down or is resource-constrained. + fn spawn_obj( + &mut self, + future: FutureObj<'static, ()>, + ) -> Result<(), SpawnObjError>; + + /// Determines whether the executor is able to spawn new tasks. + /// + /// # Returns + /// + /// An `Ok` return means the executor is *likely* (but not guaranteed) + /// to accept a subsequent spawn attempt. Likewise, an `Err` return + /// means that `spawn` is likely, but not guaranteed, to yield an error. + #[inline] + fn status(&self) -> Result<(), SpawnErrorKind> { + Ok(()) + } +} + +/// Provides the reason that an executor was unable to spawn. +pub struct SpawnErrorKind { + _hidden: (), +} + +impl fmt::Debug for SpawnErrorKind { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("SpawnErrorKind") + .field(&"shutdown") + .finish() + } +} + +impl SpawnErrorKind { + /// Spawning is failing because the executor has been shut down. + pub fn shutdown() -> SpawnErrorKind { + SpawnErrorKind { _hidden: () } + } + + /// Check whether this error is the `shutdown` error. + pub fn is_shutdown(&self) -> bool { + true + } +} + +/// The result of a failed spawn +#[derive(Debug)] +pub struct SpawnObjError { + /// The kind of error + pub kind: SpawnErrorKind, + + /// The future for which spawning inside a task was attempted + pub future: FutureObj<'static, ()>, +} + +/// The result of a failed spawn +#[derive(Debug)] +pub struct SpawnLocalObjError { + /// The kind of error + pub kind: SpawnErrorKind, + + /// The future for which spawning inside a task was attempted + pub future: LocalFutureObj<'static, ()>, +} diff --git a/src/libcore/task/wake.rs b/src/libcore/task/wake.rs new file mode 100644 index 000000000000..d770536ef427 --- /dev/null +++ b/src/libcore/task/wake.rs @@ -0,0 +1,279 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] + +use {fmt, mem}; +use marker::Unpin; +use ptr::NonNull; + +/// A `Waker` is a handle for waking up a task by notifying its executor that it +/// is ready to be run. +/// +/// This handle contains a trait object pointing to an instance of the `UnsafeWake` +/// trait, allowing notifications to get routed through it. +#[repr(transparent)] +pub struct Waker { + inner: NonNull, +} + +impl Unpin for Waker {} +unsafe impl Send for Waker {} +unsafe impl Sync for Waker {} + +impl Waker { + /// Constructs a new `Waker` directly. + /// + /// Note that most code will not need to call this. Implementers of the + /// `UnsafeWake` trait will typically provide a wrapper that calls this + /// but you otherwise shouldn't call it directly. + /// + /// If you're working with the standard library then it's recommended to + /// use the `Waker::from` function instead which works with the safe + /// `Arc` type and the safe `Wake` trait. + #[inline] + pub unsafe fn new(inner: NonNull) -> Self { + Waker { inner } + } + + /// Wake up the task associated with this `Waker`. + #[inline] + pub fn wake(&self) { + unsafe { self.inner.as_ref().wake() } + } + + /// Returns whether or not this `Waker` and `other` awaken the same task. + /// + /// This function works on a best-effort basis, and may return false even + /// when the `Waker`s would awaken the same task. However, if this function + /// returns true, it is guaranteed that the `Waker`s will awaken the same + /// task. + /// + /// This function is primarily used for optimization purposes. + #[inline] + pub fn will_wake(&self, other: &Waker) -> bool { + self.inner == other.inner + } +} + +impl Clone for Waker { + #[inline] + fn clone(&self) -> Self { + unsafe { + self.inner.as_ref().clone_raw() + } + } +} + +impl fmt::Debug for Waker { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Waker") + .finish() + } +} + +impl Drop for Waker { + #[inline] + fn drop(&mut self) { + unsafe { + self.inner.as_ref().drop_raw() + } + } +} + +/// A `LocalWaker` is a handle for waking up a task by notifying its executor that it +/// is ready to be run. +/// +/// This is similar to the `Waker` type, but cannot be sent across threads. +/// Task executors can use this type to implement more optimized singlethreaded wakeup +/// behavior. +#[repr(transparent)] +pub struct LocalWaker { + inner: NonNull, +} + +impl Unpin for LocalWaker {} +impl !Send for LocalWaker {} +impl !Sync for LocalWaker {} + +impl LocalWaker { + /// Constructs a new `LocalWaker` directly. + /// + /// Note that most code will not need to call this. Implementers of the + /// `UnsafeWake` trait will typically provide a wrapper that calls this + /// but you otherwise shouldn't call it directly. + /// + /// If you're working with the standard library then it's recommended to + /// use the `local_waker_from_nonlocal` or `local_waker` to convert a `Waker` + /// into a `LocalWaker`. + /// + /// For this function to be used safely, it must be sound to call `inner.wake_local()` + /// on the current thread. + #[inline] + pub unsafe fn new(inner: NonNull) -> Self { + LocalWaker { inner } + } + + /// Wake up the task associated with this `LocalWaker`. + #[inline] + pub fn wake(&self) { + unsafe { self.inner.as_ref().wake_local() } + } + + /// Returns whether or not this `LocalWaker` and `other` `LocalWaker` awaken the same task. + /// + /// This function works on a best-effort basis, and may return false even + /// when the `LocalWaker`s would awaken the same task. However, if this function + /// returns true, it is guaranteed that the `LocalWaker`s will awaken the same + /// task. + /// + /// This function is primarily used for optimization purposes. + #[inline] + pub fn will_wake(&self, other: &LocalWaker) -> bool { + self.inner == other.inner + } + + /// Returns whether or not this `LocalWaker` and `other` `Waker` awaken the same task. + /// + /// This function works on a best-effort basis, and may return false even + /// when the `Waker`s would awaken the same task. However, if this function + /// returns true, it is guaranteed that the `LocalWaker`s will awaken the same + /// task. + /// + /// This function is primarily used for optimization purposes. + #[inline] + pub fn will_wake_nonlocal(&self, other: &Waker) -> bool { + self.inner == other.inner + } +} + +impl From for Waker { + #[inline] + fn from(local_waker: LocalWaker) -> Self { + let inner = local_waker.inner; + mem::forget(local_waker); + Waker { inner } + } +} + +impl Clone for LocalWaker { + #[inline] + fn clone(&self) -> Self { + let waker = unsafe { self.inner.as_ref().clone_raw() }; + let inner = waker.inner; + mem::forget(waker); + LocalWaker { inner } + } +} + +impl fmt::Debug for LocalWaker { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Waker") + .finish() + } +} + +impl Drop for LocalWaker { + #[inline] + fn drop(&mut self) { + unsafe { + self.inner.as_ref().drop_raw() + } + } +} + +/// An unsafe trait for implementing custom memory management for a `Waker` or `LocalWaker`. +/// +/// A `Waker` conceptually is a cloneable trait object for `Wake`, and is +/// most often essentially just `Arc`. However, in some contexts +/// (particularly `no_std`), it's desirable to avoid `Arc` in favor of some +/// custom memory management strategy. This trait is designed to allow for such +/// customization. +/// +/// When using `std`, a default implementation of the `UnsafeWake` trait is provided for +/// `Arc` where `T: Wake`. +pub unsafe trait UnsafeWake: Send + Sync { + /// Creates a clone of this `UnsafeWake` and stores it behind a `Waker`. + /// + /// This function will create a new uniquely owned handle that under the + /// hood references the same notification instance. In other words calls + /// to `wake` on the returned handle should be equivalent to calls to + /// `wake` on this handle. + /// + /// # Unsafety + /// + /// This function is unsafe to call because it's asserting the `UnsafeWake` + /// value is in a consistent state, i.e. hasn't been dropped. + unsafe fn clone_raw(&self) -> Waker; + + /// Drops this instance of `UnsafeWake`, deallocating resources + /// associated with it. + /// + /// FIXME(cramertj) + /// This method is intended to have a signature such as: + /// + /// ```ignore (not-a-doctest) + /// fn drop_raw(self: *mut Self); + /// ``` + /// + /// Unfortunately in Rust today that signature is not object safe. + /// Nevertheless it's recommended to implement this function *as if* that + /// were its signature. As such it is not safe to call on an invalid + /// pointer, nor is the validity of the pointer guaranteed after this + /// function returns. + /// + /// # Unsafety + /// + /// This function is unsafe to call because it's asserting the `UnsafeWake` + /// value is in a consistent state, i.e. hasn't been dropped. + unsafe fn drop_raw(&self); + + /// Indicates that the associated task is ready to make progress and should + /// be `poll`ed. + /// + /// Executors generally maintain a queue of "ready" tasks; `wake` should place + /// the associated task onto this queue. + /// + /// # Panics + /// + /// Implementations should avoid panicking, but clients should also be prepared + /// for panics. + /// + /// # Unsafety + /// + /// This function is unsafe to call because it's asserting the `UnsafeWake` + /// value is in a consistent state, i.e. hasn't been dropped. + unsafe fn wake(&self); + + /// Indicates that the associated task is ready to make progress and should + /// be `poll`ed. This function is the same as `wake`, but can only be called + /// from the thread that this `UnsafeWake` is "local" to. This allows for + /// implementors to provide specialized wakeup behavior specific to the current + /// thread. This function is called by `LocalWaker::wake`. + /// + /// Executors generally maintain a queue of "ready" tasks; `wake_local` should place + /// the associated task onto this queue. + /// + /// # Panics + /// + /// Implementations should avoid panicking, but clients should also be prepared + /// for panics. + /// + /// # Unsafety + /// + /// This function is unsafe to call because it's asserting the `UnsafeWake` + /// value is in a consistent state, i.e. hasn't been dropped, and that the + /// `UnsafeWake` hasn't moved from the thread on which it was created. + unsafe fn wake_local(&self) { + self.wake() + } +} diff --git a/src/libcore/tests/any.rs b/src/libcore/tests/any.rs index 2d3e81aa131e..a80bf9395303 100644 --- a/src/libcore/tests/any.rs +++ b/src/libcore/tests/any.rs @@ -17,7 +17,7 @@ static TEST: &'static str = "Test"; #[test] fn any_referenced() { - let (a, b, c) = (&5 as &Any, &TEST as &Any, &Test as &Any); + let (a, b, c) = (&5 as &dyn Any, &TEST as &dyn Any, &Test as &dyn Any); assert!(a.is::()); assert!(!b.is::()); @@ -34,7 +34,11 @@ fn any_referenced() { #[test] fn any_owning() { - let (a, b, c) = (box 5_usize as Box, box TEST as Box, box Test as Box); + let (a, b, c) = ( + box 5_usize as Box, + box TEST as Box, + box Test as Box, + ); assert!(a.is::()); assert!(!b.is::()); @@ -51,7 +55,7 @@ fn any_owning() { #[test] fn any_downcast_ref() { - let a = &5_usize as &Any; + let a = &5_usize as &dyn Any; match a.downcast_ref::() { Some(&5) => {} @@ -69,9 +73,9 @@ fn any_downcast_mut() { let mut a = 5_usize; let mut b: Box<_> = box 7_usize; - let a_r = &mut a as &mut Any; + let a_r = &mut a as &mut dyn Any; let tmp: &mut usize = &mut *b; - let b_r = tmp as &mut Any; + let b_r = tmp as &mut dyn Any; match a_r.downcast_mut::() { Some(x) => { @@ -113,7 +117,7 @@ fn any_downcast_mut() { #[test] fn any_fixed_vec() { let test = [0_usize; 8]; - let test = &test as &Any; + let test = &test as &dyn Any; assert!(test.is::<[usize; 8]>()); assert!(!test.is::<[usize; 10]>()); } diff --git a/src/libcore/tests/ascii.rs b/src/libcore/tests/ascii.rs new file mode 100644 index 000000000000..950222dbcfa3 --- /dev/null +++ b/src/libcore/tests/ascii.rs @@ -0,0 +1,357 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use core::char::from_u32; + +#[test] +fn test_is_ascii() { + assert!(b"".is_ascii()); + assert!(b"banana\0\x7F".is_ascii()); + assert!(b"banana\0\x7F".iter().all(|b| b.is_ascii())); + assert!(!b"Vi\xe1\xbb\x87t Nam".is_ascii()); + assert!(!b"Vi\xe1\xbb\x87t Nam".iter().all(|b| b.is_ascii())); + assert!(!b"\xe1\xbb\x87".iter().any(|b| b.is_ascii())); + + assert!("".is_ascii()); + assert!("banana\0\u{7F}".is_ascii()); + assert!("banana\0\u{7F}".chars().all(|c| c.is_ascii())); + assert!(!"ประเทศไทย中华Việt Nam".chars().all(|c| c.is_ascii())); + assert!(!"ประเทศไทย中华ệ ".chars().any(|c| c.is_ascii())); +} + +#[test] +fn test_to_ascii_uppercase() { + assert_eq!("url()URL()uRl()ürl".to_ascii_uppercase(), "URL()URL()URL()üRL"); + assert_eq!("hıKß".to_ascii_uppercase(), "HıKß"); + + for i in 0..501 { + let upper = if 'a' as u32 <= i && i <= 'z' as u32 { i + 'A' as u32 - 'a' as u32 } + else { i }; + assert_eq!((from_u32(i).unwrap()).to_string().to_ascii_uppercase(), + (from_u32(upper).unwrap()).to_string()); + } +} + +#[test] +fn test_to_ascii_lowercase() { + assert_eq!("url()URL()uRl()Ürl".to_ascii_lowercase(), "url()url()url()Ürl"); + // Dotted capital I, Kelvin sign, Sharp S. + assert_eq!("HİKß".to_ascii_lowercase(), "hİKß"); + + for i in 0..501 { + let lower = if 'A' as u32 <= i && i <= 'Z' as u32 { i + 'a' as u32 - 'A' as u32 } + else { i }; + assert_eq!((from_u32(i).unwrap()).to_string().to_ascii_lowercase(), + (from_u32(lower).unwrap()).to_string()); + } +} + +#[test] +fn test_make_ascii_lower_case() { + macro_rules! test { + ($from: expr, $to: expr) => { + { + let mut x = $from; + x.make_ascii_lowercase(); + assert_eq!(x, $to); + } + } + } + test!(b'A', b'a'); + test!(b'a', b'a'); + test!(b'!', b'!'); + test!('A', 'a'); + test!('À', 'À'); + test!('a', 'a'); + test!('!', '!'); + test!(b"H\xc3\x89".to_vec(), b"h\xc3\x89"); + test!("HİKß".to_string(), "hİKß"); +} + + +#[test] +fn test_make_ascii_upper_case() { + macro_rules! test { + ($from: expr, $to: expr) => { + { + let mut x = $from; + x.make_ascii_uppercase(); + assert_eq!(x, $to); + } + } + } + test!(b'a', b'A'); + test!(b'A', b'A'); + test!(b'!', b'!'); + test!('a', 'A'); + test!('à', 'à'); + test!('A', 'A'); + test!('!', '!'); + test!(b"h\xc3\xa9".to_vec(), b"H\xc3\xa9"); + test!("hıKß".to_string(), "HıKß"); + + let mut x = "Hello".to_string(); + x[..3].make_ascii_uppercase(); // Test IndexMut on String. + assert_eq!(x, "HELlo") +} + +#[test] +fn test_eq_ignore_ascii_case() { + assert!("url()URL()uRl()Ürl".eq_ignore_ascii_case("url()url()url()Ürl")); + assert!(!"Ürl".eq_ignore_ascii_case("ürl")); + // Dotted capital I, Kelvin sign, Sharp S. + assert!("HİKß".eq_ignore_ascii_case("hİKß")); + assert!(!"İ".eq_ignore_ascii_case("i")); + assert!(!"K".eq_ignore_ascii_case("k")); + assert!(!"ß".eq_ignore_ascii_case("s")); + + for i in 0..501 { + let lower = if 'A' as u32 <= i && i <= 'Z' as u32 { i + 'a' as u32 - 'A' as u32 } + else { i }; + assert!((from_u32(i).unwrap()).to_string().eq_ignore_ascii_case( + &from_u32(lower).unwrap().to_string())); + } +} + +#[test] +fn inference_works() { + let x = "a".to_string(); + x.eq_ignore_ascii_case("A"); +} + +// Shorthands used by the is_ascii_* tests. +macro_rules! assert_all { + ($what:ident, $($str:tt),+) => {{ + $( + for b in $str.chars() { + if !b.$what() { + panic!("expected {}({}) but it isn't", + stringify!($what), b); + } + } + for b in $str.as_bytes().iter() { + if !b.$what() { + panic!("expected {}(0x{:02x})) but it isn't", + stringify!($what), b); + } + } + )+ + }}; + ($what:ident, $($str:tt),+,) => (assert_all!($what,$($str),+)) +} +macro_rules! assert_none { + ($what:ident, $($str:tt),+) => {{ + $( + for b in $str.chars() { + if b.$what() { + panic!("expected not-{}({}) but it is", + stringify!($what), b); + } + } + for b in $str.as_bytes().iter() { + if b.$what() { + panic!("expected not-{}(0x{:02x})) but it is", + stringify!($what), b); + } + } + )* + }}; + ($what:ident, $($str:tt),+,) => (assert_none!($what,$($str),+)) +} + +#[test] +fn test_is_ascii_alphabetic() { + assert_all!(is_ascii_alphabetic, + "", + "abcdefghijklmnopqrstuvwxyz", + "ABCDEFGHIJKLMNOQPRSTUVWXYZ", + ); + assert_none!(is_ascii_alphabetic, + "0123456789", + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + " \t\n\x0c\r", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); +} + +#[test] +fn test_is_ascii_uppercase() { + assert_all!(is_ascii_uppercase, + "", + "ABCDEFGHIJKLMNOQPRSTUVWXYZ", + ); + assert_none!(is_ascii_uppercase, + "abcdefghijklmnopqrstuvwxyz", + "0123456789", + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + " \t\n\x0c\r", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); +} + +#[test] +fn test_is_ascii_lowercase() { + assert_all!(is_ascii_lowercase, + "abcdefghijklmnopqrstuvwxyz", + ); + assert_none!(is_ascii_lowercase, + "ABCDEFGHIJKLMNOQPRSTUVWXYZ", + "0123456789", + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + " \t\n\x0c\r", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); +} + +#[test] +fn test_is_ascii_alphanumeric() { + assert_all!(is_ascii_alphanumeric, + "", + "abcdefghijklmnopqrstuvwxyz", + "ABCDEFGHIJKLMNOQPRSTUVWXYZ", + "0123456789", + ); + assert_none!(is_ascii_alphanumeric, + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + " \t\n\x0c\r", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); +} + +#[test] +fn test_is_ascii_digit() { + assert_all!(is_ascii_digit, + "", + "0123456789", + ); + assert_none!(is_ascii_digit, + "abcdefghijklmnopqrstuvwxyz", + "ABCDEFGHIJKLMNOQPRSTUVWXYZ", + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + " \t\n\x0c\r", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); +} + +#[test] +fn test_is_ascii_hexdigit() { + assert_all!(is_ascii_hexdigit, + "", + "0123456789", + "abcdefABCDEF", + ); + assert_none!(is_ascii_hexdigit, + "ghijklmnopqrstuvwxyz", + "GHIJKLMNOQPRSTUVWXYZ", + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + " \t\n\x0c\r", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); +} + +#[test] +fn test_is_ascii_punctuation() { + assert_all!(is_ascii_punctuation, + "", + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + ); + assert_none!(is_ascii_punctuation, + "abcdefghijklmnopqrstuvwxyz", + "ABCDEFGHIJKLMNOQPRSTUVWXYZ", + "0123456789", + " \t\n\x0c\r", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); +} + +#[test] +fn test_is_ascii_graphic() { + assert_all!(is_ascii_graphic, + "", + "abcdefghijklmnopqrstuvwxyz", + "ABCDEFGHIJKLMNOQPRSTUVWXYZ", + "0123456789", + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + ); + assert_none!(is_ascii_graphic, + " \t\n\x0c\r", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); +} + +#[test] +fn test_is_ascii_whitespace() { + assert_all!(is_ascii_whitespace, + "", + " \t\n\x0c\r", + ); + assert_none!(is_ascii_whitespace, + "abcdefghijklmnopqrstuvwxyz", + "ABCDEFGHIJKLMNOQPRSTUVWXYZ", + "0123456789", + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x0b\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); +} + +#[test] +fn test_is_ascii_control() { + assert_all!(is_ascii_control, + "", + "\x00\x01\x02\x03\x04\x05\x06\x07", + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + "\x10\x11\x12\x13\x14\x15\x16\x17", + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", + "\x7f", + ); + assert_none!(is_ascii_control, + "abcdefghijklmnopqrstuvwxyz", + "ABCDEFGHIJKLMNOQPRSTUVWXYZ", + "0123456789", + "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + " ", + ); +} diff --git a/src/libcore/tests/atomic.rs b/src/libcore/tests/atomic.rs index 9babe24a9856..a3667b3f3fee 100644 --- a/src/libcore/tests/atomic.rs +++ b/src/libcore/tests/atomic.rs @@ -48,6 +48,13 @@ fn uint_and() { assert_eq!(x.load(SeqCst), 0xf731 & 0x137f); } +#[test] +fn uint_nand() { + let x = AtomicUsize::new(0xf731); + assert_eq!(x.fetch_nand(0x137f, SeqCst), 0xf731); + assert_eq!(x.load(SeqCst), !(0xf731 & 0x137f)); +} + #[test] fn uint_or() { let x = AtomicUsize::new(0xf731); @@ -69,6 +76,13 @@ fn int_and() { assert_eq!(x.load(SeqCst), 0xf731 & 0x137f); } +#[test] +fn int_nand() { + let x = AtomicIsize::new(0xf731); + assert_eq!(x.fetch_nand(0x137f, SeqCst), 0xf731); + assert_eq!(x.load(SeqCst), !(0xf731 & 0x137f)); +} + #[test] fn int_or() { let x = AtomicIsize::new(0xf731); @@ -90,8 +104,10 @@ static S_UINT: AtomicUsize = AtomicUsize::new(0); #[test] fn static_init() { - assert!(!S_FALSE.load(SeqCst)); - assert!(S_TRUE.load(SeqCst)); - assert!(S_INT.load(SeqCst) == 0); - assert!(S_UINT.load(SeqCst) == 0); + // Note that we're not really testing the mutability here but it's important + // on Android at the moment (#49775) + assert!(!S_FALSE.swap(true, SeqCst)); + assert!(S_TRUE.swap(false, SeqCst)); + assert!(S_INT.fetch_add(1, SeqCst) == 0); + assert!(S_UINT.fetch_add(1, SeqCst) == 0); } diff --git a/src/libcore/tests/cell.rs b/src/libcore/tests/cell.rs index cc0ef6a6f17e..4b7243b9cfc7 100644 --- a/src/libcore/tests/cell.rs +++ b/src/libcore/tests/cell.rs @@ -26,6 +26,17 @@ fn smoketest_cell() { assert!(y.get() == (30, 40)); } +#[test] +fn cell_update() { + let x = Cell::new(10); + + assert_eq!(x.update(|x| x + 5), 15); + assert_eq!(x.get(), 15); + + assert_eq!(x.update(|x| x / 3), 5); + assert_eq!(x.get(), 5); +} + #[test] fn cell_has_sensible_show() { let x = Cell::new("foo bar"); @@ -154,6 +165,64 @@ fn ref_map_does_not_update_flag() { assert!(x.try_borrow_mut().is_ok()); } +#[test] +fn ref_map_split_updates_flag() { + let x = RefCell::new([1, 2]); + { + let b1 = x.borrow(); + assert!(x.try_borrow().is_ok()); + assert!(x.try_borrow_mut().is_err()); + { + let (_b2, _b3) = Ref::map_split(b1, |slc| slc.split_at(1)); + assert!(x.try_borrow().is_ok()); + assert!(x.try_borrow_mut().is_err()); + } + assert!(x.try_borrow().is_ok()); + assert!(x.try_borrow_mut().is_ok()); + } + assert!(x.try_borrow().is_ok()); + assert!(x.try_borrow_mut().is_ok()); + + { + let b1 = x.borrow_mut(); + assert!(x.try_borrow().is_err()); + assert!(x.try_borrow_mut().is_err()); + { + let (_b2, _b3) = RefMut::map_split(b1, |slc| slc.split_at_mut(1)); + assert!(x.try_borrow().is_err()); + assert!(x.try_borrow_mut().is_err()); + drop(_b2); + assert!(x.try_borrow().is_err()); + assert!(x.try_borrow_mut().is_err()); + } + assert!(x.try_borrow().is_ok()); + assert!(x.try_borrow_mut().is_ok()); + } + assert!(x.try_borrow().is_ok()); + assert!(x.try_borrow_mut().is_ok()); +} + +#[test] +fn ref_map_split() { + let x = RefCell::new([1, 2]); + let (b1, b2) = Ref::map_split(x.borrow(), |slc| slc.split_at(1)); + assert_eq!(*b1, [1]); + assert_eq!(*b2, [2]); +} + +#[test] +fn ref_mut_map_split() { + let x = RefCell::new([1, 2]); + { + let (mut b1, mut b2) = RefMut::map_split(x.borrow_mut(), |slc| slc.split_at_mut(1)); + assert_eq!(*b1, [1]); + assert_eq!(*b2, [2]); + b1[0] = 2; + b2[0] = 1; + } + assert_eq!(*x.borrow(), [2, 1]); +} + #[test] fn ref_map_accessor() { struct X(RefCell<(u32, char)>); diff --git a/src/libcore/tests/char.rs b/src/libcore/tests/char.rs index 4e10ceac878b..3d99c8ea9e22 100644 --- a/src/libcore/tests/char.rs +++ b/src/libcore/tests/char.rs @@ -148,9 +148,10 @@ fn test_is_control() { } #[test] -fn test_is_digit() { +fn test_is_numeric() { assert!('2'.is_numeric()); assert!('7'.is_numeric()); + assert!('¾'.is_numeric()); assert!(!'c'.is_numeric()); assert!(!'i'.is_numeric()); assert!(!'z'.is_numeric()); @@ -181,6 +182,7 @@ fn test_escape_debug() { assert_eq!(string('\u{ff}'), "\u{ff}"); assert_eq!(string('\u{11b}'), "\u{11b}"); assert_eq!(string('\u{1d4b6}'), "\u{1d4b6}"); + assert_eq!(string('\u{301}'), "\\u{301}"); // combining character assert_eq!(string('\u{200b}'),"\\u{200b}"); // zero width space assert_eq!(string('\u{e000}'), "\\u{e000}"); // private use 1 assert_eq!(string('\u{100000}'), "\\u{100000}"); // private use 2 @@ -362,53 +364,3 @@ fn eu_iterator_specializations() { check('\u{12340}'); check('\u{10FFFF}'); } - -#[test] -fn test_decode_utf8() { - macro_rules! assert_decode_utf8 { - ($input_bytes: expr, $expected_str: expr) => { - let input_bytes: &[u8] = &$input_bytes; - let s = char::decode_utf8(input_bytes.iter().cloned()) - .map(|r_b| r_b.unwrap_or('\u{FFFD}')) - .collect::(); - assert_eq!(s, $expected_str, - "input bytes: {:?}, expected str: {:?}, result: {:?}", - input_bytes, $expected_str, s); - assert_eq!(String::from_utf8_lossy(&$input_bytes), $expected_str); - } - } - - assert_decode_utf8!([], ""); - assert_decode_utf8!([0x41], "A"); - assert_decode_utf8!([0xC1, 0x81], "��"); - assert_decode_utf8!([0xE2, 0x99, 0xA5], "♥"); - assert_decode_utf8!([0xE2, 0x99, 0xA5, 0x41], "♥A"); - assert_decode_utf8!([0xE2, 0x99], "�"); - assert_decode_utf8!([0xE2, 0x99, 0x41], "�A"); - assert_decode_utf8!([0xC0], "�"); - assert_decode_utf8!([0xC0, 0x41], "�A"); - assert_decode_utf8!([0x80], "�"); - assert_decode_utf8!([0x80, 0x41], "�A"); - assert_decode_utf8!([0xFE], "�"); - assert_decode_utf8!([0xFE, 0x41], "�A"); - assert_decode_utf8!([0xFF], "�"); - assert_decode_utf8!([0xFF, 0x41], "�A"); - assert_decode_utf8!([0xC0, 0x80], "��"); - - // Surrogates - assert_decode_utf8!([0xED, 0x9F, 0xBF], "\u{D7FF}"); - assert_decode_utf8!([0xED, 0xA0, 0x80], "���"); - assert_decode_utf8!([0xED, 0xBF, 0x80], "���"); - assert_decode_utf8!([0xEE, 0x80, 0x80], "\u{E000}"); - - // char::MAX - assert_decode_utf8!([0xF4, 0x8F, 0xBF, 0xBF], "\u{10FFFF}"); - assert_decode_utf8!([0xF4, 0x8F, 0xBF, 0x41], "�A"); - assert_decode_utf8!([0xF4, 0x90, 0x80, 0x80], "����"); - - // 5 and 6 bytes sequence - // Part of the original design of UTF-8, - // but invalid now that UTF-8 is artificially restricted to match the range of UTF-16. - assert_decode_utf8!([0xF8, 0x80, 0x80, 0x80, 0x80], "�����"); - assert_decode_utf8!([0xFC, 0x80, 0x80, 0x80, 0x80, 0x80], "������"); -} diff --git a/src/libcore/tests/fmt/num.rs b/src/libcore/tests/fmt/num.rs index 4ddedd910048..bc205ec0582e 100644 --- a/src/libcore/tests/fmt/num.rs +++ b/src/libcore/tests/fmt/num.rs @@ -150,3 +150,9 @@ fn test_format_int_twos_complement() { assert!(format!("{}", i32::MIN) == "-2147483648"); assert!(format!("{}", i64::MIN) == "-9223372036854775808"); } + +#[test] +fn test_format_debug_hex() { + assert!(format!("{:02x?}", b"Foo\0") == "[46, 6f, 6f, 00]"); + assert!(format!("{:02X?}", b"Foo\0") == "[46, 6F, 6F, 00]"); +} diff --git a/src/libcore/tests/hash/mod.rs b/src/libcore/tests/hash/mod.rs index 8716421b424d..85c9d41b65b5 100644 --- a/src/libcore/tests/hash/mod.rs +++ b/src/libcore/tests/hash/mod.rs @@ -128,7 +128,7 @@ fn test_custom_state() { fn test_indirect_hasher() { let mut hasher = MyHasher { hash: 0 }; { - let mut indirect_hasher: &mut Hasher = &mut hasher; + let mut indirect_hasher: &mut dyn Hasher = &mut hasher; 5u32.hash(&mut indirect_hasher); } assert_eq!(hasher.hash, 5); diff --git a/src/libcore/tests/hash/sip.rs b/src/libcore/tests/hash/sip.rs index c6dd41798f2a..bad858011e96 100644 --- a/src/libcore/tests/hash/sip.rs +++ b/src/libcore/tests/hash/sip.rs @@ -11,7 +11,7 @@ #![allow(deprecated)] use core::hash::{Hash, Hasher}; -use core::hash::{SipHasher, SipHasher13, SipHasher24}; +use core::hash::{SipHasher, SipHasher13}; use core::{slice, mem}; // Hash just the bytes of the slice, without length prefix @@ -224,14 +224,14 @@ fn test_siphash_2_4() { let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08; let mut buf = Vec::new(); let mut t = 0; - let mut state_inc = SipHasher24::new_with_keys(k0, k1); + let mut state_inc = SipHasher::new_with_keys(k0, k1); while t < 64 { let vec = u8to64_le!(vecs[t], 0); - let out = hash_with(SipHasher24::new_with_keys(k0, k1), &Bytes(&buf)); + let out = hash_with(SipHasher::new_with_keys(k0, k1), &Bytes(&buf)); assert_eq!(vec, out); - let full = hash_with(SipHasher24::new_with_keys(k0, k1), &Bytes(&buf)); + let full = hash_with(SipHasher::new_with_keys(k0, k1), &Bytes(&buf)); let i = state_inc.finish(); assert_eq!(full, i); @@ -322,13 +322,13 @@ fn test_hash_no_concat_alias() { #[test] fn test_write_short_works() { let test_usize = 0xd0c0b0a0usize; - let mut h1 = SipHasher24::new(); + let mut h1 = SipHasher::new(); h1.write_usize(test_usize); h1.write(b"bytes"); h1.write(b"string"); h1.write_u8(0xFFu8); h1.write_u8(0x01u8); - let mut h2 = SipHasher24::new(); + let mut h2 = SipHasher::new(); h2.write(unsafe { slice::from_raw_parts(&test_usize as *const _ as *const u8, mem::size_of::()) diff --git a/src/libcore/tests/intrinsics.rs b/src/libcore/tests/intrinsics.rs index 2b380abf63c5..9f3cba26a62d 100644 --- a/src/libcore/tests/intrinsics.rs +++ b/src/libcore/tests/intrinsics.rs @@ -22,7 +22,7 @@ fn test_typeid_sized_types() { #[test] fn test_typeid_unsized_types() { trait Z {} - struct X(str); struct Y(Z + 'static); + struct X(str); struct Y(dyn Z + 'static); assert_eq!(TypeId::of::(), TypeId::of::()); assert_eq!(TypeId::of::(), TypeId::of::()); diff --git a/src/libcore/tests/iter.rs b/src/libcore/tests/iter.rs index 8997cf9c6bff..72b115f8b5f7 100644 --- a/src/libcore/tests/iter.rs +++ b/src/libcore/tests/iter.rs @@ -144,6 +144,43 @@ fn test_iterator_chain_find() { assert_eq!(iter.next(), None); } +#[test] +fn test_zip_nth() { + let xs = [0, 1, 2, 4, 5]; + let ys = [10, 11, 12]; + + let mut it = xs.iter().zip(&ys); + assert_eq!(it.nth(0), Some((&0, &10))); + assert_eq!(it.nth(1), Some((&2, &12))); + assert_eq!(it.nth(0), None); + + let mut it = xs.iter().zip(&ys); + assert_eq!(it.nth(3), None); + + let mut it = ys.iter().zip(&xs); + assert_eq!(it.nth(3), None); +} + +#[test] +fn test_zip_nth_side_effects() { + let mut a = Vec::new(); + let mut b = Vec::new(); + let value = [1, 2, 3, 4, 5, 6].iter().cloned() + .map(|n| { + a.push(n); + n * 10 + }) + .zip([2, 3, 4, 5, 6, 7, 8].iter().cloned().map(|n| { + b.push(n * 100); + n * 1000 + })) + .skip(1) + .nth(3); + assert_eq!(value, Some((50, 6000))); + assert_eq!(a, vec![1, 2, 3, 4, 5]); + assert_eq!(b, vec![200, 300, 400, 500, 600]); +} + #[test] fn test_iterator_step_by() { // Identity @@ -161,6 +198,68 @@ fn test_iterator_step_by() { assert_eq!(it.next(), None); } +#[test] +fn test_iterator_step_by_nth() { + let mut it = (0..16).step_by(5); + assert_eq!(it.nth(0), Some(0)); + assert_eq!(it.nth(0), Some(5)); + assert_eq!(it.nth(0), Some(10)); + assert_eq!(it.nth(0), Some(15)); + assert_eq!(it.nth(0), None); + + let it = (0..18).step_by(5); + assert_eq!(it.clone().nth(0), Some(0)); + assert_eq!(it.clone().nth(1), Some(5)); + assert_eq!(it.clone().nth(2), Some(10)); + assert_eq!(it.clone().nth(3), Some(15)); + assert_eq!(it.clone().nth(4), None); + assert_eq!(it.clone().nth(42), None); +} + +#[test] +fn test_iterator_step_by_nth_overflow() { + #[cfg(target_pointer_width = "8")] + type Bigger = u16; + #[cfg(target_pointer_width = "16")] + type Bigger = u32; + #[cfg(target_pointer_width = "32")] + type Bigger = u64; + #[cfg(target_pointer_width = "64")] + type Bigger = u128; + + #[derive(Clone)] + struct Test(Bigger); + impl<'a> Iterator for &'a mut Test { + type Item = i32; + fn next(&mut self) -> Option { Some(21) } + fn nth(&mut self, n: usize) -> Option { + self.0 += n as Bigger + 1; + Some(42) + } + } + + let mut it = Test(0); + let root = usize::MAX >> (::std::mem::size_of::() * 8 / 2); + let n = root + 20; + (&mut it).step_by(n).nth(n); + assert_eq!(it.0, n as Bigger * n as Bigger); + + // large step + let mut it = Test(0); + (&mut it).step_by(usize::MAX).nth(5); + assert_eq!(it.0, (usize::MAX as Bigger) * 5); + + // n + 1 overflows + let mut it = Test(0); + (&mut it).step_by(2).nth(usize::MAX); + assert_eq!(it.0, (usize::MAX as Bigger) * 2); + + // n + 1 overflows + let mut it = Test(0); + (&mut it).step_by(1).nth(usize::MAX); + assert_eq!(it.0, (usize::MAX as Bigger) * 1); +} + #[test] #[should_panic] fn test_iterator_step_by_zero() { @@ -812,6 +911,44 @@ fn test_iterator_flat_map_fold() { assert_eq!(i, 0); } +#[test] +fn test_iterator_flatten() { + let xs = [0, 3, 6]; + let ys = [0, 1, 2, 3, 4, 5, 6, 7, 8]; + let it = xs.iter().map(|&x| (x..).step_by(1).take(3)).flatten(); + let mut i = 0; + for x in it { + assert_eq!(x, ys[i]); + i += 1; + } + assert_eq!(i, ys.len()); +} + +/// Test `Flatten::fold` with items already picked off the front and back, +/// to make sure all parts of the `Flatten` are folded correctly. +#[test] +fn test_iterator_flatten_fold() { + let xs = [0, 3, 6]; + let ys = [1, 2, 3, 4, 5, 6, 7]; + let mut it = xs.iter().map(|&x| x..x+3).flatten(); + assert_eq!(it.next(), Some(0)); + assert_eq!(it.next_back(), Some(8)); + let i = it.fold(0, |i, x| { + assert_eq!(x, ys[i]); + i + 1 + }); + assert_eq!(i, ys.len()); + + let mut it = xs.iter().map(|&x| x..x+3).flatten(); + assert_eq!(it.next(), Some(0)); + assert_eq!(it.next_back(), Some(8)); + let i = it.rfold(ys.len(), |i, x| { + assert_eq!(x, ys[i - 1]); + i - 1 + }); + assert_eq!(i, 0); +} + #[test] fn test_inspect() { let xs = [1, 2, 3, 4]; @@ -1009,6 +1146,33 @@ fn test_find() { assert!(v.iter().find(|&&x| x % 12 == 0).is_none()); } +#[test] +fn test_find_map() { + let xs: &[isize] = &[]; + assert_eq!(xs.iter().find_map(half_if_even), None); + let xs: &[isize] = &[3, 5]; + assert_eq!(xs.iter().find_map(half_if_even), None); + let xs: &[isize] = &[4, 5]; + assert_eq!(xs.iter().find_map(half_if_even), Some(2)); + let xs: &[isize] = &[3, 6]; + assert_eq!(xs.iter().find_map(half_if_even), Some(3)); + + let xs: &[isize] = &[1, 2, 3, 4, 5, 6, 7]; + let mut iter = xs.iter(); + assert_eq!(iter.find_map(half_if_even), Some(1)); + assert_eq!(iter.find_map(half_if_even), Some(2)); + assert_eq!(iter.find_map(half_if_even), Some(3)); + assert_eq!(iter.next(), Some(&7)); + + fn half_if_even(x: &isize) -> Option { + if x % 2 == 0 { + Some(x / 2) + } else { + None + } + } +} + #[test] fn test_position() { let v = &[1, 3, 9, 27, 103, 14, 11]; @@ -1225,6 +1389,23 @@ fn test_double_ended_flat_map() { assert_eq!(it.next_back(), None); } +#[test] +fn test_double_ended_flatten() { + let u = [0,1]; + let v = [5,6,7,8]; + let mut it = u.iter().map(|x| &v[*x..v.len()]).flatten(); + assert_eq!(it.next_back().unwrap(), &8); + assert_eq!(it.next().unwrap(), &5); + assert_eq!(it.next_back().unwrap(), &7); + assert_eq!(it.next_back().unwrap(), &6); + assert_eq!(it.next_back().unwrap(), &8); + assert_eq!(it.next().unwrap(), &6); + assert_eq!(it.next_back().unwrap(), &7); + assert_eq!(it.next_back(), None); + assert_eq!(it.next(), None); + assert_eq!(it.next_back(), None); +} + #[test] fn test_double_ended_range() { assert_eq!((11..14).rev().collect::>(), [13, 12, 11]); @@ -1260,24 +1441,85 @@ fn test_range() { (isize::MAX as usize + 2, Some(isize::MAX as usize + 2))); } +#[test] +fn test_range_exhaustion() { + let mut r = 10..10; + assert!(r.is_empty()); + assert_eq!(r.next(), None); + assert_eq!(r.next_back(), None); + assert_eq!(r, 10..10); + + let mut r = 10..12; + assert_eq!(r.next(), Some(10)); + assert_eq!(r.next(), Some(11)); + assert!(r.is_empty()); + assert_eq!(r, 12..12); + assert_eq!(r.next(), None); + + let mut r = 10..12; + assert_eq!(r.next_back(), Some(11)); + assert_eq!(r.next_back(), Some(10)); + assert!(r.is_empty()); + assert_eq!(r, 10..10); + assert_eq!(r.next_back(), None); + + let mut r = 100..10; + assert!(r.is_empty()); + assert_eq!(r.next(), None); + assert_eq!(r.next_back(), None); + assert_eq!(r, 100..10); +} + #[test] fn test_range_inclusive_exhaustion() { let mut r = 10..=10; assert_eq!(r.next(), Some(10)); - assert_eq!(r, 1..=0); + assert!(r.is_empty()); + assert_eq!(r.next(), None); + assert_eq!(r.next(), None); let mut r = 10..=10; assert_eq!(r.next_back(), Some(10)); - assert_eq!(r, 1..=0); + assert!(r.is_empty()); + assert_eq!(r.next_back(), None); + + let mut r = 10..=12; + assert_eq!(r.next(), Some(10)); + assert_eq!(r.next(), Some(11)); + assert_eq!(r.next(), Some(12)); + assert!(r.is_empty()); + assert_eq!(r.next(), None); + + let mut r = 10..=12; + assert_eq!(r.next_back(), Some(12)); + assert_eq!(r.next_back(), Some(11)); + assert_eq!(r.next_back(), Some(10)); + assert!(r.is_empty()); + assert_eq!(r.next_back(), None); let mut r = 10..=12; assert_eq!(r.nth(2), Some(12)); - assert_eq!(r, 1..=0); + assert!(r.is_empty()); + assert_eq!(r.next(), None); let mut r = 10..=12; assert_eq!(r.nth(5), None); - assert_eq!(r, 1..=0); + assert!(r.is_empty()); + assert_eq!(r.next(), None); + let mut r = 100..=10; + assert_eq!(r.next(), None); + assert!(r.is_empty()); + assert_eq!(r.next(), None); + assert_eq!(r.next(), None); + assert_eq!(r, 100..=10); + + let mut r = 100..=10; + assert_eq!(r.next_back(), None); + assert!(r.is_empty()); + assert_eq!(r.next_back(), None); + assert_eq!(r.next_back(), None); + assert_eq!(r, 100..=10); } #[test] @@ -1309,6 +1551,29 @@ fn test_range_from_nth() { assert_eq!(r, 16..); assert_eq!(r.nth(10), Some(26)); assert_eq!(r, 27..); + + assert_eq!((0..).size_hint(), (usize::MAX, None)); +} + +fn is_trusted_len(_: I) {} + +#[test] +fn test_range_from_take() { + let mut it = (0..).take(3); + assert_eq!(it.next(), Some(0)); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(2)); + assert_eq!(it.next(), None); + is_trusted_len((0..).take(3)); + assert_eq!((0..).take(3).size_hint(), (3, Some(3))); + assert_eq!((0..).take(0).size_hint(), (0, Some(0))); + assert_eq!((0..).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX))); +} + +#[test] +fn test_range_from_take_collect() { + let v: Vec<_> = (0..).take(3).collect(); + assert_eq!(v, vec![0, 1, 2]); } #[test] @@ -1324,9 +1589,10 @@ fn test_range_inclusive_nth() { assert_eq!(r.nth(2), Some(15)); assert_eq!(r, 16..=20); assert_eq!(r.is_empty(), false); + assert_eq!(ExactSizeIterator::is_empty(&r), false); assert_eq!(r.nth(10), None); assert_eq!(r.is_empty(), true); - assert_eq!(r, 1..=0); // We may not want to document/promise this detail + assert_eq!(ExactSizeIterator::is_empty(&r), true); } #[test] @@ -1352,6 +1618,14 @@ fn test_range_step() { assert_eq!((isize::MIN..isize::MAX).step_by(1).size_hint(), (usize::MAX, Some(usize::MAX))); } +#[test] +fn test_range_inclusive_step() { + assert_eq!((0..=50).step_by(10).collect::>(), [0, 10, 20, 30, 40, 50]); + assert_eq!((0..=5).step_by(1).collect::>(), [0, 1, 2, 3, 4, 5]); + assert_eq!((200..=255u8).step_by(10).collect::>(), [200, 210, 220, 230, 240, 250]); + assert_eq!((250..=255u8).step_by(1).collect::>(), [250, 251, 252, 253, 254, 255]); +} + #[test] fn test_range_last_max() { assert_eq!((0..20).last(), Some(19)); @@ -1397,12 +1671,85 @@ fn test_range_inclusive_min() { assert_eq!(r.min(), None); } +#[test] +fn test_range_inclusive_folds() { + assert_eq!((1..=10).sum::(), 55); + assert_eq!((1..=10).rev().sum::(), 55); + + let mut it = 40..=50; + assert_eq!(it.try_fold(0, i8::checked_add), None); + assert_eq!(it, 44..=50); + assert_eq!(it.try_rfold(0, i8::checked_add), None); + assert_eq!(it, 44..=47); + + let mut it = 10..=20; + assert_eq!(it.try_fold(0, |a,b| Some(a+b)), Some(165)); + assert!(it.is_empty()); + + let mut it = 10..=20; + assert_eq!(it.try_rfold(0, |a,b| Some(a+b)), Some(165)); + assert!(it.is_empty()); +} + #[test] fn test_repeat() { let mut it = repeat(42); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), Some(42)); + assert_eq!(repeat(42).size_hint(), (usize::MAX, None)); +} + +#[test] +fn test_repeat_take() { + let mut it = repeat(42).take(3); + assert_eq!(it.next(), Some(42)); + assert_eq!(it.next(), Some(42)); + assert_eq!(it.next(), Some(42)); + assert_eq!(it.next(), None); + is_trusted_len(repeat(42).take(3)); + assert_eq!(repeat(42).take(3).size_hint(), (3, Some(3))); + assert_eq!(repeat(42).take(0).size_hint(), (0, Some(0))); + assert_eq!(repeat(42).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX))); +} + +#[test] +fn test_repeat_take_collect() { + let v: Vec<_> = repeat(42).take(3).collect(); + assert_eq!(v, vec![42, 42, 42]); +} + +#[test] +fn test_repeat_with() { + #[derive(PartialEq, Debug)] + struct NotClone(usize); + let mut it = repeat_with(|| NotClone(42)); + assert_eq!(it.next(), Some(NotClone(42))); + assert_eq!(it.next(), Some(NotClone(42))); + assert_eq!(it.next(), Some(NotClone(42))); + assert_eq!(repeat_with(|| NotClone(42)).size_hint(), (usize::MAX, None)); +} + +#[test] +fn test_repeat_with_take() { + let mut it = repeat_with(|| 42).take(3); + assert_eq!(it.next(), Some(42)); + assert_eq!(it.next(), Some(42)); + assert_eq!(it.next(), Some(42)); + assert_eq!(it.next(), None); + is_trusted_len(repeat_with(|| 42).take(3)); + assert_eq!(repeat_with(|| 42).take(3).size_hint(), (3, Some(3))); + assert_eq!(repeat_with(|| 42).take(0).size_hint(), (0, Some(0))); + assert_eq!(repeat_with(|| 42).take(usize::MAX).size_hint(), + (usize::MAX, Some(usize::MAX))); +} + +#[test] +fn test_repeat_with_take_collect() { + let mut curr = 1; + let v: Vec<_> = repeat_with(|| { let tmp = curr; curr *= 2; tmp }) + .take(5).collect(); + assert_eq!(v, vec![1, 2, 4, 8, 16]); } #[test] @@ -1746,3 +2093,54 @@ fn test_flat_map_try_folds() { assert_eq!(iter.try_rfold(0, i8::checked_add), None); assert_eq!(iter.next_back(), Some(35)); } + +#[test] +fn test_flatten_try_folds() { + let f = &|acc, x| i32::checked_add(acc*2/3, x); + let mr = &|x| (5*x)..(5*x + 5); + assert_eq!((0..10).map(mr).flatten().try_fold(7, f), (0..50).try_fold(7, f)); + assert_eq!((0..10).map(mr).flatten().try_rfold(7, f), (0..50).try_rfold(7, f)); + let mut iter = (0..10).map(mr).flatten(); + iter.next(); iter.next_back(); // have front and back iters in progress + assert_eq!(iter.try_rfold(7, f), (1..49).try_rfold(7, f)); + + let mut iter = (0..10).map(|x| (4*x)..(4*x + 4)).flatten(); + assert_eq!(iter.try_fold(0, i8::checked_add), None); + assert_eq!(iter.next(), Some(17)); + assert_eq!(iter.try_rfold(0, i8::checked_add), None); + assert_eq!(iter.next_back(), Some(35)); +} + +#[test] +fn test_functor_laws() { + // identity: + fn identity(x: T) -> T { x } + assert_eq!((0..10).map(identity).sum::(), (0..10).sum()); + + // composition: + fn f(x: usize) -> usize { x + 3 } + fn g(x: usize) -> usize { x * 2 } + fn h(x: usize) -> usize { g(f(x)) } + assert_eq!((0..10).map(f).map(g).sum::(), (0..10).map(h).sum()); +} + +#[test] +fn test_monad_laws_left_identity() { + fn f(x: usize) -> impl Iterator { + (0..10).map(move |y| x * y) + } + assert_eq!(once(42).flat_map(f.clone()).sum::(), f(42).sum()); +} + +#[test] +fn test_monad_laws_right_identity() { + assert_eq!((0..10).flat_map(|x| once(x)).sum::(), (0..10).sum()); +} + +#[test] +fn test_monad_laws_associativity() { + fn f(x: usize) -> impl Iterator { 0..x } + fn g(x: usize) -> impl Iterator { (0..x).rev() } + assert_eq!((0..10).flat_map(f).flat_map(g).sum::(), + (0..10).flat_map(|x| f(x).flat_map(g)).sum::()); +} diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs index 2c0009569d75..fc92a5a22251 100644 --- a/src/libcore/tests/lib.rs +++ b/src/libcore/tests/lib.rs @@ -8,47 +8,47 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(warnings)] - #![feature(box_syntax)] -#![feature(core_float)] +#![feature(cell_update)] #![feature(core_private_bignum)] #![feature(core_private_diy_float)] #![feature(dec2flt)] -#![feature(decode_utf8)] +#![feature(euclidean_division)] #![feature(exact_size_is_empty)] #![feature(fixed_size_array)] #![feature(flt2dec)] #![feature(fmt_internals)] -#![feature(iterator_step_by)] -#![feature(i128_type)] -#![feature(inclusive_range)] -#![feature(inclusive_range_syntax)] -#![feature(iterator_try_fold)] -#![feature(iter_rfind)] -#![feature(iter_rfold)] -#![feature(nonzero)] +#![feature(hashmap_internals)] #![feature(pattern)] +#![feature(range_is_empty)] #![feature(raw)] +#![feature(refcell_map_split)] #![feature(refcell_replace_swap)] -#![feature(sip_hash_13)] #![feature(slice_patterns)] -#![feature(slice_rotate)] #![feature(sort_internals)] #![feature(specialization)] #![feature(step_trait)] +#![feature(str_internals)] #![feature(test)] #![feature(trusted_len)] #![feature(try_from)] #![feature(try_trait)] -#![feature(unique)] #![feature(exact_chunks)] +#![feature(slice_align_to)] +#![feature(align_offset)] +#![feature(reverse_bits)] +#![feature(iterator_find_map)] +#![feature(inner_deref)] +#![feature(slice_internals)] +#![feature(option_replace)] extern crate core; extern crate test; +extern crate rand; mod any; mod array; +mod ascii; mod atomic; mod cell; mod char; @@ -58,6 +58,7 @@ mod fmt; mod hash; mod intrinsics; mod iter; +mod manually_drop; mod mem; mod nonzero; mod num; @@ -68,4 +69,6 @@ mod ptr; mod result; mod slice; mod str; +mod str_lossy; +mod time; mod tuple; diff --git a/src/libcore/tests/manually_drop.rs b/src/libcore/tests/manually_drop.rs new file mode 100644 index 000000000000..82dfb8d4c0b2 --- /dev/null +++ b/src/libcore/tests/manually_drop.rs @@ -0,0 +1,29 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use core::mem::ManuallyDrop; + +#[test] +fn smoke() { + struct TypeWithDrop; + impl Drop for TypeWithDrop { + fn drop(&mut self) { + unreachable!("Should not get dropped"); + } + } + + let x = ManuallyDrop::new(TypeWithDrop); + drop(x); + + // also test unsizing + let x : Box> = + Box::new(ManuallyDrop::new([TypeWithDrop, TypeWithDrop])); + drop(x); +} diff --git a/src/libcore/tests/mem.rs b/src/libcore/tests/mem.rs index f55a1c81463f..714f2babbdff 100644 --- a/src/libcore/tests/mem.rs +++ b/src/libcore/tests/mem.rs @@ -109,11 +109,11 @@ fn test_transmute() { trait Foo { fn dummy(&self) { } } impl Foo for isize {} - let a = box 100isize as Box; + let a = box 100isize as Box; unsafe { let x: ::core::raw::TraitObject = transmute(a); assert!(*(x.data as *const isize) == 100); - let _x: Box = transmute(x); + let _x: Box = transmute(x); } unsafe { diff --git a/src/libcore/tests/nonzero.rs b/src/libcore/tests/nonzero.rs index a795dd575043..8d39298bac3d 100644 --- a/src/libcore/tests/nonzero.rs +++ b/src/libcore/tests/nonzero.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::nonzero::NonZero; +use core::num::NonZeroU32; use core::option::Option; use core::option::Option::{Some, None}; use std::mem::size_of; @@ -16,28 +16,28 @@ use std::mem::size_of; #[test] fn test_create_nonzero_instance() { let _a = unsafe { - NonZero::new_unchecked(21) + NonZeroU32::new_unchecked(21) }; } #[test] fn test_size_nonzero_in_option() { - assert_eq!(size_of::>(), size_of::>>()); + assert_eq!(size_of::(), size_of::>()); } #[test] fn test_match_on_nonzero_option() { let a = Some(unsafe { - NonZero::new_unchecked(42) + NonZeroU32::new_unchecked(42) }); match a { Some(val) => assert_eq!(val.get(), 42), - None => panic!("unexpected None while matching on Some(NonZero(_))") + None => panic!("unexpected None while matching on Some(NonZeroU32(_))") } - match unsafe { Some(NonZero::new_unchecked(43)) } { + match unsafe { Some(NonZeroU32::new_unchecked(43)) } { Some(val) => assert_eq!(val.get(), 43), - None => panic!("unexpected None while matching on Some(NonZero(_))") + None => panic!("unexpected None while matching on Some(NonZeroU32(_))") } } @@ -98,3 +98,26 @@ fn test_match_option_string() { None => panic!("unexpected None while matching on Some(String { ... })") } } + +mod atom { + use core::num::NonZeroU32; + + #[derive(PartialEq, Eq)] + pub struct Atom { + index: NonZeroU32, // private + } + pub const FOO_ATOM: Atom = Atom { index: unsafe { NonZeroU32::new_unchecked(7) } }; +} + +macro_rules! atom { + ("foo") => { atom::FOO_ATOM } +} + +#[test] +fn test_match_nonzero_const_pattern() { + match atom!("foo") { + // Using as a pattern is supported by the compiler: + atom!("foo") => {} + _ => panic!("Expected the const item as a pattern to match.") + } +} diff --git a/src/libcore/tests/num/dec2flt/mod.rs b/src/libcore/tests/num/dec2flt/mod.rs index 9934e1dab966..17b2f59cd4df 100644 --- a/src/libcore/tests/num/dec2flt/mod.rs +++ b/src/libcore/tests/num/dec2flt/mod.rs @@ -101,6 +101,12 @@ fn lonely_dot() { assert!(".".parse::().is_err()); } +#[test] +fn exponentiated_dot() { + assert!(".e0".parse::().is_err()); + assert!(".e0".parse::().is_err()); +} + #[test] fn lonely_sign() { assert!("+".parse::().is_err()); diff --git a/src/libcore/tests/num/dec2flt/parse.rs b/src/libcore/tests/num/dec2flt/parse.rs index 09acf2bc517b..3ad694e38adb 100644 --- a/src/libcore/tests/num/dec2flt/parse.rs +++ b/src/libcore/tests/num/dec2flt/parse.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::iter; use core::num::dec2flt::parse::{Decimal, parse_decimal}; use core::num::dec2flt::parse::ParseResult::{Valid, Invalid}; @@ -46,7 +45,7 @@ fn valid() { assert_eq!(parse_decimal("1.e300"), Valid(Decimal::new(b"1", b"", 300))); assert_eq!(parse_decimal(".1e300"), Valid(Decimal::new(b"", b"1", 300))); assert_eq!(parse_decimal("101e-33"), Valid(Decimal::new(b"101", b"", -33))); - let zeros: String = iter::repeat('0').take(25).collect(); + let zeros = "0".repeat(25); let s = format!("1.5e{}", zeros); assert_eq!(parse_decimal(&s), Valid(Decimal::new(b"1", b"5", 0))); } diff --git a/src/libcore/tests/num/flt2dec/mod.rs b/src/libcore/tests/num/flt2dec/mod.rs index ef0178815f98..04567e25e25b 100644 --- a/src/libcore/tests/num/flt2dec/mod.rs +++ b/src/libcore/tests/num/flt2dec/mod.rs @@ -23,6 +23,7 @@ mod strategy { mod dragon; mod grisu; } +mod random; pub fn decode_finite(v: T) -> Decoded { match decode(v).1 { diff --git a/src/libcore/tests/num/flt2dec/random.rs b/src/libcore/tests/num/flt2dec/random.rs new file mode 100644 index 000000000000..a1928657dabc --- /dev/null +++ b/src/libcore/tests/num/flt2dec/random.rs @@ -0,0 +1,158 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![cfg(not(target_arch = "wasm32"))] + +use std::i16; +use std::str; + +use core::num::flt2dec::MAX_SIG_DIGITS; +use core::num::flt2dec::strategy::grisu::format_exact_opt; +use core::num::flt2dec::strategy::grisu::format_shortest_opt; +use core::num::flt2dec::{decode, DecodableFloat, FullDecoded, Decoded}; + +use rand::{self, Rand, XorShiftRng}; +use rand::distributions::{IndependentSample, Range}; + +pub fn decode_finite(v: T) -> Decoded { + match decode(v).1 { + FullDecoded::Finite(decoded) => decoded, + full_decoded => panic!("expected finite, got {:?} instead", full_decoded) + } +} + + +fn iterate(func: &str, k: usize, n: usize, mut f: F, mut g: G, mut v: V) -> (usize, usize) + where F: FnMut(&Decoded, &mut [u8]) -> Option<(usize, i16)>, + G: FnMut(&Decoded, &mut [u8]) -> (usize, i16), + V: FnMut(usize) -> Decoded { + assert!(k <= 1024); + + let mut npassed = 0; // f(x) = Some(g(x)) + let mut nignored = 0; // f(x) = None + + for i in 0..n { + if (i & 0xfffff) == 0 { + println!("in progress, {:x}/{:x} (ignored={} passed={} failed={})", + i, n, nignored, npassed, i - nignored - npassed); + } + + let decoded = v(i); + let mut buf1 = [0; 1024]; + if let Some((len1, e1)) = f(&decoded, &mut buf1[..k]) { + let mut buf2 = [0; 1024]; + let (len2, e2) = g(&decoded, &mut buf2[..k]); + if e1 == e2 && &buf1[..len1] == &buf2[..len2] { + npassed += 1; + } else { + println!("equivalence test failed, {:x}/{:x}: {:?} f(i)={}e{} g(i)={}e{}", + i, n, decoded, str::from_utf8(&buf1[..len1]).unwrap(), e1, + str::from_utf8(&buf2[..len2]).unwrap(), e2); + } + } else { + nignored += 1; + } + } + println!("{}({}): done, ignored={} passed={} failed={}", + func, k, nignored, npassed, n - nignored - npassed); + assert!(nignored + npassed == n, + "{}({}): {} out of {} values returns an incorrect value!", + func, k, n - nignored - npassed, n); + (npassed, nignored) +} + +pub fn f32_random_equivalence_test(f: F, g: G, k: usize, n: usize) + where F: FnMut(&Decoded, &mut [u8]) -> Option<(usize, i16)>, + G: FnMut(&Decoded, &mut [u8]) -> (usize, i16) { + let mut rng: XorShiftRng = Rand::rand(&mut rand::thread_rng()); + let f32_range = Range::new(0x0000_0001u32, 0x7f80_0000); + iterate("f32_random_equivalence_test", k, n, f, g, |_| { + let x = f32::from_bits(f32_range.ind_sample(&mut rng)); + decode_finite(x) + }); +} + +pub fn f64_random_equivalence_test(f: F, g: G, k: usize, n: usize) + where F: FnMut(&Decoded, &mut [u8]) -> Option<(usize, i16)>, + G: FnMut(&Decoded, &mut [u8]) -> (usize, i16) { + let mut rng: XorShiftRng = Rand::rand(&mut rand::thread_rng()); + let f64_range = Range::new(0x0000_0000_0000_0001u64, 0x7ff0_0000_0000_0000); + iterate("f64_random_equivalence_test", k, n, f, g, |_| { + let x = f64::from_bits(f64_range.ind_sample(&mut rng)); + decode_finite(x) + }); +} + +pub fn f32_exhaustive_equivalence_test(f: F, g: G, k: usize) + where F: FnMut(&Decoded, &mut [u8]) -> Option<(usize, i16)>, + G: FnMut(&Decoded, &mut [u8]) -> (usize, i16) { + // we have only 2^23 * (2^8 - 1) - 1 = 2,139,095,039 positive finite f32 values, + // so why not simply testing all of them? + // + // this is of course very stressful (and thus should be behind an `#[ignore]` attribute), + // but with `-C opt-level=3 -C lto` this only takes about an hour or so. + + // iterate from 0x0000_0001 to 0x7f7f_ffff, i.e. all finite ranges + let (npassed, nignored) = iterate("f32_exhaustive_equivalence_test", + k, 0x7f7f_ffff, f, g, |i: usize| { + + let x = f32::from_bits(i as u32 + 1); + decode_finite(x) + }); + assert_eq!((npassed, nignored), (2121451881, 17643158)); +} + +#[test] +fn shortest_random_equivalence_test() { + use core::num::flt2dec::strategy::dragon::format_shortest as fallback; + f64_random_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS, 10_000); + f32_random_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS, 10_000); +} + +#[test] #[ignore] // it is too expensive +fn shortest_f32_exhaustive_equivalence_test() { + // it is hard to directly test the optimality of the output, but we can at least test if + // two different algorithms agree to each other. + // + // this reports the progress and the number of f32 values returned `None`. + // with `--nocapture` (and plenty of time and appropriate rustc flags), this should print: + // `done, ignored=17643158 passed=2121451881 failed=0`. + + use core::num::flt2dec::strategy::dragon::format_shortest as fallback; + f32_exhaustive_equivalence_test(format_shortest_opt, fallback, MAX_SIG_DIGITS); +} + +#[test] #[ignore] // it is too expensive +fn shortest_f64_hard_random_equivalence_test() { + // this again probably has to use appropriate rustc flags. + + use core::num::flt2dec::strategy::dragon::format_shortest as fallback; + f64_random_equivalence_test(format_shortest_opt, fallback, + MAX_SIG_DIGITS, 100_000_000); +} + +#[test] +fn exact_f32_random_equivalence_test() { + use core::num::flt2dec::strategy::dragon::format_exact as fallback; + for k in 1..21 { + f32_random_equivalence_test(|d, buf| format_exact_opt(d, buf, i16::MIN), + |d, buf| fallback(d, buf, i16::MIN), k, 1_000); + } +} + +#[test] +fn exact_f64_random_equivalence_test() { + use core::num::flt2dec::strategy::dragon::format_exact as fallback; + for k in 1..21 { + f64_random_equivalence_test(|d, buf| format_exact_opt(d, buf, i16::MIN), + |d, buf| fallback(d, buf, i16::MIN), k, 1_000); + } +} + diff --git a/src/libcore/tests/num/int_macros.rs b/src/libcore/tests/num/int_macros.rs index 8d791283ab87..71d2e7945389 100644 --- a/src/libcore/tests/num/int_macros.rs +++ b/src/libcore/tests/num/int_macros.rs @@ -30,6 +30,11 @@ mod tests { num::test_num(10 as $T, 2 as $T); } + #[test] + fn test_mod_euc() { + assert!((-1 as $T).mod_euc(MIN) == MAX); + } + #[test] pub fn test_abs() { assert!((1 as $T).abs() == 1 as $T); diff --git a/src/libcore/tests/num/mod.rs b/src/libcore/tests/num/mod.rs index 587dcbe6d678..ab96d3126bb2 100644 --- a/src/libcore/tests/num/mod.rs +++ b/src/libcore/tests/num/mod.rs @@ -143,6 +143,15 @@ fn test_infallible_try_from_int_error() { } macro_rules! test_impl_from { + ($fn_name:ident, bool, $target: ty) => { + #[test] + fn $fn_name() { + let one: $target = 1; + let zero: $target = 0; + assert_eq!(one, <$target>::from(true)); + assert_eq!(zero, <$target>::from(false)); + } + }; ($fn_name: ident, $Small: ty, $Large: ty) => { #[test] fn $fn_name() { @@ -182,6 +191,18 @@ test_impl_from! { test_u16i32, u16, i32 } test_impl_from! { test_u16i64, u16, i64 } test_impl_from! { test_u32i64, u32, i64 } +// Bool -> Integer +test_impl_from! { test_boolu8, bool, u8 } +test_impl_from! { test_boolu16, bool, u16 } +test_impl_from! { test_boolu32, bool, u32 } +test_impl_from! { test_boolu64, bool, u64 } +test_impl_from! { test_boolu128, bool, u128 } +test_impl_from! { test_booli8, bool, i8 } +test_impl_from! { test_booli16, bool, i16 } +test_impl_from! { test_booli32, bool, i32 } +test_impl_from! { test_booli64, bool, i64 } +test_impl_from! { test_booli128, bool, i128 } + // Signed -> Float test_impl_from! { test_i8f32, i8, f32 } test_impl_from! { test_i8f64, i8, f64 } @@ -635,51 +656,69 @@ assume_usize_width! { macro_rules! test_float { ($modname: ident, $fty: ty, $inf: expr, $neginf: expr, $nan: expr) => { mod $modname { - use core::num::Float; // FIXME(nagisa): these tests should test for sign of -0.0 #[test] fn min() { - assert_eq!(0.0.min(0.0), 0.0); - assert_eq!((-0.0).min(-0.0), -0.0); - assert_eq!(9.0.min(9.0), 9.0); - assert_eq!((-9.0).min(0.0), -9.0); - assert_eq!(0.0.min(9.0), 0.0); - assert_eq!((-0.0).min(-9.0), -9.0); - assert_eq!($inf.min(9.0), 9.0); - assert_eq!(9.0.min($inf), 9.0); - assert_eq!($inf.min(-9.0), -9.0); - assert_eq!((-9.0).min($inf), -9.0); - assert_eq!($neginf.min(9.0), $neginf); - assert_eq!(9.0.min($neginf), $neginf); - assert_eq!($neginf.min(-9.0), $neginf); - assert_eq!((-9.0).min($neginf), $neginf); - assert_eq!($nan.min(9.0), 9.0); - assert_eq!($nan.min(-9.0), -9.0); - assert_eq!(9.0.min($nan), 9.0); - assert_eq!((-9.0).min($nan), -9.0); - assert!($nan.min($nan).is_nan()); + assert_eq!((0.0 as $fty).min(0.0), 0.0); + assert_eq!((-0.0 as $fty).min(-0.0), -0.0); + assert_eq!((9.0 as $fty).min(9.0), 9.0); + assert_eq!((-9.0 as $fty).min(0.0), -9.0); + assert_eq!((0.0 as $fty).min(9.0), 0.0); + assert_eq!((-0.0 as $fty).min(-9.0), -9.0); + assert_eq!(($inf as $fty).min(9.0), 9.0); + assert_eq!((9.0 as $fty).min($inf), 9.0); + assert_eq!(($inf as $fty).min(-9.0), -9.0); + assert_eq!((-9.0 as $fty).min($inf), -9.0); + assert_eq!(($neginf as $fty).min(9.0), $neginf); + assert_eq!((9.0 as $fty).min($neginf), $neginf); + assert_eq!(($neginf as $fty).min(-9.0), $neginf); + assert_eq!((-9.0 as $fty).min($neginf), $neginf); + assert_eq!(($nan as $fty).min(9.0), 9.0); + assert_eq!(($nan as $fty).min(-9.0), -9.0); + assert_eq!((9.0 as $fty).min($nan), 9.0); + assert_eq!((-9.0 as $fty).min($nan), -9.0); + assert!(($nan as $fty).min($nan).is_nan()); } #[test] fn max() { - assert_eq!(0.0.max(0.0), 0.0); - assert_eq!((-0.0).max(-0.0), -0.0); - assert_eq!(9.0.max(9.0), 9.0); - assert_eq!((-9.0).max(0.0), 0.0); - assert_eq!(0.0.max(9.0), 9.0); - assert_eq!((-0.0).max(-9.0), -0.0); - assert_eq!($inf.max(9.0), $inf); - assert_eq!(9.0.max($inf), $inf); - assert_eq!($inf.max(-9.0), $inf); - assert_eq!((-9.0).max($inf), $inf); - assert_eq!($neginf.max(9.0), 9.0); - assert_eq!(9.0.max($neginf), 9.0); - assert_eq!($neginf.max(-9.0), -9.0); - assert_eq!((-9.0).max($neginf), -9.0); - assert_eq!($nan.max(9.0), 9.0); - assert_eq!($nan.max(-9.0), -9.0); - assert_eq!(9.0.max($nan), 9.0); - assert_eq!((-9.0).max($nan), -9.0); - assert!($nan.max($nan).is_nan()); + assert_eq!((0.0 as $fty).max(0.0), 0.0); + assert_eq!((-0.0 as $fty).max(-0.0), -0.0); + assert_eq!((9.0 as $fty).max(9.0), 9.0); + assert_eq!((-9.0 as $fty).max(0.0), 0.0); + assert_eq!((0.0 as $fty).max(9.0), 9.0); + assert_eq!((-0.0 as $fty).max(-9.0), -0.0); + assert_eq!(($inf as $fty).max(9.0), $inf); + assert_eq!((9.0 as $fty).max($inf), $inf); + assert_eq!(($inf as $fty).max(-9.0), $inf); + assert_eq!((-9.0 as $fty).max($inf), $inf); + assert_eq!(($neginf as $fty).max(9.0), 9.0); + assert_eq!((9.0 as $fty).max($neginf), 9.0); + assert_eq!(($neginf as $fty).max(-9.0), -9.0); + assert_eq!((-9.0 as $fty).max($neginf), -9.0); + assert_eq!(($nan as $fty).max(9.0), 9.0); + assert_eq!(($nan as $fty).max(-9.0), -9.0); + assert_eq!((9.0 as $fty).max($nan), 9.0); + assert_eq!((-9.0 as $fty).max($nan), -9.0); + assert!(($nan as $fty).max($nan).is_nan()); + } + #[test] + fn mod_euc() { + let a: $fty = 42.0; + assert!($inf.mod_euc(a).is_nan()); + assert_eq!(a.mod_euc($inf), a); + assert!(a.mod_euc($nan).is_nan()); + assert!($inf.mod_euc($inf).is_nan()); + assert!($inf.mod_euc($nan).is_nan()); + assert!($nan.mod_euc($inf).is_nan()); + } + #[test] + fn div_euc() { + let a: $fty = 42.0; + assert_eq!(a.div_euc($inf), 0.0); + assert!(a.div_euc($nan).is_nan()); + assert!($inf.div_euc($inf).is_nan()); + assert!($inf.div_euc($nan).is_nan()); + assert!($nan.div_euc($inf).is_nan()); } } } } diff --git a/src/libcore/tests/num/uint_macros.rs b/src/libcore/tests/num/uint_macros.rs index daa1cc3a7f4f..ca6906f73104 100644 --- a/src/libcore/tests/num/uint_macros.rs +++ b/src/libcore/tests/num/uint_macros.rs @@ -97,6 +97,17 @@ mod tests { assert_eq!(_1.swap_bytes(), _1); } + #[test] + fn test_reverse_bits() { + assert_eq!(A.reverse_bits().reverse_bits(), A); + assert_eq!(B.reverse_bits().reverse_bits(), B); + assert_eq!(C.reverse_bits().reverse_bits(), C); + + // Swapping these should make no difference + assert_eq!(_0.reverse_bits(), _0); + assert_eq!(_1.reverse_bits(), _1); + } + #[test] fn test_le() { assert_eq!($T::from_le(A.to_le()), A); diff --git a/src/libcore/tests/ops.rs b/src/libcore/tests/ops.rs index 9d2fa1abff65..d66193b1687c 100644 --- a/src/libcore/tests/ops.rs +++ b/src/libcore/tests/ops.rs @@ -50,21 +50,45 @@ fn test_full_range() { #[test] fn test_range_inclusive() { - let mut r = RangeInclusive { start: 1i8, end: 2 }; + let mut r = RangeInclusive::new(1i8, 2); assert_eq!(r.next(), Some(1)); assert_eq!(r.next(), Some(2)); assert_eq!(r.next(), None); - r = RangeInclusive { start: 127i8, end: 127 }; + r = RangeInclusive::new(127i8, 127); assert_eq!(r.next(), Some(127)); assert_eq!(r.next(), None); - r = RangeInclusive { start: -128i8, end: -128 }; + r = RangeInclusive::new(-128i8, -128); assert_eq!(r.next_back(), Some(-128)); assert_eq!(r.next_back(), None); // degenerate - r = RangeInclusive { start: 1, end: -1 }; + r = RangeInclusive::new(1, -1); assert_eq!(r.size_hint(), (0, Some(0))); assert_eq!(r.next(), None); } + + +#[test] +fn test_range_is_empty() { + use core::f32::*; + + assert!(!(0.0 .. 10.0).is_empty()); + assert!( (-0.0 .. 0.0).is_empty()); + assert!( (10.0 .. 0.0).is_empty()); + + assert!(!(NEG_INFINITY .. INFINITY).is_empty()); + assert!( (EPSILON .. NAN).is_empty()); + assert!( (NAN .. EPSILON).is_empty()); + assert!( (NAN .. NAN).is_empty()); + + assert!(!(0.0 ..= 10.0).is_empty()); + assert!(!(-0.0 ..= 0.0).is_empty()); + assert!( (10.0 ..= 0.0).is_empty()); + + assert!(!(NEG_INFINITY ..= INFINITY).is_empty()); + assert!( (EPSILON ..= NAN).is_empty()); + assert!( (NAN ..= EPSILON).is_empty()); + assert!( (NAN ..= NAN).is_empty()); +} diff --git a/src/libcore/tests/option.rs b/src/libcore/tests/option.rs index 22109e28edd9..1324ba2d9a9c 100644 --- a/src/libcore/tests/option.rs +++ b/src/libcore/tests/option.rs @@ -240,7 +240,7 @@ fn test_collect() { assert!(v == None); // test that it does not take more elements than it needs - let mut functions: [Box Option<()>>; 3] = + let mut functions: [Box Option<()>>; 3] = [box || Some(()), box || None, box || panic!()]; let v: Option> = functions.iter_mut().map(|f| (*f)()).collect(); @@ -297,3 +297,35 @@ fn test_try() { } assert_eq!(try_option_err(), Err(NoneError)); } + +#[test] +fn test_option_deref() { + // Some: &Option::Some(T) -> Option<&T::Deref::Target>::Some(&*T) + let ref_option = &Some(&42); + assert_eq!(ref_option.deref(), Some(&42)); + + let ref_option = &Some(String::from("a result")); + assert_eq!(ref_option.deref(), Some("a result")); + + let ref_option = &Some(vec![1, 2, 3, 4, 5]); + assert_eq!(ref_option.deref(), Some(&[1, 2, 3, 4, 5][..])); + + // None: &Option>::None -> None + let ref_option: &Option<&i32> = &None; + assert_eq!(ref_option.deref(), None); +} + +#[test] +fn test_replace() { + let mut x = Some(2); + let old = x.replace(5); + + assert_eq!(x, Some(5)); + assert_eq!(old, Some(2)); + + let mut x = None; + let old = x.replace(3); + + assert_eq!(x, Some(3)); + assert_eq!(old, None); +} diff --git a/src/libcore/tests/ptr.rs b/src/libcore/tests/ptr.rs index 98436f0e1d1c..92160910d8f7 100644 --- a/src/libcore/tests/ptr.rs +++ b/src/libcore/tests/ptr.rs @@ -84,16 +84,16 @@ fn test_is_null() { assert!(nms.is_null()); // Pointers to unsized types -- trait objects - let ci: *const ToString = &3; + let ci: *const dyn ToString = &3; assert!(!ci.is_null()); - let mi: *mut ToString = &mut 3; + let mi: *mut dyn ToString = &mut 3; assert!(!mi.is_null()); - let nci: *const ToString = null::(); + let nci: *const dyn ToString = null::(); assert!(nci.is_null()); - let nmi: *mut ToString = null_mut::(); + let nmi: *mut dyn ToString = null_mut::(); assert!(nmi.is_null()); } @@ -140,16 +140,16 @@ fn test_as_ref() { assert_eq!(nms.as_ref(), None); // Pointers to unsized types -- trait objects - let ci: *const ToString = &3; + let ci: *const dyn ToString = &3; assert!(ci.as_ref().is_some()); - let mi: *mut ToString = &mut 3; + let mi: *mut dyn ToString = &mut 3; assert!(mi.as_ref().is_some()); - let nci: *const ToString = null::(); + let nci: *const dyn ToString = null::(); assert!(nci.as_ref().is_none()); - let nmi: *mut ToString = null_mut::(); + let nmi: *mut dyn ToString = null_mut::(); assert!(nmi.as_ref().is_none()); } } @@ -182,10 +182,10 @@ fn test_as_mut() { assert_eq!(nms.as_mut(), None); // Pointers to unsized types -- trait objects - let mi: *mut ToString = &mut 3; + let mi: *mut dyn ToString = &mut 3; assert!(mi.as_mut().is_some()); - let nmi: *mut ToString = null_mut::(); + let nmi: *mut dyn ToString = null_mut::(); assert!(nmi.as_mut().is_none()); } } @@ -249,9 +249,9 @@ fn test_set_memory() { } #[test] -fn test_unsized_unique() { +fn test_unsized_nonnull() { let xs: &[i32] = &[1, 2, 3]; - let ptr = unsafe { Unique::new_unchecked(xs as *const [i32] as *mut [i32]) }; + let ptr = unsafe { NonNull::new_unchecked(xs as *const [i32] as *mut [i32]) }; let ys = unsafe { ptr.as_ref() }; let zs: &[i32] = &[1, 2, 3]; assert!(ys == zs); @@ -296,3 +296,92 @@ fn write_unaligned_drop() { } DROPS.with(|d| assert_eq!(*d.borrow(), [0])); } + +#[test] +fn align_offset_zst() { + // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at + // all, because no amount of elements will align the pointer. + let mut p = 1; + while p < 1024 { + assert_eq!((p as *const ()).align_offset(p), 0); + if p != 1 { + assert_eq!(((p + 1) as *const ()).align_offset(p), !0); + } + p = (p + 1).next_power_of_two(); + } +} + +#[test] +fn align_offset_stride1() { + // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to + // number of bytes. + let mut align = 1; + while align < 1024 { + for ptr in 1..2*align { + let expected = ptr % align; + let offset = if expected == 0 { 0 } else { align - expected }; + assert_eq!((ptr as *const u8).align_offset(align), offset, + "ptr = {}, align = {}, size = 1", ptr, align); + } + align = (align + 1).next_power_of_two(); + } +} + +#[test] +fn align_offset_weird_strides() { + #[repr(packed)] + struct A3(u16, u8); + struct A4(u32); + #[repr(packed)] + struct A5(u32, u8); + #[repr(packed)] + struct A6(u32, u16); + #[repr(packed)] + struct A7(u32, u16, u8); + #[repr(packed)] + struct A8(u32, u32); + #[repr(packed)] + struct A9(u32, u32, u8); + #[repr(packed)] + struct A10(u32, u32, u16); + + unsafe fn test_weird_stride(ptr: *const T, align: usize) -> bool { + let numptr = ptr as usize; + let mut expected = usize::max_value(); + // Naive but definitely correct way to find the *first* aligned element of stride::. + for el in 0..align { + if (numptr + el * ::std::mem::size_of::()) % align == 0 { + expected = el; + break; + } + } + let got = ptr.align_offset(align); + if got != expected { + eprintln!("aligning {:p} (with stride of {}) to {}, expected {}, got {}", ptr, + ::std::mem::size_of::(), align, expected, got); + return true; + } + return false; + } + + // For pointers of stride != 1, we verify the algorithm against the naivest possible + // implementation + let mut align = 1; + let mut x = false; + while align < 1024 { + for ptr in 1usize..4*align { + unsafe { + x |= test_weird_stride::(ptr as *const A3, align); + x |= test_weird_stride::(ptr as *const A4, align); + x |= test_weird_stride::(ptr as *const A5, align); + x |= test_weird_stride::(ptr as *const A6, align); + x |= test_weird_stride::(ptr as *const A7, align); + x |= test_weird_stride::(ptr as *const A8, align); + x |= test_weird_stride::(ptr as *const A9, align); + x |= test_weird_stride::(ptr as *const A10, align); + } + } + align = (align + 1).next_power_of_two(); + } + assert!(!x); +} diff --git a/src/libcore/tests/result.rs b/src/libcore/tests/result.rs index ce41bde8342e..0c00992ffd84 100644 --- a/src/libcore/tests/result.rs +++ b/src/libcore/tests/result.rs @@ -81,7 +81,7 @@ fn test_collect() { assert!(v == Err(2)); // test that it does not take more elements than it needs - let mut functions: [Box Result<(), isize>>; 3] = + let mut functions: [Box Result<(), isize>>; 3] = [box || Ok(()), box || Err(1), box || panic!()]; let v: Result, isize> = functions.iter_mut().map(|f| (*f)()).collect(); @@ -220,14 +220,109 @@ fn test_try() { assert_eq!(try_result_none(), None); fn try_result_ok() -> Result { - let val = Ok(1)?; + let result: Result = Ok(1); + let val = result?; Ok(val) } assert_eq!(try_result_ok(), Ok(1)); fn try_result_err() -> Result { - let val = Err(1)?; + let result: Result = Err(1); + let val = result?; Ok(val) } assert_eq!(try_result_err(), Err(1)); } + +#[test] +fn test_result_deref() { + // &Result::Ok(T).deref_ok() -> + // Result<&T::Deref::Target, &E>::Ok(&*T) + let ref_ok = &Result::Ok::<&i32, u8>(&42); + let expected_result = Result::Ok::<&i32, &u8>(&42); + assert_eq!(ref_ok.deref_ok(), expected_result); + + let ref_ok = &Result::Ok::(String::from("a result")); + let expected_result = Result::Ok::<&str, &u32>("a result"); + assert_eq!(ref_ok.deref_ok(), expected_result); + + let ref_ok = &Result::Ok::, u32>(vec![1, 2, 3, 4, 5]); + let expected_result = Result::Ok::<&[i32], &u32>(&[1, 2, 3, 4, 5][..]); + assert_eq!(ref_ok.deref_ok(), expected_result); + + // &Result::Ok(T).deref() -> + // Result<&T::Deref::Target, &E::Deref::Target>::Ok(&*T) + let ref_ok = &Result::Ok::<&i32, &u8>(&42); + let expected_result = Result::Ok::<&i32, &u8>(&42); + assert_eq!(ref_ok.deref(), expected_result); + + let ref_ok = &Result::Ok::(String::from("a result")); + let expected_result = Result::Ok::<&str, &u32>("a result"); + assert_eq!(ref_ok.deref(), expected_result); + + let ref_ok = &Result::Ok::, &u32>(vec![1, 2, 3, 4, 5]); + let expected_result = Result::Ok::<&[i32], &u32>(&[1, 2, 3, 4, 5][..]); + assert_eq!(ref_ok.deref(), expected_result); + + // &Result::Err(T).deref_err() -> + // Result<&T, &E::Deref::Target>::Err(&*E) + let ref_err = &Result::Err::(&41); + let expected_result = Result::Err::<&u8, &i32>(&41); + assert_eq!(ref_err.deref_err(), expected_result); + + let ref_err = &Result::Err::(String::from("an error")); + let expected_result = Result::Err::<&u32, &str>("an error"); + assert_eq!(ref_err.deref_err(), expected_result); + + let ref_err = &Result::Err::>(vec![5, 4, 3, 2, 1]); + let expected_result = Result::Err::<&u32, &[i32]>(&[5, 4, 3, 2, 1][..]); + assert_eq!(ref_err.deref_err(), expected_result); + + // &Result::Err(T).deref_err() -> + // Result<&T, &E::Deref::Target>::Err(&*E) + let ref_err = &Result::Err::<&u8, &i32>(&41); + let expected_result = Result::Err::<&u8, &i32>(&41); + assert_eq!(ref_err.deref(), expected_result); + + let ref_err = &Result::Err::<&u32, String>(String::from("an error")); + let expected_result = Result::Err::<&u32, &str>("an error"); + assert_eq!(ref_err.deref(), expected_result); + + let ref_err = &Result::Err::<&u32, Vec>(vec![5, 4, 3, 2, 1]); + let expected_result = Result::Err::<&u32, &[i32]>(&[5, 4, 3, 2, 1][..]); + assert_eq!(ref_err.deref(), expected_result); + + // The following cases test calling deref_* with the wrong variant (i.e. + // `deref_ok()` with a `Result::Err()`, or `deref_err()` with a `Result::Ok()`. + // While unusual, these cases are supported to ensure that an `inner_deref` + // call can still be made even when one of the Result types does not implement + // `Deref` (for example, std::io::Error). + + // &Result::Ok(T).deref_err() -> + // Result<&T, &E::Deref::Target>::Ok(&T) + let ref_ok = &Result::Ok::(42); + let expected_result = Result::Ok::<&i32, &u8>(&42); + assert_eq!(ref_ok.deref_err(), expected_result); + + let ref_ok = &Result::Ok::<&str, &u32>("a result"); + let expected_result = Result::Ok::<&&str, &u32>(&"a result"); + assert_eq!(ref_ok.deref_err(), expected_result); + + let ref_ok = &Result::Ok::<[i32; 5], &u32>([1, 2, 3, 4, 5]); + let expected_result = Result::Ok::<&[i32; 5], &u32>(&[1, 2, 3, 4, 5]); + assert_eq!(ref_ok.deref_err(), expected_result); + + // &Result::Err(E).deref_ok() -> + // Result<&T::Deref::Target, &E>::Err(&E) + let ref_err = &Result::Err::<&u8, i32>(41); + let expected_result = Result::Err::<&u8, &i32>(&41); + assert_eq!(ref_err.deref_ok(), expected_result); + + let ref_err = &Result::Err::<&u32, &str>("an error"); + let expected_result = Result::Err::<&u32, &&str>(&"an error"); + assert_eq!(ref_err.deref_ok(), expected_result); + + let ref_err = &Result::Err::<&u32, [i32; 5]>([5, 4, 3, 2, 1]); + let expected_result = Result::Err::<&u32, &[i32; 5]>(&[5, 4, 3, 2, 1]); + assert_eq!(ref_err.deref_ok(), expected_result); +} diff --git a/src/libcore/tests/slice.rs b/src/libcore/tests/slice.rs index 13740b958025..b087ec81f59c 100644 --- a/src/libcore/tests/slice.rs +++ b/src/libcore/tests/slice.rs @@ -10,7 +10,6 @@ use core::result::Result::{Ok, Err}; - #[test] fn test_position() { let b = [1, 2, 3, 5, 5]; @@ -61,8 +60,8 @@ fn test_binary_search() { assert_eq!(b.binary_search(&0), Err(0)); assert_eq!(b.binary_search(&1), Ok(0)); assert_eq!(b.binary_search(&2), Err(1)); - assert!(match b.binary_search(&3) { Ok(1...3) => true, _ => false }); - assert!(match b.binary_search(&3) { Ok(1...3) => true, _ => false }); + assert!(match b.binary_search(&3) { Ok(1..=3) => true, _ => false }); + assert!(match b.binary_search(&3) { Ok(1..=3) => true, _ => false }); assert_eq!(b.binary_search(&4), Err(4)); assert_eq!(b.binary_search(&5), Err(4)); assert_eq!(b.binary_search(&6), Err(4)); @@ -260,6 +259,13 @@ fn test_exact_chunks_last() { assert_eq!(c2.last().unwrap(), &[2, 3]); } +#[test] +fn test_exact_chunks_remainder() { + let v: &[i32] = &[0, 1, 2, 3, 4]; + let c = v.exact_chunks(2); + assert_eq!(c.remainder(), &[4]); +} + #[test] fn test_exact_chunks_zip() { let v1: &[i32] = &[0, 1, 2, 3, 4]; @@ -311,6 +317,13 @@ fn test_exact_chunks_mut_last() { assert_eq!(c2.last().unwrap(), &[2, 3]); } +#[test] +fn test_exact_chunks_mut_remainder() { + let v: &mut [i32] = &mut [0, 1, 2, 3, 4]; + let c = v.exact_chunks_mut(2); + assert_eq!(c.into_remainder(), &[4]); +} + #[test] fn test_exact_chunks_mut_zip() { let v1: &mut [i32] = &mut [0, 1, 2, 3, 4]; @@ -378,47 +391,349 @@ fn test_windows_zip() { } #[test] -fn get_range() { - let v: &[i32] = &[0, 1, 2, 3, 4, 5]; - assert_eq!(v.get(..), Some(&[0, 1, 2, 3, 4, 5][..])); - assert_eq!(v.get(..2), Some(&[0, 1][..])); - assert_eq!(v.get(2..), Some(&[2, 3, 4, 5][..])); - assert_eq!(v.get(1..4), Some(&[1, 2, 3][..])); - assert_eq!(v.get(7..), None); - assert_eq!(v.get(7..10), None); -} +#[allow(const_err)] +fn test_iter_ref_consistency() { + use std::fmt::Debug; -#[test] -fn get_mut_range() { - let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; - assert_eq!(v.get_mut(..), Some(&mut [0, 1, 2, 3, 4, 5][..])); - assert_eq!(v.get_mut(..2), Some(&mut [0, 1][..])); - assert_eq!(v.get_mut(2..), Some(&mut [2, 3, 4, 5][..])); - assert_eq!(v.get_mut(1..4), Some(&mut [1, 2, 3][..])); - assert_eq!(v.get_mut(7..), None); - assert_eq!(v.get_mut(7..10), None); -} + fn test(x : T) { + let v : &[T] = &[x, x, x]; + let v_ptrs : [*const T; 3] = match v { + [ref v1, ref v2, ref v3] => [v1 as *const _, v2 as *const _, v3 as *const _], + _ => unreachable!() + }; + let len = v.len(); -#[test] -fn get_unchecked_range() { - unsafe { - let v: &[i32] = &[0, 1, 2, 3, 4, 5]; - assert_eq!(v.get_unchecked(..), &[0, 1, 2, 3, 4, 5][..]); - assert_eq!(v.get_unchecked(..2), &[0, 1][..]); - assert_eq!(v.get_unchecked(2..), &[2, 3, 4, 5][..]); - assert_eq!(v.get_unchecked(1..4), &[1, 2, 3][..]); + // nth(i) + for i in 0..len { + assert_eq!(&v[i] as *const _, v_ptrs[i]); // check the v_ptrs array, just to be sure + let nth = v.iter().nth(i).unwrap(); + assert_eq!(nth as *const _, v_ptrs[i]); + } + assert_eq!(v.iter().nth(len), None, "nth(len) should return None"); + + // stepping through with nth(0) + { + let mut it = v.iter(); + for i in 0..len { + let next = it.nth(0).unwrap(); + assert_eq!(next as *const _, v_ptrs[i]); + } + assert_eq!(it.nth(0), None); + } + + // next() + { + let mut it = v.iter(); + for i in 0..len { + let remaining = len - i; + assert_eq!(it.size_hint(), (remaining, Some(remaining))); + + let next = it.next().unwrap(); + assert_eq!(next as *const _, v_ptrs[i]); + } + assert_eq!(it.size_hint(), (0, Some(0))); + assert_eq!(it.next(), None, "The final call to next() should return None"); + } + + // next_back() + { + let mut it = v.iter(); + for i in 0..len { + let remaining = len - i; + assert_eq!(it.size_hint(), (remaining, Some(remaining))); + + let prev = it.next_back().unwrap(); + assert_eq!(prev as *const _, v_ptrs[remaining-1]); + } + assert_eq!(it.size_hint(), (0, Some(0))); + assert_eq!(it.next_back(), None, "The final call to next_back() should return None"); + } } + + fn test_mut(x : T) { + let v : &mut [T] = &mut [x, x, x]; + let v_ptrs : [*mut T; 3] = match v { + [ref v1, ref v2, ref v3] => + [v1 as *const _ as *mut _, v2 as *const _ as *mut _, v3 as *const _ as *mut _], + _ => unreachable!() + }; + let len = v.len(); + + // nth(i) + for i in 0..len { + assert_eq!(&mut v[i] as *mut _, v_ptrs[i]); // check the v_ptrs array, just to be sure + let nth = v.iter_mut().nth(i).unwrap(); + assert_eq!(nth as *mut _, v_ptrs[i]); + } + assert_eq!(v.iter().nth(len), None, "nth(len) should return None"); + + // stepping through with nth(0) + { + let mut it = v.iter(); + for i in 0..len { + let next = it.nth(0).unwrap(); + assert_eq!(next as *const _, v_ptrs[i]); + } + assert_eq!(it.nth(0), None); + } + + // next() + { + let mut it = v.iter_mut(); + for i in 0..len { + let remaining = len - i; + assert_eq!(it.size_hint(), (remaining, Some(remaining))); + + let next = it.next().unwrap(); + assert_eq!(next as *mut _, v_ptrs[i]); + } + assert_eq!(it.size_hint(), (0, Some(0))); + assert_eq!(it.next(), None, "The final call to next() should return None"); + } + + // next_back() + { + let mut it = v.iter_mut(); + for i in 0..len { + let remaining = len - i; + assert_eq!(it.size_hint(), (remaining, Some(remaining))); + + let prev = it.next_back().unwrap(); + assert_eq!(prev as *mut _, v_ptrs[remaining-1]); + } + assert_eq!(it.size_hint(), (0, Some(0))); + assert_eq!(it.next_back(), None, "The final call to next_back() should return None"); + } + } + + // Make sure iterators and slice patterns yield consistent addresses for various types, + // including ZSTs. + test(0u32); + test(()); + test([0u32; 0]); // ZST with alignment > 0 + test_mut(0u32); + test_mut(()); + test_mut([0u32; 0]); // ZST with alignment > 0 } -#[test] -fn get_unchecked_mut_range() { - unsafe { - let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; - assert_eq!(v.get_unchecked_mut(..), &mut [0, 1, 2, 3, 4, 5][..]); - assert_eq!(v.get_unchecked_mut(..2), &mut [0, 1][..]); - assert_eq!(v.get_unchecked_mut(2..), &mut[2, 3, 4, 5][..]); - assert_eq!(v.get_unchecked_mut(1..4), &mut [1, 2, 3][..]); +// The current implementation of SliceIndex fails to handle methods +// orthogonally from range types; therefore, it is worth testing +// all of the indexing operations on each input. +mod slice_index { + // This checks all six indexing methods, given an input range that + // should succeed. (it is NOT suitable for testing invalid inputs) + macro_rules! assert_range_eq { + ($arr:expr, $range:expr, $expected:expr) + => { + let mut arr = $arr; + let mut expected = $expected; + { + let s: &[_] = &arr; + let expected: &[_] = &expected; + + assert_eq!(&s[$range], expected, "(in assertion for: index)"); + assert_eq!(s.get($range), Some(expected), "(in assertion for: get)"); + unsafe { + assert_eq!( + s.get_unchecked($range), expected, + "(in assertion for: get_unchecked)", + ); + } + } + { + let s: &mut [_] = &mut arr; + let expected: &mut [_] = &mut expected; + + assert_eq!( + &mut s[$range], expected, + "(in assertion for: index_mut)", + ); + assert_eq!( + s.get_mut($range), Some(&mut expected[..]), + "(in assertion for: get_mut)", + ); + unsafe { + assert_eq!( + s.get_unchecked_mut($range), expected, + "(in assertion for: get_unchecked_mut)", + ); + } + } + } } + + // Make sure the macro can actually detect bugs, + // because if it can't, then what are we even doing here? + // + // (Be aware this only demonstrates the ability to detect bugs + // in the FIRST method that panics, as the macro is not designed + // to be used in `should_panic`) + #[test] + #[should_panic(expected = "out of range")] + fn assert_range_eq_can_fail_by_panic() { + assert_range_eq!([0, 1, 2], 0..5, [0, 1, 2]); + } + + // (Be aware this only demonstrates the ability to detect bugs + // in the FIRST method it calls, as the macro is not designed + // to be used in `should_panic`) + #[test] + #[should_panic(expected = "==")] + fn assert_range_eq_can_fail_by_inequality() { + assert_range_eq!([0, 1, 2], 0..2, [0, 1, 2]); + } + + // Test cases for bad index operations. + // + // This generates `should_panic` test cases for Index/IndexMut + // and `None` test cases for get/get_mut. + macro_rules! panic_cases { + ($( + // each test case needs a unique name to namespace the tests + in mod $case_name:ident { + data: $data:expr; + + // optional: + // + // one or more similar inputs for which data[input] succeeds, + // and the corresponding output as an array. This helps validate + // "critical points" where an input range straddles the boundary + // between valid and invalid. + // (such as the input `len..len`, which is just barely valid) + $( + good: data[$good:expr] == $output:expr; + )* + + bad: data[$bad:expr]; + message: $expect_msg:expr; + } + )*) => {$( + mod $case_name { + #[test] + fn pass() { + let mut v = $data; + + $( assert_range_eq!($data, $good, $output); )* + + { + let v: &[_] = &v; + assert_eq!(v.get($bad), None, "(in None assertion for get)"); + } + + { + let v: &mut [_] = &mut v; + assert_eq!(v.get_mut($bad), None, "(in None assertion for get_mut)"); + } + } + + #[test] + #[should_panic(expected = $expect_msg)] + fn index_fail() { + let v = $data; + let v: &[_] = &v; + let _v = &v[$bad]; + } + + #[test] + #[should_panic(expected = $expect_msg)] + fn index_mut_fail() { + let mut v = $data; + let v: &mut [_] = &mut v; + let _v = &mut v[$bad]; + } + } + )*}; + } + + #[test] + fn simple() { + let v = [0, 1, 2, 3, 4, 5]; + + assert_range_eq!(v, .., [0, 1, 2, 3, 4, 5]); + assert_range_eq!(v, ..2, [0, 1]); + assert_range_eq!(v, ..=1, [0, 1]); + assert_range_eq!(v, 2.., [2, 3, 4, 5]); + assert_range_eq!(v, 1..4, [1, 2, 3]); + assert_range_eq!(v, 1..=3, [1, 2, 3]); + } + + panic_cases! { + in mod rangefrom_len { + data: [0, 1, 2, 3, 4, 5]; + + good: data[6..] == []; + bad: data[7..]; + message: "but ends at"; // perhaps not ideal + } + + in mod rangeto_len { + data: [0, 1, 2, 3, 4, 5]; + + good: data[..6] == [0, 1, 2, 3, 4, 5]; + bad: data[..7]; + message: "out of range"; + } + + in mod rangetoinclusive_len { + data: [0, 1, 2, 3, 4, 5]; + + good: data[..=5] == [0, 1, 2, 3, 4, 5]; + bad: data[..=6]; + message: "out of range"; + } + + in mod range_len_len { + data: [0, 1, 2, 3, 4, 5]; + + good: data[6..6] == []; + bad: data[7..7]; + message: "out of range"; + } + + in mod rangeinclusive_len_len { + data: [0, 1, 2, 3, 4, 5]; + + good: data[6..=5] == []; + bad: data[7..=6]; + message: "out of range"; + } + } + + panic_cases! { + in mod range_neg_width { + data: [0, 1, 2, 3, 4, 5]; + + good: data[4..4] == []; + bad: data[4..3]; + message: "but ends at"; + } + + in mod rangeinclusive_neg_width { + data: [0, 1, 2, 3, 4, 5]; + + good: data[4..=3] == []; + bad: data[4..=2]; + message: "but ends at"; + } + } + + panic_cases! { + in mod rangeinclusive_overflow { + data: [0, 1]; + + // note: using 0 specifically ensures that the result of overflowing is 0..0, + // so that `get` doesn't simply return None for the wrong reason. + bad: data[0 ..= ::std::usize::MAX]; + message: "maximum usize"; + } + + in mod rangetoinclusive_overflow { + data: [0, 1]; + + bad: data[..= ::std::usize::MAX]; + message: "maximum usize"; + } + } // panic_cases! } #[test] @@ -481,3 +796,207 @@ fn test_rotate_right() { assert_eq!(a[(i + 42) % N], i); } } + +#[test] +#[cfg(not(target_arch = "wasm32"))] +fn sort_unstable() { + use core::cmp::Ordering::{Equal, Greater, Less}; + use core::slice::heapsort; + use rand::{Rng, XorShiftRng}; + + let mut v = [0; 600]; + let mut tmp = [0; 600]; + let mut rng = XorShiftRng::new_unseeded(); + + for len in (2..25).chain(500..510) { + let v = &mut v[0..len]; + let tmp = &mut tmp[0..len]; + + for &modulus in &[5, 10, 100, 1000] { + for _ in 0..100 { + for i in 0..len { + v[i] = rng.gen::() % modulus; + } + + // Sort in default order. + tmp.copy_from_slice(v); + tmp.sort_unstable(); + assert!(tmp.windows(2).all(|w| w[0] <= w[1])); + + // Sort in ascending order. + tmp.copy_from_slice(v); + tmp.sort_unstable_by(|a, b| a.cmp(b)); + assert!(tmp.windows(2).all(|w| w[0] <= w[1])); + + // Sort in descending order. + tmp.copy_from_slice(v); + tmp.sort_unstable_by(|a, b| b.cmp(a)); + assert!(tmp.windows(2).all(|w| w[0] >= w[1])); + + // Test heapsort using `<` operator. + tmp.copy_from_slice(v); + heapsort(tmp, |a, b| a < b); + assert!(tmp.windows(2).all(|w| w[0] <= w[1])); + + // Test heapsort using `>` operator. + tmp.copy_from_slice(v); + heapsort(tmp, |a, b| a > b); + assert!(tmp.windows(2).all(|w| w[0] >= w[1])); + } + } + } + + // Sort using a completely random comparison function. + // This will reorder the elements *somehow*, but won't panic. + for i in 0..v.len() { + v[i] = i as i32; + } + v.sort_unstable_by(|_, _| *rng.choose(&[Less, Equal, Greater]).unwrap()); + v.sort_unstable(); + for i in 0..v.len() { + assert_eq!(v[i], i as i32); + } + + // Should not panic. + [0i32; 0].sort_unstable(); + [(); 10].sort_unstable(); + [(); 100].sort_unstable(); + + let mut v = [0xDEADBEEFu64]; + v.sort_unstable(); + assert!(v == [0xDEADBEEF]); +} + +pub mod memchr { + use core::slice::memchr::{memchr, memrchr}; + + // test fallback implementations on all platforms + #[test] + fn matches_one() { + assert_eq!(Some(0), memchr(b'a', b"a")); + } + + #[test] + fn matches_begin() { + assert_eq!(Some(0), memchr(b'a', b"aaaa")); + } + + #[test] + fn matches_end() { + assert_eq!(Some(4), memchr(b'z', b"aaaaz")); + } + + #[test] + fn matches_nul() { + assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00")); + } + + #[test] + fn matches_past_nul() { + assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z")); + } + + #[test] + fn no_match_empty() { + assert_eq!(None, memchr(b'a', b"")); + } + + #[test] + fn no_match() { + assert_eq!(None, memchr(b'a', b"xyz")); + } + + #[test] + fn matches_one_reversed() { + assert_eq!(Some(0), memrchr(b'a', b"a")); + } + + #[test] + fn matches_begin_reversed() { + assert_eq!(Some(3), memrchr(b'a', b"aaaa")); + } + + #[test] + fn matches_end_reversed() { + assert_eq!(Some(0), memrchr(b'z', b"zaaaa")); + } + + #[test] + fn matches_nul_reversed() { + assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00")); + } + + #[test] + fn matches_past_nul_reversed() { + assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa")); + } + + #[test] + fn no_match_empty_reversed() { + assert_eq!(None, memrchr(b'a', b"")); + } + + #[test] + fn no_match_reversed() { + assert_eq!(None, memrchr(b'a', b"xyz")); + } + + #[test] + fn each_alignment_reversed() { + let mut data = [1u8; 64]; + let needle = 2; + let pos = 40; + data[pos] = needle; + for start in 0..16 { + assert_eq!(Some(pos - start), memrchr(needle, &data[start..])); + } + } +} + +#[test] +fn test_align_to_simple() { + let bytes = [1u8, 2, 3, 4, 5, 6, 7]; + let (prefix, aligned, suffix) = unsafe { bytes.align_to::() }; + assert_eq!(aligned.len(), 3); + assert!(prefix == [1] || suffix == [7]); + let expect1 = [1 << 8 | 2, 3 << 8 | 4, 5 << 8 | 6]; + let expect2 = [1 | 2 << 8, 3 | 4 << 8, 5 | 6 << 8]; + let expect3 = [2 << 8 | 3, 4 << 8 | 5, 6 << 8 | 7]; + let expect4 = [2 | 3 << 8, 4 | 5 << 8, 6 | 7 << 8]; + assert!(aligned == expect1 || aligned == expect2 || aligned == expect3 || aligned == expect4, + "aligned={:?} expected={:?} || {:?} || {:?} || {:?}", + aligned, expect1, expect2, expect3, expect4); +} + +#[test] +fn test_align_to_zst() { + let bytes = [1, 2, 3, 4, 5, 6, 7]; + let (prefix, aligned, suffix) = unsafe { bytes.align_to::<()>() }; + assert_eq!(aligned.len(), 0); + assert!(prefix == [1, 2, 3, 4, 5, 6, 7] || suffix == [1, 2, 3, 4, 5, 6, 7]); +} + +#[test] +fn test_align_to_non_trivial() { + #[repr(align(8))] struct U64(u64, u64); + #[repr(align(8))] struct U64U64U32(u64, u64, u32); + let data = [U64(1, 2), U64(3, 4), U64(5, 6), U64(7, 8), U64(9, 10), U64(11, 12), U64(13, 14), + U64(15, 16)]; + let (prefix, aligned, suffix) = unsafe { data.align_to::() }; + assert_eq!(aligned.len(), 4); + assert_eq!(prefix.len() + suffix.len(), 2); +} + +#[test] +fn test_align_to_empty_mid() { + use core::mem; + + // Make sure that we do not create empty unaligned slices for the mid part, even when the + // overall slice is too short to contain an aligned address. + let bytes = [1, 2, 3, 4, 5, 6, 7]; + type Chunk = u32; + for offset in 0..4 { + let (_, mid, _) = unsafe { bytes[offset..offset+1].align_to::() }; + assert_eq!(mid.as_ptr() as usize % mem::align_of::(), 0); + } +} diff --git a/src/libcore/tests/str.rs b/src/libcore/tests/str.rs index 08daafccc540..343c9596c538 100644 --- a/src/libcore/tests/str.rs +++ b/src/libcore/tests/str.rs @@ -8,4 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// All `str` tests live in collectionstests::str +// All `str` tests live in liballoc/tests diff --git a/src/libcore/tests/str_lossy.rs b/src/libcore/tests/str_lossy.rs new file mode 100644 index 000000000000..56ef3f070c1f --- /dev/null +++ b/src/libcore/tests/str_lossy.rs @@ -0,0 +1,91 @@ +// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use core::str::lossy::*; + +#[test] +fn chunks() { + let mut iter = Utf8Lossy::from_bytes(b"hello").chunks(); + assert_eq!(Some(Utf8LossyChunk { valid: "hello", broken: b"", }), iter.next()); + assert_eq!(None, iter.next()); + + let mut iter = Utf8Lossy::from_bytes("ศไทย中华Việt Nam".as_bytes()).chunks(); + assert_eq!(Some(Utf8LossyChunk { valid: "ศไทย中华Việt Nam", broken: b"", }), iter.next()); + assert_eq!(None, iter.next()); + + let mut iter = Utf8Lossy::from_bytes(b"Hello\xC2 There\xFF Goodbye").chunks(); + assert_eq!(Some(Utf8LossyChunk { valid: "Hello", broken: b"\xC2", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: " There", broken: b"\xFF", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: " Goodbye", broken: b"", }), iter.next()); + assert_eq!(None, iter.next()); + + let mut iter = Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye").chunks(); + assert_eq!(Some(Utf8LossyChunk { valid: "Hello", broken: b"\xC0", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: " There", broken: b"\xE6\x83", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: " Goodbye", broken: b"", }), iter.next()); + assert_eq!(None, iter.next()); + + let mut iter = Utf8Lossy::from_bytes(b"\xF5foo\xF5\x80bar").chunks(); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF5", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF5", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"", }), iter.next()); + assert_eq!(None, iter.next()); + + let mut iter = Utf8Lossy::from_bytes(b"\xF1foo\xF1\x80bar\xF1\x80\x80baz").chunks(); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF1", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF1\x80", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"\xF1\x80\x80", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "baz", broken: b"", }), iter.next()); + assert_eq!(None, iter.next()); + + let mut iter = Utf8Lossy::from_bytes(b"\xF4foo\xF4\x80bar\xF4\xBFbaz").chunks(); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF4", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xF4\x80", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"\xF4", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "baz", broken: b"", }), iter.next()); + assert_eq!(None, iter.next()); + + let mut iter = Utf8Lossy::from_bytes(b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar").chunks(); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xF0", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "foo\u{10000}bar", broken: b"", }), iter.next()); + assert_eq!(None, iter.next()); + + // surrogates + let mut iter = Utf8Lossy::from_bytes(b"\xED\xA0\x80foo\xED\xBF\xBFbar").chunks(); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xED", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xA0", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\x80", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "foo", broken: b"\xED", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "", broken: b"\xBF", }), iter.next()); + assert_eq!(Some(Utf8LossyChunk { valid: "bar", broken: b"", }), iter.next()); + assert_eq!(None, iter.next()); +} + +#[test] +fn display() { + assert_eq!( + "Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye", + &Utf8Lossy::from_bytes(b"Hello\xC0\x80 There\xE6\x83 Goodbye").to_string()); +} + +#[test] +fn debug() { + assert_eq!( + "\"Hello\\xc0\\x80 There\\xe6\\x83 Goodbye\\u{10d4ea}\"", + &format!("{:?}", Utf8Lossy::from_bytes( + b"Hello\xC0\x80 There\xE6\x83 Goodbye\xf4\x8d\x93\xaa"))); +} diff --git a/src/libcore/tests/time.rs b/src/libcore/tests/time.rs new file mode 100644 index 000000000000..466f28f0ef0a --- /dev/null +++ b/src/libcore/tests/time.rs @@ -0,0 +1,304 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use core::time::Duration; + +#[test] +fn creation() { + assert!(Duration::from_secs(1) != Duration::from_secs(0)); + assert_eq!(Duration::from_secs(1) + Duration::from_secs(2), + Duration::from_secs(3)); + assert_eq!(Duration::from_millis(10) + Duration::from_secs(4), + Duration::new(4, 10 * 1_000_000)); + assert_eq!(Duration::from_millis(4000), Duration::new(4, 0)); +} + +#[test] +fn secs() { + assert_eq!(Duration::new(0, 0).as_secs(), 0); + assert_eq!(Duration::new(0, 500_000_005).as_secs(), 0); + assert_eq!(Duration::new(0, 1_050_000_001).as_secs(), 1); + assert_eq!(Duration::from_secs(1).as_secs(), 1); + assert_eq!(Duration::from_millis(999).as_secs(), 0); + assert_eq!(Duration::from_millis(1001).as_secs(), 1); + assert_eq!(Duration::from_micros(999_999).as_secs(), 0); + assert_eq!(Duration::from_micros(1_000_001).as_secs(), 1); + assert_eq!(Duration::from_nanos(999_999_999).as_secs(), 0); + assert_eq!(Duration::from_nanos(1_000_000_001).as_secs(), 1); +} + +#[test] +fn millis() { + assert_eq!(Duration::new(0, 0).subsec_millis(), 0); + assert_eq!(Duration::new(0, 500_000_005).subsec_millis(), 500); + assert_eq!(Duration::new(0, 1_050_000_001).subsec_millis(), 50); + assert_eq!(Duration::from_secs(1).subsec_millis(), 0); + assert_eq!(Duration::from_millis(999).subsec_millis(), 999); + assert_eq!(Duration::from_millis(1001).subsec_millis(), 1); + assert_eq!(Duration::from_micros(999_999).subsec_millis(), 999); + assert_eq!(Duration::from_micros(1_001_000).subsec_millis(), 1); + assert_eq!(Duration::from_nanos(999_999_999).subsec_millis(), 999); + assert_eq!(Duration::from_nanos(1_001_000_000).subsec_millis(), 1); +} + +#[test] +fn micros() { + assert_eq!(Duration::new(0, 0).subsec_micros(), 0); + assert_eq!(Duration::new(0, 500_000_005).subsec_micros(), 500_000); + assert_eq!(Duration::new(0, 1_050_000_001).subsec_micros(), 50_000); + assert_eq!(Duration::from_secs(1).subsec_micros(), 0); + assert_eq!(Duration::from_millis(999).subsec_micros(), 999_000); + assert_eq!(Duration::from_millis(1001).subsec_micros(), 1_000); + assert_eq!(Duration::from_micros(999_999).subsec_micros(), 999_999); + assert_eq!(Duration::from_micros(1_000_001).subsec_micros(), 1); + assert_eq!(Duration::from_nanos(999_999_999).subsec_micros(), 999_999); + assert_eq!(Duration::from_nanos(1_000_001_000).subsec_micros(), 1); +} + +#[test] +fn nanos() { + assert_eq!(Duration::new(0, 0).subsec_nanos(), 0); + assert_eq!(Duration::new(0, 5).subsec_nanos(), 5); + assert_eq!(Duration::new(0, 1_000_000_001).subsec_nanos(), 1); + assert_eq!(Duration::from_secs(1).subsec_nanos(), 0); + assert_eq!(Duration::from_millis(999).subsec_nanos(), 999_000_000); + assert_eq!(Duration::from_millis(1001).subsec_nanos(), 1_000_000); + assert_eq!(Duration::from_micros(999_999).subsec_nanos(), 999_999_000); + assert_eq!(Duration::from_micros(1_000_001).subsec_nanos(), 1000); + assert_eq!(Duration::from_nanos(999_999_999).subsec_nanos(), 999_999_999); + assert_eq!(Duration::from_nanos(1_000_000_001).subsec_nanos(), 1); +} + +#[test] +fn add() { + assert_eq!(Duration::new(0, 0) + Duration::new(0, 1), + Duration::new(0, 1)); + assert_eq!(Duration::new(0, 500_000_000) + Duration::new(0, 500_000_001), + Duration::new(1, 1)); +} + +#[test] +fn checked_add() { + assert_eq!(Duration::new(0, 0).checked_add(Duration::new(0, 1)), + Some(Duration::new(0, 1))); + assert_eq!(Duration::new(0, 500_000_000).checked_add(Duration::new(0, 500_000_001)), + Some(Duration::new(1, 1))); + assert_eq!(Duration::new(1, 0).checked_add(Duration::new(::core::u64::MAX, 0)), None); +} + +#[test] +fn sub() { + assert_eq!(Duration::new(0, 1) - Duration::new(0, 0), + Duration::new(0, 1)); + assert_eq!(Duration::new(0, 500_000_001) - Duration::new(0, 500_000_000), + Duration::new(0, 1)); + assert_eq!(Duration::new(1, 0) - Duration::new(0, 1), + Duration::new(0, 999_999_999)); +} + +#[test] +fn checked_sub() { + let zero = Duration::new(0, 0); + let one_nano = Duration::new(0, 1); + let one_sec = Duration::new(1, 0); + assert_eq!(one_nano.checked_sub(zero), Some(Duration::new(0, 1))); + assert_eq!(one_sec.checked_sub(one_nano), + Some(Duration::new(0, 999_999_999))); + assert_eq!(zero.checked_sub(one_nano), None); + assert_eq!(zero.checked_sub(one_sec), None); +} + +#[test] +#[should_panic] +fn sub_bad1() { + let _ = Duration::new(0, 0) - Duration::new(0, 1); +} + +#[test] +#[should_panic] +fn sub_bad2() { + let _ = Duration::new(0, 0) - Duration::new(1, 0); +} + +#[test] +fn mul() { + assert_eq!(Duration::new(0, 1) * 2, Duration::new(0, 2)); + assert_eq!(Duration::new(1, 1) * 3, Duration::new(3, 3)); + assert_eq!(Duration::new(0, 500_000_001) * 4, Duration::new(2, 4)); + assert_eq!(Duration::new(0, 500_000_001) * 4000, + Duration::new(2000, 4000)); +} + +#[test] +fn checked_mul() { + assert_eq!(Duration::new(0, 1).checked_mul(2), Some(Duration::new(0, 2))); + assert_eq!(Duration::new(1, 1).checked_mul(3), Some(Duration::new(3, 3))); + assert_eq!(Duration::new(0, 500_000_001).checked_mul(4), Some(Duration::new(2, 4))); + assert_eq!(Duration::new(0, 500_000_001).checked_mul(4000), + Some(Duration::new(2000, 4000))); + assert_eq!(Duration::new(::core::u64::MAX - 1, 0).checked_mul(2), None); +} + +#[test] +fn div() { + assert_eq!(Duration::new(0, 1) / 2, Duration::new(0, 0)); + assert_eq!(Duration::new(1, 1) / 3, Duration::new(0, 333_333_333)); + assert_eq!(Duration::new(99, 999_999_000) / 100, + Duration::new(0, 999_999_990)); +} + +#[test] +fn checked_div() { + assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0))); + assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000))); + assert_eq!(Duration::new(2, 0).checked_div(0), None); +} + +#[test] +fn correct_sum() { + let durations = [ + Duration::new(1, 999_999_999), + Duration::new(2, 999_999_999), + Duration::new(0, 999_999_999), + Duration::new(0, 999_999_999), + Duration::new(0, 999_999_999), + Duration::new(5, 0), + ]; + let sum = durations.iter().sum::(); + assert_eq!(sum, Duration::new(1+2+5+4, 1_000_000_000 - 5)); +} + +#[test] +fn debug_formatting_extreme_values() { + assert_eq!( + format!("{:?}", Duration::new(18_446_744_073_709_551_615, 123_456_789)), + "18446744073709551615.123456789s" + ); +} + +#[test] +fn debug_formatting_secs() { + assert_eq!(format!("{:?}", Duration::new(7, 000_000_000)), "7s"); + assert_eq!(format!("{:?}", Duration::new(7, 100_000_000)), "7.1s"); + assert_eq!(format!("{:?}", Duration::new(7, 000_010_000)), "7.00001s"); + assert_eq!(format!("{:?}", Duration::new(7, 000_000_001)), "7.000000001s"); + assert_eq!(format!("{:?}", Duration::new(7, 123_456_789)), "7.123456789s"); + + assert_eq!(format!("{:?}", Duration::new(88, 000_000_000)), "88s"); + assert_eq!(format!("{:?}", Duration::new(88, 100_000_000)), "88.1s"); + assert_eq!(format!("{:?}", Duration::new(88, 000_010_000)), "88.00001s"); + assert_eq!(format!("{:?}", Duration::new(88, 000_000_001)), "88.000000001s"); + assert_eq!(format!("{:?}", Duration::new(88, 123_456_789)), "88.123456789s"); + + assert_eq!(format!("{:?}", Duration::new(999, 000_000_000)), "999s"); + assert_eq!(format!("{:?}", Duration::new(999, 100_000_000)), "999.1s"); + assert_eq!(format!("{:?}", Duration::new(999, 000_010_000)), "999.00001s"); + assert_eq!(format!("{:?}", Duration::new(999, 000_000_001)), "999.000000001s"); + assert_eq!(format!("{:?}", Duration::new(999, 123_456_789)), "999.123456789s"); +} + +#[test] +fn debug_formatting_millis() { + assert_eq!(format!("{:?}", Duration::new(0, 7_000_000)), "7ms"); + assert_eq!(format!("{:?}", Duration::new(0, 7_100_000)), "7.1ms"); + assert_eq!(format!("{:?}", Duration::new(0, 7_000_001)), "7.000001ms"); + assert_eq!(format!("{:?}", Duration::new(0, 7_123_456)), "7.123456ms"); + + assert_eq!(format!("{:?}", Duration::new(0, 88_000_000)), "88ms"); + assert_eq!(format!("{:?}", Duration::new(0, 88_100_000)), "88.1ms"); + assert_eq!(format!("{:?}", Duration::new(0, 88_000_001)), "88.000001ms"); + assert_eq!(format!("{:?}", Duration::new(0, 88_123_456)), "88.123456ms"); + + assert_eq!(format!("{:?}", Duration::new(0, 999_000_000)), "999ms"); + assert_eq!(format!("{:?}", Duration::new(0, 999_100_000)), "999.1ms"); + assert_eq!(format!("{:?}", Duration::new(0, 999_000_001)), "999.000001ms"); + assert_eq!(format!("{:?}", Duration::new(0, 999_123_456)), "999.123456ms"); +} + +#[test] +fn debug_formatting_micros() { + assert_eq!(format!("{:?}", Duration::new(0, 7_000)), "7µs"); + assert_eq!(format!("{:?}", Duration::new(0, 7_100)), "7.1µs"); + assert_eq!(format!("{:?}", Duration::new(0, 7_001)), "7.001µs"); + assert_eq!(format!("{:?}", Duration::new(0, 7_123)), "7.123µs"); + + assert_eq!(format!("{:?}", Duration::new(0, 88_000)), "88µs"); + assert_eq!(format!("{:?}", Duration::new(0, 88_100)), "88.1µs"); + assert_eq!(format!("{:?}", Duration::new(0, 88_001)), "88.001µs"); + assert_eq!(format!("{:?}", Duration::new(0, 88_123)), "88.123µs"); + + assert_eq!(format!("{:?}", Duration::new(0, 999_000)), "999µs"); + assert_eq!(format!("{:?}", Duration::new(0, 999_100)), "999.1µs"); + assert_eq!(format!("{:?}", Duration::new(0, 999_001)), "999.001µs"); + assert_eq!(format!("{:?}", Duration::new(0, 999_123)), "999.123µs"); +} + +#[test] +fn debug_formatting_nanos() { + assert_eq!(format!("{:?}", Duration::new(0, 0)), "0ns"); + assert_eq!(format!("{:?}", Duration::new(0, 1)), "1ns"); + assert_eq!(format!("{:?}", Duration::new(0, 88)), "88ns"); + assert_eq!(format!("{:?}", Duration::new(0, 999)), "999ns"); +} + +#[test] +fn debug_formatting_precision_zero() { + assert_eq!(format!("{:.0?}", Duration::new(0, 0)), "0ns"); + assert_eq!(format!("{:.0?}", Duration::new(0, 123)), "123ns"); + + assert_eq!(format!("{:.0?}", Duration::new(0, 1_001)), "1µs"); + assert_eq!(format!("{:.0?}", Duration::new(0, 1_499)), "1µs"); + assert_eq!(format!("{:.0?}", Duration::new(0, 1_500)), "2µs"); + assert_eq!(format!("{:.0?}", Duration::new(0, 1_999)), "2µs"); + + assert_eq!(format!("{:.0?}", Duration::new(0, 1_000_001)), "1ms"); + assert_eq!(format!("{:.0?}", Duration::new(0, 1_499_999)), "1ms"); + assert_eq!(format!("{:.0?}", Duration::new(0, 1_500_000)), "2ms"); + assert_eq!(format!("{:.0?}", Duration::new(0, 1_999_999)), "2ms"); + + assert_eq!(format!("{:.0?}", Duration::new(1, 000_000_001)), "1s"); + assert_eq!(format!("{:.0?}", Duration::new(1, 499_999_999)), "1s"); + assert_eq!(format!("{:.0?}", Duration::new(1, 500_000_000)), "2s"); + assert_eq!(format!("{:.0?}", Duration::new(1, 999_999_999)), "2s"); +} + +#[test] +fn debug_formatting_precision_two() { + assert_eq!(format!("{:.2?}", Duration::new(0, 0)), "0.00ns"); + assert_eq!(format!("{:.2?}", Duration::new(0, 123)), "123.00ns"); + + assert_eq!(format!("{:.2?}", Duration::new(0, 1_000)), "1.00µs"); + assert_eq!(format!("{:.2?}", Duration::new(0, 7_001)), "7.00µs"); + assert_eq!(format!("{:.2?}", Duration::new(0, 7_100)), "7.10µs"); + assert_eq!(format!("{:.2?}", Duration::new(0, 7_109)), "7.11µs"); + assert_eq!(format!("{:.2?}", Duration::new(0, 7_199)), "7.20µs"); + assert_eq!(format!("{:.2?}", Duration::new(0, 1_999)), "2.00µs"); + + assert_eq!(format!("{:.2?}", Duration::new(0, 1_000_000)), "1.00ms"); + assert_eq!(format!("{:.2?}", Duration::new(0, 3_001_000)), "3.00ms"); + assert_eq!(format!("{:.2?}", Duration::new(0, 3_100_000)), "3.10ms"); + assert_eq!(format!("{:.2?}", Duration::new(0, 1_999_999)), "2.00ms"); + + assert_eq!(format!("{:.2?}", Duration::new(1, 000_000_000)), "1.00s"); + assert_eq!(format!("{:.2?}", Duration::new(4, 001_000_000)), "4.00s"); + assert_eq!(format!("{:.2?}", Duration::new(2, 100_000_000)), "2.10s"); + assert_eq!(format!("{:.2?}", Duration::new(2, 104_990_000)), "2.10s"); + assert_eq!(format!("{:.2?}", Duration::new(2, 105_000_000)), "2.11s"); + assert_eq!(format!("{:.2?}", Duration::new(8, 999_999_999)), "9.00s"); +} + +#[test] +fn debug_formatting_precision_high() { + assert_eq!(format!("{:.5?}", Duration::new(0, 23_678)), "23.67800µs"); + + assert_eq!(format!("{:.9?}", Duration::new(1, 000_000_000)), "1.000000000s"); + assert_eq!(format!("{:.10?}", Duration::new(4, 001_000_000)), "4.0010000000s"); + assert_eq!(format!("{:.20?}", Duration::new(4, 001_000_000)), "4.00100000000000000000s"); +} diff --git a/src/libcore/time.rs b/src/libcore/time.rs new file mode 100644 index 000000000000..b58920224eb7 --- /dev/null +++ b/src/libcore/time.rs @@ -0,0 +1,685 @@ +// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![stable(feature = "duration_core", since = "1.25.0")] + +//! Temporal quantification. +//! +//! Example: +//! +//! ``` +//! use std::time::Duration; +//! +//! let five_seconds = Duration::new(5, 0); +//! // both declarations are equivalent +//! assert_eq!(Duration::new(5, 0), Duration::from_secs(5)); +//! ``` + +use fmt; +use iter::Sum; +use ops::{Add, Sub, Mul, Div, AddAssign, SubAssign, MulAssign, DivAssign}; + +const NANOS_PER_SEC: u32 = 1_000_000_000; +const NANOS_PER_MILLI: u32 = 1_000_000; +const NANOS_PER_MICRO: u32 = 1_000; +const MILLIS_PER_SEC: u64 = 1_000; +const MICROS_PER_SEC: u64 = 1_000_000; + +/// A `Duration` type to represent a span of time, typically used for system +/// timeouts. +/// +/// Each `Duration` is composed of a whole number of seconds and a fractional part +/// represented in nanoseconds. If the underlying system does not support +/// nanosecond-level precision, APIs binding a system timeout will typically round up +/// the number of nanoseconds. +/// +/// `Duration`s implement many common traits, including [`Add`], [`Sub`], and other +/// [`ops`] traits. +/// +/// [`Add`]: ../../std/ops/trait.Add.html +/// [`Sub`]: ../../std/ops/trait.Sub.html +/// [`ops`]: ../../std/ops/index.html +/// +/// # Examples +/// +/// ``` +/// use std::time::Duration; +/// +/// let five_seconds = Duration::new(5, 0); +/// let five_seconds_and_five_nanos = five_seconds + Duration::new(0, 5); +/// +/// assert_eq!(five_seconds_and_five_nanos.as_secs(), 5); +/// assert_eq!(five_seconds_and_five_nanos.subsec_nanos(), 5); +/// +/// let ten_millis = Duration::from_millis(10); +/// ``` +#[stable(feature = "duration", since = "1.3.0")] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] +pub struct Duration { + secs: u64, + nanos: u32, // Always 0 <= nanos < NANOS_PER_SEC +} + +impl Duration { + /// Creates a new `Duration` from the specified number of whole seconds and + /// additional nanoseconds. + /// + /// If the number of nanoseconds is greater than 1 billion (the number of + /// nanoseconds in a second), then it will carry over into the seconds provided. + /// + /// # Panics + /// + /// This constructor will panic if the carry from the nanoseconds overflows + /// the seconds counter. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// let five_seconds = Duration::new(5, 0); + /// ``` + #[stable(feature = "duration", since = "1.3.0")] + #[inline] + pub fn new(secs: u64, nanos: u32) -> Duration { + let secs = secs.checked_add((nanos / NANOS_PER_SEC) as u64) + .expect("overflow in Duration::new"); + let nanos = nanos % NANOS_PER_SEC; + Duration { secs, nanos } + } + + /// Creates a new `Duration` from the specified number of whole seconds. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// let duration = Duration::from_secs(5); + /// + /// assert_eq!(5, duration.as_secs()); + /// assert_eq!(0, duration.subsec_nanos()); + /// ``` + #[stable(feature = "duration", since = "1.3.0")] + #[inline] + pub const fn from_secs(secs: u64) -> Duration { + Duration { secs, nanos: 0 } + } + + /// Creates a new `Duration` from the specified number of milliseconds. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// let duration = Duration::from_millis(2569); + /// + /// assert_eq!(2, duration.as_secs()); + /// assert_eq!(569_000_000, duration.subsec_nanos()); + /// ``` + #[stable(feature = "duration", since = "1.3.0")] + #[inline] + pub const fn from_millis(millis: u64) -> Duration { + Duration { + secs: millis / MILLIS_PER_SEC, + nanos: ((millis % MILLIS_PER_SEC) as u32) * NANOS_PER_MILLI, + } + } + + /// Creates a new `Duration` from the specified number of microseconds. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// let duration = Duration::from_micros(1_000_002); + /// + /// assert_eq!(1, duration.as_secs()); + /// assert_eq!(2000, duration.subsec_nanos()); + /// ``` + #[stable(feature = "duration_from_micros", since = "1.27.0")] + #[inline] + pub const fn from_micros(micros: u64) -> Duration { + Duration { + secs: micros / MICROS_PER_SEC, + nanos: ((micros % MICROS_PER_SEC) as u32) * NANOS_PER_MICRO, + } + } + + /// Creates a new `Duration` from the specified number of nanoseconds. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// let duration = Duration::from_nanos(1_000_000_123); + /// + /// assert_eq!(1, duration.as_secs()); + /// assert_eq!(123, duration.subsec_nanos()); + /// ``` + #[stable(feature = "duration_extras", since = "1.27.0")] + #[inline] + pub const fn from_nanos(nanos: u64) -> Duration { + Duration { + secs: nanos / (NANOS_PER_SEC as u64), + nanos: (nanos % (NANOS_PER_SEC as u64)) as u32, + } + } + + /// Returns the number of _whole_ seconds contained by this `Duration`. + /// + /// The returned value does not include the fractional (nanosecond) part of the + /// duration, which can be obtained using [`subsec_nanos`]. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// let duration = Duration::new(5, 730023852); + /// assert_eq!(duration.as_secs(), 5); + /// ``` + /// + /// To determine the total number of seconds represented by the `Duration`, + /// use `as_secs` in combination with [`subsec_nanos`]: + /// + /// ``` + /// use std::time::Duration; + /// + /// let duration = Duration::new(5, 730023852); + /// + /// assert_eq!(5.730023852, + /// duration.as_secs() as f64 + /// + duration.subsec_nanos() as f64 * 1e-9); + /// ``` + /// + /// [`subsec_nanos`]: #method.subsec_nanos + #[stable(feature = "duration", since = "1.3.0")] + #[rustc_const_unstable(feature="duration_getters")] + #[inline] + pub const fn as_secs(&self) -> u64 { self.secs } + + /// Returns the fractional part of this `Duration`, in whole milliseconds. + /// + /// This method does **not** return the length of the duration when + /// represented by milliseconds. The returned number always represents a + /// fractional portion of a second (i.e. it is less than one thousand). + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// let duration = Duration::from_millis(5432); + /// assert_eq!(duration.as_secs(), 5); + /// assert_eq!(duration.subsec_millis(), 432); + /// ``` + #[stable(feature = "duration_extras", since = "1.27.0")] + #[rustc_const_unstable(feature="duration_getters")] + #[inline] + pub const fn subsec_millis(&self) -> u32 { self.nanos / NANOS_PER_MILLI } + + /// Returns the fractional part of this `Duration`, in whole microseconds. + /// + /// This method does **not** return the length of the duration when + /// represented by microseconds. The returned number always represents a + /// fractional portion of a second (i.e. it is less than one million). + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// let duration = Duration::from_micros(1_234_567); + /// assert_eq!(duration.as_secs(), 1); + /// assert_eq!(duration.subsec_micros(), 234_567); + /// ``` + #[stable(feature = "duration_extras", since = "1.27.0")] + #[rustc_const_unstable(feature="duration_getters")] + #[inline] + pub const fn subsec_micros(&self) -> u32 { self.nanos / NANOS_PER_MICRO } + + /// Returns the fractional part of this `Duration`, in nanoseconds. + /// + /// This method does **not** return the length of the duration when + /// represented by nanoseconds. The returned number always represents a + /// fractional portion of a second (i.e. it is less than one billion). + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// let duration = Duration::from_millis(5010); + /// assert_eq!(duration.as_secs(), 5); + /// assert_eq!(duration.subsec_nanos(), 10_000_000); + /// ``` + #[stable(feature = "duration", since = "1.3.0")] + #[rustc_const_unstable(feature="duration_getters")] + #[inline] + pub const fn subsec_nanos(&self) -> u32 { self.nanos } + + /// Returns the total number of whole milliseconds contained by this `Duration`. + /// + /// # Examples + /// + /// ``` + /// # #![feature(duration_as_u128)] + /// use std::time::Duration; + /// + /// let duration = Duration::new(5, 730023852); + /// assert_eq!(duration.as_millis(), 5730); + /// ``` + #[unstable(feature = "duration_as_u128", issue = "50202")] + #[inline] + pub fn as_millis(&self) -> u128 { + self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos / NANOS_PER_MILLI) as u128 + } + + /// Returns the total number of whole microseconds contained by this `Duration`. + /// + /// # Examples + /// + /// ``` + /// # #![feature(duration_as_u128)] + /// use std::time::Duration; + /// + /// let duration = Duration::new(5, 730023852); + /// assert_eq!(duration.as_micros(), 5730023); + /// ``` + #[unstable(feature = "duration_as_u128", issue = "50202")] + #[inline] + pub fn as_micros(&self) -> u128 { + self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos / NANOS_PER_MICRO) as u128 + } + + /// Returns the total number of nanoseconds contained by this `Duration`. + /// + /// # Examples + /// + /// ``` + /// # #![feature(duration_as_u128)] + /// use std::time::Duration; + /// + /// let duration = Duration::new(5, 730023852); + /// assert_eq!(duration.as_nanos(), 5730023852); + /// ``` + #[unstable(feature = "duration_as_u128", issue = "50202")] + #[inline] + pub fn as_nanos(&self) -> u128 { + self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos as u128 + } + + /// Checked `Duration` addition. Computes `self + other`, returning [`None`] + /// if overflow occurred. + /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::time::Duration; + /// + /// assert_eq!(Duration::new(0, 0).checked_add(Duration::new(0, 1)), Some(Duration::new(0, 1))); + /// assert_eq!(Duration::new(1, 0).checked_add(Duration::new(std::u64::MAX, 0)), None); + /// ``` + #[stable(feature = "duration_checked_ops", since = "1.16.0")] + #[inline] + pub fn checked_add(self, rhs: Duration) -> Option { + if let Some(mut secs) = self.secs.checked_add(rhs.secs) { + let mut nanos = self.nanos + rhs.nanos; + if nanos >= NANOS_PER_SEC { + nanos -= NANOS_PER_SEC; + if let Some(new_secs) = secs.checked_add(1) { + secs = new_secs; + } else { + return None; + } + } + debug_assert!(nanos < NANOS_PER_SEC); + Some(Duration { + secs, + nanos, + }) + } else { + None + } + } + + /// Checked `Duration` subtraction. Computes `self - other`, returning [`None`] + /// if the result would be negative or if overflow occurred. + /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::time::Duration; + /// + /// assert_eq!(Duration::new(0, 1).checked_sub(Duration::new(0, 0)), Some(Duration::new(0, 1))); + /// assert_eq!(Duration::new(0, 0).checked_sub(Duration::new(0, 1)), None); + /// ``` + #[stable(feature = "duration_checked_ops", since = "1.16.0")] + #[inline] + pub fn checked_sub(self, rhs: Duration) -> Option { + if let Some(mut secs) = self.secs.checked_sub(rhs.secs) { + let nanos = if self.nanos >= rhs.nanos { + self.nanos - rhs.nanos + } else { + if let Some(sub_secs) = secs.checked_sub(1) { + secs = sub_secs; + self.nanos + NANOS_PER_SEC - rhs.nanos + } else { + return None; + } + }; + debug_assert!(nanos < NANOS_PER_SEC); + Some(Duration { secs, nanos }) + } else { + None + } + } + + /// Checked `Duration` multiplication. Computes `self * other`, returning + /// [`None`] if overflow occurred. + /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::time::Duration; + /// + /// assert_eq!(Duration::new(0, 500_000_001).checked_mul(2), Some(Duration::new(1, 2))); + /// assert_eq!(Duration::new(std::u64::MAX - 1, 0).checked_mul(2), None); + /// ``` + #[stable(feature = "duration_checked_ops", since = "1.16.0")] + #[inline] + pub fn checked_mul(self, rhs: u32) -> Option { + // Multiply nanoseconds as u64, because it cannot overflow that way. + let total_nanos = self.nanos as u64 * rhs as u64; + let extra_secs = total_nanos / (NANOS_PER_SEC as u64); + let nanos = (total_nanos % (NANOS_PER_SEC as u64)) as u32; + if let Some(secs) = self.secs + .checked_mul(rhs as u64) + .and_then(|s| s.checked_add(extra_secs)) { + debug_assert!(nanos < NANOS_PER_SEC); + Some(Duration { + secs, + nanos, + }) + } else { + None + } + } + + /// Checked `Duration` division. Computes `self / other`, returning [`None`] + /// if `other == 0`. + /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::time::Duration; + /// + /// assert_eq!(Duration::new(2, 0).checked_div(2), Some(Duration::new(1, 0))); + /// assert_eq!(Duration::new(1, 0).checked_div(2), Some(Duration::new(0, 500_000_000))); + /// assert_eq!(Duration::new(2, 0).checked_div(0), None); + /// ``` + #[stable(feature = "duration_checked_ops", since = "1.16.0")] + #[inline] + pub fn checked_div(self, rhs: u32) -> Option { + if rhs != 0 { + let secs = self.secs / (rhs as u64); + let carry = self.secs - secs * (rhs as u64); + let extra_nanos = carry * (NANOS_PER_SEC as u64) / (rhs as u64); + let nanos = self.nanos / rhs + (extra_nanos as u32); + debug_assert!(nanos < NANOS_PER_SEC); + Some(Duration { secs, nanos }) + } else { + None + } + } +} + +#[stable(feature = "duration", since = "1.3.0")] +impl Add for Duration { + type Output = Duration; + + fn add(self, rhs: Duration) -> Duration { + self.checked_add(rhs).expect("overflow when adding durations") + } +} + +#[stable(feature = "time_augmented_assignment", since = "1.9.0")] +impl AddAssign for Duration { + fn add_assign(&mut self, rhs: Duration) { + *self = *self + rhs; + } +} + +#[stable(feature = "duration", since = "1.3.0")] +impl Sub for Duration { + type Output = Duration; + + fn sub(self, rhs: Duration) -> Duration { + self.checked_sub(rhs).expect("overflow when subtracting durations") + } +} + +#[stable(feature = "time_augmented_assignment", since = "1.9.0")] +impl SubAssign for Duration { + fn sub_assign(&mut self, rhs: Duration) { + *self = *self - rhs; + } +} + +#[stable(feature = "duration", since = "1.3.0")] +impl Mul for Duration { + type Output = Duration; + + fn mul(self, rhs: u32) -> Duration { + self.checked_mul(rhs).expect("overflow when multiplying duration by scalar") + } +} + +#[stable(feature = "time_augmented_assignment", since = "1.9.0")] +impl MulAssign for Duration { + fn mul_assign(&mut self, rhs: u32) { + *self = *self * rhs; + } +} + +#[stable(feature = "duration", since = "1.3.0")] +impl Div for Duration { + type Output = Duration; + + fn div(self, rhs: u32) -> Duration { + self.checked_div(rhs).expect("divide by zero error when dividing duration by scalar") + } +} + +#[stable(feature = "time_augmented_assignment", since = "1.9.0")] +impl DivAssign for Duration { + fn div_assign(&mut self, rhs: u32) { + *self = *self / rhs; + } +} + +macro_rules! sum_durations { + ($iter:expr) => {{ + let mut total_secs: u64 = 0; + let mut total_nanos: u64 = 0; + + for entry in $iter { + total_secs = total_secs + .checked_add(entry.secs) + .expect("overflow in iter::sum over durations"); + total_nanos = match total_nanos.checked_add(entry.nanos as u64) { + Some(n) => n, + None => { + total_secs = total_secs + .checked_add(total_nanos / NANOS_PER_SEC as u64) + .expect("overflow in iter::sum over durations"); + (total_nanos % NANOS_PER_SEC as u64) + entry.nanos as u64 + } + }; + } + total_secs = total_secs + .checked_add(total_nanos / NANOS_PER_SEC as u64) + .expect("overflow in iter::sum over durations"); + total_nanos = total_nanos % NANOS_PER_SEC as u64; + Duration { + secs: total_secs, + nanos: total_nanos as u32, + } + }}; +} + +#[stable(feature = "duration_sum", since = "1.16.0")] +impl Sum for Duration { + fn sum>(iter: I) -> Duration { + sum_durations!(iter) + } +} + +#[stable(feature = "duration_sum", since = "1.16.0")] +impl<'a> Sum<&'a Duration> for Duration { + fn sum>(iter: I) -> Duration { + sum_durations!(iter) + } +} + +#[stable(feature = "duration_debug_impl", since = "1.27.0")] +impl fmt::Debug for Duration { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + /// Formats a floating point number in decimal notation. + /// + /// The number is given as the `integer_part` and a fractional part. + /// The value of the fractional part is `fractional_part / divisor`. So + /// `integer_part` = 3, `fractional_part` = 12 and `divisor` = 100 + /// represents the number `3.012`. Trailing zeros are omitted. + /// + /// `divisor` must not be above 100_000_000. It also should be a power + /// of 10, everything else doesn't make sense. `fractional_part` has + /// to be less than `10 * divisor`! + fn fmt_decimal( + f: &mut fmt::Formatter, + mut integer_part: u64, + mut fractional_part: u32, + mut divisor: u32, + ) -> fmt::Result { + // Encode the fractional part into a temporary buffer. The buffer + // only need to hold 9 elements, because `fractional_part` has to + // be smaller than 10^9. The buffer is prefilled with '0' digits + // to simplify the code below. + let mut buf = [b'0'; 9]; + + // The next digit is written at this position + let mut pos = 0; + + // We keep writing digits into the buffer while there are non-zero + // digits left and we haven't written enough digits yet. + while fractional_part > 0 && pos < f.precision().unwrap_or(9) { + // Write new digit into the buffer + buf[pos] = b'0' + (fractional_part / divisor) as u8; + + fractional_part %= divisor; + divisor /= 10; + pos += 1; + } + + // If a precision < 9 was specified, there may be some non-zero + // digits left that weren't written into the buffer. In that case we + // need to perform rounding to match the semantics of printing + // normal floating point numbers. However, we only need to do work + // when rounding up. This happens if the first digit of the + // remaining ones is >= 5. + if fractional_part > 0 && fractional_part >= divisor * 5 { + // Round up the number contained in the buffer. We go through + // the buffer backwards and keep track of the carry. + let mut rev_pos = pos; + let mut carry = true; + while carry && rev_pos > 0 { + rev_pos -= 1; + + // If the digit in the buffer is not '9', we just need to + // increment it and can stop then (since we don't have a + // carry anymore). Otherwise, we set it to '0' (overflow) + // and continue. + if buf[rev_pos] < b'9' { + buf[rev_pos] += 1; + carry = false; + } else { + buf[rev_pos] = b'0'; + } + } + + // If we still have the carry bit set, that means that we set + // the whole buffer to '0's and need to increment the integer + // part. + if carry { + integer_part += 1; + } + } + + // Determine the end of the buffer: if precision is set, we just + // use as many digits from the buffer (capped to 9). If it isn't + // set, we only use all digits up to the last non-zero one. + let end = f.precision().map(|p| ::cmp::min(p, 9)).unwrap_or(pos); + + // If we haven't emitted a single fractional digit and the precision + // wasn't set to a non-zero value, we don't print the decimal point. + if end == 0 { + write!(f, "{}", integer_part) + } else { + // We are only writing ASCII digits into the buffer and it was + // initialized with '0's, so it contains valid UTF8. + let s = unsafe { + ::str::from_utf8_unchecked(&buf[..end]) + }; + + // If the user request a precision > 9, we pad '0's at the end. + let w = f.precision().unwrap_or(pos); + write!(f, "{}.{:0 0 { + fmt_decimal(f, self.secs, self.nanos, 100_000_000)?; + f.write_str("s") + } else if self.nanos >= 1_000_000 { + fmt_decimal(f, self.nanos as u64 / 1_000_000, self.nanos % 1_000_000, 100_000)?; + f.write_str("ms") + } else if self.nanos >= 1_000 { + fmt_decimal(f, self.nanos as u64 / 1_000, self.nanos % 1_000, 100)?; + f.write_str("µs") + } else { + fmt_decimal(f, self.nanos as u64, 0, 1)?; + f.write_str("ns") + } + } +} diff --git a/src/libstd_unicode/bool_trie.rs b/src/libcore/unicode/bool_trie.rs similarity index 88% rename from src/libstd_unicode/bool_trie.rs rename to src/libcore/unicode/bool_trie.rs index 3e45b08f399d..0e6437fded59 100644 --- a/src/libstd_unicode/bool_trie.rs +++ b/src/libcore/unicode/bool_trie.rs @@ -42,15 +42,15 @@ pub struct BoolTrie { } impl BoolTrie { pub fn lookup(&self, c: char) -> bool { - let c = c as usize; + let c = c as u32; if c < 0x800 { - trie_range_leaf(c, self.r1[c >> 6]) + trie_range_leaf(c, self.r1[(c >> 6) as usize]) } else if c < 0x10000 { - let child = self.r2[(c >> 6) - 0x20]; + let child = self.r2[(c >> 6) as usize - 0x20]; trie_range_leaf(c, self.r3[child as usize]) } else { - let child = self.r4[(c >> 12) - 0x10]; - let leaf = self.r5[((child as usize) << 6) + ((c >> 6) & 0x3f)]; + let child = self.r4[(c >> 12) as usize - 0x10]; + let leaf = self.r5[((child as usize) << 6) + ((c >> 6) as usize & 0x3f)]; trie_range_leaf(c, self.r6[leaf as usize]) } } @@ -63,14 +63,14 @@ pub struct SmallBoolTrie { impl SmallBoolTrie { pub fn lookup(&self, c: char) -> bool { - let c = c as usize; - match self.r1.get(c >> 6) { + let c = c as u32; + match self.r1.get((c >> 6) as usize) { Some(&child) => trie_range_leaf(c, self.r2[child as usize]), None => false, } } } -fn trie_range_leaf(c: usize, bitmap_chunk: u64) -> bool { +fn trie_range_leaf(c: u32, bitmap_chunk: u64) -> bool { ((bitmap_chunk >> (c & 63)) & 1) != 0 } diff --git a/src/libcore/unicode/mod.rs b/src/libcore/unicode/mod.rs new file mode 100644 index 000000000000..e5cda880f880 --- /dev/null +++ b/src/libcore/unicode/mod.rs @@ -0,0 +1,30 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![unstable(feature = "unicode_internals", issue = "0")] +#![allow(missing_docs)] + +mod bool_trie; +pub(crate) mod printable; +pub(crate) mod tables; +pub(crate) mod version; + +// For use in liballoc, not re-exported in libstd. +pub mod derived_property { + pub use unicode::tables::derived_property::{Case_Ignorable, Cased}; +} +pub mod conversions { + pub use unicode::tables::conversions::{to_lower, to_upper}; +} + +// For use in libsyntax +pub mod property { + pub use unicode::tables::property::Pattern_White_Space; +} diff --git a/src/libcore/unicode/printable.py b/src/libcore/unicode/printable.py new file mode 100644 index 000000000000..9410dafbbc36 --- /dev/null +++ b/src/libcore/unicode/printable.py @@ -0,0 +1,254 @@ +#!/usr/bin/env python +# +# Copyright 2011-2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +# This script uses the following Unicode tables: +# - UnicodeData.txt + + +from collections import namedtuple +import csv +import os +import subprocess + +NUM_CODEPOINTS=0x110000 + +def to_ranges(iter): + current = None + for i in iter: + if current is None or i != current[1] or i in (0x10000, 0x20000): + if current is not None: + yield tuple(current) + current = [i, i + 1] + else: + current[1] += 1 + if current is not None: + yield tuple(current) + +def get_escaped(codepoints): + for c in codepoints: + if (c.class_ or "Cn") in "Cc Cf Cs Co Cn Zl Zp Zs".split() and c.value != ord(' '): + yield c.value + +def get_file(f): + try: + return open(os.path.basename(f)) + except FileNotFoundError: + subprocess.run(["curl", "-O", f], check=True) + return open(os.path.basename(f)) + +Codepoint = namedtuple('Codepoint', 'value class_') + +def get_codepoints(f): + r = csv.reader(f, delimiter=";") + prev_codepoint = 0 + class_first = None + for row in r: + codepoint = int(row[0], 16) + name = row[1] + class_ = row[2] + + if class_first is not None: + if not name.endswith("Last>"): + raise ValueError("Missing Last after First") + + for c in range(prev_codepoint + 1, codepoint): + yield Codepoint(c, class_first) + + class_first = None + if name.endswith("First>"): + class_first = class_ + + yield Codepoint(codepoint, class_) + prev_codepoint = codepoint + + if class_first != None: + raise ValueError("Missing Last after First") + + for c in range(prev_codepoint + 1, NUM_CODEPOINTS): + yield Codepoint(c, None) + +def compress_singletons(singletons): + uppers = [] # (upper, # items in lowers) + lowers = [] + + for i in singletons: + upper = i >> 8 + lower = i & 0xff + if len(uppers) == 0 or uppers[-1][0] != upper: + uppers.append((upper, 1)) + else: + upper, count = uppers[-1] + uppers[-1] = upper, count + 1 + lowers.append(lower) + + return uppers, lowers + +def compress_normal(normal): + # lengths 0x00..0x7f are encoded as 00, 01, ..., 7e, 7f + # lengths 0x80..0x7fff are encoded as 80 80, 80 81, ..., ff fe, ff ff + compressed = [] # [truelen, (truelenaux), falselen, (falselenaux)] + + prev_start = 0 + for start, count in normal: + truelen = start - prev_start + falselen = count + prev_start = start + count + + assert truelen < 0x8000 and falselen < 0x8000 + entry = [] + if truelen > 0x7f: + entry.append(0x80 | (truelen >> 8)) + entry.append(truelen & 0xff) + else: + entry.append(truelen & 0x7f) + if falselen > 0x7f: + entry.append(0x80 | (falselen >> 8)) + entry.append(falselen & 0xff) + else: + entry.append(falselen & 0x7f) + + compressed.append(entry) + + return compressed + +def print_singletons(uppers, lowers, uppersname, lowersname): + print("const {}: &'static [(u8, u8)] = &[".format(uppersname)) + for u, c in uppers: + print(" ({:#04x}, {}),".format(u, c)) + print("];") + print("const {}: &'static [u8] = &[".format(lowersname)) + for i in range(0, len(lowers), 8): + print(" {}".format(" ".join("{:#04x},".format(l) for l in lowers[i:i+8]))) + print("];") + +def print_normal(normal, normalname): + print("const {}: &'static [u8] = &[".format(normalname)) + for v in normal: + print(" {}".format(" ".join("{:#04x},".format(i) for i in v))) + print("];") + +def main(): + file = get_file("http://www.unicode.org/Public/UNIDATA/UnicodeData.txt") + + codepoints = get_codepoints(file) + + CUTOFF=0x10000 + singletons0 = [] + singletons1 = [] + normal0 = [] + normal1 = [] + extra = [] + + for a, b in to_ranges(get_escaped(codepoints)): + if a > 2 * CUTOFF: + extra.append((a, b - a)) + elif a == b - 1: + if a & CUTOFF: + singletons1.append(a & ~CUTOFF) + else: + singletons0.append(a) + elif a == b - 2: + if a & CUTOFF: + singletons1.append(a & ~CUTOFF) + singletons1.append((a + 1) & ~CUTOFF) + else: + singletons0.append(a) + singletons0.append(a + 1) + else: + if a >= 2 * CUTOFF: + extra.append((a, b - a)) + elif a & CUTOFF: + normal1.append((a & ~CUTOFF, b - a)) + else: + normal0.append((a, b - a)) + + singletons0u, singletons0l = compress_singletons(singletons0) + singletons1u, singletons1l = compress_singletons(singletons1) + normal0 = compress_normal(normal0) + normal1 = compress_normal(normal1) + + print("""\ +// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// NOTE: The following code was generated by "src/libcore/unicode/printable.py", +// do not edit directly! + +fn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], + normal: &[u8]) -> bool { + let xupper = (x >> 8) as u8; + let mut lowerstart = 0; + for &(upper, lowercount) in singletonuppers { + let lowerend = lowerstart + lowercount as usize; + if xupper == upper { + for &lower in &singletonlowers[lowerstart..lowerend] { + if lower == x as u8 { + return false; + } + } + } else if xupper < upper { + break; + } + lowerstart = lowerend; + } + + let mut x = x as i32; + let mut normal = normal.iter().cloned(); + let mut current = true; + while let Some(v) = normal.next() { + let len = if v & 0x80 != 0 { + ((v & 0x7f) as i32) << 8 | normal.next().unwrap() as i32 + } else { + v as i32 + }; + x -= len; + if x < 0 { + break; + } + current = !current; + } + current +} + +pub(crate) fn is_printable(x: char) -> bool { + let x = x as u32; + let lower = x as u16; + if x < 0x10000 { + check(lower, SINGLETONS0U, SINGLETONS0L, NORMAL0) + } else if x < 0x20000 { + check(lower, SINGLETONS1U, SINGLETONS1L, NORMAL1) + } else {\ +""") + for a, b in extra: + print(" if 0x{:x} <= x && x < 0x{:x} {{".format(a, a + b)) + print(" return false;") + print(" }") + print("""\ + true + } +}\ +""") + print() + print_singletons(singletons0u, singletons0l, 'SINGLETONS0U', 'SINGLETONS0L') + print_singletons(singletons1u, singletons1l, 'SINGLETONS1U', 'SINGLETONS1L') + print_normal(normal0, 'NORMAL0') + print_normal(normal1, 'NORMAL1') + +if __name__ == '__main__': + main() diff --git a/src/libcore/unicode/printable.rs b/src/libcore/unicode/printable.rs new file mode 100644 index 000000000000..519dd17bb9b3 --- /dev/null +++ b/src/libcore/unicode/printable.rs @@ -0,0 +1,535 @@ +// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// NOTE: The following code was generated by "src/libcore/unicode/printable.py", +// do not edit directly! + +fn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], + normal: &[u8]) -> bool { + let xupper = (x >> 8) as u8; + let mut lowerstart = 0; + for &(upper, lowercount) in singletonuppers { + let lowerend = lowerstart + lowercount as usize; + if xupper == upper { + for &lower in &singletonlowers[lowerstart..lowerend] { + if lower == x as u8 { + return false; + } + } + } else if xupper < upper { + break; + } + lowerstart = lowerend; + } + + let mut x = x as i32; + let mut normal = normal.iter().cloned(); + let mut current = true; + while let Some(v) = normal.next() { + let len = if v & 0x80 != 0 { + ((v & 0x7f) as i32) << 8 | normal.next().unwrap() as i32 + } else { + v as i32 + }; + x -= len; + if x < 0 { + break; + } + current = !current; + } + current +} + +pub(crate) fn is_printable(x: char) -> bool { + let x = x as u32; + let lower = x as u16; + if x < 0x10000 { + check(lower, SINGLETONS0U, SINGLETONS0L, NORMAL0) + } else if x < 0x20000 { + check(lower, SINGLETONS1U, SINGLETONS1L, NORMAL1) + } else { + if 0x2a6d7 <= x && x < 0x2a700 { + return false; + } + if 0x2b735 <= x && x < 0x2b740 { + return false; + } + if 0x2b81e <= x && x < 0x2b820 { + return false; + } + if 0x2cea2 <= x && x < 0x2ceb0 { + return false; + } + if 0x2ebe1 <= x && x < 0x2f800 { + return false; + } + if 0x2fa1e <= x && x < 0xe0100 { + return false; + } + if 0xe01f0 <= x && x < 0x110000 { + return false; + } + true + } +} + +const SINGLETONS0U: &'static [(u8, u8)] = &[ + (0x00, 1), + (0x03, 5), + (0x05, 6), + (0x06, 3), + (0x07, 6), + (0x08, 8), + (0x09, 17), + (0x0a, 28), + (0x0b, 25), + (0x0c, 20), + (0x0d, 18), + (0x0e, 22), + (0x0f, 4), + (0x10, 3), + (0x12, 18), + (0x13, 9), + (0x16, 1), + (0x17, 5), + (0x18, 2), + (0x19, 3), + (0x1a, 7), + (0x1c, 2), + (0x1d, 1), + (0x1f, 22), + (0x20, 3), + (0x2b, 6), + (0x2c, 2), + (0x2d, 11), + (0x2e, 1), + (0x30, 3), + (0x31, 2), + (0x32, 2), + (0xa9, 2), + (0xaa, 4), + (0xab, 8), + (0xfa, 2), + (0xfb, 5), + (0xfd, 4), + (0xfe, 3), + (0xff, 9), +]; +const SINGLETONS0L: &'static [u8] = &[ + 0xad, 0x78, 0x79, 0x8b, 0x8d, 0xa2, 0x30, 0x57, + 0x58, 0x8b, 0x8c, 0x90, 0x1c, 0x1d, 0xdd, 0x0e, + 0x0f, 0x4b, 0x4c, 0xfb, 0xfc, 0x2e, 0x2f, 0x3f, + 0x5c, 0x5d, 0x5f, 0xb5, 0xe2, 0x84, 0x8d, 0x8e, + 0x91, 0x92, 0xa9, 0xb1, 0xba, 0xbb, 0xc5, 0xc6, + 0xc9, 0xca, 0xde, 0xe4, 0xe5, 0xff, 0x00, 0x04, + 0x11, 0x12, 0x29, 0x31, 0x34, 0x37, 0x3a, 0x3b, + 0x3d, 0x49, 0x4a, 0x5d, 0x84, 0x8e, 0x92, 0xa9, + 0xb1, 0xb4, 0xba, 0xbb, 0xc6, 0xca, 0xce, 0xcf, + 0xe4, 0xe5, 0x00, 0x04, 0x0d, 0x0e, 0x11, 0x12, + 0x29, 0x31, 0x34, 0x3a, 0x3b, 0x45, 0x46, 0x49, + 0x4a, 0x5e, 0x64, 0x65, 0x84, 0x91, 0x9b, 0x9d, + 0xc9, 0xce, 0xcf, 0x0d, 0x11, 0x29, 0x45, 0x49, + 0x57, 0x64, 0x65, 0x8d, 0x91, 0xa9, 0xb4, 0xba, + 0xbb, 0xc5, 0xc9, 0xdf, 0xe4, 0xe5, 0xf0, 0x04, + 0x0d, 0x11, 0x45, 0x49, 0x64, 0x65, 0x80, 0x81, + 0x84, 0xb2, 0xbc, 0xbe, 0xbf, 0xd5, 0xd7, 0xf0, + 0xf1, 0x83, 0x85, 0x86, 0x89, 0x8b, 0x8c, 0x98, + 0xa0, 0xa4, 0xa6, 0xa8, 0xa9, 0xac, 0xba, 0xbe, + 0xbf, 0xc5, 0xc7, 0xce, 0xcf, 0xda, 0xdb, 0x48, + 0x98, 0xbd, 0xcd, 0xc6, 0xce, 0xcf, 0x49, 0x4e, + 0x4f, 0x57, 0x59, 0x5e, 0x5f, 0x89, 0x8e, 0x8f, + 0xb1, 0xb6, 0xb7, 0xbf, 0xc1, 0xc6, 0xc7, 0xd7, + 0x11, 0x16, 0x17, 0x5b, 0x5c, 0xf6, 0xf7, 0xfe, + 0xff, 0x80, 0x0d, 0x6d, 0x71, 0xde, 0xdf, 0x0e, + 0x0f, 0x1f, 0x6e, 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, + 0x7e, 0xae, 0xaf, 0xbb, 0xbc, 0xfa, 0x16, 0x17, + 0x1e, 0x1f, 0x46, 0x47, 0x4e, 0x4f, 0x58, 0x5a, + 0x5c, 0x5e, 0x7e, 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, + 0xdc, 0xf0, 0xf1, 0xf5, 0x72, 0x73, 0x8f, 0x74, + 0x75, 0x96, 0x97, 0xc9, 0xff, 0x2f, 0x5f, 0x26, + 0x2e, 0x2f, 0xa7, 0xaf, 0xb7, 0xbf, 0xc7, 0xcf, + 0xd7, 0xdf, 0x9a, 0x40, 0x97, 0x98, 0x30, 0x8f, + 0x1f, 0xff, 0xce, 0xff, 0x4e, 0x4f, 0x5a, 0x5b, + 0x07, 0x08, 0x0f, 0x10, 0x27, 0x2f, 0xee, 0xef, + 0x6e, 0x6f, 0x37, 0x3d, 0x3f, 0x42, 0x45, 0x90, + 0x91, 0xfe, 0xff, 0x53, 0x67, 0x75, 0xc8, 0xc9, + 0xd0, 0xd1, 0xd8, 0xd9, 0xe7, 0xfe, 0xff, +]; +const SINGLETONS1U: &'static [(u8, u8)] = &[ + (0x00, 6), + (0x01, 1), + (0x03, 1), + (0x04, 2), + (0x08, 8), + (0x09, 2), + (0x0a, 5), + (0x0b, 2), + (0x10, 1), + (0x11, 4), + (0x12, 5), + (0x13, 17), + (0x14, 2), + (0x15, 2), + (0x17, 2), + (0x1a, 2), + (0x1c, 5), + (0x1d, 8), + (0x24, 1), + (0x6a, 3), + (0x6b, 2), + (0xbc, 2), + (0xd1, 2), + (0xd4, 12), + (0xd5, 9), + (0xd6, 2), + (0xd7, 2), + (0xda, 1), + (0xe0, 5), + (0xe8, 2), + (0xee, 32), + (0xf0, 4), + (0xf9, 4), +]; +const SINGLETONS1L: &'static [u8] = &[ + 0x0c, 0x27, 0x3b, 0x3e, 0x4e, 0x4f, 0x8f, 0x9e, + 0x9e, 0x9f, 0x06, 0x07, 0x09, 0x36, 0x3d, 0x3e, + 0x56, 0xf3, 0xd0, 0xd1, 0x04, 0x14, 0x18, 0x36, + 0x37, 0x56, 0x57, 0xbd, 0x35, 0xce, 0xcf, 0xe0, + 0x12, 0x87, 0x89, 0x8e, 0x9e, 0x04, 0x0d, 0x0e, + 0x11, 0x12, 0x29, 0x31, 0x34, 0x3a, 0x45, 0x46, + 0x49, 0x4a, 0x4e, 0x4f, 0x64, 0x65, 0x5a, 0x5c, + 0xb6, 0xb7, 0x1b, 0x1c, 0x84, 0x85, 0x09, 0x37, + 0x90, 0x91, 0xa8, 0x07, 0x0a, 0x3b, 0x3e, 0x66, + 0x69, 0x8f, 0x92, 0x6f, 0x5f, 0xee, 0xef, 0x5a, + 0x62, 0x9a, 0x9b, 0x27, 0x28, 0x55, 0x9d, 0xa0, + 0xa1, 0xa3, 0xa4, 0xa7, 0xa8, 0xad, 0xba, 0xbc, + 0xc4, 0x06, 0x0b, 0x0c, 0x15, 0x1d, 0x3a, 0x3f, + 0x45, 0x51, 0xa6, 0xa7, 0xcc, 0xcd, 0xa0, 0x07, + 0x19, 0x1a, 0x22, 0x25, 0xc5, 0xc6, 0x04, 0x20, + 0x23, 0x25, 0x26, 0x28, 0x33, 0x38, 0x3a, 0x48, + 0x4a, 0x4c, 0x50, 0x53, 0x55, 0x56, 0x58, 0x5a, + 0x5c, 0x5e, 0x60, 0x63, 0x65, 0x66, 0x6b, 0x73, + 0x78, 0x7d, 0x7f, 0x8a, 0xa4, 0xaa, 0xaf, 0xb0, + 0xc0, 0xd0, 0x3f, 0x71, 0x72, 0x7b, +]; +const NORMAL0: &'static [u8] = &[ + 0x00, 0x20, + 0x5f, 0x22, + 0x82, 0xdf, 0x04, + 0x82, 0x44, 0x08, + 0x1b, 0x04, + 0x06, 0x11, + 0x81, 0xac, 0x0e, + 0x80, 0xab, 0x35, + 0x1e, 0x15, + 0x80, 0xe0, 0x03, + 0x19, 0x08, + 0x01, 0x04, + 0x2f, 0x04, + 0x34, 0x04, + 0x07, 0x03, + 0x01, 0x07, + 0x06, 0x07, + 0x11, 0x0a, + 0x50, 0x0f, + 0x12, 0x07, + 0x55, 0x08, + 0x02, 0x04, + 0x1c, 0x0a, + 0x09, 0x03, + 0x08, 0x03, + 0x07, 0x03, + 0x02, 0x03, + 0x03, 0x03, + 0x0c, 0x04, + 0x05, 0x03, + 0x0b, 0x06, + 0x01, 0x0e, + 0x15, 0x05, + 0x3a, 0x03, + 0x11, 0x07, + 0x06, 0x05, + 0x10, 0x08, + 0x56, 0x07, + 0x02, 0x07, + 0x15, 0x0d, + 0x50, 0x04, + 0x43, 0x03, + 0x2d, 0x03, + 0x01, 0x04, + 0x11, 0x06, + 0x0f, 0x0c, + 0x3a, 0x04, + 0x1d, 0x25, + 0x0d, 0x06, + 0x4c, 0x20, + 0x6d, 0x04, + 0x6a, 0x25, + 0x80, 0xc8, 0x05, + 0x82, 0xb0, 0x03, + 0x1a, 0x06, + 0x82, 0xfd, 0x03, + 0x59, 0x07, + 0x15, 0x0b, + 0x17, 0x09, + 0x14, 0x0c, + 0x14, 0x0c, + 0x6a, 0x06, + 0x0a, 0x06, + 0x1a, 0x06, + 0x59, 0x07, + 0x2b, 0x05, + 0x46, 0x0a, + 0x2c, 0x04, + 0x0c, 0x04, + 0x01, 0x03, + 0x31, 0x0b, + 0x2c, 0x04, + 0x1a, 0x06, + 0x0b, 0x03, + 0x80, 0xac, 0x06, + 0x0a, 0x06, + 0x1f, 0x41, + 0x4c, 0x04, + 0x2d, 0x03, + 0x74, 0x08, + 0x3c, 0x03, + 0x0f, 0x03, + 0x3c, 0x07, + 0x38, 0x08, + 0x2a, 0x06, + 0x82, 0xff, 0x11, + 0x18, 0x08, + 0x2f, 0x11, + 0x2d, 0x03, + 0x20, 0x10, + 0x21, 0x0f, + 0x80, 0x8c, 0x04, + 0x82, 0x97, 0x19, + 0x0b, 0x15, + 0x88, 0x94, 0x05, + 0x2f, 0x05, + 0x3b, 0x07, + 0x02, 0x0e, + 0x18, 0x09, + 0x80, 0xaf, 0x31, + 0x74, 0x0c, + 0x80, 0xd6, 0x1a, + 0x0c, 0x05, + 0x80, 0xff, 0x05, + 0x80, 0xb6, 0x05, + 0x24, 0x0c, + 0x9b, 0xc6, 0x0a, + 0xd2, 0x30, 0x10, + 0x84, 0x8d, 0x03, + 0x37, 0x09, + 0x81, 0x5c, 0x14, + 0x80, 0xb8, 0x08, + 0x80, 0xba, 0x3d, + 0x35, 0x04, + 0x0a, 0x06, + 0x38, 0x08, + 0x46, 0x08, + 0x0c, 0x06, + 0x74, 0x0b, + 0x1e, 0x03, + 0x5a, 0x04, + 0x59, 0x09, + 0x80, 0x83, 0x18, + 0x1c, 0x0a, + 0x16, 0x09, + 0x46, 0x0a, + 0x80, 0x8a, 0x06, + 0xab, 0xa4, 0x0c, + 0x17, 0x04, + 0x31, 0xa1, 0x04, + 0x81, 0xda, 0x26, + 0x07, 0x0c, + 0x05, 0x05, + 0x80, 0xa5, 0x11, + 0x81, 0x6d, 0x10, + 0x78, 0x28, + 0x2a, 0x06, + 0x4c, 0x04, + 0x80, 0x8d, 0x04, + 0x80, 0xbe, 0x03, + 0x1b, 0x03, + 0x0f, 0x0d, +]; +const NORMAL1: &'static [u8] = &[ + 0x5e, 0x22, + 0x7b, 0x05, + 0x03, 0x04, + 0x2d, 0x03, + 0x65, 0x04, + 0x01, 0x2f, + 0x2e, 0x80, 0x82, + 0x1d, 0x03, + 0x31, 0x0f, + 0x1c, 0x04, + 0x24, 0x09, + 0x1e, 0x05, + 0x2b, 0x05, + 0x44, 0x04, + 0x0e, 0x2a, + 0x80, 0xaa, 0x06, + 0x24, 0x04, + 0x24, 0x04, + 0x28, 0x08, + 0x34, 0x0b, + 0x01, 0x80, 0x90, + 0x81, 0x37, 0x09, + 0x16, 0x0a, + 0x08, 0x80, 0x98, + 0x39, 0x03, + 0x63, 0x08, + 0x09, 0x30, + 0x16, 0x05, + 0x21, 0x03, + 0x1b, 0x05, + 0x01, 0x40, + 0x38, 0x04, + 0x4b, 0x05, + 0x2f, 0x04, + 0x0a, 0x07, + 0x09, 0x07, + 0x40, 0x20, + 0x27, 0x04, + 0x0c, 0x09, + 0x36, 0x03, + 0x3a, 0x05, + 0x1a, 0x07, + 0x04, 0x0c, + 0x07, 0x50, + 0x49, 0x37, + 0x33, 0x0d, + 0x33, 0x07, + 0x2e, 0x08, + 0x0a, 0x81, 0x26, + 0x1f, 0x80, 0x81, + 0x28, 0x08, + 0x2a, 0x80, 0xa6, + 0x4e, 0x04, + 0x1e, 0x0f, + 0x43, 0x0e, + 0x19, 0x07, + 0x0a, 0x06, + 0x47, 0x09, + 0x27, 0x09, + 0x75, 0x0b, + 0x3f, 0x41, + 0x2a, 0x06, + 0x3b, 0x05, + 0x0a, 0x06, + 0x51, 0x06, + 0x01, 0x05, + 0x10, 0x03, + 0x05, 0x80, 0x8b, + 0x5f, 0x21, + 0x48, 0x08, + 0x0a, 0x80, 0xa6, + 0x5e, 0x22, + 0x45, 0x0b, + 0x0a, 0x06, + 0x0d, 0x13, + 0x38, 0x08, + 0x0a, 0x36, + 0x2c, 0x04, + 0x10, 0x80, 0xc0, + 0x3c, 0x64, + 0x53, 0x0c, + 0x01, 0x81, 0x00, + 0x48, 0x08, + 0x53, 0x1d, + 0x39, 0x81, 0x07, + 0x46, 0x0a, + 0x1d, 0x03, + 0x47, 0x49, + 0x37, 0x03, + 0x0e, 0x08, + 0x0a, 0x06, + 0x39, 0x07, + 0x0a, 0x81, 0x36, + 0x19, 0x81, 0x07, + 0x83, 0x9a, 0x66, + 0x75, 0x0b, + 0x80, 0xc4, 0x8a, 0xbc, + 0x84, 0x2f, 0x8f, 0xd1, + 0x82, 0x47, 0xa1, 0xb9, + 0x82, 0x39, 0x07, + 0x2a, 0x04, + 0x02, 0x60, + 0x26, 0x0a, + 0x46, 0x0a, + 0x28, 0x05, + 0x13, 0x82, 0xb0, + 0x5b, 0x65, + 0x45, 0x0b, + 0x2f, 0x10, + 0x11, 0x40, + 0x02, 0x1e, + 0x97, 0xf2, 0x0e, + 0x82, 0xf3, 0xa5, 0x0d, + 0x81, 0x1f, 0x51, + 0x81, 0x8c, 0x89, 0x04, + 0x6b, 0x05, + 0x0d, 0x03, + 0x09, 0x07, + 0x10, 0x93, 0x60, + 0x80, 0xf6, 0x0a, + 0x73, 0x08, + 0x6e, 0x17, + 0x46, 0x80, 0x9a, + 0x14, 0x0c, + 0x57, 0x09, + 0x19, 0x80, 0x87, + 0x81, 0x47, 0x03, + 0x85, 0x42, 0x0f, + 0x15, 0x85, 0x50, + 0x2b, 0x87, 0xd5, + 0x80, 0xd7, 0x29, + 0x4b, 0x05, + 0x0a, 0x04, + 0x02, 0x83, 0x11, + 0x44, 0x81, 0x4b, + 0x3c, 0x06, + 0x01, 0x04, + 0x55, 0x05, + 0x1b, 0x34, + 0x02, 0x81, 0x0e, + 0x2c, 0x04, + 0x64, 0x0c, + 0x56, 0x0a, + 0x0d, 0x03, + 0x5c, 0x04, + 0x3d, 0x39, + 0x1d, 0x0d, + 0x2c, 0x04, + 0x09, 0x07, + 0x02, 0x0e, + 0x06, 0x80, 0x9a, + 0x83, 0xd5, 0x0b, + 0x0d, 0x03, + 0x0a, 0x06, + 0x74, 0x0c, + 0x59, 0x27, + 0x0c, 0x04, + 0x38, 0x08, + 0x0a, 0x06, + 0x28, 0x08, + 0x1e, 0x52, + 0x0c, 0x04, + 0x67, 0x03, + 0x29, 0x0d, + 0x0a, 0x06, + 0x03, 0x0d, + 0x30, 0x60, + 0x0e, 0x85, 0x92, +]; diff --git a/src/libcore/unicode/tables.rs b/src/libcore/unicode/tables.rs new file mode 100644 index 000000000000..3de855ac9431 --- /dev/null +++ b/src/libcore/unicode/tables.rs @@ -0,0 +1,2601 @@ +// Copyright 2012-2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// NOTE: The following code was generated by "./unicode.py", do not edit directly + +#![allow(missing_docs, non_upper_case_globals, non_snake_case)] + +use unicode::version::UnicodeVersion; +use unicode::bool_trie::{BoolTrie, SmallBoolTrie}; + +/// The version of [Unicode](http://www.unicode.org/) that the Unicode parts of +/// `char` and `str` methods are based on. +#[unstable(feature = "unicode_version", issue = "49726")] +pub const UNICODE_VERSION: UnicodeVersion = UnicodeVersion { + major: 11, + minor: 0, + micro: 0, + _priv: (), +}; +pub mod general_category { + pub const Cc_table: &super::SmallBoolTrie = &super::SmallBoolTrie { + r1: &[ + 0, 1, 0 + ], + r2: &[ + 0x00000000ffffffff, 0x8000000000000000 + ], + }; + + pub fn Cc(c: char) -> bool { + Cc_table.lookup(c) + } + + pub const N_table: &super::BoolTrie = &super::BoolTrie { + r1: [ + 0x03ff000000000000, 0x0000000000000000, 0x720c000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x000003ff00000000, 0x0000000000000000, 0x03ff000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x00000000000003ff + ], + r2: [ + 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0, 1, 0, 6, 0, 1, 0, 7, 0, 7, 8, + 0, 0, 0, 0, 9, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 11, 0, 0, 0, 12, 7, 0, 0, 0, 0, 13, 0, 14, 0, 0, 15, 0, 0, 7, 16, 0, 0, 15, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 9, 0, 0, 18, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 20, 21, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26, 0, 0, 0, 0, 0, 27, 0, 28, + 29, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, + 0, 0, 1, 0, 0, 0, 0, 31, 0, 0, 7, 9, 0, 0, 32, 0, 7, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0 + ], + r3: &[ + 0x0000000000000000, 0x0000ffc000000000, 0x03f0ffc000000000, 0x00fcffc000000000, + 0x0007ffc000000000, 0x7f00ffc000000000, 0x01ffffc07f000000, 0x0000000003ff0000, + 0x000fffff00000000, 0x00000000000003ff, 0x1ffffe0000000000, 0x0001c00000000000, + 0x03ff03ff00000000, 0x000000000000ffc0, 0x0000000007ff0000, 0x0000000003ff03ff, + 0x03ff000000000000, 0x03f1000000000000, 0xffffffffffff0000, 0x00000000000003e7, + 0xffffffff00000000, 0x000000000fffffff, 0xfffffc0000000000, 0xffc0000000000000, + 0x00000000000fffff, 0x2000000000000000, 0x070003fe00000080, 0x00000000003c0000, + 0x000003ff00000000, 0x00000000fffeff00, 0xfffe0000000003ff, 0x003f000000000000, + 0x03ff000003ff0000 + ], + r4: [ + 0, 1, 2, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 5, 6, 7, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 + ], + r5: &[ + 0, 0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 7, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 9, 10, 11, 12, 0, 13, 14, 0, 15, 16, 17, 0, 18, 19, 0, 0, 0, 0, 20, 21, 0, + 0, 0, 0, 22, 0, 0, 23, 24, 0, 0, 0, 25, 0, 21, 26, 0, 0, 27, 0, 0, 0, 21, 0, 0, 0, 0, 0, + 28, 0, 28, 0, 0, 0, 0, 0, 28, 0, 29, 30, 0, 0, 0, 0, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 32, 0, 0, 0, 28, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 33, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 35, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 38, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 39, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 0, 28, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 41, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0x000fffffffffff80, 0x01ffffffffffffff, 0x0000000000000c00, + 0x0ffffffe00000000, 0x0000000f00000000, 0x0000000000000402, 0x00000000003e0000, + 0x000003ff00000000, 0xfe000000ff000000, 0x0000ff8000000000, 0xf800000000000000, + 0x000000000fc00000, 0x3000000000000000, 0xfffffffffffcffff, 0x60000000000001ff, + 0x00000000e0000000, 0x0000f80000000000, 0xff000000ff000000, 0x0000fe0000000000, + 0xfc00000000000000, 0x03ff000000000000, 0x7fffffff00000000, 0x0000007fe0000000, + 0x00000000001e0000, 0x0000fffffffc0000, 0xffc0000000000000, 0x001ffffe03ff0000, + 0x0000000003ff0000, 0x00000000000003ff, 0x0fff000000000000, 0x0007ffff00000000, + 0x00001fffffff0000, 0xffffffffffffffff, 0x00007fffffffffff, 0x00000003fbff0000, + 0x00000000007fffff, 0x000fffff00000000, 0x01ffffff00000000, 0xffffffffffffc000, + 0x000000000000ff80, 0xfffe000000000000, 0x001eefffffffffff, 0x0000000000001fff + ], + }; + + pub fn N(c: char) -> bool { + N_table.lookup(c) + } + +} + +pub mod derived_property { + pub const Alphabetic_table: &super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x07fffffe07fffffe, 0x0420040000000000, 0xff7fffffff7fffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x0000501f0003ffc3, + 0x0000000000000000, 0xbcdf000000000020, 0xfffffffbffffd740, 0xffbfffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xfffffffffffffc03, 0xffffffffffffffff, + 0xfffeffffffffffff, 0xffffffff027fffff, 0xbfff0000000001ff, 0x000787ffffff00b6, + 0xffffffff07ff0000, 0xffffc000feffffff, 0xffffffffffffffff, 0x9c00e1fe1fefffff, + 0xffffffffffff0000, 0xffffffffffffe000, 0x0003ffffffffffff, 0x043007fffffffc00 + ], + r2: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 36, 36, 36, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 36, 36, 36, 36, 36, 36, 36, 36, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 31, 63, 64, 65, 66, 55, 67, 68, 69, 36, 36, 36, 70, 36, 36, + 36, 36, 71, 72, 73, 74, 31, 75, 76, 31, 77, 78, 79, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 80, 81, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 82, 83, 36, 84, 85, 86, 87, 88, 89, 31, 31, 31, + 31, 31, 31, 31, 90, 44, 91, 92, 93, 36, 94, 95, 31, 31, 31, 31, 31, 31, 31, 31, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 55, 31, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 96, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 97, 98, 36, 36, 36, 36, 99, 100, 36, 96, 101, 36, 102, + 103, 104, 105, 36, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 36, 117, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 118, 119, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 36, 36, 36, 36, 36, 120, 36, 121, 122, 123, 124, 125, 36, 36, 36, 36, 126, 127, 128, + 129, 31, 130, 36, 131, 132, 133, 113, 134 + ], + r3: &[ + 0x00001ffffcffffff, 0x000007ff01ffffff, 0x3fdfffff00000000, 0xffff03f8fff00000, + 0xefffffffffffffff, 0xfffe000fffe1dfff, 0xe3c5fdfffff99fef, 0x1003000fb080599f, + 0xc36dfdfffff987ee, 0x003f00005e021987, 0xe3edfdfffffbbfee, 0x1e00000f00011bbf, + 0xe3edfdfffff99fee, 0x0002000fb0c0199f, 0xc3ffc718d63dc7ec, 0x0000000000811dc7, + 0xe3fffdfffffddfef, 0x0000000f07601ddf, 0xe3effdfffffddfef, 0x0006000f40601ddf, + 0xe7fffffffffddfef, 0xfc00000f80f05ddf, 0x2ffbfffffc7fffec, 0x000c0000ff5f807f, + 0x07fffffffffffffe, 0x000000000000207f, 0x3bffecaefef02596, 0x00000000f000205f, + 0x0000000000000001, 0xfffe1ffffffffeff, 0x1ffffffffeffff03, 0x0000000000000000, + 0xf97fffffffffffff, 0xffffc1e7ffff0000, 0xffffffff3000407f, 0xf7ffffffffff20bf, + 0xffffffffffffffff, 0xffffffff3d7f3dff, 0x7f3dffffffff3dff, 0xffffffffff7fff3d, + 0xffffffffff3dffff, 0x0000000087ffffff, 0xffffffff0000ffff, 0x3f3fffffffffffff, + 0xfffffffffffffffe, 0xffff9fffffffffff, 0xffffffff07fffffe, 0x01ffc7ffffffffff, + 0x000fffff000fdfff, 0x000ddfff000fffff, 0xffcfffffffffffff, 0x00000000108001ff, + 0xffffffff00000000, 0x01ffffffffffffff, 0xffff07ffffffffff, 0x003fffffffffffff, + 0x01ff0fff7fffffff, 0x001f3fffffff0000, 0xffff0fffffffffff, 0x00000000000003ff, + 0xffffffff0fffffff, 0x001ffffe7fffffff, 0x0000008000000000, 0xffefffffffffffff, + 0x0000000000000fef, 0xfc00f3ffffffffff, 0x0003ffbfffffffff, 0x3ffffffffc00e000, + 0xe7ffffffffff01ff, 0x006fde0000000000, 0x001fff8000000000, 0xffffffff3f3fffff, + 0x3fffffffaaff3f3f, 0x5fdfffffffffffff, 0x1fdc1fff0fcf1fdc, 0x8002000000000000, + 0x000000001fff0000, 0xf3ffbd503e2ffc84, 0xffffffff000043e0, 0x00000000000001ff, + 0xffc0000000000000, 0x000003ffffffffff, 0xffff7fffffffffff, 0xffffffff7fffffff, + 0x000c781fffffffff, 0xffff20bfffffffff, 0x000080ffffffffff, 0x7f7f7f7f007fffff, + 0xffffffff7f7f7f7f, 0x0000800000000000, 0x1f3e03fe000000e0, 0xfffffffee07fffff, + 0xf7ffffffffffffff, 0xfffeffffffffffe0, 0x07ffffff00007fff, 0xffff000000000000, + 0x0000ffffffffffff, 0x0000000000001fff, 0x3fffffffffff0000, 0x00000c00ffff1fff, + 0x8ff07fffffffffff, 0xfffffffcff800000, 0x03fffffffffff9ff, 0xff80000000000000, + 0x000000fffffff7bb, 0x000fffffffffffff, 0x68fc00000000002f, 0xffff07fffffffc00, + 0x1fffffff0007ffff, 0xfff7ffffffffffff, 0x7c00ffdf00008000, 0x007fffffffffffff, + 0xc47fffff00003fff, 0x7fffffffffffffff, 0x003cffff38000005, 0xffff7f7f007e7e7e, + 0xffff003ff7ffffff, 0x000007ffffffffff, 0xffff000fffffffff, 0x0ffffffffffff87f, + 0xffff3fffffffffff, 0x0000000003ffffff, 0x5f7ffdffe0f8007f, 0xffffffffffffffdb, + 0x0003ffffffffffff, 0xfffffffffff80000, 0x3fffffffffffffff, 0xffffffffffff0000, + 0xfffffffffffcffff, 0x0fff0000000000ff, 0xffdf000000000000, 0x1fffffffffffffff, + 0x07fffffe00000000, 0xffffffc007fffffe, 0x000000001cfcfcfc + ], + r4: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 5, 9, 5, 10, 11, 12, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 13, 14, + 15, 7, 16, 17, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 + ], + r5: &[ + 0, 1, 2, 3, 4, 5, 4, 4, 4, 4, 6, 7, 8, 9, 10, 11, 2, 2, 12, 13, 14, 15, 4, 4, 2, 2, 2, + 2, 16, 17, 4, 4, 18, 19, 20, 21, 22, 4, 23, 4, 24, 25, 26, 27, 28, 29, 30, 4, 2, 31, 32, + 32, 33, 4, 4, 4, 4, 4, 4, 4, 34, 35, 4, 4, 2, 35, 36, 37, 32, 38, 2, 39, 40, 4, 41, 42, + 43, 44, 4, 4, 2, 45, 2, 46, 4, 4, 47, 48, 49, 50, 28, 4, 51, 4, 4, 4, 52, 4, 53, 54, 4, + 4, 4, 4, 55, 56, 57, 52, 4, 4, 4, 4, 58, 59, 60, 4, 61, 62, 63, 4, 4, 4, 4, 64, 4, 4, 4, + 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 65, 4, 2, 66, 2, 2, 2, 67, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 66, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 68, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 2, 2, 2, 2, 2, 2, 2, 2, 52, 20, 4, 69, 16, 70, 71, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 4, + 4, 2, 72, 73, 74, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 75, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 32, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 20, 76, 2, 2, 2, 2, 2, + 77, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 2, 78, 79, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 80, 81, 82, 83, 84, 2, 2, 2, 2, 85, 86, 87, 88, 89, + 90, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 91, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 92, 2, 93, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 94, 95, 96, 4, 4, 4, 4, 4, 4, 4, 4, 4, 76, 97, 98, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 99, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2, 10, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 100, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 101, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 102, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 + ], + r6: &[ + 0xb7ffff7fffffefff, 0x000000003fff3fff, 0xffffffffffffffff, 0x07ffffffffffffff, + 0x0000000000000000, 0x001fffffffffffff, 0xffffffff1fffffff, 0x000000000001ffff, + 0xffffe000ffffffff, 0x07ffffffffff07ff, 0xffffffff3fffffff, 0x00000000003eff0f, + 0xffff00003fffffff, 0x0fffffffff0fffff, 0xffff00ffffffffff, 0x0000000fffffffff, + 0x007fffffffffffff, 0x000000ff003fffff, 0x91bffffffffffd3f, 0x007fffff003fffff, + 0x000000007fffffff, 0x0037ffff00000000, 0x03ffffff003fffff, 0xc0ffffffffffffff, + 0x003ffffffeeff06f, 0x1fffffff00000000, 0x000000001fffffff, 0x0000001ffffffeff, + 0x003fffffffffffff, 0x0007ffff003fffff, 0x000000000003ffff, 0x00000000000001ff, + 0x0007ffffffffffff, 0x000000ffffffffff, 0xffff00801fffffff, 0x000000000000003f, + 0x01fffffffffffffc, 0x000001ffffff0000, 0x0047ffffffff0070, 0x000000001400001e, + 0x409ffffffffbffff, 0xffff01ffbfffbd7f, 0x000001ffffffffff, 0xe3edfdfffff99fef, + 0x0000000fe081199f, 0x00000000000007bb, 0x00000000000000b3, 0x7f3fffffffffffff, + 0x000000003f000000, 0x7fffffffffffffff, 0x0000000000000011, 0x000007ffe7ffffff, + 0x01ffffffffffffff, 0xffffffff00000000, 0x80000000ffffffff, 0x7fe7ffffffffffff, + 0xffffffffffff0000, 0x0000000020ffffcf, 0x7f7ffffffffffdff, 0xfffc000000000001, + 0x007ffefffffcffff, 0xb47ffffffffffb7f, 0xfffffdbf000000cb, 0x00000000017b7fff, + 0x007fffff00000000, 0x0000000003ffffff, 0x00007fffffffffff, 0x000000000000000f, + 0x000000000000007f, 0x00003fffffff0000, 0xe0fffff80000000f, 0x000000000000ffff, + 0x7fffffffffff001f, 0x00000000fff80000, 0x0000000300000000, 0x0003ffffffffffff, + 0xffff000000000000, 0x0fffffffffffffff, 0x1fff07ffffffffff, 0x0000000043ff01ff, + 0xffffffffffdfffff, 0xebffde64dfffffff, 0xffffffffffffffef, 0x7bffffffdfdfe7bf, + 0xfffffffffffdfc5f, 0xffffff3fffffffff, 0xf7fffffff7fffffd, 0xffdfffffffdfffff, + 0xffff7fffffff7fff, 0xfffffdfffffffdff, 0x0000000000000ff7, 0x000007dbf9ffff7f, + 0x000000000000001f, 0x000000000000008f, 0x0af7fe96ffffffef, 0x5ef7f796aa96ea84, + 0x0ffffbee0ffffbff, 0xffff03ffffff03ff, 0x00000000000003ff, 0x00000000007fffff, + 0xffff0003ffffffff, 0x00000001ffffffff, 0x000000003fffffff + ], + }; + + pub fn Alphabetic(c: char) -> bool { + Alphabetic_table.lookup(c) + } + + pub const Case_Ignorable_table: &super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0400408000000000, 0x0000000140000000, 0x0190a10000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0xffff000000000000, 0xffffffffffffffff, + 0xffffffffffffffff, 0x0430ffffffffffff, 0x00000000000000b0, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x00000000000003f8, 0x0000000000000000, + 0x0000000000000000, 0x0000000002000000, 0xbffffffffffe0000, 0x00100000000000b6, + 0x0000000017ff003f, 0x00010000fffff801, 0x0000000000000000, 0x00003dffbfc00000, + 0xffff000000028000, 0x00000000000007ff, 0x0001ffc000000000, 0x243ff80000000000 + ], + r2: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 10, 11, 12, 13, 14, 15, 16, 11, 17, 18, 19, 2, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 2, 2, 2, 2, 2, 2, 2, 2, 2, 33, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 34, 35, 36, 37, 38, 39, 40, 2, 41, 2, 2, 2, 42, 43, 44, 2, + 45, 46, 47, 48, 49, 50, 2, 51, 52, 53, 54, 55, 2, 2, 2, 2, 2, 2, 56, 57, 58, 59, 60, 61, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 62, 2, 63, 2, 64, 2, 65, 66, 2, 2, 2, 2, + 2, 2, 2, 67, 2, 68, 69, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 70, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 50, 2, 2, 2, 2, 71, 72, 73, 74, 75, 76, 77, 78, 79, 2, 2, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 2, 89, 2, 90, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 91, 2, 92, 93, 2, 2, 2, 2, 2, 2, 2, 2, 94, 95, 2, 96, + 97, 98, 99, 100 + ], + r3: &[ + 0x00003fffffc00000, 0x000000000e000000, 0x0000000000000000, 0xfffffffffff80000, + 0x1400000000000007, 0x0002000c00fe21fe, 0x1000000000000002, 0x4000000c0000201e, + 0x1000000000000006, 0x0023000000023986, 0xfc00000c000021be, 0x9000000000000002, + 0x0000000c0040201e, 0x0000000000000004, 0x0000000000002001, 0xc000000000000011, + 0x0000000c00603dc1, 0x0000000c00003040, 0x1800000000000003, 0x0000000c0000201e, + 0x00000000005c0400, 0x07f2000000000000, 0x0000000000007fc0, 0x1bf2000000000000, + 0x0000000000003f40, 0x02a0000003000000, 0x7ffe000000000000, 0x1ffffffffeffe0df, + 0x0000000000000040, 0x66fde00000000000, 0x001e0001c3000000, 0x0000000020002064, + 0x1000000000000000, 0x00000000e0000000, 0x001c0000001c0000, 0x000c0000000c0000, + 0x3fb0000000000000, 0x00000000208ffe40, 0x0000000000007800, 0x0000000000000008, + 0x0000020000000060, 0x0e04018700000000, 0x0000000009800000, 0x9ff81fe57f400000, + 0x7fff008000000000, 0x17d000000000000f, 0x000ff80000000004, 0x00003b3c00000003, + 0x0003a34000000000, 0x00cff00000000000, 0x3f00000000000000, 0x031021fdfff70000, + 0xfffff00000000000, 0x010007ffffffffff, 0xfffffffff8000000, 0xfbffffffffffffff, + 0xa000000000000000, 0x6000e000e000e003, 0x00007c900300f800, 0x8002ffdf00000000, + 0x000000001fff0000, 0x0001ffffffff0000, 0x3000000000000000, 0x0003800000000000, + 0x8000800000000000, 0xffffffff00000000, 0x0000800000000000, 0x083e3c0000000020, + 0x000000007e000000, 0x7000000000000000, 0x0000000000200000, 0x0000000000001000, + 0xbff7800000000000, 0x00000000f0000000, 0x0003000000000000, 0x00000003ffffffff, + 0x0001000000000000, 0x0000000000000700, 0x0300000000000000, 0x0000006000000844, + 0x8003ffff00000030, 0x00003fc000000000, 0x000000000003ff80, 0x13c8000000000007, + 0x0000006000008000, 0x00667e0000000000, 0x1001000000001008, 0xc19d000000000000, + 0x0058300020000002, 0x00000000f8000000, 0x0000212000000000, 0x0000000040000000, + 0xfffc000000000000, 0x0000000000000003, 0x0000ffff0008ffff, 0x0000000000240000, + 0x8000000000000000, 0x4000000004004080, 0x0001000000000001, 0x00000000c0000000, + 0x0e00000800000000 + ], + r4: [ + 0, 1, 2, 2, 2, 2, 3, 2, 2, 2, 2, 4, 2, 5, 6, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, + 0, 0, 0, 7, 0, 0, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 17, 18, 19, 0, 0, 20, 21, 22, + 23, 0, 0, 24, 25, 26, 27, 28, 0, 29, 0, 0, 0, 30, 0, 0, 0, 0, 0, 0, 0, 31, 32, 33, 0, 0, + 0, 0, 0, 34, 0, 35, 0, 36, 37, 38, 0, 0, 0, 0, 39, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 41, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 43, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 46, 47, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 49, 50, 51, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 53, 0, 54, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 55, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 56, 57, 0, 0, 57, 57, 57, 58, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0x2000000000000000, 0x0000000100000000, 0x07c0000000000000, + 0x870000000000f06e, 0x0000006000000000, 0x000000f000000000, 0x000000000001ffc0, + 0xff00000000000002, 0x800000000000007f, 0x2678000000000003, 0x0000000000002000, + 0x001fef8000000007, 0x0008000000000000, 0x7fc0000000000003, 0x0000000000001e00, + 0x40d3800000000000, 0x000007f880000000, 0x1800000000000003, 0x001f1fc000000001, + 0xff00000000000000, 0x000000004000005c, 0x85f8000000000000, 0x000000000000000d, + 0xb03c000000000000, 0x0000000030000001, 0xa7f8000000000000, 0x0000000000000001, + 0x00bf280000000000, 0x00000fbce0000000, 0x06ff800000000000, 0x79f80000000007fe, + 0x000000000e7e0080, 0x00000000037ffc00, 0xbf7f000000000000, 0x006dfcfffffc0000, + 0xb47e000000000000, 0x00000000000000bf, 0x0000000000a30000, 0x0018000000000000, + 0x001f000000000000, 0x007f000000000000, 0x000000000000000f, 0x00000000ffff8000, + 0x0000000300000000, 0x0000000f60000000, 0xfff8038000000000, 0x00003c0000000fe7, + 0x000000000000001c, 0xf87fffffffffffff, 0x00201fffffffffff, 0x0000fffef8000010, + 0x000007dbf9ffff7f, 0x00000000007f0000, 0x00000000000007f0, 0xf800000000000000, + 0xffffffff00000002, 0xffffffffffffffff, 0x0000ffffffffffff + ], + }; + + pub fn Case_Ignorable(c: char) -> bool { + Case_Ignorable_table.lookup(c) + } + + pub const Cased_table: &super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x07fffffe07fffffe, 0x0420040000000000, 0xff7fffffff7fffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xf7ffffffffffffff, 0xfffffffffffffff0, + 0xffffffffffffffff, 0xffffffffffffffff, 0x01ffffffffefffff, 0x0000001f00000003, + 0x0000000000000000, 0xbccf000000000020, 0xfffffffbffffd740, 0xffbfffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xfffffffffffffc03, 0xffffffffffffffff, + 0xfffeffffffffffff, 0xffffffff007fffff, 0x00000000000001ff, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 + ], + r2: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 5, 5, 5, + 0, 5, 5, 5, 5, 6, 7, 8, 9, 0, 10, 11, 0, 12, 13, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 15, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 17, 18, 5, 19, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, 22, + 0, 23, 5, 24, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26, 27, 5, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 29, 30, 0, 0 + ], + r3: &[ + 0x0000000000000000, 0xffffffff00000000, 0xe7ffffffffff20bf, 0x3f3fffffffffffff, + 0xe7ffffffffff01ff, 0xffffffffffffffff, 0xffffffff3f3fffff, 0x3fffffffaaff3f3f, + 0x5fdfffffffffffff, 0x1fdc1fff0fcf1fdc, 0x8002000000000000, 0x000000001fff0000, + 0xf21fbd503e2ffc84, 0xffffffff000043e0, 0x0000000000000018, 0xffc0000000000000, + 0x000003ffffffffff, 0xffff7fffffffffff, 0xffffffff7fffffff, 0x000c781fffffffff, + 0x000020bfffffffff, 0x00003fffffffffff, 0x000000003fffffff, 0xfffffffc00000000, + 0x03ffffffffff78ff, 0x0700000000000000, 0xffff000000000000, 0xffff003ff7ffffff, + 0x0000000000f8007f, 0x07fffffe00000000, 0x0000000007fffffe + ], + r4: [ + 0, 1, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 4, 5, 6, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 8, 9, 10, 11, 12, 1, 1, 1, 1, 13, 14, 15, 16, 17, + 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 20, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0xffffffffffffffff, 0x000000000000ffff, 0xffff000000000000, + 0x0fffffffff0fffff, 0x0007ffffffffffff, 0xffffffff00000000, 0x00000000ffffffff, + 0xffffffffffdfffff, 0xebffde64dfffffff, 0xffffffffffffffef, 0x7bffffffdfdfe7bf, + 0xfffffffffffdfc5f, 0xffffff3fffffffff, 0xf7fffffff7fffffd, 0xffdfffffffdfffff, + 0xffff7fffffff7fff, 0xfffffdfffffffdff, 0x0000000000000ff7, 0x000000000000000f, + 0xffff03ffffff03ff, 0x00000000000003ff + ], + }; + + pub fn Cased(c: char) -> bool { + Cased_table.lookup(c) + } + + pub const Grapheme_Extend_table: &super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0xffffffffffffffff, 0x0000ffffffffffff, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x00000000000003f8, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0xbffffffffffe0000, 0x00000000000000b6, + 0x0000000007ff0000, 0x00010000fffff800, 0x0000000000000000, 0x00003d9f9fc00000, + 0xffff000000020000, 0x00000000000007ff, 0x0001ffc000000000, 0x200ff80000000000 + ], + r2: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 2, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 33, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 34, 35, 36, 37, 38, 2, 39, 2, 40, 2, 2, 2, 41, 42, 43, 2, 44, + 45, 46, 47, 48, 2, 2, 49, 2, 2, 2, 50, 2, 2, 2, 2, 2, 2, 2, 2, 51, 2, 2, 52, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 53, 2, 54, 2, 55, 2, 2, 2, 2, 2, 2, 2, 2, 56, + 2, 57, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 58, 59, 60, 2, 2, 2, 2, 61, 2, 2, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 2, 2, 2, 71, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 72, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 73, 2, 2, 2, 2, 2, 59, 2 + ], + r3: &[ + 0x00003eeffbc00000, 0x000000000e000000, 0x0000000000000000, 0xfffffffbfff80000, + 0x1400000000000007, 0x0000000c00fe21fe, 0x5000000000000002, 0x4000000c0080201e, + 0x1000000000000006, 0x0023000000023986, 0xfc00000c000021be, 0xd000000000000002, + 0x0000000c00c0201e, 0x4000000000000004, 0x0000000000802001, 0xc000000000000011, + 0x0000000c00603dc1, 0x9000000000000002, 0x0000000c00603044, 0x5800000000000003, + 0x0000000c0080201e, 0x00000000805c8400, 0x07f2000000000000, 0x0000000000007f80, + 0x1bf2000000000000, 0x0000000000003f00, 0x02a0000003000000, 0x7ffe000000000000, + 0x1ffffffffeffe0df, 0x0000000000000040, 0x66fde00000000000, 0x001e0001c3000000, + 0x0000000020002064, 0x00000000e0000000, 0x001c0000001c0000, 0x000c0000000c0000, + 0x3fb0000000000000, 0x00000000200ffe40, 0x0000000000003800, 0x0000020000000060, + 0x0e04018700000000, 0x0000000009800000, 0x9ff81fe57f400000, 0x7fff000000000000, + 0x17d000000000000f, 0x000ff80000000004, 0x00003b3c00000003, 0x0003a34000000000, + 0x00cff00000000000, 0x031021fdfff70000, 0xfbffffffffffffff, 0x0000000000001000, + 0x0001ffffffff0000, 0x0003800000000000, 0x8000000000000000, 0xffffffff00000000, + 0x0000fc0000000000, 0x0000000006000000, 0x3ff7800000000000, 0x00000000c0000000, + 0x0003000000000000, 0x0000006000000844, 0x8003ffff00000030, 0x00003fc000000000, + 0x000000000003ff80, 0x13c8000000000007, 0x0000002000000000, 0x00667e0000000000, + 0x1000000000001008, 0xc19d000000000000, 0x0040300000000002, 0x0000212000000000, + 0x0000000040000000, 0x0000ffff0000ffff + ], + r4: [ + 0, 1, 2, 2, 2, 2, 3, 2, 2, 2, 2, 4, 2, 5, 6, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, + 0, 0, 0, 7, 0, 0, 8, 9, 10, 0, 11, 12, 13, 14, 15, 0, 0, 16, 17, 18, 0, 0, 19, 20, 21, + 22, 0, 0, 23, 24, 25, 26, 27, 0, 28, 0, 0, 0, 29, 0, 0, 0, 0, 0, 0, 0, 30, 31, 32, 0, 0, + 0, 0, 0, 33, 0, 34, 0, 35, 36, 37, 0, 0, 0, 0, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 39, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43, 44, 0, 0, 45, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 46, 47, 48, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 0, 51, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 52, 53, 0, 0, + 53, 53, 53, 54, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0x2000000000000000, 0x0000000100000000, 0x07c0000000000000, + 0x870000000000f06e, 0x0000006000000000, 0x000000f000000000, 0x000000000001ffc0, + 0xff00000000000002, 0x800000000000007f, 0x0678000000000003, 0x001fef8000000007, + 0x0008000000000000, 0x7fc0000000000003, 0x0000000000001e00, 0x40d3800000000000, + 0x000007f880000000, 0x5800000000000003, 0x001f1fc000800001, 0xff00000000000000, + 0x000000004000005c, 0xa5f9000000000000, 0x000000000000000d, 0xb03c800000000000, + 0x0000000030000001, 0xa7f8000000000000, 0x0000000000000001, 0x00bf280000000000, + 0x00000fbce0000000, 0x06ff800000000000, 0x79f80000000007fe, 0x000000000e7e0080, + 0x00000000037ffc00, 0xbf7f000000000000, 0x006dfcfffffc0000, 0xb47e000000000000, + 0x00000000000000bf, 0x0000000000a30000, 0x0018000000000000, 0x001f000000000000, + 0x007f000000000000, 0x0000000000078000, 0x0000000060000000, 0xf807c3a000000000, + 0x00003c0000000fe7, 0x000000000000001c, 0xf87fffffffffffff, 0x00201fffffffffff, + 0x0000fffef8000010, 0x000007dbf9ffff7f, 0x00000000007f0000, 0x00000000000007f0, + 0xffffffff00000000, 0xffffffffffffffff, 0x0000ffffffffffff + ], + }; + + pub fn Grapheme_Extend(c: char) -> bool { + Grapheme_Extend_table.lookup(c) + } + + pub const Lowercase_table: &super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x07fffffe00000000, 0x0420040000000000, 0xff7fffff80000000, + 0x55aaaaaaaaaaaaaa, 0xd4aaaaaaaaaaab55, 0xe6512d2a4e243129, 0xaa29aaaab5555240, + 0x93faaaaaaaaaaaaa, 0xffffffffffffaa85, 0x01ffffffffefffff, 0x0000001f00000003, + 0x0000000000000000, 0x3c8a000000000020, 0xfffff00000010000, 0x192faaaaaae37fff, + 0xffff000000000000, 0xaaaaaaaaffffffff, 0xaaaaaaaaaaaaa802, 0xaaaaaaaaaaaad554, + 0x0000aaaaaaaaaaaa, 0xffffffff00000000, 0x00000000000001ff, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 + ], + r2: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 4, 4, 4, + 0, 5, 5, 6, 5, 7, 8, 9, 10, 0, 11, 12, 0, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 17, 18, 5, 19, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, 22, + 0, 23, 24, 25, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 27, 4, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 29, 0, 0 + ], + r3: &[ + 0x0000000000000000, 0xe7ffffffffff0000, 0x3f00000000000000, 0x00000000000001ff, + 0xffffffffffffffff, 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaabfeaaaaa, 0x00ff00ff003f00ff, + 0x3fff00ff00ff003f, 0x40df00ff00ff00ff, 0x00dc00ff00cf00dc, 0x8002000000000000, + 0x000000001fff0000, 0x321080000008c400, 0xffff0000000043c0, 0x0000000000000010, + 0x000003ffffff0000, 0xffff000000000000, 0x3fda15627fffffff, 0x0008501aaaaaaaaa, + 0x000020bfffffffff, 0x00002aaaaaaaaaaa, 0x000000003aaaaaaa, 0xaaabaaa800000000, + 0x95ffaaaaaaaaaaaa, 0x02a082aaaaba50aa, 0x0700000000000000, 0xffff003ff7ffffff, + 0x0000000000f8007f, 0x0000000007fffffe + ], + r4: [ + 0, 1, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 4, 5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0xffffff0000000000, 0x000000000000ffff, 0x0fffffffff000000, + 0x0007ffffffffffff, 0x00000000ffffffff, 0xffffffff00000000, 0x000ffffffc000000, + 0x000000ffffdfc000, 0xebc000000ffffffc, 0xfffffc000000ffef, 0x00ffffffc000000f, + 0x00000ffffffc0000, 0xfc000000ffffffc0, 0xffffc000000fffff, 0x0ffffffc000000ff, + 0x0000ffffffc00000, 0x0000003ffffffc00, 0xf0000003f7fffffc, 0xffc000000fdfffff, + 0xffff0000003f7fff, 0xfffffc000000fdff, 0x0000000000000bf7, 0xfffffffc00000000, + 0x000000000000000f + ], + }; + + pub fn Lowercase(c: char) -> bool { + Lowercase_table.lookup(c) + } + + pub const Uppercase_table: &super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x0000000007fffffe, 0x0000000000000000, 0x000000007f7fffff, + 0xaa55555555555555, 0x2b555555555554aa, 0x11aed2d5b1dbced6, 0x55d255554aaaa490, + 0x6c05555555555555, 0x000000000000557a, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x8045000000000000, 0x00000ffbfffed740, 0xe6905555551c8000, + 0x0000ffffffffffff, 0x5555555500000000, 0x5555555555555401, 0x5555555555552aab, + 0xfffe555555555555, 0x00000000007fffff, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 + ], + r2: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, + 0, 5, 5, 6, 5, 7, 8, 9, 10, 0, 0, 0, 0, 11, 12, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, + 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 16, 17, 5, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 20, 0, + 21, 22, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 24, 0, 0, 0 + ], + r3: &[ + 0x0000000000000000, 0xffffffff00000000, 0x00000000000020bf, 0x003fffffffffffff, + 0xe7ffffffffff0000, 0x5555555555555555, 0x5555555540155555, 0xff00ff003f00ff00, + 0x0000ff00aa003f00, 0x0f00000000000000, 0x0f001f000f000f00, 0xc00f3d503e273884, + 0x0000ffff00000020, 0x0000000000000008, 0xffc0000000000000, 0x000000000000ffff, + 0x00007fffffffffff, 0xc025ea9d00000000, 0x0004280555555555, 0x0000155555555555, + 0x0000000005555555, 0x5554555400000000, 0x6a00555555555555, 0x015f7d5555452855, + 0x07fffffe00000000 + ], + r4: [ + 0, 1, 2, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 4, 5, 6, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 24, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0x000000ffffffffff, 0xffff000000000000, 0x00000000000fffff, + 0x0007ffffffffffff, 0xffffffff00000000, 0x00000000ffffffff, 0xfff0000003ffffff, + 0xffffff0000003fff, 0x003fde64d0000003, 0x000003ffffff0000, 0x7b0000001fdfe7b0, + 0xfffff0000001fc5f, 0x03ffffff0000003f, 0x00003ffffff00000, 0xf0000003ffffff00, + 0xffff0000003fffff, 0xffffff00000003ff, 0x07fffffc00000001, 0x001ffffff0000000, + 0x00007fffffc00000, 0x000001ffffff0000, 0x0000000000000400, 0x00000003ffffffff, + 0xffff03ffffff03ff, 0x00000000000003ff + ], + }; + + pub fn Uppercase(c: char) -> bool { + Uppercase_table.lookup(c) + } + + pub const XID_Continue_table: &super::BoolTrie = &super::BoolTrie { + r1: [ + 0x03ff000000000000, 0x07fffffe87fffffe, 0x04a0040000000000, 0xff7fffffff7fffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x0000501f0003ffc3, + 0xffffffffffffffff, 0xb8dfffffffffffff, 0xfffffffbffffd7c0, 0xffbfffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xfffffffffffffcfb, 0xffffffffffffffff, + 0xfffeffffffffffff, 0xffffffff027fffff, 0xbffffffffffe01ff, 0x000787ffffff00b6, + 0xffffffff07ff0000, 0xffffc3ffffffffff, 0xffffffffffffffff, 0x9ffffdff9fefffff, + 0xffffffffffff0000, 0xffffffffffffe7ff, 0x0003ffffffffffff, 0x243fffffffffffff + ], + r2: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 4, 32, 33, 34, 4, 4, 4, 4, 4, 35, 36, 37, 38, 39, 40, + 41, 42, 4, 4, 4, 4, 4, 4, 4, 4, 43, 44, 45, 46, 47, 4, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 4, 61, 4, 62, 63, 64, 65, 66, 4, 4, 4, 67, 4, 4, 4, 4, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 79, 80, 4, 81, 82, 83, 84, 85, 60, 60, 60, 60, 60, 60, 60, 60, 86, + 42, 87, 88, 89, 4, 90, 91, 60, 60, 60, 60, 60, 60, 60, 60, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 52, 60, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 92, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 93, 94, 4, 4, 4, 4, 95, 96, 4, 97, 98, 4, 99, 100, 101, 62, 4, 102, 103, + 104, 4, 105, 106, 107, 4, 108, 109, 110, 4, 111, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 112, 113, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 4, 4, 4, 4, 4, 103, 4, 114, + 115, 116, 97, 117, 4, 118, 4, 4, 119, 120, 121, 122, 123, 124, 4, 125, 126, 127, 128, + 129 + ], + r3: &[ + 0x00003fffffffffff, 0x000007ff0fffffff, 0x3fdfffff00000000, 0xfffffffbfff80000, + 0xffffffffffffffff, 0xfffeffcfffffffff, 0xf3c5fdfffff99fef, 0x5003ffcfb080799f, + 0xd36dfdfffff987ee, 0x003fffc05e023987, 0xf3edfdfffffbbfee, 0xfe00ffcf00013bbf, + 0xf3edfdfffff99fee, 0x0002ffcfb0c0399f, 0xc3ffc718d63dc7ec, 0x0000ffc000813dc7, + 0xe3fffdfffffddfff, 0x0000ffcf07603ddf, 0xf3effdfffffddfef, 0x0006ffcf40603ddf, + 0xfffffffffffddfef, 0xfc00ffcf80f07ddf, 0x2ffbfffffc7fffec, 0x000cffc0ff5f847f, + 0x07fffffffffffffe, 0x0000000003ff7fff, 0x3bffecaefef02596, 0x00000000f3ff3f5f, + 0xc2a003ff03000001, 0xfffe1ffffffffeff, 0x1ffffffffeffffdf, 0x0000000000000040, + 0xffffffffffff03ff, 0xffffffff3fffffff, 0xf7ffffffffff20bf, 0xffffffff3d7f3dff, + 0x7f3dffffffff3dff, 0xffffffffff7fff3d, 0xffffffffff3dffff, 0x0003fe00e7ffffff, + 0xffffffff0000ffff, 0x3f3fffffffffffff, 0xfffffffffffffffe, 0xffff9fffffffffff, + 0xffffffff07fffffe, 0x01ffc7ffffffffff, 0x001fffff001fdfff, 0x000ddfff000fffff, + 0x000003ff308fffff, 0xffffffff03ff3800, 0x01ffffffffffffff, 0xffff07ffffffffff, + 0x003fffffffffffff, 0x0fff0fff7fffffff, 0x001f3fffffffffc0, 0xffff0fffffffffff, + 0x0000000007ff03ff, 0xffffffff0fffffff, 0x9fffffff7fffffff, 0x3fff008003ff03ff, + 0x0000000000000000, 0x000ff80003ff0fff, 0x000fffffffffffff, 0x00ffffffffffffff, + 0x3fffffffffffe3ff, 0xe7ffffffffff01ff, 0x03fffffffff70000, 0xfbffffffffffffff, + 0xffffffff3f3fffff, 0x3fffffffaaff3f3f, 0x5fdfffffffffffff, 0x1fdc1fff0fcf1fdc, + 0x8000000000000000, 0x8002000000100001, 0x000000001fff0000, 0x0001ffe21fff0000, + 0xf3fffd503f2ffc84, 0xffffffff000043e0, 0x00000000000001ff, 0xffff7fffffffffff, + 0xffffffff7fffffff, 0x000ff81fffffffff, 0xffff20bfffffffff, 0x800080ffffffffff, + 0x7f7f7f7f007fffff, 0xffffffff7f7f7f7f, 0x1f3efffe000000e0, 0xfffffffee67fffff, + 0xf7ffffffffffffff, 0xfffeffffffffffe0, 0x07ffffff00007fff, 0xffff000000000000, + 0x0000ffffffffffff, 0x0000000000001fff, 0x3fffffffffff0000, 0x00000fffffff1fff, + 0xbff0ffffffffffff, 0x0003ffffffffffff, 0xfffffffcff800000, 0x03fffffffffff9ff, + 0xff80000000000000, 0x000000ffffffffff, 0xe8ffffff03ff003f, 0xffff3fffffffffff, + 0x1fffffff000fffff, 0x7fffffff03ff8001, 0x007fffffffffffff, 0xfc7fffff03ff3fff, + 0x007cffff38000007, 0xffff7f7f007e7e7e, 0xffff003ff7ffffff, 0x03ff37ffffffffff, + 0xffff000fffffffff, 0x0ffffffffffff87f, 0x0000000003ffffff, 0x5f7ffdffe0f8007f, + 0xffffffffffffffdb, 0xfffffffffff80000, 0xfffffff03fffffff, 0x3fffffffffffffff, + 0xffffffffffff0000, 0xfffffffffffcffff, 0x03ff0000000000ff, 0x0018ffff0000ffff, + 0xaa8a00000000e000, 0x1fffffffffffffff, 0x87fffffe03ff0000, 0xffffffc007fffffe, + 0x7fffffffffffffff, 0x000000001cfcfcfc + ], + r4: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 5, 9, 5, 10, 11, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 13, + 14, 7, 15, 16, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 17, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 + ], + r5: &[ + 0, 1, 2, 3, 4, 5, 4, 6, 4, 4, 7, 8, 9, 10, 11, 12, 2, 2, 13, 14, 15, 16, 4, 4, 2, 2, 2, + 2, 17, 18, 4, 4, 19, 20, 21, 22, 23, 4, 24, 4, 25, 26, 27, 28, 29, 30, 31, 4, 2, 32, 33, + 33, 34, 4, 4, 4, 4, 4, 4, 4, 35, 36, 4, 4, 2, 37, 3, 38, 39, 40, 2, 41, 42, 4, 43, 44, + 45, 46, 4, 4, 2, 47, 2, 48, 4, 4, 49, 50, 2, 51, 52, 53, 54, 4, 4, 4, 3, 4, 55, 56, 4, + 4, 4, 4, 57, 58, 59, 60, 4, 4, 4, 4, 61, 62, 63, 4, 64, 65, 66, 4, 4, 4, 4, 67, 4, 4, 4, + 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 68, 4, 2, 69, 2, 2, 2, 70, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 69, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 71, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 2, 2, 2, 2, 2, 2, 2, 2, 60, 72, 4, 73, 17, 74, 75, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 4, + 4, 2, 76, 77, 78, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 79, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 33, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 21, 80, 2, 2, 2, 2, 2, + 81, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 2, 82, 83, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 84, 85, 4, 4, 86, 4, 4, 4, 4, 4, 4, 2, 87, 88, 89, 90, 91, 2, 2, 2, 2, 92, 93, 94, 95, + 96, 97, 4, 4, 4, 4, 4, 4, 4, 4, 98, 99, 100, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 101, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 102, 2, 103, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 104, 105, 106, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 107, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2, 11, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 108, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 109, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 110, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 111, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 + ], + r6: &[ + 0xb7ffff7fffffefff, 0x000000003fff3fff, 0xffffffffffffffff, 0x07ffffffffffffff, + 0x0000000000000000, 0x001fffffffffffff, 0x2000000000000000, 0xffffffff1fffffff, + 0x000000010001ffff, 0xffffe000ffffffff, 0x07ffffffffff07ff, 0xffffffff3fffffff, + 0x00000000003eff0f, 0xffff03ff3fffffff, 0x0fffffffff0fffff, 0xffff00ffffffffff, + 0x0000000fffffffff, 0x007fffffffffffff, 0x000000ff003fffff, 0x91bffffffffffd3f, + 0x007fffff003fffff, 0x000000007fffffff, 0x0037ffff00000000, 0x03ffffff003fffff, + 0xc0ffffffffffffff, 0x873ffffffeeff06f, 0x1fffffff00000000, 0x000000001fffffff, + 0x0000007ffffffeff, 0x003fffffffffffff, 0x0007ffff003fffff, 0x000000000003ffff, + 0x00000000000001ff, 0x0007ffffffffffff, 0x03ff00ffffffffff, 0xffff00801fffffff, + 0x000000000001ffff, 0x8000ffc00000007f, 0x03ff01ffffff0000, 0xffdfffffffffffff, + 0x004fffffffff0070, 0x0000000017ff1e1f, 0x40fffffffffbffff, 0xffff01ffbfffbd7f, + 0x03ff07ffffffffff, 0xfbedfdfffff99fef, 0x001f1fcfe081399f, 0x0000000043ff07ff, + 0x0000000003ff00bf, 0xff3fffffffffffff, 0x000000003f000001, 0x0000000003ff0011, + 0x00ffffffffffffff, 0x00000000000003ff, 0x03ff0fffe7ffffff, 0xffffffff00000000, + 0x800003ffffffffff, 0x7fffffffffffffff, 0xffffffffffff0080, 0x0000000023ffffcf, + 0x01ffffffffffffff, 0xff7ffffffffffdff, 0xfffc000003ff0001, 0x007ffefffffcffff, + 0xb47ffffffffffb7f, 0xfffffdbf03ff00ff, 0x000003ff01fb7fff, 0x007fffff00000000, + 0x0000000003ffffff, 0x00007fffffffffff, 0x000000000000000f, 0x000000000000007f, + 0x000003ff7fffffff, 0x001f3fffffff0000, 0xe0fffff803ff000f, 0x000000000000ffff, + 0x7fffffffffff001f, 0x00000000ffff8000, 0x0000000300000000, 0x0003ffffffffffff, + 0xffff000000000000, 0x0fffffffffffffff, 0x1fff07ffffffffff, 0x0000000063ff01ff, + 0xf807e3e000000000, 0x00003c0000000fe7, 0x000000000000001c, 0xffffffffffdfffff, + 0xebffde64dfffffff, 0xffffffffffffffef, 0x7bffffffdfdfe7bf, 0xfffffffffffdfc5f, + 0xffffff3fffffffff, 0xf7fffffff7fffffd, 0xffdfffffffdfffff, 0xffff7fffffff7fff, + 0xfffffdfffffffdff, 0xffffffffffffcff7, 0xf87fffffffffffff, 0x00201fffffffffff, + 0x0000fffef8000010, 0x000007dbf9ffff7f, 0x00000000007f001f, 0x0000000003ff07ff, + 0x0af7fe96ffffffef, 0x5ef7f796aa96ea84, 0x0ffffbee0ffffbff, 0x00000000007fffff, + 0xffff0003ffffffff, 0x00000001ffffffff, 0x000000003fffffff, 0x0000ffffffffffff + ], + }; + + pub fn XID_Continue(c: char) -> bool { + XID_Continue_table.lookup(c) + } + + pub const XID_Start_table: &super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x07fffffe07fffffe, 0x0420040000000000, 0xff7fffffff7fffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x0000501f0003ffc3, + 0x0000000000000000, 0xb8df000000000000, 0xfffffffbffffd740, 0xffbfffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xfffffffffffffc03, 0xffffffffffffffff, + 0xfffeffffffffffff, 0xffffffff027fffff, 0x00000000000001ff, 0x000787ffffff0000, + 0xffffffff00000000, 0xfffec000000007ff, 0xffffffffffffffff, 0x9c00c060002fffff, + 0x0000fffffffd0000, 0xffffffffffffe000, 0x0002003fffffffff, 0x043007fffffffc00 + ], + r2: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 23, 25, 26, 27, 28, 29, 3, 30, 31, 32, 33, 34, 34, 34, 34, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 34, 34, 34, 34, 34, 34, 34, 34, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 3, 61, 62, 63, 64, 65, 66, 67, 68, 34, 34, 34, 3, 34, 34, + 34, 34, 69, 70, 71, 72, 3, 73, 74, 3, 75, 76, 77, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 78, + 79, 34, 80, 81, 82, 83, 84, 3, 3, 3, 3, 3, 3, 3, 3, 85, 42, 86, 87, 88, 34, 89, 90, 3, + 3, 3, 3, 3, 3, 3, 3, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 53, 3, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 91, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 92, 93, 34, 34, 34, 34, 94, + 95, 96, 91, 97, 34, 98, 99, 100, 48, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 34, 113, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 114, 115, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 34, 34, 34, 34, 34, + 116, 34, 117, 118, 119, 120, 121, 34, 122, 34, 34, 123, 124, 125, 126, 3, 127, 34, 128, + 129, 130, 131, 132 + ], + r3: &[ + 0x00000110043fffff, 0x000007ff01ffffff, 0x3fdfffff00000000, 0x0000000000000000, + 0x23fffffffffffff0, 0xfffe0003ff010000, 0x23c5fdfffff99fe1, 0x10030003b0004000, + 0x036dfdfffff987e0, 0x001c00005e000000, 0x23edfdfffffbbfe0, 0x0200000300010000, + 0x23edfdfffff99fe0, 0x00020003b0000000, 0x03ffc718d63dc7e8, 0x0000000000010000, + 0x23fffdfffffddfe0, 0x0000000307000000, 0x23effdfffffddfe1, 0x0006000340000000, + 0x27fffffffffddfe0, 0xfc00000380704000, 0x2ffbfffffc7fffe0, 0x000000000000007f, + 0x0005fffffffffffe, 0x2005ecaefef02596, 0x00000000f000005f, 0x0000000000000001, + 0x00001ffffffffeff, 0x0000000000001f00, 0x800007ffffffffff, 0xffe1c0623c3f0000, + 0xffffffff00004003, 0xf7ffffffffff20bf, 0xffffffffffffffff, 0xffffffff3d7f3dff, + 0x7f3dffffffff3dff, 0xffffffffff7fff3d, 0xffffffffff3dffff, 0x0000000007ffffff, + 0xffffffff0000ffff, 0x3f3fffffffffffff, 0xfffffffffffffffe, 0xffff9fffffffffff, + 0xffffffff07fffffe, 0x01ffc7ffffffffff, 0x0003ffff0003dfff, 0x0001dfff0003ffff, + 0x000fffffffffffff, 0x0000000010800000, 0xffffffff00000000, 0x01ffffffffffffff, + 0xffff05ffffffffff, 0x003fffffffffffff, 0x000000007fffffff, 0x001f3fffffff0000, + 0xffff0fffffffffff, 0x00000000000003ff, 0xffffffff007fffff, 0x00000000001fffff, + 0x0000008000000000, 0x000fffffffffffe0, 0x0000000000000fe0, 0xfc00c001fffffff8, + 0x0000003fffffffff, 0x0000000fffffffff, 0x3ffffffffc00e000, 0xe7ffffffffff01ff, + 0x0063de0000000000, 0xffffffff3f3fffff, 0x3fffffffaaff3f3f, 0x5fdfffffffffffff, + 0x1fdc1fff0fcf1fdc, 0x8002000000000000, 0x000000001fff0000, 0xf3fffd503f2ffc84, + 0xffffffff000043e0, 0x00000000000001ff, 0xffff7fffffffffff, 0xffffffff7fffffff, + 0x000c781fffffffff, 0xffff20bfffffffff, 0x000080ffffffffff, 0x7f7f7f7f007fffff, + 0x000000007f7f7f7f, 0x1f3e03fe000000e0, 0xfffffffee07fffff, 0xf7ffffffffffffff, + 0xfffeffffffffffe0, 0x07ffffff00007fff, 0xffff000000000000, 0x0000ffffffffffff, + 0x0000000000001fff, 0x3fffffffffff0000, 0x00000c00ffff1fff, 0x80007fffffffffff, + 0xffffffff3fffffff, 0xfffffffcff800000, 0x03fffffffffff9ff, 0xff80000000000000, + 0x00000007fffff7bb, 0x000ffffffffffffc, 0x68fc000000000000, 0xffff003ffffffc00, + 0x1fffffff0000007f, 0x0007fffffffffff0, 0x7c00ffdf00008000, 0x000001ffffffffff, + 0xc47fffff00000ff7, 0x3e62ffffffffffff, 0x001c07ff38000005, 0xffff7f7f007e7e7e, + 0xffff003ff7ffffff, 0x00000007ffffffff, 0xffff000fffffffff, 0x0ffffffffffff87f, + 0xffff3fffffffffff, 0x0000000003ffffff, 0x5f7ffdffa0f8007f, 0xffffffffffffffdb, + 0x0003ffffffffffff, 0xfffffffffff80000, 0xfffffff03fffffff, 0x3fffffffffffffff, + 0xffffffffffff0000, 0xfffffffffffcffff, 0x03ff0000000000ff, 0xaa8a000000000000, + 0x1fffffffffffffff, 0x07fffffe00000000, 0xffffffc007fffffe, 0x7fffffff3fffffff, + 0x000000001cfcfcfc + ], + r4: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 5, 9, 5, 10, 11, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 13, + 14, 7, 15, 16, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 + ], + r5: &[ + 0, 1, 2, 3, 4, 5, 4, 4, 4, 4, 6, 7, 8, 9, 10, 11, 2, 2, 12, 13, 14, 15, 4, 4, 2, 2, 2, + 2, 16, 17, 4, 4, 18, 19, 20, 21, 22, 4, 23, 4, 24, 25, 26, 27, 28, 29, 30, 4, 2, 31, 32, + 32, 15, 4, 4, 4, 4, 4, 4, 4, 33, 34, 4, 4, 35, 4, 36, 37, 38, 39, 40, 41, 42, 4, 43, 20, + 44, 45, 4, 4, 5, 46, 47, 48, 4, 4, 49, 50, 47, 51, 52, 4, 53, 4, 4, 4, 54, 4, 55, 56, 4, + 4, 4, 4, 57, 58, 59, 60, 4, 4, 4, 4, 61, 62, 63, 4, 64, 65, 66, 4, 4, 4, 4, 67, 4, 4, 4, + 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 68, 4, 2, 49, 2, 2, 2, 69, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 49, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 70, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 2, 2, 2, 2, 2, 2, 2, 2, 60, 20, 4, 71, 47, 72, 63, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 4, + 4, 2, 73, 74, 75, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 76, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 32, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 20, 77, 2, 2, 2, 2, 2, + 78, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 2, 79, 80, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 81, 82, 83, 84, 85, 2, 2, 2, 2, 86, 87, 88, 89, 90, + 91, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 92, 2, 69, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 93, 94, 95, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 96, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2, 10, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 97, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 98, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 99, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 + ], + r6: &[ + 0xb7ffff7fffffefff, 0x000000003fff3fff, 0xffffffffffffffff, 0x07ffffffffffffff, + 0x0000000000000000, 0x001fffffffffffff, 0xffffffff1fffffff, 0x000000000001ffff, + 0xffffe000ffffffff, 0x003fffffffff07ff, 0xffffffff3fffffff, 0x00000000003eff0f, + 0xffff00003fffffff, 0x0fffffffff0fffff, 0xffff00ffffffffff, 0x0000000fffffffff, + 0x007fffffffffffff, 0x000000ff003fffff, 0x91bffffffffffd3f, 0x007fffff003fffff, + 0x000000007fffffff, 0x0037ffff00000000, 0x03ffffff003fffff, 0xc0ffffffffffffff, + 0x003ffffffeef0001, 0x1fffffff00000000, 0x000000001fffffff, 0x0000001ffffffeff, + 0x003fffffffffffff, 0x0007ffff003fffff, 0x000000000003ffff, 0x00000000000001ff, + 0x0007ffffffffffff, 0xffff00801fffffff, 0x000000000000003f, 0x00fffffffffffff8, + 0x0000fffffffffff8, 0x000001ffffff0000, 0x0000007ffffffff8, 0x0047ffffffff0010, + 0x0007fffffffffff8, 0x000000001400001e, 0x00000ffffffbffff, 0xffff01ffbfffbd7f, + 0x23edfdfffff99fe0, 0x00000003e0010000, 0x0000000000000780, 0x0000ffffffffffff, + 0x00000000000000b0, 0x00007fffffffffff, 0x000000000f000000, 0x0000000000000010, + 0x000007ffffffffff, 0x0000000007ffffff, 0x00000fffffffffff, 0xffffffff00000000, + 0x80000000ffffffff, 0x0407fffffffff801, 0xfffffffff0010000, 0x00000000200003cf, + 0x01ffffffffffffff, 0x00007ffffffffdff, 0xfffc000000000001, 0x000000000000ffff, + 0x0001fffffffffb7f, 0xfffffdbf00000040, 0x00000000010003ff, 0x0007ffff00000000, + 0x0000000003ffffff, 0x000000000000000f, 0x000000000000007f, 0x00003fffffff0000, + 0xe0fffff80000000f, 0x000000000001001f, 0x00000000fff80000, 0x0000000300000000, + 0x0003ffffffffffff, 0xffff000000000000, 0x0fffffffffffffff, 0x1fff07ffffffffff, + 0x0000000003ff01ff, 0xffffffffffdfffff, 0xebffde64dfffffff, 0xffffffffffffffef, + 0x7bffffffdfdfe7bf, 0xfffffffffffdfc5f, 0xffffff3fffffffff, 0xf7fffffff7fffffd, + 0xffdfffffffdfffff, 0xffff7fffffff7fff, 0xfffffdfffffffdff, 0x0000000000000ff7, + 0x000000000000001f, 0x0af7fe96ffffffef, 0x5ef7f796aa96ea84, 0x0ffffbee0ffffbff, + 0x00000000007fffff, 0xffff0003ffffffff, 0x00000001ffffffff, 0x000000003fffffff + ], + }; + + pub fn XID_Start(c: char) -> bool { + XID_Start_table.lookup(c) + } + +} + +pub mod property { + pub const Pattern_White_Space_table: &super::SmallBoolTrie = &super::SmallBoolTrie { + r1: &[ + 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3 + ], + r2: &[ + 0x0000000100003e00, 0x0000000000000000, 0x0000000000000020, 0x000003000000c000 + ], + }; + + pub fn Pattern_White_Space(c: char) -> bool { + Pattern_White_Space_table.lookup(c) + } + + pub const White_Space_table: &super::SmallBoolTrie = &super::SmallBoolTrie { + r1: &[ + 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3 + ], + r2: &[ + 0x0000000100003e00, 0x0000000000000000, 0x0000000100000020, 0x0000000000000001, + 0x00008300000007ff, 0x0000000080000000 + ], + }; + + pub fn White_Space(c: char) -> bool { + White_Space_table.lookup(c) + } + +} + +pub mod conversions { + pub fn to_lower(c: char) -> [char; 3] { + match bsearch_case_table(c, to_lowercase_table) { + None => [c, '\0', '\0'], + Some(index) => to_lowercase_table[index].1, + } + } + + pub fn to_upper(c: char) -> [char; 3] { + match bsearch_case_table(c, to_uppercase_table) { + None => [c, '\0', '\0'], + Some(index) => to_uppercase_table[index].1, + } + } + + fn bsearch_case_table(c: char, table: &[(char, [char; 3])]) -> Option { + table.binary_search_by(|&(key, _)| key.cmp(&c)).ok() + } + + const to_lowercase_table: &[(char, [char; 3])] = &[ + ('\u{41}', ['\u{61}', '\0', '\0']), ('\u{42}', ['\u{62}', '\0', '\0']), ('\u{43}', + ['\u{63}', '\0', '\0']), ('\u{44}', ['\u{64}', '\0', '\0']), ('\u{45}', ['\u{65}', '\0', + '\0']), ('\u{46}', ['\u{66}', '\0', '\0']), ('\u{47}', ['\u{67}', '\0', '\0']), ('\u{48}', + ['\u{68}', '\0', '\0']), ('\u{49}', ['\u{69}', '\0', '\0']), ('\u{4a}', ['\u{6a}', '\0', + '\0']), ('\u{4b}', ['\u{6b}', '\0', '\0']), ('\u{4c}', ['\u{6c}', '\0', '\0']), ('\u{4d}', + ['\u{6d}', '\0', '\0']), ('\u{4e}', ['\u{6e}', '\0', '\0']), ('\u{4f}', ['\u{6f}', '\0', + '\0']), ('\u{50}', ['\u{70}', '\0', '\0']), ('\u{51}', ['\u{71}', '\0', '\0']), ('\u{52}', + ['\u{72}', '\0', '\0']), ('\u{53}', ['\u{73}', '\0', '\0']), ('\u{54}', ['\u{74}', '\0', + '\0']), ('\u{55}', ['\u{75}', '\0', '\0']), ('\u{56}', ['\u{76}', '\0', '\0']), ('\u{57}', + ['\u{77}', '\0', '\0']), ('\u{58}', ['\u{78}', '\0', '\0']), ('\u{59}', ['\u{79}', '\0', + '\0']), ('\u{5a}', ['\u{7a}', '\0', '\0']), ('\u{c0}', ['\u{e0}', '\0', '\0']), ('\u{c1}', + ['\u{e1}', '\0', '\0']), ('\u{c2}', ['\u{e2}', '\0', '\0']), ('\u{c3}', ['\u{e3}', '\0', + '\0']), ('\u{c4}', ['\u{e4}', '\0', '\0']), ('\u{c5}', ['\u{e5}', '\0', '\0']), ('\u{c6}', + ['\u{e6}', '\0', '\0']), ('\u{c7}', ['\u{e7}', '\0', '\0']), ('\u{c8}', ['\u{e8}', '\0', + '\0']), ('\u{c9}', ['\u{e9}', '\0', '\0']), ('\u{ca}', ['\u{ea}', '\0', '\0']), ('\u{cb}', + ['\u{eb}', '\0', '\0']), ('\u{cc}', ['\u{ec}', '\0', '\0']), ('\u{cd}', ['\u{ed}', '\0', + '\0']), ('\u{ce}', ['\u{ee}', '\0', '\0']), ('\u{cf}', ['\u{ef}', '\0', '\0']), ('\u{d0}', + ['\u{f0}', '\0', '\0']), ('\u{d1}', ['\u{f1}', '\0', '\0']), ('\u{d2}', ['\u{f2}', '\0', + '\0']), ('\u{d3}', ['\u{f3}', '\0', '\0']), ('\u{d4}', ['\u{f4}', '\0', '\0']), ('\u{d5}', + ['\u{f5}', '\0', '\0']), ('\u{d6}', ['\u{f6}', '\0', '\0']), ('\u{d8}', ['\u{f8}', '\0', + '\0']), ('\u{d9}', ['\u{f9}', '\0', '\0']), ('\u{da}', ['\u{fa}', '\0', '\0']), ('\u{db}', + ['\u{fb}', '\0', '\0']), ('\u{dc}', ['\u{fc}', '\0', '\0']), ('\u{dd}', ['\u{fd}', '\0', + '\0']), ('\u{de}', ['\u{fe}', '\0', '\0']), ('\u{100}', ['\u{101}', '\0', '\0']), + ('\u{102}', ['\u{103}', '\0', '\0']), ('\u{104}', ['\u{105}', '\0', '\0']), ('\u{106}', + ['\u{107}', '\0', '\0']), ('\u{108}', ['\u{109}', '\0', '\0']), ('\u{10a}', ['\u{10b}', + '\0', '\0']), ('\u{10c}', ['\u{10d}', '\0', '\0']), ('\u{10e}', ['\u{10f}', '\0', '\0']), + ('\u{110}', ['\u{111}', '\0', '\0']), ('\u{112}', ['\u{113}', '\0', '\0']), ('\u{114}', + ['\u{115}', '\0', '\0']), ('\u{116}', ['\u{117}', '\0', '\0']), ('\u{118}', ['\u{119}', + '\0', '\0']), ('\u{11a}', ['\u{11b}', '\0', '\0']), ('\u{11c}', ['\u{11d}', '\0', '\0']), + ('\u{11e}', ['\u{11f}', '\0', '\0']), ('\u{120}', ['\u{121}', '\0', '\0']), ('\u{122}', + ['\u{123}', '\0', '\0']), ('\u{124}', ['\u{125}', '\0', '\0']), ('\u{126}', ['\u{127}', + '\0', '\0']), ('\u{128}', ['\u{129}', '\0', '\0']), ('\u{12a}', ['\u{12b}', '\0', '\0']), + ('\u{12c}', ['\u{12d}', '\0', '\0']), ('\u{12e}', ['\u{12f}', '\0', '\0']), ('\u{130}', + ['\u{69}', '\u{307}', '\0']), ('\u{132}', ['\u{133}', '\0', '\0']), ('\u{134}', ['\u{135}', + '\0', '\0']), ('\u{136}', ['\u{137}', '\0', '\0']), ('\u{139}', ['\u{13a}', '\0', '\0']), + ('\u{13b}', ['\u{13c}', '\0', '\0']), ('\u{13d}', ['\u{13e}', '\0', '\0']), ('\u{13f}', + ['\u{140}', '\0', '\0']), ('\u{141}', ['\u{142}', '\0', '\0']), ('\u{143}', ['\u{144}', + '\0', '\0']), ('\u{145}', ['\u{146}', '\0', '\0']), ('\u{147}', ['\u{148}', '\0', '\0']), + ('\u{14a}', ['\u{14b}', '\0', '\0']), ('\u{14c}', ['\u{14d}', '\0', '\0']), ('\u{14e}', + ['\u{14f}', '\0', '\0']), ('\u{150}', ['\u{151}', '\0', '\0']), ('\u{152}', ['\u{153}', + '\0', '\0']), ('\u{154}', ['\u{155}', '\0', '\0']), ('\u{156}', ['\u{157}', '\0', '\0']), + ('\u{158}', ['\u{159}', '\0', '\0']), ('\u{15a}', ['\u{15b}', '\0', '\0']), ('\u{15c}', + ['\u{15d}', '\0', '\0']), ('\u{15e}', ['\u{15f}', '\0', '\0']), ('\u{160}', ['\u{161}', + '\0', '\0']), ('\u{162}', ['\u{163}', '\0', '\0']), ('\u{164}', ['\u{165}', '\0', '\0']), + ('\u{166}', ['\u{167}', '\0', '\0']), ('\u{168}', ['\u{169}', '\0', '\0']), ('\u{16a}', + ['\u{16b}', '\0', '\0']), ('\u{16c}', ['\u{16d}', '\0', '\0']), ('\u{16e}', ['\u{16f}', + '\0', '\0']), ('\u{170}', ['\u{171}', '\0', '\0']), ('\u{172}', ['\u{173}', '\0', '\0']), + ('\u{174}', ['\u{175}', '\0', '\0']), ('\u{176}', ['\u{177}', '\0', '\0']), ('\u{178}', + ['\u{ff}', '\0', '\0']), ('\u{179}', ['\u{17a}', '\0', '\0']), ('\u{17b}', ['\u{17c}', '\0', + '\0']), ('\u{17d}', ['\u{17e}', '\0', '\0']), ('\u{181}', ['\u{253}', '\0', '\0']), + ('\u{182}', ['\u{183}', '\0', '\0']), ('\u{184}', ['\u{185}', '\0', '\0']), ('\u{186}', + ['\u{254}', '\0', '\0']), ('\u{187}', ['\u{188}', '\0', '\0']), ('\u{189}', ['\u{256}', + '\0', '\0']), ('\u{18a}', ['\u{257}', '\0', '\0']), ('\u{18b}', ['\u{18c}', '\0', '\0']), + ('\u{18e}', ['\u{1dd}', '\0', '\0']), ('\u{18f}', ['\u{259}', '\0', '\0']), ('\u{190}', + ['\u{25b}', '\0', '\0']), ('\u{191}', ['\u{192}', '\0', '\0']), ('\u{193}', ['\u{260}', + '\0', '\0']), ('\u{194}', ['\u{263}', '\0', '\0']), ('\u{196}', ['\u{269}', '\0', '\0']), + ('\u{197}', ['\u{268}', '\0', '\0']), ('\u{198}', ['\u{199}', '\0', '\0']), ('\u{19c}', + ['\u{26f}', '\0', '\0']), ('\u{19d}', ['\u{272}', '\0', '\0']), ('\u{19f}', ['\u{275}', + '\0', '\0']), ('\u{1a0}', ['\u{1a1}', '\0', '\0']), ('\u{1a2}', ['\u{1a3}', '\0', '\0']), + ('\u{1a4}', ['\u{1a5}', '\0', '\0']), ('\u{1a6}', ['\u{280}', '\0', '\0']), ('\u{1a7}', + ['\u{1a8}', '\0', '\0']), ('\u{1a9}', ['\u{283}', '\0', '\0']), ('\u{1ac}', ['\u{1ad}', + '\0', '\0']), ('\u{1ae}', ['\u{288}', '\0', '\0']), ('\u{1af}', ['\u{1b0}', '\0', '\0']), + ('\u{1b1}', ['\u{28a}', '\0', '\0']), ('\u{1b2}', ['\u{28b}', '\0', '\0']), ('\u{1b3}', + ['\u{1b4}', '\0', '\0']), ('\u{1b5}', ['\u{1b6}', '\0', '\0']), ('\u{1b7}', ['\u{292}', + '\0', '\0']), ('\u{1b8}', ['\u{1b9}', '\0', '\0']), ('\u{1bc}', ['\u{1bd}', '\0', '\0']), + ('\u{1c4}', ['\u{1c6}', '\0', '\0']), ('\u{1c5}', ['\u{1c6}', '\0', '\0']), ('\u{1c7}', + ['\u{1c9}', '\0', '\0']), ('\u{1c8}', ['\u{1c9}', '\0', '\0']), ('\u{1ca}', ['\u{1cc}', + '\0', '\0']), ('\u{1cb}', ['\u{1cc}', '\0', '\0']), ('\u{1cd}', ['\u{1ce}', '\0', '\0']), + ('\u{1cf}', ['\u{1d0}', '\0', '\0']), ('\u{1d1}', ['\u{1d2}', '\0', '\0']), ('\u{1d3}', + ['\u{1d4}', '\0', '\0']), ('\u{1d5}', ['\u{1d6}', '\0', '\0']), ('\u{1d7}', ['\u{1d8}', + '\0', '\0']), ('\u{1d9}', ['\u{1da}', '\0', '\0']), ('\u{1db}', ['\u{1dc}', '\0', '\0']), + ('\u{1de}', ['\u{1df}', '\0', '\0']), ('\u{1e0}', ['\u{1e1}', '\0', '\0']), ('\u{1e2}', + ['\u{1e3}', '\0', '\0']), ('\u{1e4}', ['\u{1e5}', '\0', '\0']), ('\u{1e6}', ['\u{1e7}', + '\0', '\0']), ('\u{1e8}', ['\u{1e9}', '\0', '\0']), ('\u{1ea}', ['\u{1eb}', '\0', '\0']), + ('\u{1ec}', ['\u{1ed}', '\0', '\0']), ('\u{1ee}', ['\u{1ef}', '\0', '\0']), ('\u{1f1}', + ['\u{1f3}', '\0', '\0']), ('\u{1f2}', ['\u{1f3}', '\0', '\0']), ('\u{1f4}', ['\u{1f5}', + '\0', '\0']), ('\u{1f6}', ['\u{195}', '\0', '\0']), ('\u{1f7}', ['\u{1bf}', '\0', '\0']), + ('\u{1f8}', ['\u{1f9}', '\0', '\0']), ('\u{1fa}', ['\u{1fb}', '\0', '\0']), ('\u{1fc}', + ['\u{1fd}', '\0', '\0']), ('\u{1fe}', ['\u{1ff}', '\0', '\0']), ('\u{200}', ['\u{201}', + '\0', '\0']), ('\u{202}', ['\u{203}', '\0', '\0']), ('\u{204}', ['\u{205}', '\0', '\0']), + ('\u{206}', ['\u{207}', '\0', '\0']), ('\u{208}', ['\u{209}', '\0', '\0']), ('\u{20a}', + ['\u{20b}', '\0', '\0']), ('\u{20c}', ['\u{20d}', '\0', '\0']), ('\u{20e}', ['\u{20f}', + '\0', '\0']), ('\u{210}', ['\u{211}', '\0', '\0']), ('\u{212}', ['\u{213}', '\0', '\0']), + ('\u{214}', ['\u{215}', '\0', '\0']), ('\u{216}', ['\u{217}', '\0', '\0']), ('\u{218}', + ['\u{219}', '\0', '\0']), ('\u{21a}', ['\u{21b}', '\0', '\0']), ('\u{21c}', ['\u{21d}', + '\0', '\0']), ('\u{21e}', ['\u{21f}', '\0', '\0']), ('\u{220}', ['\u{19e}', '\0', '\0']), + ('\u{222}', ['\u{223}', '\0', '\0']), ('\u{224}', ['\u{225}', '\0', '\0']), ('\u{226}', + ['\u{227}', '\0', '\0']), ('\u{228}', ['\u{229}', '\0', '\0']), ('\u{22a}', ['\u{22b}', + '\0', '\0']), ('\u{22c}', ['\u{22d}', '\0', '\0']), ('\u{22e}', ['\u{22f}', '\0', '\0']), + ('\u{230}', ['\u{231}', '\0', '\0']), ('\u{232}', ['\u{233}', '\0', '\0']), ('\u{23a}', + ['\u{2c65}', '\0', '\0']), ('\u{23b}', ['\u{23c}', '\0', '\0']), ('\u{23d}', ['\u{19a}', + '\0', '\0']), ('\u{23e}', ['\u{2c66}', '\0', '\0']), ('\u{241}', ['\u{242}', '\0', '\0']), + ('\u{243}', ['\u{180}', '\0', '\0']), ('\u{244}', ['\u{289}', '\0', '\0']), ('\u{245}', + ['\u{28c}', '\0', '\0']), ('\u{246}', ['\u{247}', '\0', '\0']), ('\u{248}', ['\u{249}', + '\0', '\0']), ('\u{24a}', ['\u{24b}', '\0', '\0']), ('\u{24c}', ['\u{24d}', '\0', '\0']), + ('\u{24e}', ['\u{24f}', '\0', '\0']), ('\u{370}', ['\u{371}', '\0', '\0']), ('\u{372}', + ['\u{373}', '\0', '\0']), ('\u{376}', ['\u{377}', '\0', '\0']), ('\u{37f}', ['\u{3f3}', + '\0', '\0']), ('\u{386}', ['\u{3ac}', '\0', '\0']), ('\u{388}', ['\u{3ad}', '\0', '\0']), + ('\u{389}', ['\u{3ae}', '\0', '\0']), ('\u{38a}', ['\u{3af}', '\0', '\0']), ('\u{38c}', + ['\u{3cc}', '\0', '\0']), ('\u{38e}', ['\u{3cd}', '\0', '\0']), ('\u{38f}', ['\u{3ce}', + '\0', '\0']), ('\u{391}', ['\u{3b1}', '\0', '\0']), ('\u{392}', ['\u{3b2}', '\0', '\0']), + ('\u{393}', ['\u{3b3}', '\0', '\0']), ('\u{394}', ['\u{3b4}', '\0', '\0']), ('\u{395}', + ['\u{3b5}', '\0', '\0']), ('\u{396}', ['\u{3b6}', '\0', '\0']), ('\u{397}', ['\u{3b7}', + '\0', '\0']), ('\u{398}', ['\u{3b8}', '\0', '\0']), ('\u{399}', ['\u{3b9}', '\0', '\0']), + ('\u{39a}', ['\u{3ba}', '\0', '\0']), ('\u{39b}', ['\u{3bb}', '\0', '\0']), ('\u{39c}', + ['\u{3bc}', '\0', '\0']), ('\u{39d}', ['\u{3bd}', '\0', '\0']), ('\u{39e}', ['\u{3be}', + '\0', '\0']), ('\u{39f}', ['\u{3bf}', '\0', '\0']), ('\u{3a0}', ['\u{3c0}', '\0', '\0']), + ('\u{3a1}', ['\u{3c1}', '\0', '\0']), ('\u{3a3}', ['\u{3c3}', '\0', '\0']), ('\u{3a4}', + ['\u{3c4}', '\0', '\0']), ('\u{3a5}', ['\u{3c5}', '\0', '\0']), ('\u{3a6}', ['\u{3c6}', + '\0', '\0']), ('\u{3a7}', ['\u{3c7}', '\0', '\0']), ('\u{3a8}', ['\u{3c8}', '\0', '\0']), + ('\u{3a9}', ['\u{3c9}', '\0', '\0']), ('\u{3aa}', ['\u{3ca}', '\0', '\0']), ('\u{3ab}', + ['\u{3cb}', '\0', '\0']), ('\u{3cf}', ['\u{3d7}', '\0', '\0']), ('\u{3d8}', ['\u{3d9}', + '\0', '\0']), ('\u{3da}', ['\u{3db}', '\0', '\0']), ('\u{3dc}', ['\u{3dd}', '\0', '\0']), + ('\u{3de}', ['\u{3df}', '\0', '\0']), ('\u{3e0}', ['\u{3e1}', '\0', '\0']), ('\u{3e2}', + ['\u{3e3}', '\0', '\0']), ('\u{3e4}', ['\u{3e5}', '\0', '\0']), ('\u{3e6}', ['\u{3e7}', + '\0', '\0']), ('\u{3e8}', ['\u{3e9}', '\0', '\0']), ('\u{3ea}', ['\u{3eb}', '\0', '\0']), + ('\u{3ec}', ['\u{3ed}', '\0', '\0']), ('\u{3ee}', ['\u{3ef}', '\0', '\0']), ('\u{3f4}', + ['\u{3b8}', '\0', '\0']), ('\u{3f7}', ['\u{3f8}', '\0', '\0']), ('\u{3f9}', ['\u{3f2}', + '\0', '\0']), ('\u{3fa}', ['\u{3fb}', '\0', '\0']), ('\u{3fd}', ['\u{37b}', '\0', '\0']), + ('\u{3fe}', ['\u{37c}', '\0', '\0']), ('\u{3ff}', ['\u{37d}', '\0', '\0']), ('\u{400}', + ['\u{450}', '\0', '\0']), ('\u{401}', ['\u{451}', '\0', '\0']), ('\u{402}', ['\u{452}', + '\0', '\0']), ('\u{403}', ['\u{453}', '\0', '\0']), ('\u{404}', ['\u{454}', '\0', '\0']), + ('\u{405}', ['\u{455}', '\0', '\0']), ('\u{406}', ['\u{456}', '\0', '\0']), ('\u{407}', + ['\u{457}', '\0', '\0']), ('\u{408}', ['\u{458}', '\0', '\0']), ('\u{409}', ['\u{459}', + '\0', '\0']), ('\u{40a}', ['\u{45a}', '\0', '\0']), ('\u{40b}', ['\u{45b}', '\0', '\0']), + ('\u{40c}', ['\u{45c}', '\0', '\0']), ('\u{40d}', ['\u{45d}', '\0', '\0']), ('\u{40e}', + ['\u{45e}', '\0', '\0']), ('\u{40f}', ['\u{45f}', '\0', '\0']), ('\u{410}', ['\u{430}', + '\0', '\0']), ('\u{411}', ['\u{431}', '\0', '\0']), ('\u{412}', ['\u{432}', '\0', '\0']), + ('\u{413}', ['\u{433}', '\0', '\0']), ('\u{414}', ['\u{434}', '\0', '\0']), ('\u{415}', + ['\u{435}', '\0', '\0']), ('\u{416}', ['\u{436}', '\0', '\0']), ('\u{417}', ['\u{437}', + '\0', '\0']), ('\u{418}', ['\u{438}', '\0', '\0']), ('\u{419}', ['\u{439}', '\0', '\0']), + ('\u{41a}', ['\u{43a}', '\0', '\0']), ('\u{41b}', ['\u{43b}', '\0', '\0']), ('\u{41c}', + ['\u{43c}', '\0', '\0']), ('\u{41d}', ['\u{43d}', '\0', '\0']), ('\u{41e}', ['\u{43e}', + '\0', '\0']), ('\u{41f}', ['\u{43f}', '\0', '\0']), ('\u{420}', ['\u{440}', '\0', '\0']), + ('\u{421}', ['\u{441}', '\0', '\0']), ('\u{422}', ['\u{442}', '\0', '\0']), ('\u{423}', + ['\u{443}', '\0', '\0']), ('\u{424}', ['\u{444}', '\0', '\0']), ('\u{425}', ['\u{445}', + '\0', '\0']), ('\u{426}', ['\u{446}', '\0', '\0']), ('\u{427}', ['\u{447}', '\0', '\0']), + ('\u{428}', ['\u{448}', '\0', '\0']), ('\u{429}', ['\u{449}', '\0', '\0']), ('\u{42a}', + ['\u{44a}', '\0', '\0']), ('\u{42b}', ['\u{44b}', '\0', '\0']), ('\u{42c}', ['\u{44c}', + '\0', '\0']), ('\u{42d}', ['\u{44d}', '\0', '\0']), ('\u{42e}', ['\u{44e}', '\0', '\0']), + ('\u{42f}', ['\u{44f}', '\0', '\0']), ('\u{460}', ['\u{461}', '\0', '\0']), ('\u{462}', + ['\u{463}', '\0', '\0']), ('\u{464}', ['\u{465}', '\0', '\0']), ('\u{466}', ['\u{467}', + '\0', '\0']), ('\u{468}', ['\u{469}', '\0', '\0']), ('\u{46a}', ['\u{46b}', '\0', '\0']), + ('\u{46c}', ['\u{46d}', '\0', '\0']), ('\u{46e}', ['\u{46f}', '\0', '\0']), ('\u{470}', + ['\u{471}', '\0', '\0']), ('\u{472}', ['\u{473}', '\0', '\0']), ('\u{474}', ['\u{475}', + '\0', '\0']), ('\u{476}', ['\u{477}', '\0', '\0']), ('\u{478}', ['\u{479}', '\0', '\0']), + ('\u{47a}', ['\u{47b}', '\0', '\0']), ('\u{47c}', ['\u{47d}', '\0', '\0']), ('\u{47e}', + ['\u{47f}', '\0', '\0']), ('\u{480}', ['\u{481}', '\0', '\0']), ('\u{48a}', ['\u{48b}', + '\0', '\0']), ('\u{48c}', ['\u{48d}', '\0', '\0']), ('\u{48e}', ['\u{48f}', '\0', '\0']), + ('\u{490}', ['\u{491}', '\0', '\0']), ('\u{492}', ['\u{493}', '\0', '\0']), ('\u{494}', + ['\u{495}', '\0', '\0']), ('\u{496}', ['\u{497}', '\0', '\0']), ('\u{498}', ['\u{499}', + '\0', '\0']), ('\u{49a}', ['\u{49b}', '\0', '\0']), ('\u{49c}', ['\u{49d}', '\0', '\0']), + ('\u{49e}', ['\u{49f}', '\0', '\0']), ('\u{4a0}', ['\u{4a1}', '\0', '\0']), ('\u{4a2}', + ['\u{4a3}', '\0', '\0']), ('\u{4a4}', ['\u{4a5}', '\0', '\0']), ('\u{4a6}', ['\u{4a7}', + '\0', '\0']), ('\u{4a8}', ['\u{4a9}', '\0', '\0']), ('\u{4aa}', ['\u{4ab}', '\0', '\0']), + ('\u{4ac}', ['\u{4ad}', '\0', '\0']), ('\u{4ae}', ['\u{4af}', '\0', '\0']), ('\u{4b0}', + ['\u{4b1}', '\0', '\0']), ('\u{4b2}', ['\u{4b3}', '\0', '\0']), ('\u{4b4}', ['\u{4b5}', + '\0', '\0']), ('\u{4b6}', ['\u{4b7}', '\0', '\0']), ('\u{4b8}', ['\u{4b9}', '\0', '\0']), + ('\u{4ba}', ['\u{4bb}', '\0', '\0']), ('\u{4bc}', ['\u{4bd}', '\0', '\0']), ('\u{4be}', + ['\u{4bf}', '\0', '\0']), ('\u{4c0}', ['\u{4cf}', '\0', '\0']), ('\u{4c1}', ['\u{4c2}', + '\0', '\0']), ('\u{4c3}', ['\u{4c4}', '\0', '\0']), ('\u{4c5}', ['\u{4c6}', '\0', '\0']), + ('\u{4c7}', ['\u{4c8}', '\0', '\0']), ('\u{4c9}', ['\u{4ca}', '\0', '\0']), ('\u{4cb}', + ['\u{4cc}', '\0', '\0']), ('\u{4cd}', ['\u{4ce}', '\0', '\0']), ('\u{4d0}', ['\u{4d1}', + '\0', '\0']), ('\u{4d2}', ['\u{4d3}', '\0', '\0']), ('\u{4d4}', ['\u{4d5}', '\0', '\0']), + ('\u{4d6}', ['\u{4d7}', '\0', '\0']), ('\u{4d8}', ['\u{4d9}', '\0', '\0']), ('\u{4da}', + ['\u{4db}', '\0', '\0']), ('\u{4dc}', ['\u{4dd}', '\0', '\0']), ('\u{4de}', ['\u{4df}', + '\0', '\0']), ('\u{4e0}', ['\u{4e1}', '\0', '\0']), ('\u{4e2}', ['\u{4e3}', '\0', '\0']), + ('\u{4e4}', ['\u{4e5}', '\0', '\0']), ('\u{4e6}', ['\u{4e7}', '\0', '\0']), ('\u{4e8}', + ['\u{4e9}', '\0', '\0']), ('\u{4ea}', ['\u{4eb}', '\0', '\0']), ('\u{4ec}', ['\u{4ed}', + '\0', '\0']), ('\u{4ee}', ['\u{4ef}', '\0', '\0']), ('\u{4f0}', ['\u{4f1}', '\0', '\0']), + ('\u{4f2}', ['\u{4f3}', '\0', '\0']), ('\u{4f4}', ['\u{4f5}', '\0', '\0']), ('\u{4f6}', + ['\u{4f7}', '\0', '\0']), ('\u{4f8}', ['\u{4f9}', '\0', '\0']), ('\u{4fa}', ['\u{4fb}', + '\0', '\0']), ('\u{4fc}', ['\u{4fd}', '\0', '\0']), ('\u{4fe}', ['\u{4ff}', '\0', '\0']), + ('\u{500}', ['\u{501}', '\0', '\0']), ('\u{502}', ['\u{503}', '\0', '\0']), ('\u{504}', + ['\u{505}', '\0', '\0']), ('\u{506}', ['\u{507}', '\0', '\0']), ('\u{508}', ['\u{509}', + '\0', '\0']), ('\u{50a}', ['\u{50b}', '\0', '\0']), ('\u{50c}', ['\u{50d}', '\0', '\0']), + ('\u{50e}', ['\u{50f}', '\0', '\0']), ('\u{510}', ['\u{511}', '\0', '\0']), ('\u{512}', + ['\u{513}', '\0', '\0']), ('\u{514}', ['\u{515}', '\0', '\0']), ('\u{516}', ['\u{517}', + '\0', '\0']), ('\u{518}', ['\u{519}', '\0', '\0']), ('\u{51a}', ['\u{51b}', '\0', '\0']), + ('\u{51c}', ['\u{51d}', '\0', '\0']), ('\u{51e}', ['\u{51f}', '\0', '\0']), ('\u{520}', + ['\u{521}', '\0', '\0']), ('\u{522}', ['\u{523}', '\0', '\0']), ('\u{524}', ['\u{525}', + '\0', '\0']), ('\u{526}', ['\u{527}', '\0', '\0']), ('\u{528}', ['\u{529}', '\0', '\0']), + ('\u{52a}', ['\u{52b}', '\0', '\0']), ('\u{52c}', ['\u{52d}', '\0', '\0']), ('\u{52e}', + ['\u{52f}', '\0', '\0']), ('\u{531}', ['\u{561}', '\0', '\0']), ('\u{532}', ['\u{562}', + '\0', '\0']), ('\u{533}', ['\u{563}', '\0', '\0']), ('\u{534}', ['\u{564}', '\0', '\0']), + ('\u{535}', ['\u{565}', '\0', '\0']), ('\u{536}', ['\u{566}', '\0', '\0']), ('\u{537}', + ['\u{567}', '\0', '\0']), ('\u{538}', ['\u{568}', '\0', '\0']), ('\u{539}', ['\u{569}', + '\0', '\0']), ('\u{53a}', ['\u{56a}', '\0', '\0']), ('\u{53b}', ['\u{56b}', '\0', '\0']), + ('\u{53c}', ['\u{56c}', '\0', '\0']), ('\u{53d}', ['\u{56d}', '\0', '\0']), ('\u{53e}', + ['\u{56e}', '\0', '\0']), ('\u{53f}', ['\u{56f}', '\0', '\0']), ('\u{540}', ['\u{570}', + '\0', '\0']), ('\u{541}', ['\u{571}', '\0', '\0']), ('\u{542}', ['\u{572}', '\0', '\0']), + ('\u{543}', ['\u{573}', '\0', '\0']), ('\u{544}', ['\u{574}', '\0', '\0']), ('\u{545}', + ['\u{575}', '\0', '\0']), ('\u{546}', ['\u{576}', '\0', '\0']), ('\u{547}', ['\u{577}', + '\0', '\0']), ('\u{548}', ['\u{578}', '\0', '\0']), ('\u{549}', ['\u{579}', '\0', '\0']), + ('\u{54a}', ['\u{57a}', '\0', '\0']), ('\u{54b}', ['\u{57b}', '\0', '\0']), ('\u{54c}', + ['\u{57c}', '\0', '\0']), ('\u{54d}', ['\u{57d}', '\0', '\0']), ('\u{54e}', ['\u{57e}', + '\0', '\0']), ('\u{54f}', ['\u{57f}', '\0', '\0']), ('\u{550}', ['\u{580}', '\0', '\0']), + ('\u{551}', ['\u{581}', '\0', '\0']), ('\u{552}', ['\u{582}', '\0', '\0']), ('\u{553}', + ['\u{583}', '\0', '\0']), ('\u{554}', ['\u{584}', '\0', '\0']), ('\u{555}', ['\u{585}', + '\0', '\0']), ('\u{556}', ['\u{586}', '\0', '\0']), ('\u{10a0}', ['\u{2d00}', '\0', '\0']), + ('\u{10a1}', ['\u{2d01}', '\0', '\0']), ('\u{10a2}', ['\u{2d02}', '\0', '\0']), ('\u{10a3}', + ['\u{2d03}', '\0', '\0']), ('\u{10a4}', ['\u{2d04}', '\0', '\0']), ('\u{10a5}', ['\u{2d05}', + '\0', '\0']), ('\u{10a6}', ['\u{2d06}', '\0', '\0']), ('\u{10a7}', ['\u{2d07}', '\0', + '\0']), ('\u{10a8}', ['\u{2d08}', '\0', '\0']), ('\u{10a9}', ['\u{2d09}', '\0', '\0']), + ('\u{10aa}', ['\u{2d0a}', '\0', '\0']), ('\u{10ab}', ['\u{2d0b}', '\0', '\0']), ('\u{10ac}', + ['\u{2d0c}', '\0', '\0']), ('\u{10ad}', ['\u{2d0d}', '\0', '\0']), ('\u{10ae}', ['\u{2d0e}', + '\0', '\0']), ('\u{10af}', ['\u{2d0f}', '\0', '\0']), ('\u{10b0}', ['\u{2d10}', '\0', + '\0']), ('\u{10b1}', ['\u{2d11}', '\0', '\0']), ('\u{10b2}', ['\u{2d12}', '\0', '\0']), + ('\u{10b3}', ['\u{2d13}', '\0', '\0']), ('\u{10b4}', ['\u{2d14}', '\0', '\0']), ('\u{10b5}', + ['\u{2d15}', '\0', '\0']), ('\u{10b6}', ['\u{2d16}', '\0', '\0']), ('\u{10b7}', ['\u{2d17}', + '\0', '\0']), ('\u{10b8}', ['\u{2d18}', '\0', '\0']), ('\u{10b9}', ['\u{2d19}', '\0', + '\0']), ('\u{10ba}', ['\u{2d1a}', '\0', '\0']), ('\u{10bb}', ['\u{2d1b}', '\0', '\0']), + ('\u{10bc}', ['\u{2d1c}', '\0', '\0']), ('\u{10bd}', ['\u{2d1d}', '\0', '\0']), ('\u{10be}', + ['\u{2d1e}', '\0', '\0']), ('\u{10bf}', ['\u{2d1f}', '\0', '\0']), ('\u{10c0}', ['\u{2d20}', + '\0', '\0']), ('\u{10c1}', ['\u{2d21}', '\0', '\0']), ('\u{10c2}', ['\u{2d22}', '\0', + '\0']), ('\u{10c3}', ['\u{2d23}', '\0', '\0']), ('\u{10c4}', ['\u{2d24}', '\0', '\0']), + ('\u{10c5}', ['\u{2d25}', '\0', '\0']), ('\u{10c7}', ['\u{2d27}', '\0', '\0']), ('\u{10cd}', + ['\u{2d2d}', '\0', '\0']), ('\u{13a0}', ['\u{ab70}', '\0', '\0']), ('\u{13a1}', ['\u{ab71}', + '\0', '\0']), ('\u{13a2}', ['\u{ab72}', '\0', '\0']), ('\u{13a3}', ['\u{ab73}', '\0', + '\0']), ('\u{13a4}', ['\u{ab74}', '\0', '\0']), ('\u{13a5}', ['\u{ab75}', '\0', '\0']), + ('\u{13a6}', ['\u{ab76}', '\0', '\0']), ('\u{13a7}', ['\u{ab77}', '\0', '\0']), ('\u{13a8}', + ['\u{ab78}', '\0', '\0']), ('\u{13a9}', ['\u{ab79}', '\0', '\0']), ('\u{13aa}', ['\u{ab7a}', + '\0', '\0']), ('\u{13ab}', ['\u{ab7b}', '\0', '\0']), ('\u{13ac}', ['\u{ab7c}', '\0', + '\0']), ('\u{13ad}', ['\u{ab7d}', '\0', '\0']), ('\u{13ae}', ['\u{ab7e}', '\0', '\0']), + ('\u{13af}', ['\u{ab7f}', '\0', '\0']), ('\u{13b0}', ['\u{ab80}', '\0', '\0']), ('\u{13b1}', + ['\u{ab81}', '\0', '\0']), ('\u{13b2}', ['\u{ab82}', '\0', '\0']), ('\u{13b3}', ['\u{ab83}', + '\0', '\0']), ('\u{13b4}', ['\u{ab84}', '\0', '\0']), ('\u{13b5}', ['\u{ab85}', '\0', + '\0']), ('\u{13b6}', ['\u{ab86}', '\0', '\0']), ('\u{13b7}', ['\u{ab87}', '\0', '\0']), + ('\u{13b8}', ['\u{ab88}', '\0', '\0']), ('\u{13b9}', ['\u{ab89}', '\0', '\0']), ('\u{13ba}', + ['\u{ab8a}', '\0', '\0']), ('\u{13bb}', ['\u{ab8b}', '\0', '\0']), ('\u{13bc}', ['\u{ab8c}', + '\0', '\0']), ('\u{13bd}', ['\u{ab8d}', '\0', '\0']), ('\u{13be}', ['\u{ab8e}', '\0', + '\0']), ('\u{13bf}', ['\u{ab8f}', '\0', '\0']), ('\u{13c0}', ['\u{ab90}', '\0', '\0']), + ('\u{13c1}', ['\u{ab91}', '\0', '\0']), ('\u{13c2}', ['\u{ab92}', '\0', '\0']), ('\u{13c3}', + ['\u{ab93}', '\0', '\0']), ('\u{13c4}', ['\u{ab94}', '\0', '\0']), ('\u{13c5}', ['\u{ab95}', + '\0', '\0']), ('\u{13c6}', ['\u{ab96}', '\0', '\0']), ('\u{13c7}', ['\u{ab97}', '\0', + '\0']), ('\u{13c8}', ['\u{ab98}', '\0', '\0']), ('\u{13c9}', ['\u{ab99}', '\0', '\0']), + ('\u{13ca}', ['\u{ab9a}', '\0', '\0']), ('\u{13cb}', ['\u{ab9b}', '\0', '\0']), ('\u{13cc}', + ['\u{ab9c}', '\0', '\0']), ('\u{13cd}', ['\u{ab9d}', '\0', '\0']), ('\u{13ce}', ['\u{ab9e}', + '\0', '\0']), ('\u{13cf}', ['\u{ab9f}', '\0', '\0']), ('\u{13d0}', ['\u{aba0}', '\0', + '\0']), ('\u{13d1}', ['\u{aba1}', '\0', '\0']), ('\u{13d2}', ['\u{aba2}', '\0', '\0']), + ('\u{13d3}', ['\u{aba3}', '\0', '\0']), ('\u{13d4}', ['\u{aba4}', '\0', '\0']), ('\u{13d5}', + ['\u{aba5}', '\0', '\0']), ('\u{13d6}', ['\u{aba6}', '\0', '\0']), ('\u{13d7}', ['\u{aba7}', + '\0', '\0']), ('\u{13d8}', ['\u{aba8}', '\0', '\0']), ('\u{13d9}', ['\u{aba9}', '\0', + '\0']), ('\u{13da}', ['\u{abaa}', '\0', '\0']), ('\u{13db}', ['\u{abab}', '\0', '\0']), + ('\u{13dc}', ['\u{abac}', '\0', '\0']), ('\u{13dd}', ['\u{abad}', '\0', '\0']), ('\u{13de}', + ['\u{abae}', '\0', '\0']), ('\u{13df}', ['\u{abaf}', '\0', '\0']), ('\u{13e0}', ['\u{abb0}', + '\0', '\0']), ('\u{13e1}', ['\u{abb1}', '\0', '\0']), ('\u{13e2}', ['\u{abb2}', '\0', + '\0']), ('\u{13e3}', ['\u{abb3}', '\0', '\0']), ('\u{13e4}', ['\u{abb4}', '\0', '\0']), + ('\u{13e5}', ['\u{abb5}', '\0', '\0']), ('\u{13e6}', ['\u{abb6}', '\0', '\0']), ('\u{13e7}', + ['\u{abb7}', '\0', '\0']), ('\u{13e8}', ['\u{abb8}', '\0', '\0']), ('\u{13e9}', ['\u{abb9}', + '\0', '\0']), ('\u{13ea}', ['\u{abba}', '\0', '\0']), ('\u{13eb}', ['\u{abbb}', '\0', + '\0']), ('\u{13ec}', ['\u{abbc}', '\0', '\0']), ('\u{13ed}', ['\u{abbd}', '\0', '\0']), + ('\u{13ee}', ['\u{abbe}', '\0', '\0']), ('\u{13ef}', ['\u{abbf}', '\0', '\0']), ('\u{13f0}', + ['\u{13f8}', '\0', '\0']), ('\u{13f1}', ['\u{13f9}', '\0', '\0']), ('\u{13f2}', ['\u{13fa}', + '\0', '\0']), ('\u{13f3}', ['\u{13fb}', '\0', '\0']), ('\u{13f4}', ['\u{13fc}', '\0', + '\0']), ('\u{13f5}', ['\u{13fd}', '\0', '\0']), ('\u{1c90}', ['\u{10d0}', '\0', '\0']), + ('\u{1c91}', ['\u{10d1}', '\0', '\0']), ('\u{1c92}', ['\u{10d2}', '\0', '\0']), ('\u{1c93}', + ['\u{10d3}', '\0', '\0']), ('\u{1c94}', ['\u{10d4}', '\0', '\0']), ('\u{1c95}', ['\u{10d5}', + '\0', '\0']), ('\u{1c96}', ['\u{10d6}', '\0', '\0']), ('\u{1c97}', ['\u{10d7}', '\0', + '\0']), ('\u{1c98}', ['\u{10d8}', '\0', '\0']), ('\u{1c99}', ['\u{10d9}', '\0', '\0']), + ('\u{1c9a}', ['\u{10da}', '\0', '\0']), ('\u{1c9b}', ['\u{10db}', '\0', '\0']), ('\u{1c9c}', + ['\u{10dc}', '\0', '\0']), ('\u{1c9d}', ['\u{10dd}', '\0', '\0']), ('\u{1c9e}', ['\u{10de}', + '\0', '\0']), ('\u{1c9f}', ['\u{10df}', '\0', '\0']), ('\u{1ca0}', ['\u{10e0}', '\0', + '\0']), ('\u{1ca1}', ['\u{10e1}', '\0', '\0']), ('\u{1ca2}', ['\u{10e2}', '\0', '\0']), + ('\u{1ca3}', ['\u{10e3}', '\0', '\0']), ('\u{1ca4}', ['\u{10e4}', '\0', '\0']), ('\u{1ca5}', + ['\u{10e5}', '\0', '\0']), ('\u{1ca6}', ['\u{10e6}', '\0', '\0']), ('\u{1ca7}', ['\u{10e7}', + '\0', '\0']), ('\u{1ca8}', ['\u{10e8}', '\0', '\0']), ('\u{1ca9}', ['\u{10e9}', '\0', + '\0']), ('\u{1caa}', ['\u{10ea}', '\0', '\0']), ('\u{1cab}', ['\u{10eb}', '\0', '\0']), + ('\u{1cac}', ['\u{10ec}', '\0', '\0']), ('\u{1cad}', ['\u{10ed}', '\0', '\0']), ('\u{1cae}', + ['\u{10ee}', '\0', '\0']), ('\u{1caf}', ['\u{10ef}', '\0', '\0']), ('\u{1cb0}', ['\u{10f0}', + '\0', '\0']), ('\u{1cb1}', ['\u{10f1}', '\0', '\0']), ('\u{1cb2}', ['\u{10f2}', '\0', + '\0']), ('\u{1cb3}', ['\u{10f3}', '\0', '\0']), ('\u{1cb4}', ['\u{10f4}', '\0', '\0']), + ('\u{1cb5}', ['\u{10f5}', '\0', '\0']), ('\u{1cb6}', ['\u{10f6}', '\0', '\0']), ('\u{1cb7}', + ['\u{10f7}', '\0', '\0']), ('\u{1cb8}', ['\u{10f8}', '\0', '\0']), ('\u{1cb9}', ['\u{10f9}', + '\0', '\0']), ('\u{1cba}', ['\u{10fa}', '\0', '\0']), ('\u{1cbd}', ['\u{10fd}', '\0', + '\0']), ('\u{1cbe}', ['\u{10fe}', '\0', '\0']), ('\u{1cbf}', ['\u{10ff}', '\0', '\0']), + ('\u{1e00}', ['\u{1e01}', '\0', '\0']), ('\u{1e02}', ['\u{1e03}', '\0', '\0']), ('\u{1e04}', + ['\u{1e05}', '\0', '\0']), ('\u{1e06}', ['\u{1e07}', '\0', '\0']), ('\u{1e08}', ['\u{1e09}', + '\0', '\0']), ('\u{1e0a}', ['\u{1e0b}', '\0', '\0']), ('\u{1e0c}', ['\u{1e0d}', '\0', + '\0']), ('\u{1e0e}', ['\u{1e0f}', '\0', '\0']), ('\u{1e10}', ['\u{1e11}', '\0', '\0']), + ('\u{1e12}', ['\u{1e13}', '\0', '\0']), ('\u{1e14}', ['\u{1e15}', '\0', '\0']), ('\u{1e16}', + ['\u{1e17}', '\0', '\0']), ('\u{1e18}', ['\u{1e19}', '\0', '\0']), ('\u{1e1a}', ['\u{1e1b}', + '\0', '\0']), ('\u{1e1c}', ['\u{1e1d}', '\0', '\0']), ('\u{1e1e}', ['\u{1e1f}', '\0', + '\0']), ('\u{1e20}', ['\u{1e21}', '\0', '\0']), ('\u{1e22}', ['\u{1e23}', '\0', '\0']), + ('\u{1e24}', ['\u{1e25}', '\0', '\0']), ('\u{1e26}', ['\u{1e27}', '\0', '\0']), ('\u{1e28}', + ['\u{1e29}', '\0', '\0']), ('\u{1e2a}', ['\u{1e2b}', '\0', '\0']), ('\u{1e2c}', ['\u{1e2d}', + '\0', '\0']), ('\u{1e2e}', ['\u{1e2f}', '\0', '\0']), ('\u{1e30}', ['\u{1e31}', '\0', + '\0']), ('\u{1e32}', ['\u{1e33}', '\0', '\0']), ('\u{1e34}', ['\u{1e35}', '\0', '\0']), + ('\u{1e36}', ['\u{1e37}', '\0', '\0']), ('\u{1e38}', ['\u{1e39}', '\0', '\0']), ('\u{1e3a}', + ['\u{1e3b}', '\0', '\0']), ('\u{1e3c}', ['\u{1e3d}', '\0', '\0']), ('\u{1e3e}', ['\u{1e3f}', + '\0', '\0']), ('\u{1e40}', ['\u{1e41}', '\0', '\0']), ('\u{1e42}', ['\u{1e43}', '\0', + '\0']), ('\u{1e44}', ['\u{1e45}', '\0', '\0']), ('\u{1e46}', ['\u{1e47}', '\0', '\0']), + ('\u{1e48}', ['\u{1e49}', '\0', '\0']), ('\u{1e4a}', ['\u{1e4b}', '\0', '\0']), ('\u{1e4c}', + ['\u{1e4d}', '\0', '\0']), ('\u{1e4e}', ['\u{1e4f}', '\0', '\0']), ('\u{1e50}', ['\u{1e51}', + '\0', '\0']), ('\u{1e52}', ['\u{1e53}', '\0', '\0']), ('\u{1e54}', ['\u{1e55}', '\0', + '\0']), ('\u{1e56}', ['\u{1e57}', '\0', '\0']), ('\u{1e58}', ['\u{1e59}', '\0', '\0']), + ('\u{1e5a}', ['\u{1e5b}', '\0', '\0']), ('\u{1e5c}', ['\u{1e5d}', '\0', '\0']), ('\u{1e5e}', + ['\u{1e5f}', '\0', '\0']), ('\u{1e60}', ['\u{1e61}', '\0', '\0']), ('\u{1e62}', ['\u{1e63}', + '\0', '\0']), ('\u{1e64}', ['\u{1e65}', '\0', '\0']), ('\u{1e66}', ['\u{1e67}', '\0', + '\0']), ('\u{1e68}', ['\u{1e69}', '\0', '\0']), ('\u{1e6a}', ['\u{1e6b}', '\0', '\0']), + ('\u{1e6c}', ['\u{1e6d}', '\0', '\0']), ('\u{1e6e}', ['\u{1e6f}', '\0', '\0']), ('\u{1e70}', + ['\u{1e71}', '\0', '\0']), ('\u{1e72}', ['\u{1e73}', '\0', '\0']), ('\u{1e74}', ['\u{1e75}', + '\0', '\0']), ('\u{1e76}', ['\u{1e77}', '\0', '\0']), ('\u{1e78}', ['\u{1e79}', '\0', + '\0']), ('\u{1e7a}', ['\u{1e7b}', '\0', '\0']), ('\u{1e7c}', ['\u{1e7d}', '\0', '\0']), + ('\u{1e7e}', ['\u{1e7f}', '\0', '\0']), ('\u{1e80}', ['\u{1e81}', '\0', '\0']), ('\u{1e82}', + ['\u{1e83}', '\0', '\0']), ('\u{1e84}', ['\u{1e85}', '\0', '\0']), ('\u{1e86}', ['\u{1e87}', + '\0', '\0']), ('\u{1e88}', ['\u{1e89}', '\0', '\0']), ('\u{1e8a}', ['\u{1e8b}', '\0', + '\0']), ('\u{1e8c}', ['\u{1e8d}', '\0', '\0']), ('\u{1e8e}', ['\u{1e8f}', '\0', '\0']), + ('\u{1e90}', ['\u{1e91}', '\0', '\0']), ('\u{1e92}', ['\u{1e93}', '\0', '\0']), ('\u{1e94}', + ['\u{1e95}', '\0', '\0']), ('\u{1e9e}', ['\u{df}', '\0', '\0']), ('\u{1ea0}', ['\u{1ea1}', + '\0', '\0']), ('\u{1ea2}', ['\u{1ea3}', '\0', '\0']), ('\u{1ea4}', ['\u{1ea5}', '\0', + '\0']), ('\u{1ea6}', ['\u{1ea7}', '\0', '\0']), ('\u{1ea8}', ['\u{1ea9}', '\0', '\0']), + ('\u{1eaa}', ['\u{1eab}', '\0', '\0']), ('\u{1eac}', ['\u{1ead}', '\0', '\0']), ('\u{1eae}', + ['\u{1eaf}', '\0', '\0']), ('\u{1eb0}', ['\u{1eb1}', '\0', '\0']), ('\u{1eb2}', ['\u{1eb3}', + '\0', '\0']), ('\u{1eb4}', ['\u{1eb5}', '\0', '\0']), ('\u{1eb6}', ['\u{1eb7}', '\0', + '\0']), ('\u{1eb8}', ['\u{1eb9}', '\0', '\0']), ('\u{1eba}', ['\u{1ebb}', '\0', '\0']), + ('\u{1ebc}', ['\u{1ebd}', '\0', '\0']), ('\u{1ebe}', ['\u{1ebf}', '\0', '\0']), ('\u{1ec0}', + ['\u{1ec1}', '\0', '\0']), ('\u{1ec2}', ['\u{1ec3}', '\0', '\0']), ('\u{1ec4}', ['\u{1ec5}', + '\0', '\0']), ('\u{1ec6}', ['\u{1ec7}', '\0', '\0']), ('\u{1ec8}', ['\u{1ec9}', '\0', + '\0']), ('\u{1eca}', ['\u{1ecb}', '\0', '\0']), ('\u{1ecc}', ['\u{1ecd}', '\0', '\0']), + ('\u{1ece}', ['\u{1ecf}', '\0', '\0']), ('\u{1ed0}', ['\u{1ed1}', '\0', '\0']), ('\u{1ed2}', + ['\u{1ed3}', '\0', '\0']), ('\u{1ed4}', ['\u{1ed5}', '\0', '\0']), ('\u{1ed6}', ['\u{1ed7}', + '\0', '\0']), ('\u{1ed8}', ['\u{1ed9}', '\0', '\0']), ('\u{1eda}', ['\u{1edb}', '\0', + '\0']), ('\u{1edc}', ['\u{1edd}', '\0', '\0']), ('\u{1ede}', ['\u{1edf}', '\0', '\0']), + ('\u{1ee0}', ['\u{1ee1}', '\0', '\0']), ('\u{1ee2}', ['\u{1ee3}', '\0', '\0']), ('\u{1ee4}', + ['\u{1ee5}', '\0', '\0']), ('\u{1ee6}', ['\u{1ee7}', '\0', '\0']), ('\u{1ee8}', ['\u{1ee9}', + '\0', '\0']), ('\u{1eea}', ['\u{1eeb}', '\0', '\0']), ('\u{1eec}', ['\u{1eed}', '\0', + '\0']), ('\u{1eee}', ['\u{1eef}', '\0', '\0']), ('\u{1ef0}', ['\u{1ef1}', '\0', '\0']), + ('\u{1ef2}', ['\u{1ef3}', '\0', '\0']), ('\u{1ef4}', ['\u{1ef5}', '\0', '\0']), ('\u{1ef6}', + ['\u{1ef7}', '\0', '\0']), ('\u{1ef8}', ['\u{1ef9}', '\0', '\0']), ('\u{1efa}', ['\u{1efb}', + '\0', '\0']), ('\u{1efc}', ['\u{1efd}', '\0', '\0']), ('\u{1efe}', ['\u{1eff}', '\0', + '\0']), ('\u{1f08}', ['\u{1f00}', '\0', '\0']), ('\u{1f09}', ['\u{1f01}', '\0', '\0']), + ('\u{1f0a}', ['\u{1f02}', '\0', '\0']), ('\u{1f0b}', ['\u{1f03}', '\0', '\0']), ('\u{1f0c}', + ['\u{1f04}', '\0', '\0']), ('\u{1f0d}', ['\u{1f05}', '\0', '\0']), ('\u{1f0e}', ['\u{1f06}', + '\0', '\0']), ('\u{1f0f}', ['\u{1f07}', '\0', '\0']), ('\u{1f18}', ['\u{1f10}', '\0', + '\0']), ('\u{1f19}', ['\u{1f11}', '\0', '\0']), ('\u{1f1a}', ['\u{1f12}', '\0', '\0']), + ('\u{1f1b}', ['\u{1f13}', '\0', '\0']), ('\u{1f1c}', ['\u{1f14}', '\0', '\0']), ('\u{1f1d}', + ['\u{1f15}', '\0', '\0']), ('\u{1f28}', ['\u{1f20}', '\0', '\0']), ('\u{1f29}', ['\u{1f21}', + '\0', '\0']), ('\u{1f2a}', ['\u{1f22}', '\0', '\0']), ('\u{1f2b}', ['\u{1f23}', '\0', + '\0']), ('\u{1f2c}', ['\u{1f24}', '\0', '\0']), ('\u{1f2d}', ['\u{1f25}', '\0', '\0']), + ('\u{1f2e}', ['\u{1f26}', '\0', '\0']), ('\u{1f2f}', ['\u{1f27}', '\0', '\0']), ('\u{1f38}', + ['\u{1f30}', '\0', '\0']), ('\u{1f39}', ['\u{1f31}', '\0', '\0']), ('\u{1f3a}', ['\u{1f32}', + '\0', '\0']), ('\u{1f3b}', ['\u{1f33}', '\0', '\0']), ('\u{1f3c}', ['\u{1f34}', '\0', + '\0']), ('\u{1f3d}', ['\u{1f35}', '\0', '\0']), ('\u{1f3e}', ['\u{1f36}', '\0', '\0']), + ('\u{1f3f}', ['\u{1f37}', '\0', '\0']), ('\u{1f48}', ['\u{1f40}', '\0', '\0']), ('\u{1f49}', + ['\u{1f41}', '\0', '\0']), ('\u{1f4a}', ['\u{1f42}', '\0', '\0']), ('\u{1f4b}', ['\u{1f43}', + '\0', '\0']), ('\u{1f4c}', ['\u{1f44}', '\0', '\0']), ('\u{1f4d}', ['\u{1f45}', '\0', + '\0']), ('\u{1f59}', ['\u{1f51}', '\0', '\0']), ('\u{1f5b}', ['\u{1f53}', '\0', '\0']), + ('\u{1f5d}', ['\u{1f55}', '\0', '\0']), ('\u{1f5f}', ['\u{1f57}', '\0', '\0']), ('\u{1f68}', + ['\u{1f60}', '\0', '\0']), ('\u{1f69}', ['\u{1f61}', '\0', '\0']), ('\u{1f6a}', ['\u{1f62}', + '\0', '\0']), ('\u{1f6b}', ['\u{1f63}', '\0', '\0']), ('\u{1f6c}', ['\u{1f64}', '\0', + '\0']), ('\u{1f6d}', ['\u{1f65}', '\0', '\0']), ('\u{1f6e}', ['\u{1f66}', '\0', '\0']), + ('\u{1f6f}', ['\u{1f67}', '\0', '\0']), ('\u{1f88}', ['\u{1f80}', '\0', '\0']), ('\u{1f89}', + ['\u{1f81}', '\0', '\0']), ('\u{1f8a}', ['\u{1f82}', '\0', '\0']), ('\u{1f8b}', ['\u{1f83}', + '\0', '\0']), ('\u{1f8c}', ['\u{1f84}', '\0', '\0']), ('\u{1f8d}', ['\u{1f85}', '\0', + '\0']), ('\u{1f8e}', ['\u{1f86}', '\0', '\0']), ('\u{1f8f}', ['\u{1f87}', '\0', '\0']), + ('\u{1f98}', ['\u{1f90}', '\0', '\0']), ('\u{1f99}', ['\u{1f91}', '\0', '\0']), ('\u{1f9a}', + ['\u{1f92}', '\0', '\0']), ('\u{1f9b}', ['\u{1f93}', '\0', '\0']), ('\u{1f9c}', ['\u{1f94}', + '\0', '\0']), ('\u{1f9d}', ['\u{1f95}', '\0', '\0']), ('\u{1f9e}', ['\u{1f96}', '\0', + '\0']), ('\u{1f9f}', ['\u{1f97}', '\0', '\0']), ('\u{1fa8}', ['\u{1fa0}', '\0', '\0']), + ('\u{1fa9}', ['\u{1fa1}', '\0', '\0']), ('\u{1faa}', ['\u{1fa2}', '\0', '\0']), ('\u{1fab}', + ['\u{1fa3}', '\0', '\0']), ('\u{1fac}', ['\u{1fa4}', '\0', '\0']), ('\u{1fad}', ['\u{1fa5}', + '\0', '\0']), ('\u{1fae}', ['\u{1fa6}', '\0', '\0']), ('\u{1faf}', ['\u{1fa7}', '\0', + '\0']), ('\u{1fb8}', ['\u{1fb0}', '\0', '\0']), ('\u{1fb9}', ['\u{1fb1}', '\0', '\0']), + ('\u{1fba}', ['\u{1f70}', '\0', '\0']), ('\u{1fbb}', ['\u{1f71}', '\0', '\0']), ('\u{1fbc}', + ['\u{1fb3}', '\0', '\0']), ('\u{1fc8}', ['\u{1f72}', '\0', '\0']), ('\u{1fc9}', ['\u{1f73}', + '\0', '\0']), ('\u{1fca}', ['\u{1f74}', '\0', '\0']), ('\u{1fcb}', ['\u{1f75}', '\0', + '\0']), ('\u{1fcc}', ['\u{1fc3}', '\0', '\0']), ('\u{1fd8}', ['\u{1fd0}', '\0', '\0']), + ('\u{1fd9}', ['\u{1fd1}', '\0', '\0']), ('\u{1fda}', ['\u{1f76}', '\0', '\0']), ('\u{1fdb}', + ['\u{1f77}', '\0', '\0']), ('\u{1fe8}', ['\u{1fe0}', '\0', '\0']), ('\u{1fe9}', ['\u{1fe1}', + '\0', '\0']), ('\u{1fea}', ['\u{1f7a}', '\0', '\0']), ('\u{1feb}', ['\u{1f7b}', '\0', + '\0']), ('\u{1fec}', ['\u{1fe5}', '\0', '\0']), ('\u{1ff8}', ['\u{1f78}', '\0', '\0']), + ('\u{1ff9}', ['\u{1f79}', '\0', '\0']), ('\u{1ffa}', ['\u{1f7c}', '\0', '\0']), ('\u{1ffb}', + ['\u{1f7d}', '\0', '\0']), ('\u{1ffc}', ['\u{1ff3}', '\0', '\0']), ('\u{2126}', ['\u{3c9}', + '\0', '\0']), ('\u{212a}', ['\u{6b}', '\0', '\0']), ('\u{212b}', ['\u{e5}', '\0', '\0']), + ('\u{2132}', ['\u{214e}', '\0', '\0']), ('\u{2160}', ['\u{2170}', '\0', '\0']), ('\u{2161}', + ['\u{2171}', '\0', '\0']), ('\u{2162}', ['\u{2172}', '\0', '\0']), ('\u{2163}', ['\u{2173}', + '\0', '\0']), ('\u{2164}', ['\u{2174}', '\0', '\0']), ('\u{2165}', ['\u{2175}', '\0', + '\0']), ('\u{2166}', ['\u{2176}', '\0', '\0']), ('\u{2167}', ['\u{2177}', '\0', '\0']), + ('\u{2168}', ['\u{2178}', '\0', '\0']), ('\u{2169}', ['\u{2179}', '\0', '\0']), ('\u{216a}', + ['\u{217a}', '\0', '\0']), ('\u{216b}', ['\u{217b}', '\0', '\0']), ('\u{216c}', ['\u{217c}', + '\0', '\0']), ('\u{216d}', ['\u{217d}', '\0', '\0']), ('\u{216e}', ['\u{217e}', '\0', + '\0']), ('\u{216f}', ['\u{217f}', '\0', '\0']), ('\u{2183}', ['\u{2184}', '\0', '\0']), + ('\u{24b6}', ['\u{24d0}', '\0', '\0']), ('\u{24b7}', ['\u{24d1}', '\0', '\0']), ('\u{24b8}', + ['\u{24d2}', '\0', '\0']), ('\u{24b9}', ['\u{24d3}', '\0', '\0']), ('\u{24ba}', ['\u{24d4}', + '\0', '\0']), ('\u{24bb}', ['\u{24d5}', '\0', '\0']), ('\u{24bc}', ['\u{24d6}', '\0', + '\0']), ('\u{24bd}', ['\u{24d7}', '\0', '\0']), ('\u{24be}', ['\u{24d8}', '\0', '\0']), + ('\u{24bf}', ['\u{24d9}', '\0', '\0']), ('\u{24c0}', ['\u{24da}', '\0', '\0']), ('\u{24c1}', + ['\u{24db}', '\0', '\0']), ('\u{24c2}', ['\u{24dc}', '\0', '\0']), ('\u{24c3}', ['\u{24dd}', + '\0', '\0']), ('\u{24c4}', ['\u{24de}', '\0', '\0']), ('\u{24c5}', ['\u{24df}', '\0', + '\0']), ('\u{24c6}', ['\u{24e0}', '\0', '\0']), ('\u{24c7}', ['\u{24e1}', '\0', '\0']), + ('\u{24c8}', ['\u{24e2}', '\0', '\0']), ('\u{24c9}', ['\u{24e3}', '\0', '\0']), ('\u{24ca}', + ['\u{24e4}', '\0', '\0']), ('\u{24cb}', ['\u{24e5}', '\0', '\0']), ('\u{24cc}', ['\u{24e6}', + '\0', '\0']), ('\u{24cd}', ['\u{24e7}', '\0', '\0']), ('\u{24ce}', ['\u{24e8}', '\0', + '\0']), ('\u{24cf}', ['\u{24e9}', '\0', '\0']), ('\u{2c00}', ['\u{2c30}', '\0', '\0']), + ('\u{2c01}', ['\u{2c31}', '\0', '\0']), ('\u{2c02}', ['\u{2c32}', '\0', '\0']), ('\u{2c03}', + ['\u{2c33}', '\0', '\0']), ('\u{2c04}', ['\u{2c34}', '\0', '\0']), ('\u{2c05}', ['\u{2c35}', + '\0', '\0']), ('\u{2c06}', ['\u{2c36}', '\0', '\0']), ('\u{2c07}', ['\u{2c37}', '\0', + '\0']), ('\u{2c08}', ['\u{2c38}', '\0', '\0']), ('\u{2c09}', ['\u{2c39}', '\0', '\0']), + ('\u{2c0a}', ['\u{2c3a}', '\0', '\0']), ('\u{2c0b}', ['\u{2c3b}', '\0', '\0']), ('\u{2c0c}', + ['\u{2c3c}', '\0', '\0']), ('\u{2c0d}', ['\u{2c3d}', '\0', '\0']), ('\u{2c0e}', ['\u{2c3e}', + '\0', '\0']), ('\u{2c0f}', ['\u{2c3f}', '\0', '\0']), ('\u{2c10}', ['\u{2c40}', '\0', + '\0']), ('\u{2c11}', ['\u{2c41}', '\0', '\0']), ('\u{2c12}', ['\u{2c42}', '\0', '\0']), + ('\u{2c13}', ['\u{2c43}', '\0', '\0']), ('\u{2c14}', ['\u{2c44}', '\0', '\0']), ('\u{2c15}', + ['\u{2c45}', '\0', '\0']), ('\u{2c16}', ['\u{2c46}', '\0', '\0']), ('\u{2c17}', ['\u{2c47}', + '\0', '\0']), ('\u{2c18}', ['\u{2c48}', '\0', '\0']), ('\u{2c19}', ['\u{2c49}', '\0', + '\0']), ('\u{2c1a}', ['\u{2c4a}', '\0', '\0']), ('\u{2c1b}', ['\u{2c4b}', '\0', '\0']), + ('\u{2c1c}', ['\u{2c4c}', '\0', '\0']), ('\u{2c1d}', ['\u{2c4d}', '\0', '\0']), ('\u{2c1e}', + ['\u{2c4e}', '\0', '\0']), ('\u{2c1f}', ['\u{2c4f}', '\0', '\0']), ('\u{2c20}', ['\u{2c50}', + '\0', '\0']), ('\u{2c21}', ['\u{2c51}', '\0', '\0']), ('\u{2c22}', ['\u{2c52}', '\0', + '\0']), ('\u{2c23}', ['\u{2c53}', '\0', '\0']), ('\u{2c24}', ['\u{2c54}', '\0', '\0']), + ('\u{2c25}', ['\u{2c55}', '\0', '\0']), ('\u{2c26}', ['\u{2c56}', '\0', '\0']), ('\u{2c27}', + ['\u{2c57}', '\0', '\0']), ('\u{2c28}', ['\u{2c58}', '\0', '\0']), ('\u{2c29}', ['\u{2c59}', + '\0', '\0']), ('\u{2c2a}', ['\u{2c5a}', '\0', '\0']), ('\u{2c2b}', ['\u{2c5b}', '\0', + '\0']), ('\u{2c2c}', ['\u{2c5c}', '\0', '\0']), ('\u{2c2d}', ['\u{2c5d}', '\0', '\0']), + ('\u{2c2e}', ['\u{2c5e}', '\0', '\0']), ('\u{2c60}', ['\u{2c61}', '\0', '\0']), ('\u{2c62}', + ['\u{26b}', '\0', '\0']), ('\u{2c63}', ['\u{1d7d}', '\0', '\0']), ('\u{2c64}', ['\u{27d}', + '\0', '\0']), ('\u{2c67}', ['\u{2c68}', '\0', '\0']), ('\u{2c69}', ['\u{2c6a}', '\0', + '\0']), ('\u{2c6b}', ['\u{2c6c}', '\0', '\0']), ('\u{2c6d}', ['\u{251}', '\0', '\0']), + ('\u{2c6e}', ['\u{271}', '\0', '\0']), ('\u{2c6f}', ['\u{250}', '\0', '\0']), ('\u{2c70}', + ['\u{252}', '\0', '\0']), ('\u{2c72}', ['\u{2c73}', '\0', '\0']), ('\u{2c75}', ['\u{2c76}', + '\0', '\0']), ('\u{2c7e}', ['\u{23f}', '\0', '\0']), ('\u{2c7f}', ['\u{240}', '\0', '\0']), + ('\u{2c80}', ['\u{2c81}', '\0', '\0']), ('\u{2c82}', ['\u{2c83}', '\0', '\0']), ('\u{2c84}', + ['\u{2c85}', '\0', '\0']), ('\u{2c86}', ['\u{2c87}', '\0', '\0']), ('\u{2c88}', ['\u{2c89}', + '\0', '\0']), ('\u{2c8a}', ['\u{2c8b}', '\0', '\0']), ('\u{2c8c}', ['\u{2c8d}', '\0', + '\0']), ('\u{2c8e}', ['\u{2c8f}', '\0', '\0']), ('\u{2c90}', ['\u{2c91}', '\0', '\0']), + ('\u{2c92}', ['\u{2c93}', '\0', '\0']), ('\u{2c94}', ['\u{2c95}', '\0', '\0']), ('\u{2c96}', + ['\u{2c97}', '\0', '\0']), ('\u{2c98}', ['\u{2c99}', '\0', '\0']), ('\u{2c9a}', ['\u{2c9b}', + '\0', '\0']), ('\u{2c9c}', ['\u{2c9d}', '\0', '\0']), ('\u{2c9e}', ['\u{2c9f}', '\0', + '\0']), ('\u{2ca0}', ['\u{2ca1}', '\0', '\0']), ('\u{2ca2}', ['\u{2ca3}', '\0', '\0']), + ('\u{2ca4}', ['\u{2ca5}', '\0', '\0']), ('\u{2ca6}', ['\u{2ca7}', '\0', '\0']), ('\u{2ca8}', + ['\u{2ca9}', '\0', '\0']), ('\u{2caa}', ['\u{2cab}', '\0', '\0']), ('\u{2cac}', ['\u{2cad}', + '\0', '\0']), ('\u{2cae}', ['\u{2caf}', '\0', '\0']), ('\u{2cb0}', ['\u{2cb1}', '\0', + '\0']), ('\u{2cb2}', ['\u{2cb3}', '\0', '\0']), ('\u{2cb4}', ['\u{2cb5}', '\0', '\0']), + ('\u{2cb6}', ['\u{2cb7}', '\0', '\0']), ('\u{2cb8}', ['\u{2cb9}', '\0', '\0']), ('\u{2cba}', + ['\u{2cbb}', '\0', '\0']), ('\u{2cbc}', ['\u{2cbd}', '\0', '\0']), ('\u{2cbe}', ['\u{2cbf}', + '\0', '\0']), ('\u{2cc0}', ['\u{2cc1}', '\0', '\0']), ('\u{2cc2}', ['\u{2cc3}', '\0', + '\0']), ('\u{2cc4}', ['\u{2cc5}', '\0', '\0']), ('\u{2cc6}', ['\u{2cc7}', '\0', '\0']), + ('\u{2cc8}', ['\u{2cc9}', '\0', '\0']), ('\u{2cca}', ['\u{2ccb}', '\0', '\0']), ('\u{2ccc}', + ['\u{2ccd}', '\0', '\0']), ('\u{2cce}', ['\u{2ccf}', '\0', '\0']), ('\u{2cd0}', ['\u{2cd1}', + '\0', '\0']), ('\u{2cd2}', ['\u{2cd3}', '\0', '\0']), ('\u{2cd4}', ['\u{2cd5}', '\0', + '\0']), ('\u{2cd6}', ['\u{2cd7}', '\0', '\0']), ('\u{2cd8}', ['\u{2cd9}', '\0', '\0']), + ('\u{2cda}', ['\u{2cdb}', '\0', '\0']), ('\u{2cdc}', ['\u{2cdd}', '\0', '\0']), ('\u{2cde}', + ['\u{2cdf}', '\0', '\0']), ('\u{2ce0}', ['\u{2ce1}', '\0', '\0']), ('\u{2ce2}', ['\u{2ce3}', + '\0', '\0']), ('\u{2ceb}', ['\u{2cec}', '\0', '\0']), ('\u{2ced}', ['\u{2cee}', '\0', + '\0']), ('\u{2cf2}', ['\u{2cf3}', '\0', '\0']), ('\u{a640}', ['\u{a641}', '\0', '\0']), + ('\u{a642}', ['\u{a643}', '\0', '\0']), ('\u{a644}', ['\u{a645}', '\0', '\0']), ('\u{a646}', + ['\u{a647}', '\0', '\0']), ('\u{a648}', ['\u{a649}', '\0', '\0']), ('\u{a64a}', ['\u{a64b}', + '\0', '\0']), ('\u{a64c}', ['\u{a64d}', '\0', '\0']), ('\u{a64e}', ['\u{a64f}', '\0', + '\0']), ('\u{a650}', ['\u{a651}', '\0', '\0']), ('\u{a652}', ['\u{a653}', '\0', '\0']), + ('\u{a654}', ['\u{a655}', '\0', '\0']), ('\u{a656}', ['\u{a657}', '\0', '\0']), ('\u{a658}', + ['\u{a659}', '\0', '\0']), ('\u{a65a}', ['\u{a65b}', '\0', '\0']), ('\u{a65c}', ['\u{a65d}', + '\0', '\0']), ('\u{a65e}', ['\u{a65f}', '\0', '\0']), ('\u{a660}', ['\u{a661}', '\0', + '\0']), ('\u{a662}', ['\u{a663}', '\0', '\0']), ('\u{a664}', ['\u{a665}', '\0', '\0']), + ('\u{a666}', ['\u{a667}', '\0', '\0']), ('\u{a668}', ['\u{a669}', '\0', '\0']), ('\u{a66a}', + ['\u{a66b}', '\0', '\0']), ('\u{a66c}', ['\u{a66d}', '\0', '\0']), ('\u{a680}', ['\u{a681}', + '\0', '\0']), ('\u{a682}', ['\u{a683}', '\0', '\0']), ('\u{a684}', ['\u{a685}', '\0', + '\0']), ('\u{a686}', ['\u{a687}', '\0', '\0']), ('\u{a688}', ['\u{a689}', '\0', '\0']), + ('\u{a68a}', ['\u{a68b}', '\0', '\0']), ('\u{a68c}', ['\u{a68d}', '\0', '\0']), ('\u{a68e}', + ['\u{a68f}', '\0', '\0']), ('\u{a690}', ['\u{a691}', '\0', '\0']), ('\u{a692}', ['\u{a693}', + '\0', '\0']), ('\u{a694}', ['\u{a695}', '\0', '\0']), ('\u{a696}', ['\u{a697}', '\0', + '\0']), ('\u{a698}', ['\u{a699}', '\0', '\0']), ('\u{a69a}', ['\u{a69b}', '\0', '\0']), + ('\u{a722}', ['\u{a723}', '\0', '\0']), ('\u{a724}', ['\u{a725}', '\0', '\0']), ('\u{a726}', + ['\u{a727}', '\0', '\0']), ('\u{a728}', ['\u{a729}', '\0', '\0']), ('\u{a72a}', ['\u{a72b}', + '\0', '\0']), ('\u{a72c}', ['\u{a72d}', '\0', '\0']), ('\u{a72e}', ['\u{a72f}', '\0', + '\0']), ('\u{a732}', ['\u{a733}', '\0', '\0']), ('\u{a734}', ['\u{a735}', '\0', '\0']), + ('\u{a736}', ['\u{a737}', '\0', '\0']), ('\u{a738}', ['\u{a739}', '\0', '\0']), ('\u{a73a}', + ['\u{a73b}', '\0', '\0']), ('\u{a73c}', ['\u{a73d}', '\0', '\0']), ('\u{a73e}', ['\u{a73f}', + '\0', '\0']), ('\u{a740}', ['\u{a741}', '\0', '\0']), ('\u{a742}', ['\u{a743}', '\0', + '\0']), ('\u{a744}', ['\u{a745}', '\0', '\0']), ('\u{a746}', ['\u{a747}', '\0', '\0']), + ('\u{a748}', ['\u{a749}', '\0', '\0']), ('\u{a74a}', ['\u{a74b}', '\0', '\0']), ('\u{a74c}', + ['\u{a74d}', '\0', '\0']), ('\u{a74e}', ['\u{a74f}', '\0', '\0']), ('\u{a750}', ['\u{a751}', + '\0', '\0']), ('\u{a752}', ['\u{a753}', '\0', '\0']), ('\u{a754}', ['\u{a755}', '\0', + '\0']), ('\u{a756}', ['\u{a757}', '\0', '\0']), ('\u{a758}', ['\u{a759}', '\0', '\0']), + ('\u{a75a}', ['\u{a75b}', '\0', '\0']), ('\u{a75c}', ['\u{a75d}', '\0', '\0']), ('\u{a75e}', + ['\u{a75f}', '\0', '\0']), ('\u{a760}', ['\u{a761}', '\0', '\0']), ('\u{a762}', ['\u{a763}', + '\0', '\0']), ('\u{a764}', ['\u{a765}', '\0', '\0']), ('\u{a766}', ['\u{a767}', '\0', + '\0']), ('\u{a768}', ['\u{a769}', '\0', '\0']), ('\u{a76a}', ['\u{a76b}', '\0', '\0']), + ('\u{a76c}', ['\u{a76d}', '\0', '\0']), ('\u{a76e}', ['\u{a76f}', '\0', '\0']), ('\u{a779}', + ['\u{a77a}', '\0', '\0']), ('\u{a77b}', ['\u{a77c}', '\0', '\0']), ('\u{a77d}', ['\u{1d79}', + '\0', '\0']), ('\u{a77e}', ['\u{a77f}', '\0', '\0']), ('\u{a780}', ['\u{a781}', '\0', + '\0']), ('\u{a782}', ['\u{a783}', '\0', '\0']), ('\u{a784}', ['\u{a785}', '\0', '\0']), + ('\u{a786}', ['\u{a787}', '\0', '\0']), ('\u{a78b}', ['\u{a78c}', '\0', '\0']), ('\u{a78d}', + ['\u{265}', '\0', '\0']), ('\u{a790}', ['\u{a791}', '\0', '\0']), ('\u{a792}', ['\u{a793}', + '\0', '\0']), ('\u{a796}', ['\u{a797}', '\0', '\0']), ('\u{a798}', ['\u{a799}', '\0', + '\0']), ('\u{a79a}', ['\u{a79b}', '\0', '\0']), ('\u{a79c}', ['\u{a79d}', '\0', '\0']), + ('\u{a79e}', ['\u{a79f}', '\0', '\0']), ('\u{a7a0}', ['\u{a7a1}', '\0', '\0']), ('\u{a7a2}', + ['\u{a7a3}', '\0', '\0']), ('\u{a7a4}', ['\u{a7a5}', '\0', '\0']), ('\u{a7a6}', ['\u{a7a7}', + '\0', '\0']), ('\u{a7a8}', ['\u{a7a9}', '\0', '\0']), ('\u{a7aa}', ['\u{266}', '\0', '\0']), + ('\u{a7ab}', ['\u{25c}', '\0', '\0']), ('\u{a7ac}', ['\u{261}', '\0', '\0']), ('\u{a7ad}', + ['\u{26c}', '\0', '\0']), ('\u{a7ae}', ['\u{26a}', '\0', '\0']), ('\u{a7b0}', ['\u{29e}', + '\0', '\0']), ('\u{a7b1}', ['\u{287}', '\0', '\0']), ('\u{a7b2}', ['\u{29d}', '\0', '\0']), + ('\u{a7b3}', ['\u{ab53}', '\0', '\0']), ('\u{a7b4}', ['\u{a7b5}', '\0', '\0']), ('\u{a7b6}', + ['\u{a7b7}', '\0', '\0']), ('\u{a7b8}', ['\u{a7b9}', '\0', '\0']), ('\u{ff21}', ['\u{ff41}', + '\0', '\0']), ('\u{ff22}', ['\u{ff42}', '\0', '\0']), ('\u{ff23}', ['\u{ff43}', '\0', + '\0']), ('\u{ff24}', ['\u{ff44}', '\0', '\0']), ('\u{ff25}', ['\u{ff45}', '\0', '\0']), + ('\u{ff26}', ['\u{ff46}', '\0', '\0']), ('\u{ff27}', ['\u{ff47}', '\0', '\0']), ('\u{ff28}', + ['\u{ff48}', '\0', '\0']), ('\u{ff29}', ['\u{ff49}', '\0', '\0']), ('\u{ff2a}', ['\u{ff4a}', + '\0', '\0']), ('\u{ff2b}', ['\u{ff4b}', '\0', '\0']), ('\u{ff2c}', ['\u{ff4c}', '\0', + '\0']), ('\u{ff2d}', ['\u{ff4d}', '\0', '\0']), ('\u{ff2e}', ['\u{ff4e}', '\0', '\0']), + ('\u{ff2f}', ['\u{ff4f}', '\0', '\0']), ('\u{ff30}', ['\u{ff50}', '\0', '\0']), ('\u{ff31}', + ['\u{ff51}', '\0', '\0']), ('\u{ff32}', ['\u{ff52}', '\0', '\0']), ('\u{ff33}', ['\u{ff53}', + '\0', '\0']), ('\u{ff34}', ['\u{ff54}', '\0', '\0']), ('\u{ff35}', ['\u{ff55}', '\0', + '\0']), ('\u{ff36}', ['\u{ff56}', '\0', '\0']), ('\u{ff37}', ['\u{ff57}', '\0', '\0']), + ('\u{ff38}', ['\u{ff58}', '\0', '\0']), ('\u{ff39}', ['\u{ff59}', '\0', '\0']), ('\u{ff3a}', + ['\u{ff5a}', '\0', '\0']), ('\u{10400}', ['\u{10428}', '\0', '\0']), ('\u{10401}', + ['\u{10429}', '\0', '\0']), ('\u{10402}', ['\u{1042a}', '\0', '\0']), ('\u{10403}', + ['\u{1042b}', '\0', '\0']), ('\u{10404}', ['\u{1042c}', '\0', '\0']), ('\u{10405}', + ['\u{1042d}', '\0', '\0']), ('\u{10406}', ['\u{1042e}', '\0', '\0']), ('\u{10407}', + ['\u{1042f}', '\0', '\0']), ('\u{10408}', ['\u{10430}', '\0', '\0']), ('\u{10409}', + ['\u{10431}', '\0', '\0']), ('\u{1040a}', ['\u{10432}', '\0', '\0']), ('\u{1040b}', + ['\u{10433}', '\0', '\0']), ('\u{1040c}', ['\u{10434}', '\0', '\0']), ('\u{1040d}', + ['\u{10435}', '\0', '\0']), ('\u{1040e}', ['\u{10436}', '\0', '\0']), ('\u{1040f}', + ['\u{10437}', '\0', '\0']), ('\u{10410}', ['\u{10438}', '\0', '\0']), ('\u{10411}', + ['\u{10439}', '\0', '\0']), ('\u{10412}', ['\u{1043a}', '\0', '\0']), ('\u{10413}', + ['\u{1043b}', '\0', '\0']), ('\u{10414}', ['\u{1043c}', '\0', '\0']), ('\u{10415}', + ['\u{1043d}', '\0', '\0']), ('\u{10416}', ['\u{1043e}', '\0', '\0']), ('\u{10417}', + ['\u{1043f}', '\0', '\0']), ('\u{10418}', ['\u{10440}', '\0', '\0']), ('\u{10419}', + ['\u{10441}', '\0', '\0']), ('\u{1041a}', ['\u{10442}', '\0', '\0']), ('\u{1041b}', + ['\u{10443}', '\0', '\0']), ('\u{1041c}', ['\u{10444}', '\0', '\0']), ('\u{1041d}', + ['\u{10445}', '\0', '\0']), ('\u{1041e}', ['\u{10446}', '\0', '\0']), ('\u{1041f}', + ['\u{10447}', '\0', '\0']), ('\u{10420}', ['\u{10448}', '\0', '\0']), ('\u{10421}', + ['\u{10449}', '\0', '\0']), ('\u{10422}', ['\u{1044a}', '\0', '\0']), ('\u{10423}', + ['\u{1044b}', '\0', '\0']), ('\u{10424}', ['\u{1044c}', '\0', '\0']), ('\u{10425}', + ['\u{1044d}', '\0', '\0']), ('\u{10426}', ['\u{1044e}', '\0', '\0']), ('\u{10427}', + ['\u{1044f}', '\0', '\0']), ('\u{104b0}', ['\u{104d8}', '\0', '\0']), ('\u{104b1}', + ['\u{104d9}', '\0', '\0']), ('\u{104b2}', ['\u{104da}', '\0', '\0']), ('\u{104b3}', + ['\u{104db}', '\0', '\0']), ('\u{104b4}', ['\u{104dc}', '\0', '\0']), ('\u{104b5}', + ['\u{104dd}', '\0', '\0']), ('\u{104b6}', ['\u{104de}', '\0', '\0']), ('\u{104b7}', + ['\u{104df}', '\0', '\0']), ('\u{104b8}', ['\u{104e0}', '\0', '\0']), ('\u{104b9}', + ['\u{104e1}', '\0', '\0']), ('\u{104ba}', ['\u{104e2}', '\0', '\0']), ('\u{104bb}', + ['\u{104e3}', '\0', '\0']), ('\u{104bc}', ['\u{104e4}', '\0', '\0']), ('\u{104bd}', + ['\u{104e5}', '\0', '\0']), ('\u{104be}', ['\u{104e6}', '\0', '\0']), ('\u{104bf}', + ['\u{104e7}', '\0', '\0']), ('\u{104c0}', ['\u{104e8}', '\0', '\0']), ('\u{104c1}', + ['\u{104e9}', '\0', '\0']), ('\u{104c2}', ['\u{104ea}', '\0', '\0']), ('\u{104c3}', + ['\u{104eb}', '\0', '\0']), ('\u{104c4}', ['\u{104ec}', '\0', '\0']), ('\u{104c5}', + ['\u{104ed}', '\0', '\0']), ('\u{104c6}', ['\u{104ee}', '\0', '\0']), ('\u{104c7}', + ['\u{104ef}', '\0', '\0']), ('\u{104c8}', ['\u{104f0}', '\0', '\0']), ('\u{104c9}', + ['\u{104f1}', '\0', '\0']), ('\u{104ca}', ['\u{104f2}', '\0', '\0']), ('\u{104cb}', + ['\u{104f3}', '\0', '\0']), ('\u{104cc}', ['\u{104f4}', '\0', '\0']), ('\u{104cd}', + ['\u{104f5}', '\0', '\0']), ('\u{104ce}', ['\u{104f6}', '\0', '\0']), ('\u{104cf}', + ['\u{104f7}', '\0', '\0']), ('\u{104d0}', ['\u{104f8}', '\0', '\0']), ('\u{104d1}', + ['\u{104f9}', '\0', '\0']), ('\u{104d2}', ['\u{104fa}', '\0', '\0']), ('\u{104d3}', + ['\u{104fb}', '\0', '\0']), ('\u{10c80}', ['\u{10cc0}', '\0', '\0']), ('\u{10c81}', + ['\u{10cc1}', '\0', '\0']), ('\u{10c82}', ['\u{10cc2}', '\0', '\0']), ('\u{10c83}', + ['\u{10cc3}', '\0', '\0']), ('\u{10c84}', ['\u{10cc4}', '\0', '\0']), ('\u{10c85}', + ['\u{10cc5}', '\0', '\0']), ('\u{10c86}', ['\u{10cc6}', '\0', '\0']), ('\u{10c87}', + ['\u{10cc7}', '\0', '\0']), ('\u{10c88}', ['\u{10cc8}', '\0', '\0']), ('\u{10c89}', + ['\u{10cc9}', '\0', '\0']), ('\u{10c8a}', ['\u{10cca}', '\0', '\0']), ('\u{10c8b}', + ['\u{10ccb}', '\0', '\0']), ('\u{10c8c}', ['\u{10ccc}', '\0', '\0']), ('\u{10c8d}', + ['\u{10ccd}', '\0', '\0']), ('\u{10c8e}', ['\u{10cce}', '\0', '\0']), ('\u{10c8f}', + ['\u{10ccf}', '\0', '\0']), ('\u{10c90}', ['\u{10cd0}', '\0', '\0']), ('\u{10c91}', + ['\u{10cd1}', '\0', '\0']), ('\u{10c92}', ['\u{10cd2}', '\0', '\0']), ('\u{10c93}', + ['\u{10cd3}', '\0', '\0']), ('\u{10c94}', ['\u{10cd4}', '\0', '\0']), ('\u{10c95}', + ['\u{10cd5}', '\0', '\0']), ('\u{10c96}', ['\u{10cd6}', '\0', '\0']), ('\u{10c97}', + ['\u{10cd7}', '\0', '\0']), ('\u{10c98}', ['\u{10cd8}', '\0', '\0']), ('\u{10c99}', + ['\u{10cd9}', '\0', '\0']), ('\u{10c9a}', ['\u{10cda}', '\0', '\0']), ('\u{10c9b}', + ['\u{10cdb}', '\0', '\0']), ('\u{10c9c}', ['\u{10cdc}', '\0', '\0']), ('\u{10c9d}', + ['\u{10cdd}', '\0', '\0']), ('\u{10c9e}', ['\u{10cde}', '\0', '\0']), ('\u{10c9f}', + ['\u{10cdf}', '\0', '\0']), ('\u{10ca0}', ['\u{10ce0}', '\0', '\0']), ('\u{10ca1}', + ['\u{10ce1}', '\0', '\0']), ('\u{10ca2}', ['\u{10ce2}', '\0', '\0']), ('\u{10ca3}', + ['\u{10ce3}', '\0', '\0']), ('\u{10ca4}', ['\u{10ce4}', '\0', '\0']), ('\u{10ca5}', + ['\u{10ce5}', '\0', '\0']), ('\u{10ca6}', ['\u{10ce6}', '\0', '\0']), ('\u{10ca7}', + ['\u{10ce7}', '\0', '\0']), ('\u{10ca8}', ['\u{10ce8}', '\0', '\0']), ('\u{10ca9}', + ['\u{10ce9}', '\0', '\0']), ('\u{10caa}', ['\u{10cea}', '\0', '\0']), ('\u{10cab}', + ['\u{10ceb}', '\0', '\0']), ('\u{10cac}', ['\u{10cec}', '\0', '\0']), ('\u{10cad}', + ['\u{10ced}', '\0', '\0']), ('\u{10cae}', ['\u{10cee}', '\0', '\0']), ('\u{10caf}', + ['\u{10cef}', '\0', '\0']), ('\u{10cb0}', ['\u{10cf0}', '\0', '\0']), ('\u{10cb1}', + ['\u{10cf1}', '\0', '\0']), ('\u{10cb2}', ['\u{10cf2}', '\0', '\0']), ('\u{118a0}', + ['\u{118c0}', '\0', '\0']), ('\u{118a1}', ['\u{118c1}', '\0', '\0']), ('\u{118a2}', + ['\u{118c2}', '\0', '\0']), ('\u{118a3}', ['\u{118c3}', '\0', '\0']), ('\u{118a4}', + ['\u{118c4}', '\0', '\0']), ('\u{118a5}', ['\u{118c5}', '\0', '\0']), ('\u{118a6}', + ['\u{118c6}', '\0', '\0']), ('\u{118a7}', ['\u{118c7}', '\0', '\0']), ('\u{118a8}', + ['\u{118c8}', '\0', '\0']), ('\u{118a9}', ['\u{118c9}', '\0', '\0']), ('\u{118aa}', + ['\u{118ca}', '\0', '\0']), ('\u{118ab}', ['\u{118cb}', '\0', '\0']), ('\u{118ac}', + ['\u{118cc}', '\0', '\0']), ('\u{118ad}', ['\u{118cd}', '\0', '\0']), ('\u{118ae}', + ['\u{118ce}', '\0', '\0']), ('\u{118af}', ['\u{118cf}', '\0', '\0']), ('\u{118b0}', + ['\u{118d0}', '\0', '\0']), ('\u{118b1}', ['\u{118d1}', '\0', '\0']), ('\u{118b2}', + ['\u{118d2}', '\0', '\0']), ('\u{118b3}', ['\u{118d3}', '\0', '\0']), ('\u{118b4}', + ['\u{118d4}', '\0', '\0']), ('\u{118b5}', ['\u{118d5}', '\0', '\0']), ('\u{118b6}', + ['\u{118d6}', '\0', '\0']), ('\u{118b7}', ['\u{118d7}', '\0', '\0']), ('\u{118b8}', + ['\u{118d8}', '\0', '\0']), ('\u{118b9}', ['\u{118d9}', '\0', '\0']), ('\u{118ba}', + ['\u{118da}', '\0', '\0']), ('\u{118bb}', ['\u{118db}', '\0', '\0']), ('\u{118bc}', + ['\u{118dc}', '\0', '\0']), ('\u{118bd}', ['\u{118dd}', '\0', '\0']), ('\u{118be}', + ['\u{118de}', '\0', '\0']), ('\u{118bf}', ['\u{118df}', '\0', '\0']), ('\u{16e40}', + ['\u{16e60}', '\0', '\0']), ('\u{16e41}', ['\u{16e61}', '\0', '\0']), ('\u{16e42}', + ['\u{16e62}', '\0', '\0']), ('\u{16e43}', ['\u{16e63}', '\0', '\0']), ('\u{16e44}', + ['\u{16e64}', '\0', '\0']), ('\u{16e45}', ['\u{16e65}', '\0', '\0']), ('\u{16e46}', + ['\u{16e66}', '\0', '\0']), ('\u{16e47}', ['\u{16e67}', '\0', '\0']), ('\u{16e48}', + ['\u{16e68}', '\0', '\0']), ('\u{16e49}', ['\u{16e69}', '\0', '\0']), ('\u{16e4a}', + ['\u{16e6a}', '\0', '\0']), ('\u{16e4b}', ['\u{16e6b}', '\0', '\0']), ('\u{16e4c}', + ['\u{16e6c}', '\0', '\0']), ('\u{16e4d}', ['\u{16e6d}', '\0', '\0']), ('\u{16e4e}', + ['\u{16e6e}', '\0', '\0']), ('\u{16e4f}', ['\u{16e6f}', '\0', '\0']), ('\u{16e50}', + ['\u{16e70}', '\0', '\0']), ('\u{16e51}', ['\u{16e71}', '\0', '\0']), ('\u{16e52}', + ['\u{16e72}', '\0', '\0']), ('\u{16e53}', ['\u{16e73}', '\0', '\0']), ('\u{16e54}', + ['\u{16e74}', '\0', '\0']), ('\u{16e55}', ['\u{16e75}', '\0', '\0']), ('\u{16e56}', + ['\u{16e76}', '\0', '\0']), ('\u{16e57}', ['\u{16e77}', '\0', '\0']), ('\u{16e58}', + ['\u{16e78}', '\0', '\0']), ('\u{16e59}', ['\u{16e79}', '\0', '\0']), ('\u{16e5a}', + ['\u{16e7a}', '\0', '\0']), ('\u{16e5b}', ['\u{16e7b}', '\0', '\0']), ('\u{16e5c}', + ['\u{16e7c}', '\0', '\0']), ('\u{16e5d}', ['\u{16e7d}', '\0', '\0']), ('\u{16e5e}', + ['\u{16e7e}', '\0', '\0']), ('\u{16e5f}', ['\u{16e7f}', '\0', '\0']), ('\u{1e900}', + ['\u{1e922}', '\0', '\0']), ('\u{1e901}', ['\u{1e923}', '\0', '\0']), ('\u{1e902}', + ['\u{1e924}', '\0', '\0']), ('\u{1e903}', ['\u{1e925}', '\0', '\0']), ('\u{1e904}', + ['\u{1e926}', '\0', '\0']), ('\u{1e905}', ['\u{1e927}', '\0', '\0']), ('\u{1e906}', + ['\u{1e928}', '\0', '\0']), ('\u{1e907}', ['\u{1e929}', '\0', '\0']), ('\u{1e908}', + ['\u{1e92a}', '\0', '\0']), ('\u{1e909}', ['\u{1e92b}', '\0', '\0']), ('\u{1e90a}', + ['\u{1e92c}', '\0', '\0']), ('\u{1e90b}', ['\u{1e92d}', '\0', '\0']), ('\u{1e90c}', + ['\u{1e92e}', '\0', '\0']), ('\u{1e90d}', ['\u{1e92f}', '\0', '\0']), ('\u{1e90e}', + ['\u{1e930}', '\0', '\0']), ('\u{1e90f}', ['\u{1e931}', '\0', '\0']), ('\u{1e910}', + ['\u{1e932}', '\0', '\0']), ('\u{1e911}', ['\u{1e933}', '\0', '\0']), ('\u{1e912}', + ['\u{1e934}', '\0', '\0']), ('\u{1e913}', ['\u{1e935}', '\0', '\0']), ('\u{1e914}', + ['\u{1e936}', '\0', '\0']), ('\u{1e915}', ['\u{1e937}', '\0', '\0']), ('\u{1e916}', + ['\u{1e938}', '\0', '\0']), ('\u{1e917}', ['\u{1e939}', '\0', '\0']), ('\u{1e918}', + ['\u{1e93a}', '\0', '\0']), ('\u{1e919}', ['\u{1e93b}', '\0', '\0']), ('\u{1e91a}', + ['\u{1e93c}', '\0', '\0']), ('\u{1e91b}', ['\u{1e93d}', '\0', '\0']), ('\u{1e91c}', + ['\u{1e93e}', '\0', '\0']), ('\u{1e91d}', ['\u{1e93f}', '\0', '\0']), ('\u{1e91e}', + ['\u{1e940}', '\0', '\0']), ('\u{1e91f}', ['\u{1e941}', '\0', '\0']), ('\u{1e920}', + ['\u{1e942}', '\0', '\0']), ('\u{1e921}', ['\u{1e943}', '\0', '\0']) + ]; + + const to_uppercase_table: &[(char, [char; 3])] = &[ + ('\u{61}', ['\u{41}', '\0', '\0']), ('\u{62}', ['\u{42}', '\0', '\0']), ('\u{63}', + ['\u{43}', '\0', '\0']), ('\u{64}', ['\u{44}', '\0', '\0']), ('\u{65}', ['\u{45}', '\0', + '\0']), ('\u{66}', ['\u{46}', '\0', '\0']), ('\u{67}', ['\u{47}', '\0', '\0']), ('\u{68}', + ['\u{48}', '\0', '\0']), ('\u{69}', ['\u{49}', '\0', '\0']), ('\u{6a}', ['\u{4a}', '\0', + '\0']), ('\u{6b}', ['\u{4b}', '\0', '\0']), ('\u{6c}', ['\u{4c}', '\0', '\0']), ('\u{6d}', + ['\u{4d}', '\0', '\0']), ('\u{6e}', ['\u{4e}', '\0', '\0']), ('\u{6f}', ['\u{4f}', '\0', + '\0']), ('\u{70}', ['\u{50}', '\0', '\0']), ('\u{71}', ['\u{51}', '\0', '\0']), ('\u{72}', + ['\u{52}', '\0', '\0']), ('\u{73}', ['\u{53}', '\0', '\0']), ('\u{74}', ['\u{54}', '\0', + '\0']), ('\u{75}', ['\u{55}', '\0', '\0']), ('\u{76}', ['\u{56}', '\0', '\0']), ('\u{77}', + ['\u{57}', '\0', '\0']), ('\u{78}', ['\u{58}', '\0', '\0']), ('\u{79}', ['\u{59}', '\0', + '\0']), ('\u{7a}', ['\u{5a}', '\0', '\0']), ('\u{b5}', ['\u{39c}', '\0', '\0']), ('\u{df}', + ['\u{53}', '\u{53}', '\0']), ('\u{e0}', ['\u{c0}', '\0', '\0']), ('\u{e1}', ['\u{c1}', '\0', + '\0']), ('\u{e2}', ['\u{c2}', '\0', '\0']), ('\u{e3}', ['\u{c3}', '\0', '\0']), ('\u{e4}', + ['\u{c4}', '\0', '\0']), ('\u{e5}', ['\u{c5}', '\0', '\0']), ('\u{e6}', ['\u{c6}', '\0', + '\0']), ('\u{e7}', ['\u{c7}', '\0', '\0']), ('\u{e8}', ['\u{c8}', '\0', '\0']), ('\u{e9}', + ['\u{c9}', '\0', '\0']), ('\u{ea}', ['\u{ca}', '\0', '\0']), ('\u{eb}', ['\u{cb}', '\0', + '\0']), ('\u{ec}', ['\u{cc}', '\0', '\0']), ('\u{ed}', ['\u{cd}', '\0', '\0']), ('\u{ee}', + ['\u{ce}', '\0', '\0']), ('\u{ef}', ['\u{cf}', '\0', '\0']), ('\u{f0}', ['\u{d0}', '\0', + '\0']), ('\u{f1}', ['\u{d1}', '\0', '\0']), ('\u{f2}', ['\u{d2}', '\0', '\0']), ('\u{f3}', + ['\u{d3}', '\0', '\0']), ('\u{f4}', ['\u{d4}', '\0', '\0']), ('\u{f5}', ['\u{d5}', '\0', + '\0']), ('\u{f6}', ['\u{d6}', '\0', '\0']), ('\u{f8}', ['\u{d8}', '\0', '\0']), ('\u{f9}', + ['\u{d9}', '\0', '\0']), ('\u{fa}', ['\u{da}', '\0', '\0']), ('\u{fb}', ['\u{db}', '\0', + '\0']), ('\u{fc}', ['\u{dc}', '\0', '\0']), ('\u{fd}', ['\u{dd}', '\0', '\0']), ('\u{fe}', + ['\u{de}', '\0', '\0']), ('\u{ff}', ['\u{178}', '\0', '\0']), ('\u{101}', ['\u{100}', '\0', + '\0']), ('\u{103}', ['\u{102}', '\0', '\0']), ('\u{105}', ['\u{104}', '\0', '\0']), + ('\u{107}', ['\u{106}', '\0', '\0']), ('\u{109}', ['\u{108}', '\0', '\0']), ('\u{10b}', + ['\u{10a}', '\0', '\0']), ('\u{10d}', ['\u{10c}', '\0', '\0']), ('\u{10f}', ['\u{10e}', + '\0', '\0']), ('\u{111}', ['\u{110}', '\0', '\0']), ('\u{113}', ['\u{112}', '\0', '\0']), + ('\u{115}', ['\u{114}', '\0', '\0']), ('\u{117}', ['\u{116}', '\0', '\0']), ('\u{119}', + ['\u{118}', '\0', '\0']), ('\u{11b}', ['\u{11a}', '\0', '\0']), ('\u{11d}', ['\u{11c}', + '\0', '\0']), ('\u{11f}', ['\u{11e}', '\0', '\0']), ('\u{121}', ['\u{120}', '\0', '\0']), + ('\u{123}', ['\u{122}', '\0', '\0']), ('\u{125}', ['\u{124}', '\0', '\0']), ('\u{127}', + ['\u{126}', '\0', '\0']), ('\u{129}', ['\u{128}', '\0', '\0']), ('\u{12b}', ['\u{12a}', + '\0', '\0']), ('\u{12d}', ['\u{12c}', '\0', '\0']), ('\u{12f}', ['\u{12e}', '\0', '\0']), + ('\u{131}', ['\u{49}', '\0', '\0']), ('\u{133}', ['\u{132}', '\0', '\0']), ('\u{135}', + ['\u{134}', '\0', '\0']), ('\u{137}', ['\u{136}', '\0', '\0']), ('\u{13a}', ['\u{139}', + '\0', '\0']), ('\u{13c}', ['\u{13b}', '\0', '\0']), ('\u{13e}', ['\u{13d}', '\0', '\0']), + ('\u{140}', ['\u{13f}', '\0', '\0']), ('\u{142}', ['\u{141}', '\0', '\0']), ('\u{144}', + ['\u{143}', '\0', '\0']), ('\u{146}', ['\u{145}', '\0', '\0']), ('\u{148}', ['\u{147}', + '\0', '\0']), ('\u{149}', ['\u{2bc}', '\u{4e}', '\0']), ('\u{14b}', ['\u{14a}', '\0', + '\0']), ('\u{14d}', ['\u{14c}', '\0', '\0']), ('\u{14f}', ['\u{14e}', '\0', '\0']), + ('\u{151}', ['\u{150}', '\0', '\0']), ('\u{153}', ['\u{152}', '\0', '\0']), ('\u{155}', + ['\u{154}', '\0', '\0']), ('\u{157}', ['\u{156}', '\0', '\0']), ('\u{159}', ['\u{158}', + '\0', '\0']), ('\u{15b}', ['\u{15a}', '\0', '\0']), ('\u{15d}', ['\u{15c}', '\0', '\0']), + ('\u{15f}', ['\u{15e}', '\0', '\0']), ('\u{161}', ['\u{160}', '\0', '\0']), ('\u{163}', + ['\u{162}', '\0', '\0']), ('\u{165}', ['\u{164}', '\0', '\0']), ('\u{167}', ['\u{166}', + '\0', '\0']), ('\u{169}', ['\u{168}', '\0', '\0']), ('\u{16b}', ['\u{16a}', '\0', '\0']), + ('\u{16d}', ['\u{16c}', '\0', '\0']), ('\u{16f}', ['\u{16e}', '\0', '\0']), ('\u{171}', + ['\u{170}', '\0', '\0']), ('\u{173}', ['\u{172}', '\0', '\0']), ('\u{175}', ['\u{174}', + '\0', '\0']), ('\u{177}', ['\u{176}', '\0', '\0']), ('\u{17a}', ['\u{179}', '\0', '\0']), + ('\u{17c}', ['\u{17b}', '\0', '\0']), ('\u{17e}', ['\u{17d}', '\0', '\0']), ('\u{17f}', + ['\u{53}', '\0', '\0']), ('\u{180}', ['\u{243}', '\0', '\0']), ('\u{183}', ['\u{182}', '\0', + '\0']), ('\u{185}', ['\u{184}', '\0', '\0']), ('\u{188}', ['\u{187}', '\0', '\0']), + ('\u{18c}', ['\u{18b}', '\0', '\0']), ('\u{192}', ['\u{191}', '\0', '\0']), ('\u{195}', + ['\u{1f6}', '\0', '\0']), ('\u{199}', ['\u{198}', '\0', '\0']), ('\u{19a}', ['\u{23d}', + '\0', '\0']), ('\u{19e}', ['\u{220}', '\0', '\0']), ('\u{1a1}', ['\u{1a0}', '\0', '\0']), + ('\u{1a3}', ['\u{1a2}', '\0', '\0']), ('\u{1a5}', ['\u{1a4}', '\0', '\0']), ('\u{1a8}', + ['\u{1a7}', '\0', '\0']), ('\u{1ad}', ['\u{1ac}', '\0', '\0']), ('\u{1b0}', ['\u{1af}', + '\0', '\0']), ('\u{1b4}', ['\u{1b3}', '\0', '\0']), ('\u{1b6}', ['\u{1b5}', '\0', '\0']), + ('\u{1b9}', ['\u{1b8}', '\0', '\0']), ('\u{1bd}', ['\u{1bc}', '\0', '\0']), ('\u{1bf}', + ['\u{1f7}', '\0', '\0']), ('\u{1c5}', ['\u{1c4}', '\0', '\0']), ('\u{1c6}', ['\u{1c4}', + '\0', '\0']), ('\u{1c8}', ['\u{1c7}', '\0', '\0']), ('\u{1c9}', ['\u{1c7}', '\0', '\0']), + ('\u{1cb}', ['\u{1ca}', '\0', '\0']), ('\u{1cc}', ['\u{1ca}', '\0', '\0']), ('\u{1ce}', + ['\u{1cd}', '\0', '\0']), ('\u{1d0}', ['\u{1cf}', '\0', '\0']), ('\u{1d2}', ['\u{1d1}', + '\0', '\0']), ('\u{1d4}', ['\u{1d3}', '\0', '\0']), ('\u{1d6}', ['\u{1d5}', '\0', '\0']), + ('\u{1d8}', ['\u{1d7}', '\0', '\0']), ('\u{1da}', ['\u{1d9}', '\0', '\0']), ('\u{1dc}', + ['\u{1db}', '\0', '\0']), ('\u{1dd}', ['\u{18e}', '\0', '\0']), ('\u{1df}', ['\u{1de}', + '\0', '\0']), ('\u{1e1}', ['\u{1e0}', '\0', '\0']), ('\u{1e3}', ['\u{1e2}', '\0', '\0']), + ('\u{1e5}', ['\u{1e4}', '\0', '\0']), ('\u{1e7}', ['\u{1e6}', '\0', '\0']), ('\u{1e9}', + ['\u{1e8}', '\0', '\0']), ('\u{1eb}', ['\u{1ea}', '\0', '\0']), ('\u{1ed}', ['\u{1ec}', + '\0', '\0']), ('\u{1ef}', ['\u{1ee}', '\0', '\0']), ('\u{1f0}', ['\u{4a}', '\u{30c}', + '\0']), ('\u{1f2}', ['\u{1f1}', '\0', '\0']), ('\u{1f3}', ['\u{1f1}', '\0', '\0']), + ('\u{1f5}', ['\u{1f4}', '\0', '\0']), ('\u{1f9}', ['\u{1f8}', '\0', '\0']), ('\u{1fb}', + ['\u{1fa}', '\0', '\0']), ('\u{1fd}', ['\u{1fc}', '\0', '\0']), ('\u{1ff}', ['\u{1fe}', + '\0', '\0']), ('\u{201}', ['\u{200}', '\0', '\0']), ('\u{203}', ['\u{202}', '\0', '\0']), + ('\u{205}', ['\u{204}', '\0', '\0']), ('\u{207}', ['\u{206}', '\0', '\0']), ('\u{209}', + ['\u{208}', '\0', '\0']), ('\u{20b}', ['\u{20a}', '\0', '\0']), ('\u{20d}', ['\u{20c}', + '\0', '\0']), ('\u{20f}', ['\u{20e}', '\0', '\0']), ('\u{211}', ['\u{210}', '\0', '\0']), + ('\u{213}', ['\u{212}', '\0', '\0']), ('\u{215}', ['\u{214}', '\0', '\0']), ('\u{217}', + ['\u{216}', '\0', '\0']), ('\u{219}', ['\u{218}', '\0', '\0']), ('\u{21b}', ['\u{21a}', + '\0', '\0']), ('\u{21d}', ['\u{21c}', '\0', '\0']), ('\u{21f}', ['\u{21e}', '\0', '\0']), + ('\u{223}', ['\u{222}', '\0', '\0']), ('\u{225}', ['\u{224}', '\0', '\0']), ('\u{227}', + ['\u{226}', '\0', '\0']), ('\u{229}', ['\u{228}', '\0', '\0']), ('\u{22b}', ['\u{22a}', + '\0', '\0']), ('\u{22d}', ['\u{22c}', '\0', '\0']), ('\u{22f}', ['\u{22e}', '\0', '\0']), + ('\u{231}', ['\u{230}', '\0', '\0']), ('\u{233}', ['\u{232}', '\0', '\0']), ('\u{23c}', + ['\u{23b}', '\0', '\0']), ('\u{23f}', ['\u{2c7e}', '\0', '\0']), ('\u{240}', ['\u{2c7f}', + '\0', '\0']), ('\u{242}', ['\u{241}', '\0', '\0']), ('\u{247}', ['\u{246}', '\0', '\0']), + ('\u{249}', ['\u{248}', '\0', '\0']), ('\u{24b}', ['\u{24a}', '\0', '\0']), ('\u{24d}', + ['\u{24c}', '\0', '\0']), ('\u{24f}', ['\u{24e}', '\0', '\0']), ('\u{250}', ['\u{2c6f}', + '\0', '\0']), ('\u{251}', ['\u{2c6d}', '\0', '\0']), ('\u{252}', ['\u{2c70}', '\0', '\0']), + ('\u{253}', ['\u{181}', '\0', '\0']), ('\u{254}', ['\u{186}', '\0', '\0']), ('\u{256}', + ['\u{189}', '\0', '\0']), ('\u{257}', ['\u{18a}', '\0', '\0']), ('\u{259}', ['\u{18f}', + '\0', '\0']), ('\u{25b}', ['\u{190}', '\0', '\0']), ('\u{25c}', ['\u{a7ab}', '\0', '\0']), + ('\u{260}', ['\u{193}', '\0', '\0']), ('\u{261}', ['\u{a7ac}', '\0', '\0']), ('\u{263}', + ['\u{194}', '\0', '\0']), ('\u{265}', ['\u{a78d}', '\0', '\0']), ('\u{266}', ['\u{a7aa}', + '\0', '\0']), ('\u{268}', ['\u{197}', '\0', '\0']), ('\u{269}', ['\u{196}', '\0', '\0']), + ('\u{26a}', ['\u{a7ae}', '\0', '\0']), ('\u{26b}', ['\u{2c62}', '\0', '\0']), ('\u{26c}', + ['\u{a7ad}', '\0', '\0']), ('\u{26f}', ['\u{19c}', '\0', '\0']), ('\u{271}', ['\u{2c6e}', + '\0', '\0']), ('\u{272}', ['\u{19d}', '\0', '\0']), ('\u{275}', ['\u{19f}', '\0', '\0']), + ('\u{27d}', ['\u{2c64}', '\0', '\0']), ('\u{280}', ['\u{1a6}', '\0', '\0']), ('\u{283}', + ['\u{1a9}', '\0', '\0']), ('\u{287}', ['\u{a7b1}', '\0', '\0']), ('\u{288}', ['\u{1ae}', + '\0', '\0']), ('\u{289}', ['\u{244}', '\0', '\0']), ('\u{28a}', ['\u{1b1}', '\0', '\0']), + ('\u{28b}', ['\u{1b2}', '\0', '\0']), ('\u{28c}', ['\u{245}', '\0', '\0']), ('\u{292}', + ['\u{1b7}', '\0', '\0']), ('\u{29d}', ['\u{a7b2}', '\0', '\0']), ('\u{29e}', ['\u{a7b0}', + '\0', '\0']), ('\u{345}', ['\u{399}', '\0', '\0']), ('\u{371}', ['\u{370}', '\0', '\0']), + ('\u{373}', ['\u{372}', '\0', '\0']), ('\u{377}', ['\u{376}', '\0', '\0']), ('\u{37b}', + ['\u{3fd}', '\0', '\0']), ('\u{37c}', ['\u{3fe}', '\0', '\0']), ('\u{37d}', ['\u{3ff}', + '\0', '\0']), ('\u{390}', ['\u{399}', '\u{308}', '\u{301}']), ('\u{3ac}', ['\u{386}', '\0', + '\0']), ('\u{3ad}', ['\u{388}', '\0', '\0']), ('\u{3ae}', ['\u{389}', '\0', '\0']), + ('\u{3af}', ['\u{38a}', '\0', '\0']), ('\u{3b0}', ['\u{3a5}', '\u{308}', '\u{301}']), + ('\u{3b1}', ['\u{391}', '\0', '\0']), ('\u{3b2}', ['\u{392}', '\0', '\0']), ('\u{3b3}', + ['\u{393}', '\0', '\0']), ('\u{3b4}', ['\u{394}', '\0', '\0']), ('\u{3b5}', ['\u{395}', + '\0', '\0']), ('\u{3b6}', ['\u{396}', '\0', '\0']), ('\u{3b7}', ['\u{397}', '\0', '\0']), + ('\u{3b8}', ['\u{398}', '\0', '\0']), ('\u{3b9}', ['\u{399}', '\0', '\0']), ('\u{3ba}', + ['\u{39a}', '\0', '\0']), ('\u{3bb}', ['\u{39b}', '\0', '\0']), ('\u{3bc}', ['\u{39c}', + '\0', '\0']), ('\u{3bd}', ['\u{39d}', '\0', '\0']), ('\u{3be}', ['\u{39e}', '\0', '\0']), + ('\u{3bf}', ['\u{39f}', '\0', '\0']), ('\u{3c0}', ['\u{3a0}', '\0', '\0']), ('\u{3c1}', + ['\u{3a1}', '\0', '\0']), ('\u{3c2}', ['\u{3a3}', '\0', '\0']), ('\u{3c3}', ['\u{3a3}', + '\0', '\0']), ('\u{3c4}', ['\u{3a4}', '\0', '\0']), ('\u{3c5}', ['\u{3a5}', '\0', '\0']), + ('\u{3c6}', ['\u{3a6}', '\0', '\0']), ('\u{3c7}', ['\u{3a7}', '\0', '\0']), ('\u{3c8}', + ['\u{3a8}', '\0', '\0']), ('\u{3c9}', ['\u{3a9}', '\0', '\0']), ('\u{3ca}', ['\u{3aa}', + '\0', '\0']), ('\u{3cb}', ['\u{3ab}', '\0', '\0']), ('\u{3cc}', ['\u{38c}', '\0', '\0']), + ('\u{3cd}', ['\u{38e}', '\0', '\0']), ('\u{3ce}', ['\u{38f}', '\0', '\0']), ('\u{3d0}', + ['\u{392}', '\0', '\0']), ('\u{3d1}', ['\u{398}', '\0', '\0']), ('\u{3d5}', ['\u{3a6}', + '\0', '\0']), ('\u{3d6}', ['\u{3a0}', '\0', '\0']), ('\u{3d7}', ['\u{3cf}', '\0', '\0']), + ('\u{3d9}', ['\u{3d8}', '\0', '\0']), ('\u{3db}', ['\u{3da}', '\0', '\0']), ('\u{3dd}', + ['\u{3dc}', '\0', '\0']), ('\u{3df}', ['\u{3de}', '\0', '\0']), ('\u{3e1}', ['\u{3e0}', + '\0', '\0']), ('\u{3e3}', ['\u{3e2}', '\0', '\0']), ('\u{3e5}', ['\u{3e4}', '\0', '\0']), + ('\u{3e7}', ['\u{3e6}', '\0', '\0']), ('\u{3e9}', ['\u{3e8}', '\0', '\0']), ('\u{3eb}', + ['\u{3ea}', '\0', '\0']), ('\u{3ed}', ['\u{3ec}', '\0', '\0']), ('\u{3ef}', ['\u{3ee}', + '\0', '\0']), ('\u{3f0}', ['\u{39a}', '\0', '\0']), ('\u{3f1}', ['\u{3a1}', '\0', '\0']), + ('\u{3f2}', ['\u{3f9}', '\0', '\0']), ('\u{3f3}', ['\u{37f}', '\0', '\0']), ('\u{3f5}', + ['\u{395}', '\0', '\0']), ('\u{3f8}', ['\u{3f7}', '\0', '\0']), ('\u{3fb}', ['\u{3fa}', + '\0', '\0']), ('\u{430}', ['\u{410}', '\0', '\0']), ('\u{431}', ['\u{411}', '\0', '\0']), + ('\u{432}', ['\u{412}', '\0', '\0']), ('\u{433}', ['\u{413}', '\0', '\0']), ('\u{434}', + ['\u{414}', '\0', '\0']), ('\u{435}', ['\u{415}', '\0', '\0']), ('\u{436}', ['\u{416}', + '\0', '\0']), ('\u{437}', ['\u{417}', '\0', '\0']), ('\u{438}', ['\u{418}', '\0', '\0']), + ('\u{439}', ['\u{419}', '\0', '\0']), ('\u{43a}', ['\u{41a}', '\0', '\0']), ('\u{43b}', + ['\u{41b}', '\0', '\0']), ('\u{43c}', ['\u{41c}', '\0', '\0']), ('\u{43d}', ['\u{41d}', + '\0', '\0']), ('\u{43e}', ['\u{41e}', '\0', '\0']), ('\u{43f}', ['\u{41f}', '\0', '\0']), + ('\u{440}', ['\u{420}', '\0', '\0']), ('\u{441}', ['\u{421}', '\0', '\0']), ('\u{442}', + ['\u{422}', '\0', '\0']), ('\u{443}', ['\u{423}', '\0', '\0']), ('\u{444}', ['\u{424}', + '\0', '\0']), ('\u{445}', ['\u{425}', '\0', '\0']), ('\u{446}', ['\u{426}', '\0', '\0']), + ('\u{447}', ['\u{427}', '\0', '\0']), ('\u{448}', ['\u{428}', '\0', '\0']), ('\u{449}', + ['\u{429}', '\0', '\0']), ('\u{44a}', ['\u{42a}', '\0', '\0']), ('\u{44b}', ['\u{42b}', + '\0', '\0']), ('\u{44c}', ['\u{42c}', '\0', '\0']), ('\u{44d}', ['\u{42d}', '\0', '\0']), + ('\u{44e}', ['\u{42e}', '\0', '\0']), ('\u{44f}', ['\u{42f}', '\0', '\0']), ('\u{450}', + ['\u{400}', '\0', '\0']), ('\u{451}', ['\u{401}', '\0', '\0']), ('\u{452}', ['\u{402}', + '\0', '\0']), ('\u{453}', ['\u{403}', '\0', '\0']), ('\u{454}', ['\u{404}', '\0', '\0']), + ('\u{455}', ['\u{405}', '\0', '\0']), ('\u{456}', ['\u{406}', '\0', '\0']), ('\u{457}', + ['\u{407}', '\0', '\0']), ('\u{458}', ['\u{408}', '\0', '\0']), ('\u{459}', ['\u{409}', + '\0', '\0']), ('\u{45a}', ['\u{40a}', '\0', '\0']), ('\u{45b}', ['\u{40b}', '\0', '\0']), + ('\u{45c}', ['\u{40c}', '\0', '\0']), ('\u{45d}', ['\u{40d}', '\0', '\0']), ('\u{45e}', + ['\u{40e}', '\0', '\0']), ('\u{45f}', ['\u{40f}', '\0', '\0']), ('\u{461}', ['\u{460}', + '\0', '\0']), ('\u{463}', ['\u{462}', '\0', '\0']), ('\u{465}', ['\u{464}', '\0', '\0']), + ('\u{467}', ['\u{466}', '\0', '\0']), ('\u{469}', ['\u{468}', '\0', '\0']), ('\u{46b}', + ['\u{46a}', '\0', '\0']), ('\u{46d}', ['\u{46c}', '\0', '\0']), ('\u{46f}', ['\u{46e}', + '\0', '\0']), ('\u{471}', ['\u{470}', '\0', '\0']), ('\u{473}', ['\u{472}', '\0', '\0']), + ('\u{475}', ['\u{474}', '\0', '\0']), ('\u{477}', ['\u{476}', '\0', '\0']), ('\u{479}', + ['\u{478}', '\0', '\0']), ('\u{47b}', ['\u{47a}', '\0', '\0']), ('\u{47d}', ['\u{47c}', + '\0', '\0']), ('\u{47f}', ['\u{47e}', '\0', '\0']), ('\u{481}', ['\u{480}', '\0', '\0']), + ('\u{48b}', ['\u{48a}', '\0', '\0']), ('\u{48d}', ['\u{48c}', '\0', '\0']), ('\u{48f}', + ['\u{48e}', '\0', '\0']), ('\u{491}', ['\u{490}', '\0', '\0']), ('\u{493}', ['\u{492}', + '\0', '\0']), ('\u{495}', ['\u{494}', '\0', '\0']), ('\u{497}', ['\u{496}', '\0', '\0']), + ('\u{499}', ['\u{498}', '\0', '\0']), ('\u{49b}', ['\u{49a}', '\0', '\0']), ('\u{49d}', + ['\u{49c}', '\0', '\0']), ('\u{49f}', ['\u{49e}', '\0', '\0']), ('\u{4a1}', ['\u{4a0}', + '\0', '\0']), ('\u{4a3}', ['\u{4a2}', '\0', '\0']), ('\u{4a5}', ['\u{4a4}', '\0', '\0']), + ('\u{4a7}', ['\u{4a6}', '\0', '\0']), ('\u{4a9}', ['\u{4a8}', '\0', '\0']), ('\u{4ab}', + ['\u{4aa}', '\0', '\0']), ('\u{4ad}', ['\u{4ac}', '\0', '\0']), ('\u{4af}', ['\u{4ae}', + '\0', '\0']), ('\u{4b1}', ['\u{4b0}', '\0', '\0']), ('\u{4b3}', ['\u{4b2}', '\0', '\0']), + ('\u{4b5}', ['\u{4b4}', '\0', '\0']), ('\u{4b7}', ['\u{4b6}', '\0', '\0']), ('\u{4b9}', + ['\u{4b8}', '\0', '\0']), ('\u{4bb}', ['\u{4ba}', '\0', '\0']), ('\u{4bd}', ['\u{4bc}', + '\0', '\0']), ('\u{4bf}', ['\u{4be}', '\0', '\0']), ('\u{4c2}', ['\u{4c1}', '\0', '\0']), + ('\u{4c4}', ['\u{4c3}', '\0', '\0']), ('\u{4c6}', ['\u{4c5}', '\0', '\0']), ('\u{4c8}', + ['\u{4c7}', '\0', '\0']), ('\u{4ca}', ['\u{4c9}', '\0', '\0']), ('\u{4cc}', ['\u{4cb}', + '\0', '\0']), ('\u{4ce}', ['\u{4cd}', '\0', '\0']), ('\u{4cf}', ['\u{4c0}', '\0', '\0']), + ('\u{4d1}', ['\u{4d0}', '\0', '\0']), ('\u{4d3}', ['\u{4d2}', '\0', '\0']), ('\u{4d5}', + ['\u{4d4}', '\0', '\0']), ('\u{4d7}', ['\u{4d6}', '\0', '\0']), ('\u{4d9}', ['\u{4d8}', + '\0', '\0']), ('\u{4db}', ['\u{4da}', '\0', '\0']), ('\u{4dd}', ['\u{4dc}', '\0', '\0']), + ('\u{4df}', ['\u{4de}', '\0', '\0']), ('\u{4e1}', ['\u{4e0}', '\0', '\0']), ('\u{4e3}', + ['\u{4e2}', '\0', '\0']), ('\u{4e5}', ['\u{4e4}', '\0', '\0']), ('\u{4e7}', ['\u{4e6}', + '\0', '\0']), ('\u{4e9}', ['\u{4e8}', '\0', '\0']), ('\u{4eb}', ['\u{4ea}', '\0', '\0']), + ('\u{4ed}', ['\u{4ec}', '\0', '\0']), ('\u{4ef}', ['\u{4ee}', '\0', '\0']), ('\u{4f1}', + ['\u{4f0}', '\0', '\0']), ('\u{4f3}', ['\u{4f2}', '\0', '\0']), ('\u{4f5}', ['\u{4f4}', + '\0', '\0']), ('\u{4f7}', ['\u{4f6}', '\0', '\0']), ('\u{4f9}', ['\u{4f8}', '\0', '\0']), + ('\u{4fb}', ['\u{4fa}', '\0', '\0']), ('\u{4fd}', ['\u{4fc}', '\0', '\0']), ('\u{4ff}', + ['\u{4fe}', '\0', '\0']), ('\u{501}', ['\u{500}', '\0', '\0']), ('\u{503}', ['\u{502}', + '\0', '\0']), ('\u{505}', ['\u{504}', '\0', '\0']), ('\u{507}', ['\u{506}', '\0', '\0']), + ('\u{509}', ['\u{508}', '\0', '\0']), ('\u{50b}', ['\u{50a}', '\0', '\0']), ('\u{50d}', + ['\u{50c}', '\0', '\0']), ('\u{50f}', ['\u{50e}', '\0', '\0']), ('\u{511}', ['\u{510}', + '\0', '\0']), ('\u{513}', ['\u{512}', '\0', '\0']), ('\u{515}', ['\u{514}', '\0', '\0']), + ('\u{517}', ['\u{516}', '\0', '\0']), ('\u{519}', ['\u{518}', '\0', '\0']), ('\u{51b}', + ['\u{51a}', '\0', '\0']), ('\u{51d}', ['\u{51c}', '\0', '\0']), ('\u{51f}', ['\u{51e}', + '\0', '\0']), ('\u{521}', ['\u{520}', '\0', '\0']), ('\u{523}', ['\u{522}', '\0', '\0']), + ('\u{525}', ['\u{524}', '\0', '\0']), ('\u{527}', ['\u{526}', '\0', '\0']), ('\u{529}', + ['\u{528}', '\0', '\0']), ('\u{52b}', ['\u{52a}', '\0', '\0']), ('\u{52d}', ['\u{52c}', + '\0', '\0']), ('\u{52f}', ['\u{52e}', '\0', '\0']), ('\u{561}', ['\u{531}', '\0', '\0']), + ('\u{562}', ['\u{532}', '\0', '\0']), ('\u{563}', ['\u{533}', '\0', '\0']), ('\u{564}', + ['\u{534}', '\0', '\0']), ('\u{565}', ['\u{535}', '\0', '\0']), ('\u{566}', ['\u{536}', + '\0', '\0']), ('\u{567}', ['\u{537}', '\0', '\0']), ('\u{568}', ['\u{538}', '\0', '\0']), + ('\u{569}', ['\u{539}', '\0', '\0']), ('\u{56a}', ['\u{53a}', '\0', '\0']), ('\u{56b}', + ['\u{53b}', '\0', '\0']), ('\u{56c}', ['\u{53c}', '\0', '\0']), ('\u{56d}', ['\u{53d}', + '\0', '\0']), ('\u{56e}', ['\u{53e}', '\0', '\0']), ('\u{56f}', ['\u{53f}', '\0', '\0']), + ('\u{570}', ['\u{540}', '\0', '\0']), ('\u{571}', ['\u{541}', '\0', '\0']), ('\u{572}', + ['\u{542}', '\0', '\0']), ('\u{573}', ['\u{543}', '\0', '\0']), ('\u{574}', ['\u{544}', + '\0', '\0']), ('\u{575}', ['\u{545}', '\0', '\0']), ('\u{576}', ['\u{546}', '\0', '\0']), + ('\u{577}', ['\u{547}', '\0', '\0']), ('\u{578}', ['\u{548}', '\0', '\0']), ('\u{579}', + ['\u{549}', '\0', '\0']), ('\u{57a}', ['\u{54a}', '\0', '\0']), ('\u{57b}', ['\u{54b}', + '\0', '\0']), ('\u{57c}', ['\u{54c}', '\0', '\0']), ('\u{57d}', ['\u{54d}', '\0', '\0']), + ('\u{57e}', ['\u{54e}', '\0', '\0']), ('\u{57f}', ['\u{54f}', '\0', '\0']), ('\u{580}', + ['\u{550}', '\0', '\0']), ('\u{581}', ['\u{551}', '\0', '\0']), ('\u{582}', ['\u{552}', + '\0', '\0']), ('\u{583}', ['\u{553}', '\0', '\0']), ('\u{584}', ['\u{554}', '\0', '\0']), + ('\u{585}', ['\u{555}', '\0', '\0']), ('\u{586}', ['\u{556}', '\0', '\0']), ('\u{587}', + ['\u{535}', '\u{552}', '\0']), ('\u{10d0}', ['\u{1c90}', '\0', '\0']), ('\u{10d1}', + ['\u{1c91}', '\0', '\0']), ('\u{10d2}', ['\u{1c92}', '\0', '\0']), ('\u{10d3}', ['\u{1c93}', + '\0', '\0']), ('\u{10d4}', ['\u{1c94}', '\0', '\0']), ('\u{10d5}', ['\u{1c95}', '\0', + '\0']), ('\u{10d6}', ['\u{1c96}', '\0', '\0']), ('\u{10d7}', ['\u{1c97}', '\0', '\0']), + ('\u{10d8}', ['\u{1c98}', '\0', '\0']), ('\u{10d9}', ['\u{1c99}', '\0', '\0']), ('\u{10da}', + ['\u{1c9a}', '\0', '\0']), ('\u{10db}', ['\u{1c9b}', '\0', '\0']), ('\u{10dc}', ['\u{1c9c}', + '\0', '\0']), ('\u{10dd}', ['\u{1c9d}', '\0', '\0']), ('\u{10de}', ['\u{1c9e}', '\0', + '\0']), ('\u{10df}', ['\u{1c9f}', '\0', '\0']), ('\u{10e0}', ['\u{1ca0}', '\0', '\0']), + ('\u{10e1}', ['\u{1ca1}', '\0', '\0']), ('\u{10e2}', ['\u{1ca2}', '\0', '\0']), ('\u{10e3}', + ['\u{1ca3}', '\0', '\0']), ('\u{10e4}', ['\u{1ca4}', '\0', '\0']), ('\u{10e5}', ['\u{1ca5}', + '\0', '\0']), ('\u{10e6}', ['\u{1ca6}', '\0', '\0']), ('\u{10e7}', ['\u{1ca7}', '\0', + '\0']), ('\u{10e8}', ['\u{1ca8}', '\0', '\0']), ('\u{10e9}', ['\u{1ca9}', '\0', '\0']), + ('\u{10ea}', ['\u{1caa}', '\0', '\0']), ('\u{10eb}', ['\u{1cab}', '\0', '\0']), ('\u{10ec}', + ['\u{1cac}', '\0', '\0']), ('\u{10ed}', ['\u{1cad}', '\0', '\0']), ('\u{10ee}', ['\u{1cae}', + '\0', '\0']), ('\u{10ef}', ['\u{1caf}', '\0', '\0']), ('\u{10f0}', ['\u{1cb0}', '\0', + '\0']), ('\u{10f1}', ['\u{1cb1}', '\0', '\0']), ('\u{10f2}', ['\u{1cb2}', '\0', '\0']), + ('\u{10f3}', ['\u{1cb3}', '\0', '\0']), ('\u{10f4}', ['\u{1cb4}', '\0', '\0']), ('\u{10f5}', + ['\u{1cb5}', '\0', '\0']), ('\u{10f6}', ['\u{1cb6}', '\0', '\0']), ('\u{10f7}', ['\u{1cb7}', + '\0', '\0']), ('\u{10f8}', ['\u{1cb8}', '\0', '\0']), ('\u{10f9}', ['\u{1cb9}', '\0', + '\0']), ('\u{10fa}', ['\u{1cba}', '\0', '\0']), ('\u{10fd}', ['\u{1cbd}', '\0', '\0']), + ('\u{10fe}', ['\u{1cbe}', '\0', '\0']), ('\u{10ff}', ['\u{1cbf}', '\0', '\0']), ('\u{13f8}', + ['\u{13f0}', '\0', '\0']), ('\u{13f9}', ['\u{13f1}', '\0', '\0']), ('\u{13fa}', ['\u{13f2}', + '\0', '\0']), ('\u{13fb}', ['\u{13f3}', '\0', '\0']), ('\u{13fc}', ['\u{13f4}', '\0', + '\0']), ('\u{13fd}', ['\u{13f5}', '\0', '\0']), ('\u{1c80}', ['\u{412}', '\0', '\0']), + ('\u{1c81}', ['\u{414}', '\0', '\0']), ('\u{1c82}', ['\u{41e}', '\0', '\0']), ('\u{1c83}', + ['\u{421}', '\0', '\0']), ('\u{1c84}', ['\u{422}', '\0', '\0']), ('\u{1c85}', ['\u{422}', + '\0', '\0']), ('\u{1c86}', ['\u{42a}', '\0', '\0']), ('\u{1c87}', ['\u{462}', '\0', '\0']), + ('\u{1c88}', ['\u{a64a}', '\0', '\0']), ('\u{1d79}', ['\u{a77d}', '\0', '\0']), ('\u{1d7d}', + ['\u{2c63}', '\0', '\0']), ('\u{1e01}', ['\u{1e00}', '\0', '\0']), ('\u{1e03}', ['\u{1e02}', + '\0', '\0']), ('\u{1e05}', ['\u{1e04}', '\0', '\0']), ('\u{1e07}', ['\u{1e06}', '\0', + '\0']), ('\u{1e09}', ['\u{1e08}', '\0', '\0']), ('\u{1e0b}', ['\u{1e0a}', '\0', '\0']), + ('\u{1e0d}', ['\u{1e0c}', '\0', '\0']), ('\u{1e0f}', ['\u{1e0e}', '\0', '\0']), ('\u{1e11}', + ['\u{1e10}', '\0', '\0']), ('\u{1e13}', ['\u{1e12}', '\0', '\0']), ('\u{1e15}', ['\u{1e14}', + '\0', '\0']), ('\u{1e17}', ['\u{1e16}', '\0', '\0']), ('\u{1e19}', ['\u{1e18}', '\0', + '\0']), ('\u{1e1b}', ['\u{1e1a}', '\0', '\0']), ('\u{1e1d}', ['\u{1e1c}', '\0', '\0']), + ('\u{1e1f}', ['\u{1e1e}', '\0', '\0']), ('\u{1e21}', ['\u{1e20}', '\0', '\0']), ('\u{1e23}', + ['\u{1e22}', '\0', '\0']), ('\u{1e25}', ['\u{1e24}', '\0', '\0']), ('\u{1e27}', ['\u{1e26}', + '\0', '\0']), ('\u{1e29}', ['\u{1e28}', '\0', '\0']), ('\u{1e2b}', ['\u{1e2a}', '\0', + '\0']), ('\u{1e2d}', ['\u{1e2c}', '\0', '\0']), ('\u{1e2f}', ['\u{1e2e}', '\0', '\0']), + ('\u{1e31}', ['\u{1e30}', '\0', '\0']), ('\u{1e33}', ['\u{1e32}', '\0', '\0']), ('\u{1e35}', + ['\u{1e34}', '\0', '\0']), ('\u{1e37}', ['\u{1e36}', '\0', '\0']), ('\u{1e39}', ['\u{1e38}', + '\0', '\0']), ('\u{1e3b}', ['\u{1e3a}', '\0', '\0']), ('\u{1e3d}', ['\u{1e3c}', '\0', + '\0']), ('\u{1e3f}', ['\u{1e3e}', '\0', '\0']), ('\u{1e41}', ['\u{1e40}', '\0', '\0']), + ('\u{1e43}', ['\u{1e42}', '\0', '\0']), ('\u{1e45}', ['\u{1e44}', '\0', '\0']), ('\u{1e47}', + ['\u{1e46}', '\0', '\0']), ('\u{1e49}', ['\u{1e48}', '\0', '\0']), ('\u{1e4b}', ['\u{1e4a}', + '\0', '\0']), ('\u{1e4d}', ['\u{1e4c}', '\0', '\0']), ('\u{1e4f}', ['\u{1e4e}', '\0', + '\0']), ('\u{1e51}', ['\u{1e50}', '\0', '\0']), ('\u{1e53}', ['\u{1e52}', '\0', '\0']), + ('\u{1e55}', ['\u{1e54}', '\0', '\0']), ('\u{1e57}', ['\u{1e56}', '\0', '\0']), ('\u{1e59}', + ['\u{1e58}', '\0', '\0']), ('\u{1e5b}', ['\u{1e5a}', '\0', '\0']), ('\u{1e5d}', ['\u{1e5c}', + '\0', '\0']), ('\u{1e5f}', ['\u{1e5e}', '\0', '\0']), ('\u{1e61}', ['\u{1e60}', '\0', + '\0']), ('\u{1e63}', ['\u{1e62}', '\0', '\0']), ('\u{1e65}', ['\u{1e64}', '\0', '\0']), + ('\u{1e67}', ['\u{1e66}', '\0', '\0']), ('\u{1e69}', ['\u{1e68}', '\0', '\0']), ('\u{1e6b}', + ['\u{1e6a}', '\0', '\0']), ('\u{1e6d}', ['\u{1e6c}', '\0', '\0']), ('\u{1e6f}', ['\u{1e6e}', + '\0', '\0']), ('\u{1e71}', ['\u{1e70}', '\0', '\0']), ('\u{1e73}', ['\u{1e72}', '\0', + '\0']), ('\u{1e75}', ['\u{1e74}', '\0', '\0']), ('\u{1e77}', ['\u{1e76}', '\0', '\0']), + ('\u{1e79}', ['\u{1e78}', '\0', '\0']), ('\u{1e7b}', ['\u{1e7a}', '\0', '\0']), ('\u{1e7d}', + ['\u{1e7c}', '\0', '\0']), ('\u{1e7f}', ['\u{1e7e}', '\0', '\0']), ('\u{1e81}', ['\u{1e80}', + '\0', '\0']), ('\u{1e83}', ['\u{1e82}', '\0', '\0']), ('\u{1e85}', ['\u{1e84}', '\0', + '\0']), ('\u{1e87}', ['\u{1e86}', '\0', '\0']), ('\u{1e89}', ['\u{1e88}', '\0', '\0']), + ('\u{1e8b}', ['\u{1e8a}', '\0', '\0']), ('\u{1e8d}', ['\u{1e8c}', '\0', '\0']), ('\u{1e8f}', + ['\u{1e8e}', '\0', '\0']), ('\u{1e91}', ['\u{1e90}', '\0', '\0']), ('\u{1e93}', ['\u{1e92}', + '\0', '\0']), ('\u{1e95}', ['\u{1e94}', '\0', '\0']), ('\u{1e96}', ['\u{48}', '\u{331}', + '\0']), ('\u{1e97}', ['\u{54}', '\u{308}', '\0']), ('\u{1e98}', ['\u{57}', '\u{30a}', + '\0']), ('\u{1e99}', ['\u{59}', '\u{30a}', '\0']), ('\u{1e9a}', ['\u{41}', '\u{2be}', + '\0']), ('\u{1e9b}', ['\u{1e60}', '\0', '\0']), ('\u{1ea1}', ['\u{1ea0}', '\0', '\0']), + ('\u{1ea3}', ['\u{1ea2}', '\0', '\0']), ('\u{1ea5}', ['\u{1ea4}', '\0', '\0']), ('\u{1ea7}', + ['\u{1ea6}', '\0', '\0']), ('\u{1ea9}', ['\u{1ea8}', '\0', '\0']), ('\u{1eab}', ['\u{1eaa}', + '\0', '\0']), ('\u{1ead}', ['\u{1eac}', '\0', '\0']), ('\u{1eaf}', ['\u{1eae}', '\0', + '\0']), ('\u{1eb1}', ['\u{1eb0}', '\0', '\0']), ('\u{1eb3}', ['\u{1eb2}', '\0', '\0']), + ('\u{1eb5}', ['\u{1eb4}', '\0', '\0']), ('\u{1eb7}', ['\u{1eb6}', '\0', '\0']), ('\u{1eb9}', + ['\u{1eb8}', '\0', '\0']), ('\u{1ebb}', ['\u{1eba}', '\0', '\0']), ('\u{1ebd}', ['\u{1ebc}', + '\0', '\0']), ('\u{1ebf}', ['\u{1ebe}', '\0', '\0']), ('\u{1ec1}', ['\u{1ec0}', '\0', + '\0']), ('\u{1ec3}', ['\u{1ec2}', '\0', '\0']), ('\u{1ec5}', ['\u{1ec4}', '\0', '\0']), + ('\u{1ec7}', ['\u{1ec6}', '\0', '\0']), ('\u{1ec9}', ['\u{1ec8}', '\0', '\0']), ('\u{1ecb}', + ['\u{1eca}', '\0', '\0']), ('\u{1ecd}', ['\u{1ecc}', '\0', '\0']), ('\u{1ecf}', ['\u{1ece}', + '\0', '\0']), ('\u{1ed1}', ['\u{1ed0}', '\0', '\0']), ('\u{1ed3}', ['\u{1ed2}', '\0', + '\0']), ('\u{1ed5}', ['\u{1ed4}', '\0', '\0']), ('\u{1ed7}', ['\u{1ed6}', '\0', '\0']), + ('\u{1ed9}', ['\u{1ed8}', '\0', '\0']), ('\u{1edb}', ['\u{1eda}', '\0', '\0']), ('\u{1edd}', + ['\u{1edc}', '\0', '\0']), ('\u{1edf}', ['\u{1ede}', '\0', '\0']), ('\u{1ee1}', ['\u{1ee0}', + '\0', '\0']), ('\u{1ee3}', ['\u{1ee2}', '\0', '\0']), ('\u{1ee5}', ['\u{1ee4}', '\0', + '\0']), ('\u{1ee7}', ['\u{1ee6}', '\0', '\0']), ('\u{1ee9}', ['\u{1ee8}', '\0', '\0']), + ('\u{1eeb}', ['\u{1eea}', '\0', '\0']), ('\u{1eed}', ['\u{1eec}', '\0', '\0']), ('\u{1eef}', + ['\u{1eee}', '\0', '\0']), ('\u{1ef1}', ['\u{1ef0}', '\0', '\0']), ('\u{1ef3}', ['\u{1ef2}', + '\0', '\0']), ('\u{1ef5}', ['\u{1ef4}', '\0', '\0']), ('\u{1ef7}', ['\u{1ef6}', '\0', + '\0']), ('\u{1ef9}', ['\u{1ef8}', '\0', '\0']), ('\u{1efb}', ['\u{1efa}', '\0', '\0']), + ('\u{1efd}', ['\u{1efc}', '\0', '\0']), ('\u{1eff}', ['\u{1efe}', '\0', '\0']), ('\u{1f00}', + ['\u{1f08}', '\0', '\0']), ('\u{1f01}', ['\u{1f09}', '\0', '\0']), ('\u{1f02}', ['\u{1f0a}', + '\0', '\0']), ('\u{1f03}', ['\u{1f0b}', '\0', '\0']), ('\u{1f04}', ['\u{1f0c}', '\0', + '\0']), ('\u{1f05}', ['\u{1f0d}', '\0', '\0']), ('\u{1f06}', ['\u{1f0e}', '\0', '\0']), + ('\u{1f07}', ['\u{1f0f}', '\0', '\0']), ('\u{1f10}', ['\u{1f18}', '\0', '\0']), ('\u{1f11}', + ['\u{1f19}', '\0', '\0']), ('\u{1f12}', ['\u{1f1a}', '\0', '\0']), ('\u{1f13}', ['\u{1f1b}', + '\0', '\0']), ('\u{1f14}', ['\u{1f1c}', '\0', '\0']), ('\u{1f15}', ['\u{1f1d}', '\0', + '\0']), ('\u{1f20}', ['\u{1f28}', '\0', '\0']), ('\u{1f21}', ['\u{1f29}', '\0', '\0']), + ('\u{1f22}', ['\u{1f2a}', '\0', '\0']), ('\u{1f23}', ['\u{1f2b}', '\0', '\0']), ('\u{1f24}', + ['\u{1f2c}', '\0', '\0']), ('\u{1f25}', ['\u{1f2d}', '\0', '\0']), ('\u{1f26}', ['\u{1f2e}', + '\0', '\0']), ('\u{1f27}', ['\u{1f2f}', '\0', '\0']), ('\u{1f30}', ['\u{1f38}', '\0', + '\0']), ('\u{1f31}', ['\u{1f39}', '\0', '\0']), ('\u{1f32}', ['\u{1f3a}', '\0', '\0']), + ('\u{1f33}', ['\u{1f3b}', '\0', '\0']), ('\u{1f34}', ['\u{1f3c}', '\0', '\0']), ('\u{1f35}', + ['\u{1f3d}', '\0', '\0']), ('\u{1f36}', ['\u{1f3e}', '\0', '\0']), ('\u{1f37}', ['\u{1f3f}', + '\0', '\0']), ('\u{1f40}', ['\u{1f48}', '\0', '\0']), ('\u{1f41}', ['\u{1f49}', '\0', + '\0']), ('\u{1f42}', ['\u{1f4a}', '\0', '\0']), ('\u{1f43}', ['\u{1f4b}', '\0', '\0']), + ('\u{1f44}', ['\u{1f4c}', '\0', '\0']), ('\u{1f45}', ['\u{1f4d}', '\0', '\0']), ('\u{1f50}', + ['\u{3a5}', '\u{313}', '\0']), ('\u{1f51}', ['\u{1f59}', '\0', '\0']), ('\u{1f52}', + ['\u{3a5}', '\u{313}', '\u{300}']), ('\u{1f53}', ['\u{1f5b}', '\0', '\0']), ('\u{1f54}', + ['\u{3a5}', '\u{313}', '\u{301}']), ('\u{1f55}', ['\u{1f5d}', '\0', '\0']), ('\u{1f56}', + ['\u{3a5}', '\u{313}', '\u{342}']), ('\u{1f57}', ['\u{1f5f}', '\0', '\0']), ('\u{1f60}', + ['\u{1f68}', '\0', '\0']), ('\u{1f61}', ['\u{1f69}', '\0', '\0']), ('\u{1f62}', ['\u{1f6a}', + '\0', '\0']), ('\u{1f63}', ['\u{1f6b}', '\0', '\0']), ('\u{1f64}', ['\u{1f6c}', '\0', + '\0']), ('\u{1f65}', ['\u{1f6d}', '\0', '\0']), ('\u{1f66}', ['\u{1f6e}', '\0', '\0']), + ('\u{1f67}', ['\u{1f6f}', '\0', '\0']), ('\u{1f70}', ['\u{1fba}', '\0', '\0']), ('\u{1f71}', + ['\u{1fbb}', '\0', '\0']), ('\u{1f72}', ['\u{1fc8}', '\0', '\0']), ('\u{1f73}', ['\u{1fc9}', + '\0', '\0']), ('\u{1f74}', ['\u{1fca}', '\0', '\0']), ('\u{1f75}', ['\u{1fcb}', '\0', + '\0']), ('\u{1f76}', ['\u{1fda}', '\0', '\0']), ('\u{1f77}', ['\u{1fdb}', '\0', '\0']), + ('\u{1f78}', ['\u{1ff8}', '\0', '\0']), ('\u{1f79}', ['\u{1ff9}', '\0', '\0']), ('\u{1f7a}', + ['\u{1fea}', '\0', '\0']), ('\u{1f7b}', ['\u{1feb}', '\0', '\0']), ('\u{1f7c}', ['\u{1ffa}', + '\0', '\0']), ('\u{1f7d}', ['\u{1ffb}', '\0', '\0']), ('\u{1f80}', ['\u{1f08}', '\u{399}', + '\0']), ('\u{1f81}', ['\u{1f09}', '\u{399}', '\0']), ('\u{1f82}', ['\u{1f0a}', '\u{399}', + '\0']), ('\u{1f83}', ['\u{1f0b}', '\u{399}', '\0']), ('\u{1f84}', ['\u{1f0c}', '\u{399}', + '\0']), ('\u{1f85}', ['\u{1f0d}', '\u{399}', '\0']), ('\u{1f86}', ['\u{1f0e}', '\u{399}', + '\0']), ('\u{1f87}', ['\u{1f0f}', '\u{399}', '\0']), ('\u{1f88}', ['\u{1f08}', '\u{399}', + '\0']), ('\u{1f89}', ['\u{1f09}', '\u{399}', '\0']), ('\u{1f8a}', ['\u{1f0a}', '\u{399}', + '\0']), ('\u{1f8b}', ['\u{1f0b}', '\u{399}', '\0']), ('\u{1f8c}', ['\u{1f0c}', '\u{399}', + '\0']), ('\u{1f8d}', ['\u{1f0d}', '\u{399}', '\0']), ('\u{1f8e}', ['\u{1f0e}', '\u{399}', + '\0']), ('\u{1f8f}', ['\u{1f0f}', '\u{399}', '\0']), ('\u{1f90}', ['\u{1f28}', '\u{399}', + '\0']), ('\u{1f91}', ['\u{1f29}', '\u{399}', '\0']), ('\u{1f92}', ['\u{1f2a}', '\u{399}', + '\0']), ('\u{1f93}', ['\u{1f2b}', '\u{399}', '\0']), ('\u{1f94}', ['\u{1f2c}', '\u{399}', + '\0']), ('\u{1f95}', ['\u{1f2d}', '\u{399}', '\0']), ('\u{1f96}', ['\u{1f2e}', '\u{399}', + '\0']), ('\u{1f97}', ['\u{1f2f}', '\u{399}', '\0']), ('\u{1f98}', ['\u{1f28}', '\u{399}', + '\0']), ('\u{1f99}', ['\u{1f29}', '\u{399}', '\0']), ('\u{1f9a}', ['\u{1f2a}', '\u{399}', + '\0']), ('\u{1f9b}', ['\u{1f2b}', '\u{399}', '\0']), ('\u{1f9c}', ['\u{1f2c}', '\u{399}', + '\0']), ('\u{1f9d}', ['\u{1f2d}', '\u{399}', '\0']), ('\u{1f9e}', ['\u{1f2e}', '\u{399}', + '\0']), ('\u{1f9f}', ['\u{1f2f}', '\u{399}', '\0']), ('\u{1fa0}', ['\u{1f68}', '\u{399}', + '\0']), ('\u{1fa1}', ['\u{1f69}', '\u{399}', '\0']), ('\u{1fa2}', ['\u{1f6a}', '\u{399}', + '\0']), ('\u{1fa3}', ['\u{1f6b}', '\u{399}', '\0']), ('\u{1fa4}', ['\u{1f6c}', '\u{399}', + '\0']), ('\u{1fa5}', ['\u{1f6d}', '\u{399}', '\0']), ('\u{1fa6}', ['\u{1f6e}', '\u{399}', + '\0']), ('\u{1fa7}', ['\u{1f6f}', '\u{399}', '\0']), ('\u{1fa8}', ['\u{1f68}', '\u{399}', + '\0']), ('\u{1fa9}', ['\u{1f69}', '\u{399}', '\0']), ('\u{1faa}', ['\u{1f6a}', '\u{399}', + '\0']), ('\u{1fab}', ['\u{1f6b}', '\u{399}', '\0']), ('\u{1fac}', ['\u{1f6c}', '\u{399}', + '\0']), ('\u{1fad}', ['\u{1f6d}', '\u{399}', '\0']), ('\u{1fae}', ['\u{1f6e}', '\u{399}', + '\0']), ('\u{1faf}', ['\u{1f6f}', '\u{399}', '\0']), ('\u{1fb0}', ['\u{1fb8}', '\0', '\0']), + ('\u{1fb1}', ['\u{1fb9}', '\0', '\0']), ('\u{1fb2}', ['\u{1fba}', '\u{399}', '\0']), + ('\u{1fb3}', ['\u{391}', '\u{399}', '\0']), ('\u{1fb4}', ['\u{386}', '\u{399}', '\0']), + ('\u{1fb6}', ['\u{391}', '\u{342}', '\0']), ('\u{1fb7}', ['\u{391}', '\u{342}', '\u{399}']), + ('\u{1fbc}', ['\u{391}', '\u{399}', '\0']), ('\u{1fbe}', ['\u{399}', '\0', '\0']), + ('\u{1fc2}', ['\u{1fca}', '\u{399}', '\0']), ('\u{1fc3}', ['\u{397}', '\u{399}', '\0']), + ('\u{1fc4}', ['\u{389}', '\u{399}', '\0']), ('\u{1fc6}', ['\u{397}', '\u{342}', '\0']), + ('\u{1fc7}', ['\u{397}', '\u{342}', '\u{399}']), ('\u{1fcc}', ['\u{397}', '\u{399}', '\0']), + ('\u{1fd0}', ['\u{1fd8}', '\0', '\0']), ('\u{1fd1}', ['\u{1fd9}', '\0', '\0']), ('\u{1fd2}', + ['\u{399}', '\u{308}', '\u{300}']), ('\u{1fd3}', ['\u{399}', '\u{308}', '\u{301}']), + ('\u{1fd6}', ['\u{399}', '\u{342}', '\0']), ('\u{1fd7}', ['\u{399}', '\u{308}', '\u{342}']), + ('\u{1fe0}', ['\u{1fe8}', '\0', '\0']), ('\u{1fe1}', ['\u{1fe9}', '\0', '\0']), ('\u{1fe2}', + ['\u{3a5}', '\u{308}', '\u{300}']), ('\u{1fe3}', ['\u{3a5}', '\u{308}', '\u{301}']), + ('\u{1fe4}', ['\u{3a1}', '\u{313}', '\0']), ('\u{1fe5}', ['\u{1fec}', '\0', '\0']), + ('\u{1fe6}', ['\u{3a5}', '\u{342}', '\0']), ('\u{1fe7}', ['\u{3a5}', '\u{308}', '\u{342}']), + ('\u{1ff2}', ['\u{1ffa}', '\u{399}', '\0']), ('\u{1ff3}', ['\u{3a9}', '\u{399}', '\0']), + ('\u{1ff4}', ['\u{38f}', '\u{399}', '\0']), ('\u{1ff6}', ['\u{3a9}', '\u{342}', '\0']), + ('\u{1ff7}', ['\u{3a9}', '\u{342}', '\u{399}']), ('\u{1ffc}', ['\u{3a9}', '\u{399}', '\0']), + ('\u{214e}', ['\u{2132}', '\0', '\0']), ('\u{2170}', ['\u{2160}', '\0', '\0']), ('\u{2171}', + ['\u{2161}', '\0', '\0']), ('\u{2172}', ['\u{2162}', '\0', '\0']), ('\u{2173}', ['\u{2163}', + '\0', '\0']), ('\u{2174}', ['\u{2164}', '\0', '\0']), ('\u{2175}', ['\u{2165}', '\0', + '\0']), ('\u{2176}', ['\u{2166}', '\0', '\0']), ('\u{2177}', ['\u{2167}', '\0', '\0']), + ('\u{2178}', ['\u{2168}', '\0', '\0']), ('\u{2179}', ['\u{2169}', '\0', '\0']), ('\u{217a}', + ['\u{216a}', '\0', '\0']), ('\u{217b}', ['\u{216b}', '\0', '\0']), ('\u{217c}', ['\u{216c}', + '\0', '\0']), ('\u{217d}', ['\u{216d}', '\0', '\0']), ('\u{217e}', ['\u{216e}', '\0', + '\0']), ('\u{217f}', ['\u{216f}', '\0', '\0']), ('\u{2184}', ['\u{2183}', '\0', '\0']), + ('\u{24d0}', ['\u{24b6}', '\0', '\0']), ('\u{24d1}', ['\u{24b7}', '\0', '\0']), ('\u{24d2}', + ['\u{24b8}', '\0', '\0']), ('\u{24d3}', ['\u{24b9}', '\0', '\0']), ('\u{24d4}', ['\u{24ba}', + '\0', '\0']), ('\u{24d5}', ['\u{24bb}', '\0', '\0']), ('\u{24d6}', ['\u{24bc}', '\0', + '\0']), ('\u{24d7}', ['\u{24bd}', '\0', '\0']), ('\u{24d8}', ['\u{24be}', '\0', '\0']), + ('\u{24d9}', ['\u{24bf}', '\0', '\0']), ('\u{24da}', ['\u{24c0}', '\0', '\0']), ('\u{24db}', + ['\u{24c1}', '\0', '\0']), ('\u{24dc}', ['\u{24c2}', '\0', '\0']), ('\u{24dd}', ['\u{24c3}', + '\0', '\0']), ('\u{24de}', ['\u{24c4}', '\0', '\0']), ('\u{24df}', ['\u{24c5}', '\0', + '\0']), ('\u{24e0}', ['\u{24c6}', '\0', '\0']), ('\u{24e1}', ['\u{24c7}', '\0', '\0']), + ('\u{24e2}', ['\u{24c8}', '\0', '\0']), ('\u{24e3}', ['\u{24c9}', '\0', '\0']), ('\u{24e4}', + ['\u{24ca}', '\0', '\0']), ('\u{24e5}', ['\u{24cb}', '\0', '\0']), ('\u{24e6}', ['\u{24cc}', + '\0', '\0']), ('\u{24e7}', ['\u{24cd}', '\0', '\0']), ('\u{24e8}', ['\u{24ce}', '\0', + '\0']), ('\u{24e9}', ['\u{24cf}', '\0', '\0']), ('\u{2c30}', ['\u{2c00}', '\0', '\0']), + ('\u{2c31}', ['\u{2c01}', '\0', '\0']), ('\u{2c32}', ['\u{2c02}', '\0', '\0']), ('\u{2c33}', + ['\u{2c03}', '\0', '\0']), ('\u{2c34}', ['\u{2c04}', '\0', '\0']), ('\u{2c35}', ['\u{2c05}', + '\0', '\0']), ('\u{2c36}', ['\u{2c06}', '\0', '\0']), ('\u{2c37}', ['\u{2c07}', '\0', + '\0']), ('\u{2c38}', ['\u{2c08}', '\0', '\0']), ('\u{2c39}', ['\u{2c09}', '\0', '\0']), + ('\u{2c3a}', ['\u{2c0a}', '\0', '\0']), ('\u{2c3b}', ['\u{2c0b}', '\0', '\0']), ('\u{2c3c}', + ['\u{2c0c}', '\0', '\0']), ('\u{2c3d}', ['\u{2c0d}', '\0', '\0']), ('\u{2c3e}', ['\u{2c0e}', + '\0', '\0']), ('\u{2c3f}', ['\u{2c0f}', '\0', '\0']), ('\u{2c40}', ['\u{2c10}', '\0', + '\0']), ('\u{2c41}', ['\u{2c11}', '\0', '\0']), ('\u{2c42}', ['\u{2c12}', '\0', '\0']), + ('\u{2c43}', ['\u{2c13}', '\0', '\0']), ('\u{2c44}', ['\u{2c14}', '\0', '\0']), ('\u{2c45}', + ['\u{2c15}', '\0', '\0']), ('\u{2c46}', ['\u{2c16}', '\0', '\0']), ('\u{2c47}', ['\u{2c17}', + '\0', '\0']), ('\u{2c48}', ['\u{2c18}', '\0', '\0']), ('\u{2c49}', ['\u{2c19}', '\0', + '\0']), ('\u{2c4a}', ['\u{2c1a}', '\0', '\0']), ('\u{2c4b}', ['\u{2c1b}', '\0', '\0']), + ('\u{2c4c}', ['\u{2c1c}', '\0', '\0']), ('\u{2c4d}', ['\u{2c1d}', '\0', '\0']), ('\u{2c4e}', + ['\u{2c1e}', '\0', '\0']), ('\u{2c4f}', ['\u{2c1f}', '\0', '\0']), ('\u{2c50}', ['\u{2c20}', + '\0', '\0']), ('\u{2c51}', ['\u{2c21}', '\0', '\0']), ('\u{2c52}', ['\u{2c22}', '\0', + '\0']), ('\u{2c53}', ['\u{2c23}', '\0', '\0']), ('\u{2c54}', ['\u{2c24}', '\0', '\0']), + ('\u{2c55}', ['\u{2c25}', '\0', '\0']), ('\u{2c56}', ['\u{2c26}', '\0', '\0']), ('\u{2c57}', + ['\u{2c27}', '\0', '\0']), ('\u{2c58}', ['\u{2c28}', '\0', '\0']), ('\u{2c59}', ['\u{2c29}', + '\0', '\0']), ('\u{2c5a}', ['\u{2c2a}', '\0', '\0']), ('\u{2c5b}', ['\u{2c2b}', '\0', + '\0']), ('\u{2c5c}', ['\u{2c2c}', '\0', '\0']), ('\u{2c5d}', ['\u{2c2d}', '\0', '\0']), + ('\u{2c5e}', ['\u{2c2e}', '\0', '\0']), ('\u{2c61}', ['\u{2c60}', '\0', '\0']), ('\u{2c65}', + ['\u{23a}', '\0', '\0']), ('\u{2c66}', ['\u{23e}', '\0', '\0']), ('\u{2c68}', ['\u{2c67}', + '\0', '\0']), ('\u{2c6a}', ['\u{2c69}', '\0', '\0']), ('\u{2c6c}', ['\u{2c6b}', '\0', + '\0']), ('\u{2c73}', ['\u{2c72}', '\0', '\0']), ('\u{2c76}', ['\u{2c75}', '\0', '\0']), + ('\u{2c81}', ['\u{2c80}', '\0', '\0']), ('\u{2c83}', ['\u{2c82}', '\0', '\0']), ('\u{2c85}', + ['\u{2c84}', '\0', '\0']), ('\u{2c87}', ['\u{2c86}', '\0', '\0']), ('\u{2c89}', ['\u{2c88}', + '\0', '\0']), ('\u{2c8b}', ['\u{2c8a}', '\0', '\0']), ('\u{2c8d}', ['\u{2c8c}', '\0', + '\0']), ('\u{2c8f}', ['\u{2c8e}', '\0', '\0']), ('\u{2c91}', ['\u{2c90}', '\0', '\0']), + ('\u{2c93}', ['\u{2c92}', '\0', '\0']), ('\u{2c95}', ['\u{2c94}', '\0', '\0']), ('\u{2c97}', + ['\u{2c96}', '\0', '\0']), ('\u{2c99}', ['\u{2c98}', '\0', '\0']), ('\u{2c9b}', ['\u{2c9a}', + '\0', '\0']), ('\u{2c9d}', ['\u{2c9c}', '\0', '\0']), ('\u{2c9f}', ['\u{2c9e}', '\0', + '\0']), ('\u{2ca1}', ['\u{2ca0}', '\0', '\0']), ('\u{2ca3}', ['\u{2ca2}', '\0', '\0']), + ('\u{2ca5}', ['\u{2ca4}', '\0', '\0']), ('\u{2ca7}', ['\u{2ca6}', '\0', '\0']), ('\u{2ca9}', + ['\u{2ca8}', '\0', '\0']), ('\u{2cab}', ['\u{2caa}', '\0', '\0']), ('\u{2cad}', ['\u{2cac}', + '\0', '\0']), ('\u{2caf}', ['\u{2cae}', '\0', '\0']), ('\u{2cb1}', ['\u{2cb0}', '\0', + '\0']), ('\u{2cb3}', ['\u{2cb2}', '\0', '\0']), ('\u{2cb5}', ['\u{2cb4}', '\0', '\0']), + ('\u{2cb7}', ['\u{2cb6}', '\0', '\0']), ('\u{2cb9}', ['\u{2cb8}', '\0', '\0']), ('\u{2cbb}', + ['\u{2cba}', '\0', '\0']), ('\u{2cbd}', ['\u{2cbc}', '\0', '\0']), ('\u{2cbf}', ['\u{2cbe}', + '\0', '\0']), ('\u{2cc1}', ['\u{2cc0}', '\0', '\0']), ('\u{2cc3}', ['\u{2cc2}', '\0', + '\0']), ('\u{2cc5}', ['\u{2cc4}', '\0', '\0']), ('\u{2cc7}', ['\u{2cc6}', '\0', '\0']), + ('\u{2cc9}', ['\u{2cc8}', '\0', '\0']), ('\u{2ccb}', ['\u{2cca}', '\0', '\0']), ('\u{2ccd}', + ['\u{2ccc}', '\0', '\0']), ('\u{2ccf}', ['\u{2cce}', '\0', '\0']), ('\u{2cd1}', ['\u{2cd0}', + '\0', '\0']), ('\u{2cd3}', ['\u{2cd2}', '\0', '\0']), ('\u{2cd5}', ['\u{2cd4}', '\0', + '\0']), ('\u{2cd7}', ['\u{2cd6}', '\0', '\0']), ('\u{2cd9}', ['\u{2cd8}', '\0', '\0']), + ('\u{2cdb}', ['\u{2cda}', '\0', '\0']), ('\u{2cdd}', ['\u{2cdc}', '\0', '\0']), ('\u{2cdf}', + ['\u{2cde}', '\0', '\0']), ('\u{2ce1}', ['\u{2ce0}', '\0', '\0']), ('\u{2ce3}', ['\u{2ce2}', + '\0', '\0']), ('\u{2cec}', ['\u{2ceb}', '\0', '\0']), ('\u{2cee}', ['\u{2ced}', '\0', + '\0']), ('\u{2cf3}', ['\u{2cf2}', '\0', '\0']), ('\u{2d00}', ['\u{10a0}', '\0', '\0']), + ('\u{2d01}', ['\u{10a1}', '\0', '\0']), ('\u{2d02}', ['\u{10a2}', '\0', '\0']), ('\u{2d03}', + ['\u{10a3}', '\0', '\0']), ('\u{2d04}', ['\u{10a4}', '\0', '\0']), ('\u{2d05}', ['\u{10a5}', + '\0', '\0']), ('\u{2d06}', ['\u{10a6}', '\0', '\0']), ('\u{2d07}', ['\u{10a7}', '\0', + '\0']), ('\u{2d08}', ['\u{10a8}', '\0', '\0']), ('\u{2d09}', ['\u{10a9}', '\0', '\0']), + ('\u{2d0a}', ['\u{10aa}', '\0', '\0']), ('\u{2d0b}', ['\u{10ab}', '\0', '\0']), ('\u{2d0c}', + ['\u{10ac}', '\0', '\0']), ('\u{2d0d}', ['\u{10ad}', '\0', '\0']), ('\u{2d0e}', ['\u{10ae}', + '\0', '\0']), ('\u{2d0f}', ['\u{10af}', '\0', '\0']), ('\u{2d10}', ['\u{10b0}', '\0', + '\0']), ('\u{2d11}', ['\u{10b1}', '\0', '\0']), ('\u{2d12}', ['\u{10b2}', '\0', '\0']), + ('\u{2d13}', ['\u{10b3}', '\0', '\0']), ('\u{2d14}', ['\u{10b4}', '\0', '\0']), ('\u{2d15}', + ['\u{10b5}', '\0', '\0']), ('\u{2d16}', ['\u{10b6}', '\0', '\0']), ('\u{2d17}', ['\u{10b7}', + '\0', '\0']), ('\u{2d18}', ['\u{10b8}', '\0', '\0']), ('\u{2d19}', ['\u{10b9}', '\0', + '\0']), ('\u{2d1a}', ['\u{10ba}', '\0', '\0']), ('\u{2d1b}', ['\u{10bb}', '\0', '\0']), + ('\u{2d1c}', ['\u{10bc}', '\0', '\0']), ('\u{2d1d}', ['\u{10bd}', '\0', '\0']), ('\u{2d1e}', + ['\u{10be}', '\0', '\0']), ('\u{2d1f}', ['\u{10bf}', '\0', '\0']), ('\u{2d20}', ['\u{10c0}', + '\0', '\0']), ('\u{2d21}', ['\u{10c1}', '\0', '\0']), ('\u{2d22}', ['\u{10c2}', '\0', + '\0']), ('\u{2d23}', ['\u{10c3}', '\0', '\0']), ('\u{2d24}', ['\u{10c4}', '\0', '\0']), + ('\u{2d25}', ['\u{10c5}', '\0', '\0']), ('\u{2d27}', ['\u{10c7}', '\0', '\0']), ('\u{2d2d}', + ['\u{10cd}', '\0', '\0']), ('\u{a641}', ['\u{a640}', '\0', '\0']), ('\u{a643}', ['\u{a642}', + '\0', '\0']), ('\u{a645}', ['\u{a644}', '\0', '\0']), ('\u{a647}', ['\u{a646}', '\0', + '\0']), ('\u{a649}', ['\u{a648}', '\0', '\0']), ('\u{a64b}', ['\u{a64a}', '\0', '\0']), + ('\u{a64d}', ['\u{a64c}', '\0', '\0']), ('\u{a64f}', ['\u{a64e}', '\0', '\0']), ('\u{a651}', + ['\u{a650}', '\0', '\0']), ('\u{a653}', ['\u{a652}', '\0', '\0']), ('\u{a655}', ['\u{a654}', + '\0', '\0']), ('\u{a657}', ['\u{a656}', '\0', '\0']), ('\u{a659}', ['\u{a658}', '\0', + '\0']), ('\u{a65b}', ['\u{a65a}', '\0', '\0']), ('\u{a65d}', ['\u{a65c}', '\0', '\0']), + ('\u{a65f}', ['\u{a65e}', '\0', '\0']), ('\u{a661}', ['\u{a660}', '\0', '\0']), ('\u{a663}', + ['\u{a662}', '\0', '\0']), ('\u{a665}', ['\u{a664}', '\0', '\0']), ('\u{a667}', ['\u{a666}', + '\0', '\0']), ('\u{a669}', ['\u{a668}', '\0', '\0']), ('\u{a66b}', ['\u{a66a}', '\0', + '\0']), ('\u{a66d}', ['\u{a66c}', '\0', '\0']), ('\u{a681}', ['\u{a680}', '\0', '\0']), + ('\u{a683}', ['\u{a682}', '\0', '\0']), ('\u{a685}', ['\u{a684}', '\0', '\0']), ('\u{a687}', + ['\u{a686}', '\0', '\0']), ('\u{a689}', ['\u{a688}', '\0', '\0']), ('\u{a68b}', ['\u{a68a}', + '\0', '\0']), ('\u{a68d}', ['\u{a68c}', '\0', '\0']), ('\u{a68f}', ['\u{a68e}', '\0', + '\0']), ('\u{a691}', ['\u{a690}', '\0', '\0']), ('\u{a693}', ['\u{a692}', '\0', '\0']), + ('\u{a695}', ['\u{a694}', '\0', '\0']), ('\u{a697}', ['\u{a696}', '\0', '\0']), ('\u{a699}', + ['\u{a698}', '\0', '\0']), ('\u{a69b}', ['\u{a69a}', '\0', '\0']), ('\u{a723}', ['\u{a722}', + '\0', '\0']), ('\u{a725}', ['\u{a724}', '\0', '\0']), ('\u{a727}', ['\u{a726}', '\0', + '\0']), ('\u{a729}', ['\u{a728}', '\0', '\0']), ('\u{a72b}', ['\u{a72a}', '\0', '\0']), + ('\u{a72d}', ['\u{a72c}', '\0', '\0']), ('\u{a72f}', ['\u{a72e}', '\0', '\0']), ('\u{a733}', + ['\u{a732}', '\0', '\0']), ('\u{a735}', ['\u{a734}', '\0', '\0']), ('\u{a737}', ['\u{a736}', + '\0', '\0']), ('\u{a739}', ['\u{a738}', '\0', '\0']), ('\u{a73b}', ['\u{a73a}', '\0', + '\0']), ('\u{a73d}', ['\u{a73c}', '\0', '\0']), ('\u{a73f}', ['\u{a73e}', '\0', '\0']), + ('\u{a741}', ['\u{a740}', '\0', '\0']), ('\u{a743}', ['\u{a742}', '\0', '\0']), ('\u{a745}', + ['\u{a744}', '\0', '\0']), ('\u{a747}', ['\u{a746}', '\0', '\0']), ('\u{a749}', ['\u{a748}', + '\0', '\0']), ('\u{a74b}', ['\u{a74a}', '\0', '\0']), ('\u{a74d}', ['\u{a74c}', '\0', + '\0']), ('\u{a74f}', ['\u{a74e}', '\0', '\0']), ('\u{a751}', ['\u{a750}', '\0', '\0']), + ('\u{a753}', ['\u{a752}', '\0', '\0']), ('\u{a755}', ['\u{a754}', '\0', '\0']), ('\u{a757}', + ['\u{a756}', '\0', '\0']), ('\u{a759}', ['\u{a758}', '\0', '\0']), ('\u{a75b}', ['\u{a75a}', + '\0', '\0']), ('\u{a75d}', ['\u{a75c}', '\0', '\0']), ('\u{a75f}', ['\u{a75e}', '\0', + '\0']), ('\u{a761}', ['\u{a760}', '\0', '\0']), ('\u{a763}', ['\u{a762}', '\0', '\0']), + ('\u{a765}', ['\u{a764}', '\0', '\0']), ('\u{a767}', ['\u{a766}', '\0', '\0']), ('\u{a769}', + ['\u{a768}', '\0', '\0']), ('\u{a76b}', ['\u{a76a}', '\0', '\0']), ('\u{a76d}', ['\u{a76c}', + '\0', '\0']), ('\u{a76f}', ['\u{a76e}', '\0', '\0']), ('\u{a77a}', ['\u{a779}', '\0', + '\0']), ('\u{a77c}', ['\u{a77b}', '\0', '\0']), ('\u{a77f}', ['\u{a77e}', '\0', '\0']), + ('\u{a781}', ['\u{a780}', '\0', '\0']), ('\u{a783}', ['\u{a782}', '\0', '\0']), ('\u{a785}', + ['\u{a784}', '\0', '\0']), ('\u{a787}', ['\u{a786}', '\0', '\0']), ('\u{a78c}', ['\u{a78b}', + '\0', '\0']), ('\u{a791}', ['\u{a790}', '\0', '\0']), ('\u{a793}', ['\u{a792}', '\0', + '\0']), ('\u{a797}', ['\u{a796}', '\0', '\0']), ('\u{a799}', ['\u{a798}', '\0', '\0']), + ('\u{a79b}', ['\u{a79a}', '\0', '\0']), ('\u{a79d}', ['\u{a79c}', '\0', '\0']), ('\u{a79f}', + ['\u{a79e}', '\0', '\0']), ('\u{a7a1}', ['\u{a7a0}', '\0', '\0']), ('\u{a7a3}', ['\u{a7a2}', + '\0', '\0']), ('\u{a7a5}', ['\u{a7a4}', '\0', '\0']), ('\u{a7a7}', ['\u{a7a6}', '\0', + '\0']), ('\u{a7a9}', ['\u{a7a8}', '\0', '\0']), ('\u{a7b5}', ['\u{a7b4}', '\0', '\0']), + ('\u{a7b7}', ['\u{a7b6}', '\0', '\0']), ('\u{a7b9}', ['\u{a7b8}', '\0', '\0']), ('\u{ab53}', + ['\u{a7b3}', '\0', '\0']), ('\u{ab70}', ['\u{13a0}', '\0', '\0']), ('\u{ab71}', ['\u{13a1}', + '\0', '\0']), ('\u{ab72}', ['\u{13a2}', '\0', '\0']), ('\u{ab73}', ['\u{13a3}', '\0', + '\0']), ('\u{ab74}', ['\u{13a4}', '\0', '\0']), ('\u{ab75}', ['\u{13a5}', '\0', '\0']), + ('\u{ab76}', ['\u{13a6}', '\0', '\0']), ('\u{ab77}', ['\u{13a7}', '\0', '\0']), ('\u{ab78}', + ['\u{13a8}', '\0', '\0']), ('\u{ab79}', ['\u{13a9}', '\0', '\0']), ('\u{ab7a}', ['\u{13aa}', + '\0', '\0']), ('\u{ab7b}', ['\u{13ab}', '\0', '\0']), ('\u{ab7c}', ['\u{13ac}', '\0', + '\0']), ('\u{ab7d}', ['\u{13ad}', '\0', '\0']), ('\u{ab7e}', ['\u{13ae}', '\0', '\0']), + ('\u{ab7f}', ['\u{13af}', '\0', '\0']), ('\u{ab80}', ['\u{13b0}', '\0', '\0']), ('\u{ab81}', + ['\u{13b1}', '\0', '\0']), ('\u{ab82}', ['\u{13b2}', '\0', '\0']), ('\u{ab83}', ['\u{13b3}', + '\0', '\0']), ('\u{ab84}', ['\u{13b4}', '\0', '\0']), ('\u{ab85}', ['\u{13b5}', '\0', + '\0']), ('\u{ab86}', ['\u{13b6}', '\0', '\0']), ('\u{ab87}', ['\u{13b7}', '\0', '\0']), + ('\u{ab88}', ['\u{13b8}', '\0', '\0']), ('\u{ab89}', ['\u{13b9}', '\0', '\0']), ('\u{ab8a}', + ['\u{13ba}', '\0', '\0']), ('\u{ab8b}', ['\u{13bb}', '\0', '\0']), ('\u{ab8c}', ['\u{13bc}', + '\0', '\0']), ('\u{ab8d}', ['\u{13bd}', '\0', '\0']), ('\u{ab8e}', ['\u{13be}', '\0', + '\0']), ('\u{ab8f}', ['\u{13bf}', '\0', '\0']), ('\u{ab90}', ['\u{13c0}', '\0', '\0']), + ('\u{ab91}', ['\u{13c1}', '\0', '\0']), ('\u{ab92}', ['\u{13c2}', '\0', '\0']), ('\u{ab93}', + ['\u{13c3}', '\0', '\0']), ('\u{ab94}', ['\u{13c4}', '\0', '\0']), ('\u{ab95}', ['\u{13c5}', + '\0', '\0']), ('\u{ab96}', ['\u{13c6}', '\0', '\0']), ('\u{ab97}', ['\u{13c7}', '\0', + '\0']), ('\u{ab98}', ['\u{13c8}', '\0', '\0']), ('\u{ab99}', ['\u{13c9}', '\0', '\0']), + ('\u{ab9a}', ['\u{13ca}', '\0', '\0']), ('\u{ab9b}', ['\u{13cb}', '\0', '\0']), ('\u{ab9c}', + ['\u{13cc}', '\0', '\0']), ('\u{ab9d}', ['\u{13cd}', '\0', '\0']), ('\u{ab9e}', ['\u{13ce}', + '\0', '\0']), ('\u{ab9f}', ['\u{13cf}', '\0', '\0']), ('\u{aba0}', ['\u{13d0}', '\0', + '\0']), ('\u{aba1}', ['\u{13d1}', '\0', '\0']), ('\u{aba2}', ['\u{13d2}', '\0', '\0']), + ('\u{aba3}', ['\u{13d3}', '\0', '\0']), ('\u{aba4}', ['\u{13d4}', '\0', '\0']), ('\u{aba5}', + ['\u{13d5}', '\0', '\0']), ('\u{aba6}', ['\u{13d6}', '\0', '\0']), ('\u{aba7}', ['\u{13d7}', + '\0', '\0']), ('\u{aba8}', ['\u{13d8}', '\0', '\0']), ('\u{aba9}', ['\u{13d9}', '\0', + '\0']), ('\u{abaa}', ['\u{13da}', '\0', '\0']), ('\u{abab}', ['\u{13db}', '\0', '\0']), + ('\u{abac}', ['\u{13dc}', '\0', '\0']), ('\u{abad}', ['\u{13dd}', '\0', '\0']), ('\u{abae}', + ['\u{13de}', '\0', '\0']), ('\u{abaf}', ['\u{13df}', '\0', '\0']), ('\u{abb0}', ['\u{13e0}', + '\0', '\0']), ('\u{abb1}', ['\u{13e1}', '\0', '\0']), ('\u{abb2}', ['\u{13e2}', '\0', + '\0']), ('\u{abb3}', ['\u{13e3}', '\0', '\0']), ('\u{abb4}', ['\u{13e4}', '\0', '\0']), + ('\u{abb5}', ['\u{13e5}', '\0', '\0']), ('\u{abb6}', ['\u{13e6}', '\0', '\0']), ('\u{abb7}', + ['\u{13e7}', '\0', '\0']), ('\u{abb8}', ['\u{13e8}', '\0', '\0']), ('\u{abb9}', ['\u{13e9}', + '\0', '\0']), ('\u{abba}', ['\u{13ea}', '\0', '\0']), ('\u{abbb}', ['\u{13eb}', '\0', + '\0']), ('\u{abbc}', ['\u{13ec}', '\0', '\0']), ('\u{abbd}', ['\u{13ed}', '\0', '\0']), + ('\u{abbe}', ['\u{13ee}', '\0', '\0']), ('\u{abbf}', ['\u{13ef}', '\0', '\0']), ('\u{fb00}', + ['\u{46}', '\u{46}', '\0']), ('\u{fb01}', ['\u{46}', '\u{49}', '\0']), ('\u{fb02}', + ['\u{46}', '\u{4c}', '\0']), ('\u{fb03}', ['\u{46}', '\u{46}', '\u{49}']), ('\u{fb04}', + ['\u{46}', '\u{46}', '\u{4c}']), ('\u{fb05}', ['\u{53}', '\u{54}', '\0']), ('\u{fb06}', + ['\u{53}', '\u{54}', '\0']), ('\u{fb13}', ['\u{544}', '\u{546}', '\0']), ('\u{fb14}', + ['\u{544}', '\u{535}', '\0']), ('\u{fb15}', ['\u{544}', '\u{53b}', '\0']), ('\u{fb16}', + ['\u{54e}', '\u{546}', '\0']), ('\u{fb17}', ['\u{544}', '\u{53d}', '\0']), ('\u{ff41}', + ['\u{ff21}', '\0', '\0']), ('\u{ff42}', ['\u{ff22}', '\0', '\0']), ('\u{ff43}', ['\u{ff23}', + '\0', '\0']), ('\u{ff44}', ['\u{ff24}', '\0', '\0']), ('\u{ff45}', ['\u{ff25}', '\0', + '\0']), ('\u{ff46}', ['\u{ff26}', '\0', '\0']), ('\u{ff47}', ['\u{ff27}', '\0', '\0']), + ('\u{ff48}', ['\u{ff28}', '\0', '\0']), ('\u{ff49}', ['\u{ff29}', '\0', '\0']), ('\u{ff4a}', + ['\u{ff2a}', '\0', '\0']), ('\u{ff4b}', ['\u{ff2b}', '\0', '\0']), ('\u{ff4c}', ['\u{ff2c}', + '\0', '\0']), ('\u{ff4d}', ['\u{ff2d}', '\0', '\0']), ('\u{ff4e}', ['\u{ff2e}', '\0', + '\0']), ('\u{ff4f}', ['\u{ff2f}', '\0', '\0']), ('\u{ff50}', ['\u{ff30}', '\0', '\0']), + ('\u{ff51}', ['\u{ff31}', '\0', '\0']), ('\u{ff52}', ['\u{ff32}', '\0', '\0']), ('\u{ff53}', + ['\u{ff33}', '\0', '\0']), ('\u{ff54}', ['\u{ff34}', '\0', '\0']), ('\u{ff55}', ['\u{ff35}', + '\0', '\0']), ('\u{ff56}', ['\u{ff36}', '\0', '\0']), ('\u{ff57}', ['\u{ff37}', '\0', + '\0']), ('\u{ff58}', ['\u{ff38}', '\0', '\0']), ('\u{ff59}', ['\u{ff39}', '\0', '\0']), + ('\u{ff5a}', ['\u{ff3a}', '\0', '\0']), ('\u{10428}', ['\u{10400}', '\0', '\0']), + ('\u{10429}', ['\u{10401}', '\0', '\0']), ('\u{1042a}', ['\u{10402}', '\0', '\0']), + ('\u{1042b}', ['\u{10403}', '\0', '\0']), ('\u{1042c}', ['\u{10404}', '\0', '\0']), + ('\u{1042d}', ['\u{10405}', '\0', '\0']), ('\u{1042e}', ['\u{10406}', '\0', '\0']), + ('\u{1042f}', ['\u{10407}', '\0', '\0']), ('\u{10430}', ['\u{10408}', '\0', '\0']), + ('\u{10431}', ['\u{10409}', '\0', '\0']), ('\u{10432}', ['\u{1040a}', '\0', '\0']), + ('\u{10433}', ['\u{1040b}', '\0', '\0']), ('\u{10434}', ['\u{1040c}', '\0', '\0']), + ('\u{10435}', ['\u{1040d}', '\0', '\0']), ('\u{10436}', ['\u{1040e}', '\0', '\0']), + ('\u{10437}', ['\u{1040f}', '\0', '\0']), ('\u{10438}', ['\u{10410}', '\0', '\0']), + ('\u{10439}', ['\u{10411}', '\0', '\0']), ('\u{1043a}', ['\u{10412}', '\0', '\0']), + ('\u{1043b}', ['\u{10413}', '\0', '\0']), ('\u{1043c}', ['\u{10414}', '\0', '\0']), + ('\u{1043d}', ['\u{10415}', '\0', '\0']), ('\u{1043e}', ['\u{10416}', '\0', '\0']), + ('\u{1043f}', ['\u{10417}', '\0', '\0']), ('\u{10440}', ['\u{10418}', '\0', '\0']), + ('\u{10441}', ['\u{10419}', '\0', '\0']), ('\u{10442}', ['\u{1041a}', '\0', '\0']), + ('\u{10443}', ['\u{1041b}', '\0', '\0']), ('\u{10444}', ['\u{1041c}', '\0', '\0']), + ('\u{10445}', ['\u{1041d}', '\0', '\0']), ('\u{10446}', ['\u{1041e}', '\0', '\0']), + ('\u{10447}', ['\u{1041f}', '\0', '\0']), ('\u{10448}', ['\u{10420}', '\0', '\0']), + ('\u{10449}', ['\u{10421}', '\0', '\0']), ('\u{1044a}', ['\u{10422}', '\0', '\0']), + ('\u{1044b}', ['\u{10423}', '\0', '\0']), ('\u{1044c}', ['\u{10424}', '\0', '\0']), + ('\u{1044d}', ['\u{10425}', '\0', '\0']), ('\u{1044e}', ['\u{10426}', '\0', '\0']), + ('\u{1044f}', ['\u{10427}', '\0', '\0']), ('\u{104d8}', ['\u{104b0}', '\0', '\0']), + ('\u{104d9}', ['\u{104b1}', '\0', '\0']), ('\u{104da}', ['\u{104b2}', '\0', '\0']), + ('\u{104db}', ['\u{104b3}', '\0', '\0']), ('\u{104dc}', ['\u{104b4}', '\0', '\0']), + ('\u{104dd}', ['\u{104b5}', '\0', '\0']), ('\u{104de}', ['\u{104b6}', '\0', '\0']), + ('\u{104df}', ['\u{104b7}', '\0', '\0']), ('\u{104e0}', ['\u{104b8}', '\0', '\0']), + ('\u{104e1}', ['\u{104b9}', '\0', '\0']), ('\u{104e2}', ['\u{104ba}', '\0', '\0']), + ('\u{104e3}', ['\u{104bb}', '\0', '\0']), ('\u{104e4}', ['\u{104bc}', '\0', '\0']), + ('\u{104e5}', ['\u{104bd}', '\0', '\0']), ('\u{104e6}', ['\u{104be}', '\0', '\0']), + ('\u{104e7}', ['\u{104bf}', '\0', '\0']), ('\u{104e8}', ['\u{104c0}', '\0', '\0']), + ('\u{104e9}', ['\u{104c1}', '\0', '\0']), ('\u{104ea}', ['\u{104c2}', '\0', '\0']), + ('\u{104eb}', ['\u{104c3}', '\0', '\0']), ('\u{104ec}', ['\u{104c4}', '\0', '\0']), + ('\u{104ed}', ['\u{104c5}', '\0', '\0']), ('\u{104ee}', ['\u{104c6}', '\0', '\0']), + ('\u{104ef}', ['\u{104c7}', '\0', '\0']), ('\u{104f0}', ['\u{104c8}', '\0', '\0']), + ('\u{104f1}', ['\u{104c9}', '\0', '\0']), ('\u{104f2}', ['\u{104ca}', '\0', '\0']), + ('\u{104f3}', ['\u{104cb}', '\0', '\0']), ('\u{104f4}', ['\u{104cc}', '\0', '\0']), + ('\u{104f5}', ['\u{104cd}', '\0', '\0']), ('\u{104f6}', ['\u{104ce}', '\0', '\0']), + ('\u{104f7}', ['\u{104cf}', '\0', '\0']), ('\u{104f8}', ['\u{104d0}', '\0', '\0']), + ('\u{104f9}', ['\u{104d1}', '\0', '\0']), ('\u{104fa}', ['\u{104d2}', '\0', '\0']), + ('\u{104fb}', ['\u{104d3}', '\0', '\0']), ('\u{10cc0}', ['\u{10c80}', '\0', '\0']), + ('\u{10cc1}', ['\u{10c81}', '\0', '\0']), ('\u{10cc2}', ['\u{10c82}', '\0', '\0']), + ('\u{10cc3}', ['\u{10c83}', '\0', '\0']), ('\u{10cc4}', ['\u{10c84}', '\0', '\0']), + ('\u{10cc5}', ['\u{10c85}', '\0', '\0']), ('\u{10cc6}', ['\u{10c86}', '\0', '\0']), + ('\u{10cc7}', ['\u{10c87}', '\0', '\0']), ('\u{10cc8}', ['\u{10c88}', '\0', '\0']), + ('\u{10cc9}', ['\u{10c89}', '\0', '\0']), ('\u{10cca}', ['\u{10c8a}', '\0', '\0']), + ('\u{10ccb}', ['\u{10c8b}', '\0', '\0']), ('\u{10ccc}', ['\u{10c8c}', '\0', '\0']), + ('\u{10ccd}', ['\u{10c8d}', '\0', '\0']), ('\u{10cce}', ['\u{10c8e}', '\0', '\0']), + ('\u{10ccf}', ['\u{10c8f}', '\0', '\0']), ('\u{10cd0}', ['\u{10c90}', '\0', '\0']), + ('\u{10cd1}', ['\u{10c91}', '\0', '\0']), ('\u{10cd2}', ['\u{10c92}', '\0', '\0']), + ('\u{10cd3}', ['\u{10c93}', '\0', '\0']), ('\u{10cd4}', ['\u{10c94}', '\0', '\0']), + ('\u{10cd5}', ['\u{10c95}', '\0', '\0']), ('\u{10cd6}', ['\u{10c96}', '\0', '\0']), + ('\u{10cd7}', ['\u{10c97}', '\0', '\0']), ('\u{10cd8}', ['\u{10c98}', '\0', '\0']), + ('\u{10cd9}', ['\u{10c99}', '\0', '\0']), ('\u{10cda}', ['\u{10c9a}', '\0', '\0']), + ('\u{10cdb}', ['\u{10c9b}', '\0', '\0']), ('\u{10cdc}', ['\u{10c9c}', '\0', '\0']), + ('\u{10cdd}', ['\u{10c9d}', '\0', '\0']), ('\u{10cde}', ['\u{10c9e}', '\0', '\0']), + ('\u{10cdf}', ['\u{10c9f}', '\0', '\0']), ('\u{10ce0}', ['\u{10ca0}', '\0', '\0']), + ('\u{10ce1}', ['\u{10ca1}', '\0', '\0']), ('\u{10ce2}', ['\u{10ca2}', '\0', '\0']), + ('\u{10ce3}', ['\u{10ca3}', '\0', '\0']), ('\u{10ce4}', ['\u{10ca4}', '\0', '\0']), + ('\u{10ce5}', ['\u{10ca5}', '\0', '\0']), ('\u{10ce6}', ['\u{10ca6}', '\0', '\0']), + ('\u{10ce7}', ['\u{10ca7}', '\0', '\0']), ('\u{10ce8}', ['\u{10ca8}', '\0', '\0']), + ('\u{10ce9}', ['\u{10ca9}', '\0', '\0']), ('\u{10cea}', ['\u{10caa}', '\0', '\0']), + ('\u{10ceb}', ['\u{10cab}', '\0', '\0']), ('\u{10cec}', ['\u{10cac}', '\0', '\0']), + ('\u{10ced}', ['\u{10cad}', '\0', '\0']), ('\u{10cee}', ['\u{10cae}', '\0', '\0']), + ('\u{10cef}', ['\u{10caf}', '\0', '\0']), ('\u{10cf0}', ['\u{10cb0}', '\0', '\0']), + ('\u{10cf1}', ['\u{10cb1}', '\0', '\0']), ('\u{10cf2}', ['\u{10cb2}', '\0', '\0']), + ('\u{118c0}', ['\u{118a0}', '\0', '\0']), ('\u{118c1}', ['\u{118a1}', '\0', '\0']), + ('\u{118c2}', ['\u{118a2}', '\0', '\0']), ('\u{118c3}', ['\u{118a3}', '\0', '\0']), + ('\u{118c4}', ['\u{118a4}', '\0', '\0']), ('\u{118c5}', ['\u{118a5}', '\0', '\0']), + ('\u{118c6}', ['\u{118a6}', '\0', '\0']), ('\u{118c7}', ['\u{118a7}', '\0', '\0']), + ('\u{118c8}', ['\u{118a8}', '\0', '\0']), ('\u{118c9}', ['\u{118a9}', '\0', '\0']), + ('\u{118ca}', ['\u{118aa}', '\0', '\0']), ('\u{118cb}', ['\u{118ab}', '\0', '\0']), + ('\u{118cc}', ['\u{118ac}', '\0', '\0']), ('\u{118cd}', ['\u{118ad}', '\0', '\0']), + ('\u{118ce}', ['\u{118ae}', '\0', '\0']), ('\u{118cf}', ['\u{118af}', '\0', '\0']), + ('\u{118d0}', ['\u{118b0}', '\0', '\0']), ('\u{118d1}', ['\u{118b1}', '\0', '\0']), + ('\u{118d2}', ['\u{118b2}', '\0', '\0']), ('\u{118d3}', ['\u{118b3}', '\0', '\0']), + ('\u{118d4}', ['\u{118b4}', '\0', '\0']), ('\u{118d5}', ['\u{118b5}', '\0', '\0']), + ('\u{118d6}', ['\u{118b6}', '\0', '\0']), ('\u{118d7}', ['\u{118b7}', '\0', '\0']), + ('\u{118d8}', ['\u{118b8}', '\0', '\0']), ('\u{118d9}', ['\u{118b9}', '\0', '\0']), + ('\u{118da}', ['\u{118ba}', '\0', '\0']), ('\u{118db}', ['\u{118bb}', '\0', '\0']), + ('\u{118dc}', ['\u{118bc}', '\0', '\0']), ('\u{118dd}', ['\u{118bd}', '\0', '\0']), + ('\u{118de}', ['\u{118be}', '\0', '\0']), ('\u{118df}', ['\u{118bf}', '\0', '\0']), + ('\u{16e60}', ['\u{16e40}', '\0', '\0']), ('\u{16e61}', ['\u{16e41}', '\0', '\0']), + ('\u{16e62}', ['\u{16e42}', '\0', '\0']), ('\u{16e63}', ['\u{16e43}', '\0', '\0']), + ('\u{16e64}', ['\u{16e44}', '\0', '\0']), ('\u{16e65}', ['\u{16e45}', '\0', '\0']), + ('\u{16e66}', ['\u{16e46}', '\0', '\0']), ('\u{16e67}', ['\u{16e47}', '\0', '\0']), + ('\u{16e68}', ['\u{16e48}', '\0', '\0']), ('\u{16e69}', ['\u{16e49}', '\0', '\0']), + ('\u{16e6a}', ['\u{16e4a}', '\0', '\0']), ('\u{16e6b}', ['\u{16e4b}', '\0', '\0']), + ('\u{16e6c}', ['\u{16e4c}', '\0', '\0']), ('\u{16e6d}', ['\u{16e4d}', '\0', '\0']), + ('\u{16e6e}', ['\u{16e4e}', '\0', '\0']), ('\u{16e6f}', ['\u{16e4f}', '\0', '\0']), + ('\u{16e70}', ['\u{16e50}', '\0', '\0']), ('\u{16e71}', ['\u{16e51}', '\0', '\0']), + ('\u{16e72}', ['\u{16e52}', '\0', '\0']), ('\u{16e73}', ['\u{16e53}', '\0', '\0']), + ('\u{16e74}', ['\u{16e54}', '\0', '\0']), ('\u{16e75}', ['\u{16e55}', '\0', '\0']), + ('\u{16e76}', ['\u{16e56}', '\0', '\0']), ('\u{16e77}', ['\u{16e57}', '\0', '\0']), + ('\u{16e78}', ['\u{16e58}', '\0', '\0']), ('\u{16e79}', ['\u{16e59}', '\0', '\0']), + ('\u{16e7a}', ['\u{16e5a}', '\0', '\0']), ('\u{16e7b}', ['\u{16e5b}', '\0', '\0']), + ('\u{16e7c}', ['\u{16e5c}', '\0', '\0']), ('\u{16e7d}', ['\u{16e5d}', '\0', '\0']), + ('\u{16e7e}', ['\u{16e5e}', '\0', '\0']), ('\u{16e7f}', ['\u{16e5f}', '\0', '\0']), + ('\u{1e922}', ['\u{1e900}', '\0', '\0']), ('\u{1e923}', ['\u{1e901}', '\0', '\0']), + ('\u{1e924}', ['\u{1e902}', '\0', '\0']), ('\u{1e925}', ['\u{1e903}', '\0', '\0']), + ('\u{1e926}', ['\u{1e904}', '\0', '\0']), ('\u{1e927}', ['\u{1e905}', '\0', '\0']), + ('\u{1e928}', ['\u{1e906}', '\0', '\0']), ('\u{1e929}', ['\u{1e907}', '\0', '\0']), + ('\u{1e92a}', ['\u{1e908}', '\0', '\0']), ('\u{1e92b}', ['\u{1e909}', '\0', '\0']), + ('\u{1e92c}', ['\u{1e90a}', '\0', '\0']), ('\u{1e92d}', ['\u{1e90b}', '\0', '\0']), + ('\u{1e92e}', ['\u{1e90c}', '\0', '\0']), ('\u{1e92f}', ['\u{1e90d}', '\0', '\0']), + ('\u{1e930}', ['\u{1e90e}', '\0', '\0']), ('\u{1e931}', ['\u{1e90f}', '\0', '\0']), + ('\u{1e932}', ['\u{1e910}', '\0', '\0']), ('\u{1e933}', ['\u{1e911}', '\0', '\0']), + ('\u{1e934}', ['\u{1e912}', '\0', '\0']), ('\u{1e935}', ['\u{1e913}', '\0', '\0']), + ('\u{1e936}', ['\u{1e914}', '\0', '\0']), ('\u{1e937}', ['\u{1e915}', '\0', '\0']), + ('\u{1e938}', ['\u{1e916}', '\0', '\0']), ('\u{1e939}', ['\u{1e917}', '\0', '\0']), + ('\u{1e93a}', ['\u{1e918}', '\0', '\0']), ('\u{1e93b}', ['\u{1e919}', '\0', '\0']), + ('\u{1e93c}', ['\u{1e91a}', '\0', '\0']), ('\u{1e93d}', ['\u{1e91b}', '\0', '\0']), + ('\u{1e93e}', ['\u{1e91c}', '\0', '\0']), ('\u{1e93f}', ['\u{1e91d}', '\0', '\0']), + ('\u{1e940}', ['\u{1e91e}', '\0', '\0']), ('\u{1e941}', ['\u{1e91f}', '\0', '\0']), + ('\u{1e942}', ['\u{1e920}', '\0', '\0']), ('\u{1e943}', ['\u{1e921}', '\0', '\0']) + ]; + +} + diff --git a/src/libstd_unicode/unicode.py b/src/libcore/unicode/unicode.py similarity index 93% rename from src/libstd_unicode/unicode.py rename to src/libcore/unicode/unicode.py index a86294930861..28a1e01805e4 100755 --- a/src/libstd_unicode/unicode.py +++ b/src/libcore/unicode/unicode.py @@ -21,11 +21,14 @@ # - UnicodeData.txt # # Since this should not require frequent updates, we just store this -# out-of-line and check the unicode.rs file into git. +# out-of-line and check the tables.rs file into git. -import fileinput, re, os, sys, operator, math +import fileinput, re, os, sys, operator, math, datetime -preamble = '''// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +# The directory in which this file resides. +fdir = os.path.dirname(os.path.realpath(__file__)) + "/" + +preamble = '''// Copyright 2012-{year} The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -39,9 +42,9 @@ preamble = '''// Copyright 2012-2016 The Rust Project Developers. See the COPYRI #![allow(missing_docs, non_upper_case_globals, non_snake_case)] -use version::UnicodeVersion; -use bool_trie::{BoolTrie, SmallBoolTrie}; -''' +use unicode::version::UnicodeVersion; +use unicode::bool_trie::{{BoolTrie, SmallBoolTrie}}; +'''.format(year = datetime.datetime.now().year) # Mapping taken from Table 12 from: # http://www.unicode.org/reports/tr44/#General_Category_Values @@ -49,7 +52,7 @@ expanded_categories = { 'Lu': ['LC', 'L'], 'Ll': ['LC', 'L'], 'Lt': ['LC', 'L'], 'Lm': ['L'], 'Lo': ['L'], 'Mn': ['M'], 'Mc': ['M'], 'Me': ['M'], - 'Nd': ['N'], 'Nl': ['N'], 'No': ['No'], + 'Nd': ['N'], 'Nl': ['N'], 'No': ['N'], 'Pc': ['P'], 'Pd': ['P'], 'Ps': ['P'], 'Pe': ['P'], 'Pi': ['P'], 'Pf': ['P'], 'Po': ['P'], 'Sm': ['S'], 'Sc': ['S'], 'Sk': ['S'], 'So': ['S'], @@ -61,11 +64,11 @@ expanded_categories = { surrogate_codepoints = (0xd800, 0xdfff) def fetch(f): - if not os.path.exists(os.path.basename(f)): - os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s" - % f) + path = fdir + os.path.basename(f) + if not os.path.exists(path): + os.system("curl -o {0}{1} ftp://ftp.unicode.org/Public/UNIDATA/{1}".format(fdir, f)) - if not os.path.exists(os.path.basename(f)): + if not os.path.exists(path): sys.stderr.write("cannot load %s" % f) exit(1) @@ -84,7 +87,7 @@ def load_unicode_data(f): udict = {} range_start = -1 - for line in fileinput.input(f): + for line in fileinput.input(fdir + f): data = line.split(';') if len(data) != 15: continue @@ -156,7 +159,7 @@ def load_unicode_data(f): def load_special_casing(f, to_upper, to_lower, to_title): fetch(f) - for line in fileinput.input(f): + for line in fileinput.input(fdir + f): data = line.split('#')[0].split(';') if len(data) == 5: code, lower, title, upper, _comment = data @@ -243,7 +246,7 @@ def load_properties(f, interestingprops): re1 = re.compile("^ *([0-9A-F]+) *; *(\w+)") re2 = re.compile("^ *([0-9A-F]+)\.\.([0-9A-F]+) *; *(\w+)") - for line in fileinput.input(os.path.basename(f)): + for line in fileinput.input(fdir + os.path.basename(f)): prop = None d_lo = 0 d_hi = 0 @@ -408,9 +411,6 @@ def emit_property_module(f, mod, tbl, emit): def emit_conversions_module(f, to_upper, to_lower, to_title): f.write("pub mod conversions {") f.write(""" - use core::option::Option; - use core::option::Option::{Some, None}; - pub fn to_lower(c: char) -> [char; 3] { match bsearch_case_table(c, to_lowercase_table) { None => [c, '\\0', '\\0'], @@ -459,7 +459,7 @@ def emit_norm_module(f, canon, compat, combine, norm_props): canon_comp_keys = sorted(canon_comp.keys()) if __name__ == "__main__": - r = "tables.rs" + r = fdir + "tables.rs" if os.path.exists(r): os.remove(r) with open(r, "w") as rf: @@ -468,12 +468,13 @@ if __name__ == "__main__": # download and parse all the data fetch("ReadMe.txt") - with open("ReadMe.txt") as readme: + with open(fdir + "ReadMe.txt") as readme: pattern = "for Version (\d+)\.(\d+)\.(\d+) of the Unicode" unicode_version = re.search(pattern, readme.read()).groups() rf.write(""" /// The version of [Unicode](http://www.unicode.org/) that the Unicode parts of -/// `CharExt` and `UnicodeStrPrelude` traits are based on. +/// `char` and `str` methods are based on. +#[unstable(feature = "unicode_version", issue = "49726")] pub const UNICODE_VERSION: UnicodeVersion = UnicodeVersion { major: %s, minor: %s, @@ -485,7 +486,7 @@ pub const UNICODE_VERSION: UnicodeVersion = UnicodeVersion { to_upper, to_lower, to_title) = load_unicode_data("UnicodeData.txt") load_special_casing("SpecialCasing.txt", to_upper, to_lower, to_title) want_derived = ["XID_Start", "XID_Continue", "Alphabetic", "Lowercase", "Uppercase", - "Cased", "Case_Ignorable"] + "Cased", "Case_Ignorable", "Grapheme_Extend"] derived = load_properties("DerivedCoreProperties.txt", want_derived) scripts = load_properties("Scripts.txt", []) props = load_properties("PropList.txt", @@ -502,3 +503,4 @@ pub const UNICODE_VERSION: UnicodeVersion = UnicodeVersion { # normalizations and conversions module emit_norm_module(rf, canon_decomp, compat_decomp, combines, norm_props) emit_conversions_module(rf, to_upper, to_lower, to_title) + print("Regenerated tables.rs.") diff --git a/src/libstd_unicode/version.rs b/src/libcore/unicode/version.rs similarity index 93% rename from src/libstd_unicode/version.rs rename to src/libcore/unicode/version.rs index d82a749d9178..59ebf5f50126 100644 --- a/src/libstd_unicode/version.rs +++ b/src/libcore/unicode/version.rs @@ -12,6 +12,7 @@ /// /// See also: #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] +#[unstable(feature = "unicode_version", issue = "49726")] pub struct UnicodeVersion { /// Major version. pub major: u32, diff --git a/src/libfmt_macros/lib.rs b/src/libfmt_macros/lib.rs index 44cdb5e8a367..e2380f0fe2ff 100644 --- a/src/libfmt_macros/lib.rs +++ b/src/libfmt_macros/lib.rs @@ -19,7 +19,8 @@ html_root_url = "https://doc.rust-lang.org/nightly/", html_playground_url = "https://play.rust-lang.org/", test(attr(deny(warnings))))] -#![deny(warnings)] + +#![cfg_attr(not(stage0), feature(nll))] pub use self::Piece::*; pub use self::Position::*; @@ -73,7 +74,7 @@ pub struct FormatSpec<'a> { /// Enum describing where an argument for a format can be located. #[derive(Copy, Clone, PartialEq)] pub enum Position<'a> { - /// The arugment is implied to be located at an index + /// The argument is implied to be located at an index ArgumentImplicitlyIs(usize), /// The argument is located at a specific index given in the format ArgumentIs(usize), @@ -108,6 +109,10 @@ pub enum Flag { /// For numbers, this means that the number will be padded with zeroes, /// and the sign (`+` or `-`) will precede them. FlagSignAwareZeroPad, + /// For Debug / `?`, format integers in lower-case hexadecimal. + FlagDebugLowerHex, + /// For Debug / `?`, format integers in upper-case hexadecimal. + FlagDebugUpperHex, } /// A count is used for the precision and width parameters of an integer, and @@ -124,6 +129,14 @@ pub enum Count<'a> { CountImplied, } +pub struct ParseError { + pub description: string::String, + pub note: Option, + pub label: string::String, + pub start: usize, + pub end: usize, +} + /// The parser structure for interpreting the input format string. This is /// modeled as an iterator over `Piece` structures to form a stream of tokens /// being output. @@ -134,15 +147,22 @@ pub struct Parser<'a> { input: &'a str, cur: iter::Peekable>, /// Error messages accumulated during parsing - pub errors: Vec<(string::String, Option)>, + pub errors: Vec, /// Current position of implicit positional argument pointer curarg: usize, + /// `Some(raw count)` when the string is "raw", used to position spans correctly + style: Option, + /// How many newlines have been seen in the string so far, to adjust the error spans + seen_newlines: usize, + /// Start and end byte offset of every successfuly parsed argument + pub arg_places: Vec<(usize, usize)>, } impl<'a> Iterator for Parser<'a> { type Item = Piece<'a>; fn next(&mut self) -> Option> { + let raw = self.style.map(|raw| raw + self.seen_newlines).unwrap_or(0); if let Some(&(pos, c)) = self.cur.peek() { match c { '{' => { @@ -150,9 +170,13 @@ impl<'a> Iterator for Parser<'a> { if self.consume('{') { Some(String(self.string(pos + 1))) } else { - let ret = Some(NextArgument(self.argument())); - self.must_consume('}'); - ret + let arg = self.argument(); + if let Some(arg_pos) = self.must_consume('}').map(|end| { + (pos + raw + 1, end + raw + 2) + }) { + self.arg_places.push(arg_pos); + } + Some(NextArgument(arg)) } } '}' => { @@ -160,12 +184,21 @@ impl<'a> Iterator for Parser<'a> { if self.consume('}') { Some(String(self.string(pos + 1))) } else { - self.err_with_note("unmatched `}` found", - "if you intended to print `}`, \ - you can escape it using `}}`"); + let err_pos = pos + raw + 1; + self.err_with_note( + "unmatched `}` found", + "unmatched `}`", + "if you intended to print `}`, you can escape it using `}}`", + err_pos, + err_pos, + ); None } } + '\n' => { + self.seen_newlines += 1; + Some(String(self.string(pos))) + } _ => Some(String(self.string(pos))), } } else { @@ -176,27 +209,55 @@ impl<'a> Iterator for Parser<'a> { impl<'a> Parser<'a> { /// Creates a new parser for the given format string - pub fn new(s: &'a str) -> Parser<'a> { + pub fn new(s: &'a str, style: Option) -> Parser<'a> { Parser { input: s, cur: s.char_indices().peekable(), errors: vec![], curarg: 0, + style, + seen_newlines: 0, + arg_places: vec![], } } /// Notifies of an error. The message doesn't actually need to be of type /// String, but I think it does when this eventually uses conditions so it /// might as well start using it now. - fn err(&mut self, msg: &str) { - self.errors.push((msg.to_owned(), None)); + fn err, S2: Into>( + &mut self, + description: S1, + label: S2, + start: usize, + end: usize, + ) { + self.errors.push(ParseError { + description: description.into(), + note: None, + label: label.into(), + start, + end, + }); } /// Notifies of an error. The message doesn't actually need to be of type /// String, but I think it does when this eventually uses conditions so it /// might as well start using it now. - fn err_with_note(&mut self, msg: &str, note: &str) { - self.errors.push((msg.to_owned(), Some(note.to_owned()))); + fn err_with_note, S2: Into, S3: Into>( + &mut self, + description: S1, + label: S2, + note: S3, + start: usize, + end: usize, + ) { + self.errors.push(ParseError { + description: description.into(), + note: Some(note.into()), + label: label.into(), + start, + end, + }); } /// Optionally consumes the specified character. If the character is not at @@ -217,22 +278,40 @@ impl<'a> Parser<'a> { /// Forces consumption of the specified character. If the character is not /// found, an error is emitted. - fn must_consume(&mut self, c: char) { + fn must_consume(&mut self, c: char) -> Option { self.ws(); - if let Some(&(_, maybe)) = self.cur.peek() { + let raw = self.style.unwrap_or(0); + + let padding = raw + self.seen_newlines; + if let Some(&(pos, maybe)) = self.cur.peek() { if c == maybe { self.cur.next(); + Some(pos) } else { - self.err(&format!("expected `{:?}`, found `{:?}`", c, maybe)); + let pos = pos + padding + 1; + self.err(format!("expected `{:?}`, found `{:?}`", c, maybe), + format!("expected `{}`", c), + pos, + pos); + None } } else { - let msg = &format!("expected `{:?}` but string was terminated", c); + let msg = format!("expected `{:?}` but string was terminated", c); + // point at closing `"`, unless the last char is `\n` to account for `println` + let pos = match self.input.chars().last() { + Some('\n') => self.input.len(), + _ => self.input.len() + 1, + }; if c == '}' { self.err_with_note(msg, - "if you intended to print `{`, you can escape it using `{{`"); + format!("expected `{:?}`", c), + "if you intended to print `{`, you can escape it using `{{`", + pos + padding, + pos + padding); } else { - self.err(msg); + self.err(msg, format!("expected `{:?}`", c), pos, pos); } + None } } @@ -297,6 +376,15 @@ impl<'a> Parser<'a> { } else { match self.cur.peek() { Some(&(_, c)) if c.is_alphabetic() => Some(ArgumentNamed(self.word())), + Some(&(pos, c)) if c == '_' => { + let invalid_name = self.string(pos); + self.err_with_note(format!("invalid argument name `{}`", invalid_name), + "invalid argument name", + "argument names cannot start with an underscore", + pos + 1, // add 1 to account for leading `{` + pos + 1 + invalid_name.len()); + Some(ArgumentNamed(invalid_name)) + }, // This is an `ArgumentNext`. // Record the fact and do the resolution after parsing the @@ -323,7 +411,7 @@ impl<'a> Parser<'a> { // fill character if let Some(&(_, c)) = self.cur.peek() { - match self.cur.clone().skip(1).next() { + match self.cur.clone().nth(1) { Some((_, '>')) | Some((_, '<')) | Some((_, '^')) => { spec.fill = Some(c); self.cur.next(); @@ -377,8 +465,22 @@ impl<'a> Parser<'a> { spec.precision = self.count(); } } - // Finally the actual format specifier - if self.consume('?') { + // Optional radix followed by the actual format specifier + if self.consume('x') { + if self.consume('?') { + spec.flags |= 1 << (FlagDebugLowerHex as u32); + spec.ty = "?"; + } else { + spec.ty = "x"; + } + } else if self.consume('X') { + if self.consume('?') { + spec.flags |= 1 << (FlagDebugUpperHex as u32); + spec.ty = "?"; + } else { + spec.ty = "X"; + } + } else if self.consume('?') { spec.ty = "?"; } else { spec.ty = self.word(); @@ -402,13 +504,11 @@ impl<'a> Parser<'a> { if word.is_empty() { self.cur = tmp; CountImplied + } else if self.consume('$') { + CountIsName(word) } else { - if self.consume('$') { - CountIsName(word) - } else { - self.cur = tmp; - CountImplied - } + self.cur = tmp; + CountImplied } } } @@ -463,7 +563,7 @@ mod tests { use super::*; fn same(fmt: &'static str, p: &[Piece<'static>]) { - let parser = Parser::new(fmt); + let parser = Parser::new(fmt, None); assert!(parser.collect::>>() == p); } @@ -479,7 +579,7 @@ mod tests { } fn musterr(s: &str) { - let mut p = Parser::new(s); + let mut p = Parser::new(s, None); p.next(); assert!(!p.errors.is_empty()); } diff --git a/src/libgetopts/lib.rs b/src/libgetopts/lib.rs deleted file mode 100644 index 81fa0374f549..000000000000 --- a/src/libgetopts/lib.rs +++ /dev/null @@ -1,1622 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Simple getopt alternative. -//! -//! Construct a vector of options, either by using `reqopt`, `optopt`, and `optflag` -//! or by building them from components yourself, and pass them to `getopts`, -//! along with a vector of actual arguments (not including `argv[0]`). You'll -//! either get a failure code back, or a match. You'll have to verify whether -//! the amount of 'free' arguments in the match is what you expect. Use `opt_*` -//! accessors to get argument values out of the matches object. -//! -//! Single-character options are expected to appear on the command line with a -//! single preceding dash; multiple-character options are expected to be -//! proceeded by two dashes. Options that expect an argument accept their -//! argument following either a space or an equals sign. Single-character -//! options don't require the space. -//! -//! # Example -//! -//! The following example shows simple command line parsing for an application -//! that requires an input file to be specified, accepts an optional output -//! file name following `-o`, and accepts both `-h` and `--help` as optional flags. -//! -//! ```{.rust} -//! #![feature(rustc_private)] -//! -//! extern crate getopts; -//! use getopts::{optopt,optflag,getopts,OptGroup,usage}; -//! use std::env; -//! -//! fn do_work(inp: &str, out: Option) { -//! println!("{}", inp); -//! match out { -//! Some(x) => println!("{}", x), -//! None => println!("No Output"), -//! } -//! } -//! -//! fn print_usage(program: &str, opts: &[OptGroup]) { -//! let brief = format!("Usage: {} [options]", program); -//! print!("{}", usage(&brief, opts)); -//! } -//! -//! fn main() { -//! let args: Vec = env::args().collect(); -//! -//! let program = args[0].clone(); -//! -//! let opts = &[ -//! optopt("o", "", "set output file name", "NAME"), -//! optflag("h", "help", "print this help menu") -//! ]; -//! let matches = match getopts(&args[1..], opts) { -//! Ok(m) => { m } -//! Err(f) => { panic!(f.to_string()) } -//! }; -//! if matches.opt_present("h") { -//! print_usage(&program, opts); -//! return; -//! } -//! let output = matches.opt_str("o"); -//! let input = if !matches.free.is_empty() { -//! matches.free[0].clone() -//! } else { -//! print_usage(&program, opts); -//! return; -//! }; -//! do_work(&input, output); -//! } -//! ``` - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", - html_playground_url = "https://play.rust-lang.org/", - test(attr(deny(warnings))))] - -#![deny(missing_docs)] -#![deny(warnings)] - -use self::Name::*; -use self::HasArg::*; -use self::Occur::*; -use self::Fail::*; -use self::Optval::*; -use self::SplitWithinState::*; -use self::Whitespace::*; -use self::LengthLimit::*; - -use std::fmt; -use std::iter::repeat; -use std::result; - -/// Name of an option. Either a string or a single char. -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum Name { - /// A string representing the long name of an option. - /// For example: "help" - Long(String), - /// A char representing the short name of an option. - /// For example: 'h' - Short(char), -} - -/// Describes whether an option has an argument. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum HasArg { - /// The option requires an argument. - Yes, - /// The option takes no argument. - No, - /// The option argument is optional. - Maybe, -} - -/// Describes how often an option may occur. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum Occur { - /// The option occurs once. - Req, - /// The option occurs at most once. - Optional, - /// The option occurs zero or more times. - Multi, -} - -/// A description of a possible option. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Opt { - /// Name of the option - pub name: Name, - /// Whether it has an argument - pub hasarg: HasArg, - /// How often it can occur - pub occur: Occur, - /// Which options it aliases - pub aliases: Vec, -} - -/// One group of options, e.g., both `-h` and `--help`, along with -/// their shared description and properties. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct OptGroup { - /// Short name of the option, e.g. `h` for a `-h` option - pub short_name: String, - /// Long name of the option, e.g. `help` for a `--help` option - pub long_name: String, - /// Hint for argument, e.g. `FILE` for a `-o FILE` option - pub hint: String, - /// Description for usage help text - pub desc: String, - /// Whether option has an argument - pub hasarg: HasArg, - /// How often it can occur - pub occur: Occur, -} - -/// Describes whether an option is given at all or has a value. -#[derive(Clone, PartialEq, Eq, Debug)] -enum Optval { - Val(String), - Given, -} - -/// The result of checking command line arguments. Contains a vector -/// of matches and a vector of free strings. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Matches { - /// Options that matched - opts: Vec, - /// Values of the Options that matched - vals: Vec>, - /// Free string fragments - pub free: Vec, -} - -/// The type returned when the command line does not conform to the -/// expected format. Use the `Debug` implementation to output detailed -/// information. -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum Fail { - /// The option requires an argument but none was passed. - ArgumentMissing(String), - /// The passed option is not declared among the possible options. - UnrecognizedOption(String), - /// A required option is not present. - OptionMissing(String), - /// A single occurrence option is being used multiple times. - OptionDuplicated(String), - /// There's an argument being passed to a non-argument option. - UnexpectedArgument(String), -} - -/// The type of failure that occurred. -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -#[allow(missing_docs)] -pub enum FailType { - ArgumentMissing_, - UnrecognizedOption_, - OptionMissing_, - OptionDuplicated_, - UnexpectedArgument_, -} - -/// The result of parsing a command line with a set of options. -pub type Result = result::Result; - -impl Name { - fn from_str(nm: &str) -> Name { - if nm.len() == 1 { - Short(nm.chars().next().unwrap()) - } else { - Long(nm.to_owned()) - } - } - - fn to_string(&self) -> String { - match *self { - Short(ch) => ch.to_string(), - Long(ref s) => s.to_owned(), - } - } -} - -impl OptGroup { - /// Translate OptGroup into Opt. - /// (Both short and long names correspond to different Opts). - pub fn long_to_short(&self) -> Opt { - let OptGroup { - short_name, - long_name, - hasarg, - occur, - .. - } = (*self).clone(); - - match (short_name.len(), long_name.len()) { - (0, 0) => panic!("this long-format option was given no name"), - (0, _) => { - Opt { - name: Long((long_name)), - hasarg, - occur, - aliases: Vec::new(), - } - } - (1, 0) => { - Opt { - name: Short(short_name.chars().next().unwrap()), - hasarg, - occur, - aliases: Vec::new(), - } - } - (1, _) => { - Opt { - name: Long((long_name)), - hasarg, - occur, - aliases: vec![Opt { - name: Short(short_name.chars().next().unwrap()), - hasarg, - occur, - aliases: Vec::new(), - }], - } - } - _ => panic!("something is wrong with the long-form opt"), - } - } -} - -impl Matches { - fn opt_vals(&self, nm: &str) -> Vec { - match find_opt(&self.opts[..], Name::from_str(nm)) { - Some(id) => self.vals[id].clone(), - None => panic!("No option '{}' defined", nm), - } - } - - fn opt_val(&self, nm: &str) -> Option { - let vals = self.opt_vals(nm); - if vals.is_empty() { - None - } else { - Some(vals[0].clone()) - } - } - - /// Returns true if an option was matched. - pub fn opt_present(&self, nm: &str) -> bool { - !self.opt_vals(nm).is_empty() - } - - /// Returns the number of times an option was matched. - pub fn opt_count(&self, nm: &str) -> usize { - self.opt_vals(nm).len() - } - - /// Returns true if any of several options were matched. - pub fn opts_present(&self, names: &[String]) -> bool { - for nm in names { - match find_opt(&self.opts, Name::from_str(&**nm)) { - Some(id) if !self.vals[id].is_empty() => return true, - _ => (), - }; - } - false - } - - /// Returns the string argument supplied to one of several matching options or `None`. - pub fn opts_str(&self, names: &[String]) -> Option { - for nm in names { - if let Some(Val(ref s)) = self.opt_val(&nm[..]) { - return Some(s.clone()) - } - } - None - } - - /// Returns a vector of the arguments provided to all matches of the given - /// option. - /// - /// Used when an option accepts multiple values. - pub fn opt_strs(&self, nm: &str) -> Vec { - let mut acc: Vec = Vec::new(); - let r = self.opt_vals(nm); - for v in &r { - match *v { - Val(ref s) => acc.push((*s).clone()), - _ => (), - } - } - acc - } - - /// Returns the string argument supplied to a matching option or `None`. - pub fn opt_str(&self, nm: &str) -> Option { - let vals = self.opt_vals(nm); - if vals.is_empty() { - return None::; - } - match vals[0] { - Val(ref s) => Some((*s).clone()), - _ => None, - } - } - - - /// Returns the matching string, a default, or none. - /// - /// Returns none if the option was not present, `def` if the option was - /// present but no argument was provided, and the argument if the option was - /// present and an argument was provided. - pub fn opt_default(&self, nm: &str, def: &str) -> Option { - let vals = self.opt_vals(nm); - if vals.is_empty() { - None - } else { - match vals[0] { - Val(ref s) => Some((*s).clone()), - _ => Some(def.to_owned()), - } - } - } -} - -fn is_arg(arg: &str) -> bool { - arg.len() > 1 && arg.as_bytes()[0] == b'-' -} - -fn find_opt(opts: &[Opt], nm: Name) -> Option { - // Search main options. - let pos = opts.iter().position(|opt| opt.name == nm); - if pos.is_some() { - return pos; - } - - // Search in aliases. - for candidate in opts { - if candidate.aliases.iter().position(|opt| opt.name == nm).is_some() { - return opts.iter().position(|opt| opt.name == candidate.name); - } - } - - None -} - -/// Create a long option that is required and takes an argument. -/// -/// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none -/// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none -/// * `desc` - Description for usage help -/// * `hint` - Hint that is used in place of the argument in the usage help, -/// e.g. `"FILE"` for a `-o FILE` option -pub fn reqopt(short_name: &str, long_name: &str, desc: &str, hint: &str) -> OptGroup { - let len = short_name.len(); - assert!(len == 1 || len == 0); - OptGroup { - short_name: short_name.to_owned(), - long_name: long_name.to_owned(), - hint: hint.to_owned(), - desc: desc.to_owned(), - hasarg: Yes, - occur: Req, - } -} - -/// Create a long option that is optional and takes an argument. -/// -/// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none -/// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none -/// * `desc` - Description for usage help -/// * `hint` - Hint that is used in place of the argument in the usage help, -/// e.g. `"FILE"` for a `-o FILE` option -pub fn optopt(short_name: &str, long_name: &str, desc: &str, hint: &str) -> OptGroup { - let len = short_name.len(); - assert!(len == 1 || len == 0); - OptGroup { - short_name: short_name.to_owned(), - long_name: long_name.to_owned(), - hint: hint.to_owned(), - desc: desc.to_owned(), - hasarg: Yes, - occur: Optional, - } -} - -/// Create a long option that is optional and does not take an argument. -/// -/// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none -/// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none -/// * `desc` - Description for usage help -pub fn optflag(short_name: &str, long_name: &str, desc: &str) -> OptGroup { - let len = short_name.len(); - assert!(len == 1 || len == 0); - OptGroup { - short_name: short_name.to_owned(), - long_name: long_name.to_owned(), - hint: "".to_owned(), - desc: desc.to_owned(), - hasarg: No, - occur: Optional, - } -} - -/// Create a long option that can occur more than once and does not -/// take an argument. -/// -/// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none -/// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none -/// * `desc` - Description for usage help -pub fn optflagmulti(short_name: &str, long_name: &str, desc: &str) -> OptGroup { - let len = short_name.len(); - assert!(len == 1 || len == 0); - OptGroup { - short_name: short_name.to_owned(), - long_name: long_name.to_owned(), - hint: "".to_owned(), - desc: desc.to_owned(), - hasarg: No, - occur: Multi, - } -} - -/// Create a long option that is optional and takes an optional argument. -/// -/// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none -/// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none -/// * `desc` - Description for usage help -/// * `hint` - Hint that is used in place of the argument in the usage help, -/// e.g. `"FILE"` for a `-o FILE` option -pub fn optflagopt(short_name: &str, long_name: &str, desc: &str, hint: &str) -> OptGroup { - let len = short_name.len(); - assert!(len == 1 || len == 0); - OptGroup { - short_name: short_name.to_owned(), - long_name: long_name.to_owned(), - hint: hint.to_owned(), - desc: desc.to_owned(), - hasarg: Maybe, - occur: Optional, - } -} - -/// Create a long option that is optional, takes an argument, and may occur -/// multiple times. -/// -/// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none -/// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none -/// * `desc` - Description for usage help -/// * `hint` - Hint that is used in place of the argument in the usage help, -/// e.g. `"FILE"` for a `-o FILE` option -pub fn optmulti(short_name: &str, long_name: &str, desc: &str, hint: &str) -> OptGroup { - let len = short_name.len(); - assert!(len == 1 || len == 0); - OptGroup { - short_name: short_name.to_owned(), - long_name: long_name.to_owned(), - hint: hint.to_owned(), - desc: desc.to_owned(), - hasarg: Yes, - occur: Multi, - } -} - -/// Create a generic option group, stating all parameters explicitly -pub fn opt(short_name: &str, - long_name: &str, - desc: &str, - hint: &str, - hasarg: HasArg, - occur: Occur) - -> OptGroup { - let len = short_name.len(); - assert!(len == 1 || len == 0); - OptGroup { - short_name: short_name.to_owned(), - long_name: long_name.to_owned(), - hint: hint.to_owned(), - desc: desc.to_owned(), - hasarg, - occur, - } -} - -impl fmt::Display for Fail { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ArgumentMissing(ref nm) => write!(f, "Argument to option '{}' missing.", *nm), - UnrecognizedOption(ref nm) => write!(f, "Unrecognized option: '{}'.", *nm), - OptionMissing(ref nm) => write!(f, "Required option '{}' missing.", *nm), - OptionDuplicated(ref nm) => write!(f, "Option '{}' given more than once.", *nm), - UnexpectedArgument(ref nm) => write!(f, "Option '{}' does not take an argument.", *nm), - } - } -} - -/// Parse command line arguments according to the provided options. -/// -/// On success returns `Ok(Matches)`. Use methods such as `opt_present` -/// `opt_str`, etc. to interrogate results. -/// # Panics -/// -/// Returns `Err(Fail)` on failure: use the `Debug` implementation of `Fail` to display -/// information about it. -pub fn getopts(args: &[String], optgrps: &[OptGroup]) -> Result { - let opts: Vec = optgrps.iter().map(|x| x.long_to_short()).collect(); - let n_opts = opts.len(); - - fn f(_x: usize) -> Vec { - Vec::new() - } - - let mut vals: Vec<_> = (0..n_opts).map(f).collect(); - let mut free: Vec = Vec::new(); - let l = args.len(); - let mut i = 0; - while i < l { - let cur = args[i].clone(); - let curlen = cur.len(); - if !is_arg(&cur[..]) { - free.push(cur); - } else if cur == "--" { - let mut j = i + 1; - while j < l { - free.push(args[j].clone()); - j += 1; - } - break; - } else { - let mut names; - let mut i_arg = None; - if cur.as_bytes()[1] == b'-' { - let tail = &cur[2..curlen]; - let tail_eq: Vec<&str> = tail.splitn(2, '=').collect(); - if tail_eq.len() <= 1 { - names = vec![Long(tail.to_owned())]; - } else { - names = vec![Long(tail_eq[0].to_owned())]; - i_arg = Some(tail_eq[1].to_owned()); - } - } else { - let mut j = 1; - names = Vec::new(); - while j < curlen { - let ch = cur[j..].chars().next().unwrap(); - let opt = Short(ch); - - // In a series of potential options (eg. -aheJ), if we - // see one which takes an argument, we assume all - // subsequent characters make up the argument. This - // allows options such as -L/usr/local/lib/foo to be - // interpreted correctly - - let opt_id = match find_opt(&opts, opt.clone()) { - Some(id) => id, - None => return Err(UnrecognizedOption(opt.to_string())), - }; - - names.push(opt); - - let arg_follows = match opts[opt_id].hasarg { - Yes | Maybe => true, - No => false, - }; - - let next = j + ch.len_utf8(); - if arg_follows && next < curlen { - i_arg = Some((&cur[next..curlen]).to_owned()); - break; - } - - j = next; - } - } - let mut name_pos = 0; - for nm in &names { - name_pos += 1; - let optid = match find_opt(&opts, (*nm).clone()) { - Some(id) => id, - None => return Err(UnrecognizedOption(nm.to_string())), - }; - match opts[optid].hasarg { - No => { - if name_pos == names.len() && !i_arg.is_none() { - return Err(UnexpectedArgument(nm.to_string())); - } - let v = &mut vals[optid]; - v.push(Given); - } - Maybe => { - if !i_arg.is_none() { - let v = &mut vals[optid]; - v.push(Val((i_arg.clone()).unwrap())); - } else if name_pos < names.len() || i + 1 == l || is_arg(&args[i + 1][..]) { - let v = &mut vals[optid]; - v.push(Given); - } else { - i += 1; - let v = &mut vals[optid]; - v.push(Val(args[i].clone())); - } - } - Yes => { - if !i_arg.is_none() { - let v = &mut vals[optid]; - v.push(Val(i_arg.clone().unwrap())); - } else if i + 1 == l { - return Err(ArgumentMissing(nm.to_string())); - } else { - i += 1; - let v = &mut vals[optid]; - v.push(Val(args[i].clone())); - } - } - } - } - } - i += 1; - } - for i in 0..n_opts { - let n = vals[i].len(); - let occ = opts[i].occur; - if occ == Req && n == 0 { - return Err(OptionMissing(opts[i].name.to_string())); - } - if occ != Multi && n > 1 { - return Err(OptionDuplicated(opts[i].name.to_string())); - } - } - Ok(Matches { - opts, - vals, - free, - }) -} - -/// Derive a usage message from a set of long options. -pub fn usage(brief: &str, opts: &[OptGroup]) -> String { - - let desc_sep = format!("\n{}", repeat(" ").take(24).collect::()); - - let rows = opts.iter().map(|optref| { - let OptGroup{short_name, - long_name, - hint, - desc, - hasarg, - ..} = (*optref).clone(); - - let mut row = repeat(" ").take(4).collect::(); - - // short option - match short_name.len() { - 0 => {} - 1 => { - row.push('-'); - row.push_str(&short_name[..]); - row.push(' '); - } - _ => panic!("the short name should only be 1 ascii char long"), - } - - // long option - match long_name.len() { - 0 => {} - _ => { - row.push_str("--"); - row.push_str(&long_name[..]); - row.push(' '); - } - } - - // arg - match hasarg { - No => {} - Yes => row.push_str(&hint[..]), - Maybe => { - row.push('['); - row.push_str(&hint[..]); - row.push(']'); - } - } - - // FIXME(https://github.com/rust-lang-nursery/getopts/issues/7) - // should be graphemes not codepoints - // - // here we just need to indent the start of the description - let rowlen = row.chars().count(); - if rowlen < 24 { - for _ in 0..24 - rowlen { - row.push(' '); - } - } else { - row.push_str(&desc_sep[..]); - } - - // Normalize desc to contain words separated by one space character - let mut desc_normalized_whitespace = String::new(); - for word in desc.split_whitespace() { - desc_normalized_whitespace.push_str(word); - desc_normalized_whitespace.push(' '); - } - - // FIXME(https://github.com/rust-lang-nursery/getopts/issues/7) - // should be graphemes not codepoints - let mut desc_rows = Vec::new(); - each_split_within(&desc_normalized_whitespace[..], 54, |substr| { - desc_rows.push(substr.to_owned()); - true - }); - - // FIXME(https://github.com/rust-lang-nursery/getopts/issues/7) - // should be graphemes not codepoints - // - // wrapped description - row.push_str(&desc_rows.join(&desc_sep[..])); - - row - }); - - format!("{}\n\nOptions:\n{}\n", - brief, - rows.collect::>().join("\n")) -} - -fn format_option(opt: &OptGroup) -> String { - let mut line = String::new(); - - if opt.occur != Req { - line.push('['); - } - - // Use short_name is possible, but fallback to long_name. - if !opt.short_name.is_empty() { - line.push('-'); - line.push_str(&opt.short_name[..]); - } else { - line.push_str("--"); - line.push_str(&opt.long_name[..]); - } - - if opt.hasarg != No { - line.push(' '); - if opt.hasarg == Maybe { - line.push('['); - } - line.push_str(&opt.hint[..]); - if opt.hasarg == Maybe { - line.push(']'); - } - } - - if opt.occur != Req { - line.push(']'); - } - if opt.occur == Multi { - line.push_str(".."); - } - - line -} - -/// Derive a short one-line usage summary from a set of long options. -pub fn short_usage(program_name: &str, opts: &[OptGroup]) -> String { - let mut line = format!("Usage: {} ", program_name); - line.push_str(&opts.iter() - .map(format_option) - .collect::>() - .join(" ")[..]); - line -} - -#[derive(Copy, Clone)] -enum SplitWithinState { - A, // leading whitespace, initial state - B, // words - C, // internal and trailing whitespace -} -#[derive(Copy, Clone)] -enum Whitespace { - Ws, // current char is whitespace - Cr, // current char is not whitespace -} -#[derive(Copy, Clone)] -enum LengthLimit { - UnderLim, // current char makes current substring still fit in limit - OverLim, // current char makes current substring no longer fit in limit -} - - -/// Splits a string into substrings with possibly internal whitespace, -/// each of them at most `lim` bytes long. The substrings have leading and trailing -/// whitespace removed, and are only cut at whitespace boundaries. -/// -/// Note: Function was moved here from `std::str` because this module is the only place that -/// uses it, and because it was too specific for a general string function. -/// -/// # Panics -/// -/// Panics during iteration if the string contains a non-whitespace -/// sequence longer than the limit. -fn each_split_within(ss: &str, lim: usize, mut it: F) -> bool - where F: FnMut(&str) -> bool -{ - // Just for fun, let's write this as a state machine: - - let mut slice_start = 0; - let mut last_start = 0; - let mut last_end = 0; - let mut state = A; - let mut fake_i = ss.len(); - let mut lim = lim; - - let mut cont = true; - - // if the limit is larger than the string, lower it to save cycles - if lim >= fake_i { - lim = fake_i; - } - - let mut machine = |cont: &mut bool, (i, c): (usize, char)| -> bool { - let whitespace = if c.is_whitespace() { - Ws - } else { - Cr - }; - let limit = if (i - slice_start + 1) <= lim { - UnderLim - } else { - OverLim - }; - - state = match (state, whitespace, limit) { - (A, Ws, _) => A, - (A, Cr, _) => { - slice_start = i; - last_start = i; - B - } - - (B, Cr, UnderLim) => B, - (B, Cr, OverLim) if (i - last_start + 1) > lim => { - panic!("word starting with {} longer than limit!", - &ss[last_start..i + 1]) - } - (B, Cr, OverLim) => { - *cont = it(&ss[slice_start..last_end]); - slice_start = last_start; - B - } - (B, Ws, UnderLim) => { - last_end = i; - C - } - (B, Ws, OverLim) => { - last_end = i; - *cont = it(&ss[slice_start..last_end]); - A - } - - (C, Cr, UnderLim) => { - last_start = i; - B - } - (C, Cr, OverLim) => { - *cont = it(&ss[slice_start..last_end]); - slice_start = i; - last_start = i; - last_end = i; - B - } - (C, Ws, OverLim) => { - *cont = it(&ss[slice_start..last_end]); - A - } - (C, Ws, UnderLim) => C, - }; - - *cont - }; - - ss.char_indices().all(|x| machine(&mut cont, x)); - - // Let the automaton 'run out' by supplying trailing whitespace - while cont && - match state { - B | C => true, - A => false, - } { - machine(&mut cont, (fake_i, ' ')); - fake_i += 1; - } - cont -} - -#[test] -fn test_split_within() { - fn t(s: &str, i: usize, u: &[String]) { - let mut v = Vec::new(); - each_split_within(s, i, |s| { - v.push(s.to_string()); - true - }); - assert!(v.iter().zip(u).all(|(a, b)| a == b)); - } - t("", 0, &[]); - t("", 15, &[]); - t("hello", 15, &["hello".to_string()]); - t("\nMary had a little lamb\nLittle lamb\n", - 15, - &["Mary had a".to_string(), "little lamb".to_string(), "Little lamb".to_string()]); - t("\nMary had a little lamb\nLittle lamb\n", - ::std::usize::MAX, - &["Mary had a little lamb\nLittle lamb".to_string()]); -} - -#[cfg(test)] -mod tests { - use super::*; - - use std::result::Result::{Err, Ok}; - use std::result; - - // Tests for reqopt - #[test] - fn test_reqopt() { - let long_args = vec!["--test=20".to_string()]; - let opts = vec![reqopt("t", "test", "testing", "TEST")]; - let rs = getopts(&long_args, &opts); - match rs { - Ok(ref m) => { - assert!(m.opt_present("test")); - assert_eq!(m.opt_str("test").unwrap(), "20"); - assert!(m.opt_present("t")); - assert_eq!(m.opt_str("t").unwrap(), "20"); - } - _ => { - panic!("test_reqopt failed (long arg)"); - } - } - let short_args = vec!["-t".to_string(), "20".to_string()]; - match getopts(&short_args, &opts) { - Ok(ref m) => { - assert!((m.opt_present("test"))); - assert_eq!(m.opt_str("test").unwrap(), "20"); - assert!((m.opt_present("t"))); - assert_eq!(m.opt_str("t").unwrap(), "20"); - } - _ => { - panic!("test_reqopt failed (short arg)"); - } - } - } - - #[test] - fn test_reqopt_missing() { - let args = vec!["blah".to_string()]; - let opts = vec![reqopt("t", "test", "testing", "TEST")]; - let rs = getopts(&args, &opts); - match rs { - Err(OptionMissing(_)) => {} - _ => panic!(), - } - } - - #[test] - fn test_reqopt_no_arg() { - let long_args = vec!["--test".to_string()]; - let opts = vec![reqopt("t", "test", "testing", "TEST")]; - let rs = getopts(&long_args, &opts); - match rs { - Err(ArgumentMissing(_)) => {} - _ => panic!(), - } - let short_args = vec!["-t".to_string()]; - match getopts(&short_args, &opts) { - Err(ArgumentMissing(_)) => {} - _ => panic!(), - } - } - - #[test] - fn test_reqopt_multi() { - let args = vec!["--test=20".to_string(), "-t".to_string(), "30".to_string()]; - let opts = vec![reqopt("t", "test", "testing", "TEST")]; - let rs = getopts(&args, &opts); - match rs { - Err(OptionDuplicated(_)) => {} - _ => panic!(), - } - } - - // Tests for optopt - #[test] - fn test_optopt() { - let long_args = vec!["--test=20".to_string()]; - let opts = vec![optopt("t", "test", "testing", "TEST")]; - let rs = getopts(&long_args, &opts); - match rs { - Ok(ref m) => { - assert!(m.opt_present("test")); - assert_eq!(m.opt_str("test").unwrap(), "20"); - assert!((m.opt_present("t"))); - assert_eq!(m.opt_str("t").unwrap(), "20"); - } - _ => panic!(), - } - let short_args = vec!["-t".to_string(), "20".to_string()]; - match getopts(&short_args, &opts) { - Ok(ref m) => { - assert!((m.opt_present("test"))); - assert_eq!(m.opt_str("test").unwrap(), "20"); - assert!((m.opt_present("t"))); - assert_eq!(m.opt_str("t").unwrap(), "20"); - } - _ => panic!(), - } - } - - #[test] - fn test_optopt_missing() { - let args = vec!["blah".to_string()]; - let opts = vec![optopt("t", "test", "testing", "TEST")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert!(!m.opt_present("test")); - assert!(!m.opt_present("t")); - } - _ => panic!(), - } - } - - #[test] - fn test_optopt_no_arg() { - let long_args = vec!["--test".to_string()]; - let opts = vec![optopt("t", "test", "testing", "TEST")]; - let rs = getopts(&long_args, &opts); - match rs { - Err(ArgumentMissing(_)) => {} - _ => panic!(), - } - let short_args = vec!["-t".to_string()]; - match getopts(&short_args, &opts) { - Err(ArgumentMissing(_)) => {} - _ => panic!(), - } - } - - #[test] - fn test_optopt_multi() { - let args = vec!["--test=20".to_string(), "-t".to_string(), "30".to_string()]; - let opts = vec![optopt("t", "test", "testing", "TEST")]; - let rs = getopts(&args, &opts); - match rs { - Err(OptionDuplicated(_)) => {} - _ => panic!(), - } - } - - // Tests for optflag - #[test] - fn test_optflag() { - let long_args = vec!["--test".to_string()]; - let opts = vec![optflag("t", "test", "testing")]; - let rs = getopts(&long_args, &opts); - match rs { - Ok(ref m) => { - assert!(m.opt_present("test")); - assert!(m.opt_present("t")); - } - _ => panic!(), - } - let short_args = vec!["-t".to_string()]; - match getopts(&short_args, &opts) { - Ok(ref m) => { - assert!(m.opt_present("test")); - assert!(m.opt_present("t")); - } - _ => panic!(), - } - } - - #[test] - fn test_optflag_missing() { - let args = vec!["blah".to_string()]; - let opts = vec![optflag("t", "test", "testing")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert!(!m.opt_present("test")); - assert!(!m.opt_present("t")); - } - _ => panic!(), - } - } - - #[test] - fn test_optflag_long_arg() { - let args = vec!["--test=20".to_string()]; - let opts = vec![optflag("t", "test", "testing")]; - let rs = getopts(&args, &opts); - match rs { - Err(UnexpectedArgument(_)) => {} - _ => panic!(), - } - } - - #[test] - fn test_optflag_multi() { - let args = vec!["--test".to_string(), "-t".to_string()]; - let opts = vec![optflag("t", "test", "testing")]; - let rs = getopts(&args, &opts); - match rs { - Err(OptionDuplicated(_)) => {} - _ => panic!(), - } - } - - #[test] - fn test_optflag_short_arg() { - let args = vec!["-t".to_string(), "20".to_string()]; - let opts = vec![optflag("t", "test", "testing")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - // The next variable after the flag is just a free argument - - assert!(m.free[0] == "20"); - } - _ => panic!(), - } - } - - // Tests for optflagmulti - #[test] - fn test_optflagmulti_short1() { - let args = vec!["-v".to_string()]; - let opts = vec![optflagmulti("v", "verbose", "verbosity")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert_eq!(m.opt_count("v"), 1); - } - _ => panic!(), - } - } - - #[test] - fn test_optflagmulti_short2a() { - let args = vec!["-v".to_string(), "-v".to_string()]; - let opts = vec![optflagmulti("v", "verbose", "verbosity")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert_eq!(m.opt_count("v"), 2); - } - _ => panic!(), - } - } - - #[test] - fn test_optflagmulti_short2b() { - let args = vec!["-vv".to_string()]; - let opts = vec![optflagmulti("v", "verbose", "verbosity")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert_eq!(m.opt_count("v"), 2); - } - _ => panic!(), - } - } - - #[test] - fn test_optflagmulti_long1() { - let args = vec!["--verbose".to_string()]; - let opts = vec![optflagmulti("v", "verbose", "verbosity")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert_eq!(m.opt_count("verbose"), 1); - } - _ => panic!(), - } - } - - #[test] - fn test_optflagmulti_long2() { - let args = vec!["--verbose".to_string(), "--verbose".to_string()]; - let opts = vec![optflagmulti("v", "verbose", "verbosity")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert_eq!(m.opt_count("verbose"), 2); - } - _ => panic!(), - } - } - - #[test] - fn test_optflagmulti_mix() { - let args = vec!["--verbose".to_string(), - "-v".to_string(), - "-vv".to_string(), - "verbose".to_string()]; - let opts = vec![optflagmulti("v", "verbose", "verbosity")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert_eq!(m.opt_count("verbose"), 4); - assert_eq!(m.opt_count("v"), 4); - } - _ => panic!(), - } - } - - // Tests for optmulti - #[test] - fn test_optmulti() { - let long_args = vec!["--test=20".to_string()]; - let opts = vec![optmulti("t", "test", "testing", "TEST")]; - let rs = getopts(&long_args, &opts); - match rs { - Ok(ref m) => { - assert!((m.opt_present("test"))); - assert_eq!(m.opt_str("test").unwrap(), "20"); - assert!((m.opt_present("t"))); - assert_eq!(m.opt_str("t").unwrap(), "20"); - } - _ => panic!(), - } - let short_args = vec!["-t".to_string(), "20".to_string()]; - match getopts(&short_args, &opts) { - Ok(ref m) => { - assert!((m.opt_present("test"))); - assert_eq!(m.opt_str("test").unwrap(), "20"); - assert!((m.opt_present("t"))); - assert_eq!(m.opt_str("t").unwrap(), "20"); - } - _ => panic!(), - } - } - - #[test] - fn test_optmulti_missing() { - let args = vec!["blah".to_string()]; - let opts = vec![optmulti("t", "test", "testing", "TEST")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert!(!m.opt_present("test")); - assert!(!m.opt_present("t")); - } - _ => panic!(), - } - } - - #[test] - fn test_optmulti_no_arg() { - let long_args = vec!["--test".to_string()]; - let opts = vec![optmulti("t", "test", "testing", "TEST")]; - let rs = getopts(&long_args, &opts); - match rs { - Err(ArgumentMissing(_)) => {} - _ => panic!(), - } - let short_args = vec!["-t".to_string()]; - match getopts(&short_args, &opts) { - Err(ArgumentMissing(_)) => {} - _ => panic!(), - } - } - - #[test] - fn test_optmulti_multi() { - let args = vec!["--test=20".to_string(), "-t".to_string(), "30".to_string()]; - let opts = vec![optmulti("t", "test", "testing", "TEST")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert!(m.opt_present("test")); - assert_eq!(m.opt_str("test").unwrap(), "20"); - assert!(m.opt_present("t")); - assert_eq!(m.opt_str("t").unwrap(), "20"); - let pair = m.opt_strs("test"); - assert!(pair[0] == "20"); - assert!(pair[1] == "30"); - } - _ => panic!(), - } - } - - #[test] - fn test_unrecognized_option() { - let long_args = vec!["--untest".to_string()]; - let opts = vec![optmulti("t", "test", "testing", "TEST")]; - let rs = getopts(&long_args, &opts); - match rs { - Err(UnrecognizedOption(_)) => {} - _ => panic!(), - } - let short_args = vec!["-u".to_string()]; - match getopts(&short_args, &opts) { - Err(UnrecognizedOption(_)) => {} - _ => panic!(), - } - } - - #[test] - fn test_combined() { - let args = vec!["prog".to_string(), - "free1".to_string(), - "-s".to_string(), - "20".to_string(), - "free2".to_string(), - "--flag".to_string(), - "--long=30".to_string(), - "-f".to_string(), - "-m".to_string(), - "40".to_string(), - "-m".to_string(), - "50".to_string(), - "-n".to_string(), - "-A B".to_string(), - "-n".to_string(), - "-60 70".to_string()]; - let opts = vec![optopt("s", "something", "something", "SOMETHING"), - optflag("", "flag", "a flag"), - reqopt("", "long", "hi", "LONG"), - optflag("f", "", "another flag"), - optmulti("m", "", "mmmmmm", "YUM"), - optmulti("n", "", "nothing", "NOTHING"), - optopt("", "notpresent", "nothing to see here", "NOPE")]; - let rs = getopts(&args, &opts); - match rs { - Ok(ref m) => { - assert!(m.free[0] == "prog"); - assert!(m.free[1] == "free1"); - assert_eq!(m.opt_str("s").unwrap(), "20"); - assert!(m.free[2] == "free2"); - assert!((m.opt_present("flag"))); - assert_eq!(m.opt_str("long").unwrap(), "30"); - assert!((m.opt_present("f"))); - let pair = m.opt_strs("m"); - assert!(pair[0] == "40"); - assert!(pair[1] == "50"); - let pair = m.opt_strs("n"); - assert!(pair[0] == "-A B"); - assert!(pair[1] == "-60 70"); - assert!((!m.opt_present("notpresent"))); - } - _ => panic!(), - } - } - - #[test] - fn test_multi() { - let opts = vec![optopt("e", "", "encrypt", "ENCRYPT"), - optopt("", "encrypt", "encrypt", "ENCRYPT"), - optopt("f", "", "flag", "FLAG")]; - - let args_single = vec!["-e".to_string(), "foo".to_string()]; - let matches_single = &match getopts(&args_single, &opts) { - result::Result::Ok(m) => m, - result::Result::Err(_) => panic!(), - }; - assert!(matches_single.opts_present(&["e".to_string()])); - assert!(matches_single.opts_present(&["encrypt".to_string(), "e".to_string()])); - assert!(matches_single.opts_present(&["e".to_string(), "encrypt".to_string()])); - assert!(!matches_single.opts_present(&["encrypt".to_string()])); - assert!(!matches_single.opts_present(&["thing".to_string()])); - assert!(!matches_single.opts_present(&[])); - - assert_eq!(matches_single.opts_str(&["e".to_string()]).unwrap(), "foo"); - assert_eq!(matches_single.opts_str(&["e".to_string(), "encrypt".to_string()]).unwrap(), - "foo"); - assert_eq!(matches_single.opts_str(&["encrypt".to_string(), "e".to_string()]).unwrap(), - "foo"); - - let args_both = vec!["-e".to_string(), - "foo".to_string(), - "--encrypt".to_string(), - "foo".to_string()]; - let matches_both = &match getopts(&args_both, &opts) { - result::Result::Ok(m) => m, - result::Result::Err(_) => panic!(), - }; - assert!(matches_both.opts_present(&["e".to_string()])); - assert!(matches_both.opts_present(&["encrypt".to_string()])); - assert!(matches_both.opts_present(&["encrypt".to_string(), "e".to_string()])); - assert!(matches_both.opts_present(&["e".to_string(), "encrypt".to_string()])); - assert!(!matches_both.opts_present(&["f".to_string()])); - assert!(!matches_both.opts_present(&["thing".to_string()])); - assert!(!matches_both.opts_present(&[])); - - assert_eq!(matches_both.opts_str(&["e".to_string()]).unwrap(), "foo"); - assert_eq!(matches_both.opts_str(&["encrypt".to_string()]).unwrap(), - "foo"); - assert_eq!(matches_both.opts_str(&["e".to_string(), "encrypt".to_string()]).unwrap(), - "foo"); - assert_eq!(matches_both.opts_str(&["encrypt".to_string(), "e".to_string()]).unwrap(), - "foo"); - } - - #[test] - fn test_nospace() { - let args = vec!["-Lfoo".to_string(), "-M.".to_string()]; - let opts = vec![optmulti("L", "", "library directory", "LIB"), - optmulti("M", "", "something", "MMMM")]; - let matches = &match getopts(&args, &opts) { - result::Result::Ok(m) => m, - result::Result::Err(_) => panic!(), - }; - assert!(matches.opts_present(&["L".to_string()])); - assert_eq!(matches.opts_str(&["L".to_string()]).unwrap(), "foo"); - assert!(matches.opts_present(&["M".to_string()])); - assert_eq!(matches.opts_str(&["M".to_string()]).unwrap(), "."); - - } - - #[test] - fn test_nospace_conflict() { - let args = vec!["-vvLverbose".to_string(), "-v".to_string()]; - let opts = vec![optmulti("L", "", "library directory", "LIB"), - optflagmulti("v", "verbose", "Verbose")]; - let matches = &match getopts(&args, &opts) { - result::Result::Ok(m) => m, - result::Result::Err(e) => panic!("{}", e), - }; - assert!(matches.opts_present(&["L".to_string()])); - assert_eq!(matches.opts_str(&["L".to_string()]).unwrap(), "verbose"); - assert!(matches.opts_present(&["v".to_string()])); - assert_eq!(3, matches.opt_count("v")); - } - - #[test] - fn test_long_to_short() { - let mut short = Opt { - name: Name::Long("banana".to_string()), - hasarg: HasArg::Yes, - occur: Occur::Req, - aliases: Vec::new(), - }; - short.aliases = vec![Opt { - name: Name::Short('b'), - hasarg: HasArg::Yes, - occur: Occur::Req, - aliases: Vec::new(), - }]; - let verbose = reqopt("b", "banana", "some bananas", "VAL"); - - assert!(verbose.long_to_short() == short); - } - - #[test] - fn test_aliases_long_and_short() { - let opts = vec![optflagmulti("a", "apple", "Desc")]; - - let args = vec!["-a".to_string(), "--apple".to_string(), "-a".to_string()]; - - let matches = getopts(&args, &opts).unwrap(); - assert_eq!(3, matches.opt_count("a")); - assert_eq!(3, matches.opt_count("apple")); - } - - #[test] - fn test_usage() { - let optgroups = vec![reqopt("b", "banana", "Desc", "VAL"), - optopt("a", "012345678901234567890123456789", "Desc", "VAL"), - optflag("k", "kiwi", "Desc"), - optflagopt("p", "", "Desc", "VAL"), - optmulti("l", "", "Desc", "VAL")]; - - let expected = -"Usage: fruits - -Options: - -b --banana VAL Desc - -a --012345678901234567890123456789 VAL - Desc - -k --kiwi Desc - -p [VAL] Desc - -l VAL Desc -"; - - let generated_usage = usage("Usage: fruits", &optgroups); - - assert_eq!(generated_usage, expected); - } - - #[test] - fn test_usage_description_wrapping() { - // indentation should be 24 spaces - // lines wrap after 78: or rather descriptions wrap after 54 - - let optgroups = vec![optflag("k", - "kiwi", - // 54 - "This is a long description which won't be wrapped..+.."), - optflag("a", - "apple", - "This is a long description which _will_ be wrapped..+..")]; - - let expected = -"Usage: fruits - -Options: - -k --kiwi This is a long description which won't be wrapped..+.. - -a --apple This is a long description which _will_ be - wrapped..+.. -"; - - let usage = usage("Usage: fruits", &optgroups); - - assert!(usage == expected) - } - - #[test] - fn test_usage_description_multibyte_handling() { - let optgroups = vec![optflag("k", - "k\u{2013}w\u{2013}", - "The word kiwi is normally spelled with two i's"), - optflag("a", - "apple", - "This \u{201C}description\u{201D} has some characters that \ - could confuse the line wrapping; an apple costs 0.51€ in \ - some parts of Europe.")]; - - let expected = -"Usage: fruits - -Options: - -k --k–w– The word kiwi is normally spelled with two i's - -a --apple This “description” has some characters that could - confuse the line wrapping; an apple costs 0.51€ in - some parts of Europe. -"; - - let usage = usage("Usage: fruits", &optgroups); - - assert!(usage == expected) - } - - #[test] - fn test_short_usage() { - let optgroups = vec![reqopt("b", "banana", "Desc", "VAL"), - optopt("a", "012345678901234567890123456789", "Desc", "VAL"), - optflag("k", "kiwi", "Desc"), - optflagopt("p", "", "Desc", "VAL"), - optmulti("l", "", "Desc", "VAL")]; - - let expected = "Usage: fruits -b VAL [-a VAL] [-k] [-p [VAL]] [-l VAL]..".to_string(); - let generated_usage = short_usage("fruits", &optgroups); - - assert_eq!(generated_usage, expected); - } - - #[test] - fn test_args_with_equals() { - let args = vec!["--one".to_string(), "A=B".to_string(), - "--two=C=D".to_string()]; - let opts = vec![optopt("o", "one", "One", "INFO"), - optopt("t", "two", "Two", "INFO")]; - let matches = &match getopts(&args, &opts) { - result::Result::Ok(m) => m, - result::Result::Err(e) => panic!("{}", e) - }; - assert_eq!(matches.opts_str(&["o".to_string()]).unwrap(), "A=B"); - assert_eq!(matches.opts_str(&["t".to_string()]).unwrap(), "C=D"); - } -} diff --git a/src/libgraphviz/lib.rs b/src/libgraphviz/lib.rs index cd893b9784ab..9fa48adebdf0 100644 --- a/src/libgraphviz/lib.rs +++ b/src/libgraphviz/lib.rs @@ -287,8 +287,8 @@ html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(allow(unused_variables), deny(warnings))))] -#![deny(warnings)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(str_escape)] use self::LabelText::*; @@ -420,7 +420,8 @@ impl<'a> Id<'a> { if !name.chars().all(|c| c.is_ascii_alphanumeric() || c == '_' ) { return Err(()); } - return Ok(Id { name: name }); + + Ok(Id { name }) } pub fn as_slice(&'a self) -> &'a str { @@ -533,10 +534,10 @@ impl<'a> LabelText<'a> { /// Renders text as string suitable for a label in a .dot file. /// This includes quotes or suitable delimiters. pub fn to_dot_string(&self) -> String { - match self { - &LabelStr(ref s) => format!("\"{}\"", s.escape_default()), - &EscStr(ref s) => format!("\"{}\"", LabelText::escape_str(&s)), - &HtmlStr(ref s) => format!("<{}>", s), + match *self { + LabelStr(ref s) => format!("\"{}\"", s.escape_default()), + EscStr(ref s) => format!("\"{}\"", LabelText::escape_str(&s)), + HtmlStr(ref s) => format!("<{}>", s), } } @@ -711,6 +712,12 @@ impl<'a> IntoCow<'a, str> for &'a str { } } +impl<'a> IntoCow<'a, str> for Cow<'a, str> { + fn into_cow(self) -> Cow<'a, str> { + self + } +} + impl<'a, T: Clone> IntoCow<'a, [T]> for Vec { fn into_cow(self) -> Cow<'a, [T]> { Cow::Owned(self) diff --git a/src/liblibc b/src/liblibc index 2b4cd1016bdb..6bdbf5dc9374 160000 --- a/src/liblibc +++ b/src/liblibc @@ -1 +1 @@ -Subproject commit 2b4cd1016bdba92becb4f982a4dcb18fe6653bc4 +Subproject commit 6bdbf5dc937459bd10e6bc4dc52b0adbd8cf4358 diff --git a/src/libpanic_abort/Cargo.toml b/src/libpanic_abort/Cargo.toml index e0eac41f49ec..633d273b3b93 100644 --- a/src/libpanic_abort/Cargo.toml +++ b/src/libpanic_abort/Cargo.toml @@ -12,3 +12,4 @@ doc = false [dependencies] core = { path = "../libcore" } libc = { path = "../rustc/libc_shim" } +compiler_builtins = { path = "../rustc/compiler_builtins_shim" } diff --git a/src/libpanic_abort/lib.rs b/src/libpanic_abort/lib.rs index c3bd6a2bc187..da568fae70e1 100644 --- a/src/libpanic_abort/lib.rs +++ b/src/libpanic_abort/lib.rs @@ -19,18 +19,20 @@ html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/", issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")] -#![deny(warnings)] #![panic_runtime] #![allow(unused_features)] #![feature(core_intrinsics)] #![feature(libc)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(panic_runtime)] #![feature(staged_api)] +#![feature(rustc_attrs)] // Rust's "try" function, but if we're aborting on panics we just call the // function as there's nothing else we need to do here. #[no_mangle] +#[rustc_std_internal_symbol] pub unsafe extern fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, _data_ptr: *mut usize, @@ -50,7 +52,8 @@ pub unsafe extern fn __rust_maybe_catch_panic(f: fn(*mut u8), // will kill us with an illegal instruction, which will do a good enough job for // now hopefully. #[no_mangle] -pub unsafe extern fn __rust_start_panic(_data: usize, _vtable: usize) -> u32 { +#[rustc_std_internal_symbol] +pub unsafe extern fn __rust_start_panic(_payload: usize) -> u32 { abort(); #[cfg(any(unix, target_os = "cloudabi"))] diff --git a/src/libpanic_unwind/Cargo.toml b/src/libpanic_unwind/Cargo.toml index a978ea16e9e7..74aaa4d5ae3d 100644 --- a/src/libpanic_unwind/Cargo.toml +++ b/src/libpanic_unwind/Cargo.toml @@ -14,3 +14,4 @@ alloc = { path = "../liballoc" } core = { path = "../libcore" } libc = { path = "../rustc/libc_shim" } unwind = { path = "../libunwind" } +compiler_builtins = { path = "../rustc/compiler_builtins_shim" } diff --git a/src/libpanic_unwind/dummy.rs b/src/libpanic_unwind/dummy.rs new file mode 100644 index 000000000000..7150560b4a13 --- /dev/null +++ b/src/libpanic_unwind/dummy.rs @@ -0,0 +1,29 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Unwinding for wasm32 +//! +//! Right now we don't support this, so this is just stubs + +use alloc::boxed::Box; +use core::any::Any; +use core::intrinsics; + +pub fn payload() -> *mut u8 { + 0 as *mut u8 +} + +pub unsafe fn cleanup(_ptr: *mut u8) -> Box { + intrinsics::abort() +} + +pub unsafe fn panic(_data: Box) -> u32 { + intrinsics::abort() +} diff --git a/src/libpanic_unwind/dwarf/eh.rs b/src/libpanic_unwind/dwarf/eh.rs index 0c326ce37184..a24c65968913 100644 --- a/src/libpanic_unwind/dwarf/eh.rs +++ b/src/libpanic_unwind/dwarf/eh.rs @@ -48,8 +48,8 @@ pub const DW_EH_PE_indirect: u8 = 0x80; pub struct EHContext<'a> { pub ip: usize, // Current instruction pointer pub func_start: usize, // Address of the current function - pub get_text_start: &'a Fn() -> usize, // Get address of the code section - pub get_data_start: &'a Fn() -> usize, // Get address of the data section + pub get_text_start: &'a dyn Fn() -> usize, // Get address of the code section + pub get_data_start: &'a dyn Fn() -> usize, // Get address of the data section } pub enum EHAction { diff --git a/src/libpanic_unwind/emcc.rs b/src/libpanic_unwind/emcc.rs index 0e48e37c9235..87efc23abc81 100644 --- a/src/libpanic_unwind/emcc.rs +++ b/src/libpanic_unwind/emcc.rs @@ -29,20 +29,20 @@ pub fn payload() -> *mut u8 { ptr::null_mut() } -pub unsafe fn cleanup(ptr: *mut u8) -> Box { +pub unsafe fn cleanup(ptr: *mut u8) -> Box { assert!(!ptr.is_null()); let ex = ptr::read(ptr as *mut _); __cxa_free_exception(ptr as *mut _); ex } -pub unsafe fn panic(data: Box) -> u32 { +pub unsafe fn panic(data: Box) -> u32 { let sz = mem::size_of_val(&data); let exception = __cxa_allocate_exception(sz); if exception == ptr::null_mut() { return uw::_URC_FATAL_PHASE1_ERROR as u32; } - let exception = exception as *mut Box; + let exception = exception as *mut Box; ptr::write(exception, data); __cxa_throw(exception as *mut _, ptr::null_mut(), ptr::null_mut()); diff --git a/src/libpanic_unwind/gcc.rs b/src/libpanic_unwind/gcc.rs index 63e44f71a3a8..11ebcf5c01ea 100644 --- a/src/libpanic_unwind/gcc.rs +++ b/src/libpanic_unwind/gcc.rs @@ -67,10 +67,10 @@ use dwarf::eh::{self, EHContext, EHAction}; #[repr(C)] struct Exception { _uwe: uw::_Unwind_Exception, - cause: Option>, + cause: Option>, } -pub unsafe fn panic(data: Box) -> u32 { +pub unsafe fn panic(data: Box) -> u32 { let exception = Box::new(Exception { _uwe: uw::_Unwind_Exception { exception_class: rust_exception_class(), @@ -94,7 +94,7 @@ pub fn payload() -> *mut u8 { ptr::null_mut() } -pub unsafe fn cleanup(ptr: *mut u8) -> Box { +pub unsafe fn cleanup(ptr: *mut u8) -> Box { let my_ep = ptr as *mut Exception; let cause = (*my_ep).cause.take(); uw::_Unwind_DeleteException(ptr as *mut _); @@ -143,7 +143,7 @@ const UNWIND_DATA_REG: (i32, i32) = (24, 25); // I0, I1 // The personality routine for most of our targets, except ARM, which has a slightly different ABI // (however, iOS goes here as it uses SjLj unwinding). Also, the 64-bit Windows implementation // lives in seh64_gnu.rs -#[cfg(all(any(target_os = "ios", not(target_arch = "arm"))))] +#[cfg(all(any(target_os = "ios", target_os = "netbsd", not(target_arch = "arm"))))] #[lang = "eh_personality"] #[no_mangle] #[allow(unused)] @@ -184,7 +184,7 @@ unsafe extern "C" fn rust_eh_personality(version: c_int, // ARM EHABI personality routine. // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038b/IHI0038B_ehabi.pdf -#[cfg(all(target_arch = "arm", not(target_os = "ios")))] +#[cfg(all(target_arch = "arm", not(target_os = "ios"), not(target_os = "netbsd")))] #[lang = "eh_personality"] #[no_mangle] unsafe extern "C" fn rust_eh_personality(state: uw::_Unwind_State, @@ -286,7 +286,7 @@ unsafe fn find_eh_action(context: *mut uw::_Unwind_Context) // See docs in the `unwind` module. #[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))] #[lang = "eh_unwind_resume"] -#[unwind] +#[unwind(allowed)] unsafe extern "C" fn rust_eh_unwind_resume(panic_ctx: *mut u8) -> ! { uw::_Unwind_Resume(panic_ctx as *mut uw::_Unwind_Exception); } diff --git a/src/libpanic_unwind/lib.rs b/src/libpanic_unwind/lib.rs index 92e40e8f26d4..9c3fc76c307a 100644 --- a/src/libpanic_unwind/lib.rs +++ b/src/libpanic_unwind/lib.rs @@ -28,17 +28,18 @@ html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/", issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")] -#![deny(warnings)] +#![feature(allocator_api)] #![feature(alloc)] #![feature(core_intrinsics)] #![feature(lang_items)] #![feature(libc)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(panic_unwind)] #![feature(raw)] #![feature(staged_api)] +#![feature(std_internals)] #![feature(unwind_attributes)] -#![cfg_attr(target_env = "msvc", feature(raw))] #![panic_runtime] #![feature(panic_runtime)] @@ -48,40 +49,39 @@ extern crate libc; #[cfg(not(any(target_env = "msvc", all(windows, target_arch = "x86_64", target_env = "gnu"))))] extern crate unwind; +use alloc::boxed::Box; use core::intrinsics; use core::mem; use core::raw; +use core::panic::BoxMeUp; -// Rust runtime's startup objects depend on these symbols, so make them public. -#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))] -pub use imp::eh_frame_registry::*; +#[macro_use] +mod macros; -// *-pc-windows-msvc -#[cfg(target_env = "msvc")] -#[path = "seh.rs"] -mod imp; - -// x86_64-pc-windows-gnu -#[cfg(all(windows, target_arch = "x86_64", target_env = "gnu"))] -#[path = "seh64_gnu.rs"] -mod imp; - -// i686-pc-windows-gnu and all others -#[cfg(any(all(unix, not(target_os = "emscripten")), - target_os = "cloudabi", - target_os = "redox", - all(windows, target_arch = "x86", target_env = "gnu")))] -#[path = "gcc.rs"] -mod imp; - -// emscripten -#[cfg(target_os = "emscripten")] -#[path = "emcc.rs"] -mod imp; - -#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))] -#[path = "wasm32.rs"] -mod imp; +cfg_if! { + if #[cfg(target_os = "emscripten")] { + #[path = "emcc.rs"] + mod imp; + } else if #[cfg(target_arch = "wasm32")] { + #[path = "dummy.rs"] + mod imp; + } else if #[cfg(all(target_env = "msvc", target_arch = "aarch64"))] { + #[path = "dummy.rs"] + mod imp; + } else if #[cfg(target_env = "msvc")] { + #[path = "seh.rs"] + mod imp; + } else if #[cfg(all(windows, target_arch = "x86_64", target_env = "gnu"))] { + #[path = "seh64_gnu.rs"] + mod imp; + } else { + // Rust runtime's startup objects depend on these symbols, so make them public. + #[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))] + pub use imp::eh_frame_registry::*; + #[path = "gcc.rs"] + mod imp; + } +} mod dwarf; mod windows; @@ -112,10 +112,8 @@ pub unsafe extern "C" fn __rust_maybe_catch_panic(f: fn(*mut u8), // Entry point for raising an exception, just delegates to the platform-specific // implementation. #[no_mangle] -#[unwind] -pub unsafe extern "C" fn __rust_start_panic(data: usize, vtable: usize) -> u32 { - imp::panic(mem::transmute(raw::TraitObject { - data: data as *mut (), - vtable: vtable as *mut (), - })) +#[unwind(allowed)] +pub unsafe extern "C" fn __rust_start_panic(payload: usize) -> u32 { + let payload = payload as *mut &mut dyn BoxMeUp; + imp::panic(Box::from_raw((*payload).box_me_up())) } diff --git a/src/libpanic_unwind/macros.rs b/src/libpanic_unwind/macros.rs new file mode 100644 index 000000000000..6ea79dc862bd --- /dev/null +++ b/src/libpanic_unwind/macros.rs @@ -0,0 +1,45 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// A macro for defining `#[cfg]` if-else statements. +/// +/// This is similar to the `if/elif` C preprocessor macro by allowing definition +/// of a cascade of `#[cfg]` cases, emitting the implementation which matches +/// first. +/// +/// This allows you to conveniently provide a long list `#[cfg]`'d blocks of code +/// without having to rewrite each clause multiple times. +macro_rules! cfg_if { + ($( + if #[cfg($($meta:meta),*)] { $($it:item)* } + ) else * else { + $($it2:item)* + }) => { + __cfg_if_items! { + () ; + $( ( ($($meta),*) ($($it)*) ), )* + ( () ($($it2)*) ), + } + } +} + +macro_rules! __cfg_if_items { + (($($not:meta,)*) ; ) => {}; + (($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { + __cfg_if_apply! { cfg(all(not(any($($not),*)), $($m,)*)), $($it)* } + __cfg_if_items! { ($($not,)* $($m,)*) ; $($rest)* } + } +} + +macro_rules! __cfg_if_apply { + ($m:meta, $($it:item)*) => { + $(#[$m] $it)* + } +} diff --git a/src/libpanic_unwind/seh.rs b/src/libpanic_unwind/seh.rs index 589642149300..8cbc4a623fa0 100644 --- a/src/libpanic_unwind/seh.rs +++ b/src/libpanic_unwind/seh.rs @@ -43,7 +43,7 @@ //! throwing. Note that throwing an exception into Rust is undefined behavior //! anyway, so this should be fine. //! * We've got some data to transmit across the unwinding boundary, -//! specifically a `Box`. Like with Dwarf exceptions +//! specifically a `Box`. Like with Dwarf exceptions //! these two pointers are stored as a payload in the exception itself. On //! MSVC, however, there's no need for an extra heap allocation because the //! call stack is preserved while filter functions are being executed. This @@ -243,7 +243,7 @@ static mut TYPE_DESCRIPTOR2: _TypeDescriptor = _TypeDescriptor { name: imp::NAME2, }; -pub unsafe fn panic(data: Box) -> u32 { +pub unsafe fn panic(data: Box) -> u32 { use core::intrinsics::atomic_store; // _CxxThrowException executes entirely on this stack frame, so there's no @@ -297,7 +297,7 @@ pub fn payload() -> [u64; 2] { [0; 2] } -pub unsafe fn cleanup(payload: [u64; 2]) -> Box { +pub unsafe fn cleanup(payload: [u64; 2]) -> Box { mem::transmute(raw::TraitObject { data: payload[0] as *mut _, vtable: payload[1] as *mut _, diff --git a/src/libpanic_unwind/seh64_gnu.rs b/src/libpanic_unwind/seh64_gnu.rs index 0a9fa7d9a80b..0b08e54c6739 100644 --- a/src/libpanic_unwind/seh64_gnu.rs +++ b/src/libpanic_unwind/seh64_gnu.rs @@ -37,10 +37,10 @@ const RUST_PANIC: c::DWORD = ETYPE | (1 << 24) | MAGIC; #[repr(C)] struct PanicData { - data: Box, + data: Box, } -pub unsafe fn panic(data: Box) -> u32 { +pub unsafe fn panic(data: Box) -> u32 { let panic_ctx = Box::new(PanicData { data: data }); let params = [Box::into_raw(panic_ctx) as c::ULONG_PTR]; c::RaiseException(RUST_PANIC, @@ -54,7 +54,7 @@ pub fn payload() -> *mut u8 { ptr::null_mut() } -pub unsafe fn cleanup(ptr: *mut u8) -> Box { +pub unsafe fn cleanup(ptr: *mut u8) -> Box { let panic_ctx = Box::from_raw(ptr as *mut PanicData); return panic_ctx.data; } @@ -108,7 +108,7 @@ unsafe extern "C" fn rust_eh_personality(exceptionRecord: *mut c::EXCEPTION_RECO } #[lang = "eh_unwind_resume"] -#[unwind] +#[unwind(allowed)] unsafe extern "C" fn rust_eh_unwind_resume(panic_ctx: c::LPVOID) -> ! { let params = [panic_ctx as c::ULONG_PTR]; c::RaiseException(RUST_PANIC, diff --git a/src/libpanic_unwind/wasm32.rs b/src/libpanic_unwind/wasm32.rs deleted file mode 100644 index 8aed61b3c385..000000000000 --- a/src/libpanic_unwind/wasm32.rs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Unwinding for wasm32 -//! -//! Right now we don't support this, so this is just stubs - -use alloc::boxed::Box; -use core::any::Any; -use core::intrinsics; - -pub fn payload() -> *mut u8 { - 0 as *mut u8 -} - -pub unsafe fn cleanup(_ptr: *mut u8) -> Box { - intrinsics::abort() -} - -pub unsafe fn panic(_data: Box) -> u32 { - intrinsics::abort() -} diff --git a/src/libpanic_unwind/windows.rs b/src/libpanic_unwind/windows.rs index a7e90071ceae..5f1dda36a889 100644 --- a/src/libpanic_unwind/windows.rs +++ b/src/libpanic_unwind/windows.rs @@ -79,18 +79,18 @@ pub enum EXCEPTION_DISPOSITION { pub use self::EXCEPTION_DISPOSITION::*; extern "system" { - #[unwind] + #[unwind(allowed)] pub fn RaiseException(dwExceptionCode: DWORD, dwExceptionFlags: DWORD, nNumberOfArguments: DWORD, lpArguments: *const ULONG_PTR); - #[unwind] + #[unwind(allowed)] pub fn RtlUnwindEx(TargetFrame: LPVOID, TargetIp: LPVOID, ExceptionRecord: *const EXCEPTION_RECORD, ReturnValue: LPVOID, OriginalContext: *const CONTEXT, HistoryTable: *const UNWIND_HISTORY_TABLE); - #[unwind] + #[unwind(allowed)] pub fn _CxxThrowException(pExceptionObject: *mut c_void, pThrowInfo: *mut u8); } diff --git a/src/libproc_macro/Cargo.toml b/src/libproc_macro/Cargo.toml index cfd83e348a8e..c1b2622520b1 100644 --- a/src/libproc_macro/Cargo.toml +++ b/src/libproc_macro/Cargo.toml @@ -11,3 +11,4 @@ crate-type = ["dylib"] syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } rustc_errors = { path = "../librustc_errors" } +rustc_data_structures = { path = "../librustc_data_structures" } diff --git a/src/libproc_macro/diagnostic.rs b/src/libproc_macro/diagnostic.rs index c39aec896e6b..af7790164183 100644 --- a/src/libproc_macro/diagnostic.rs +++ b/src/libproc_macro/diagnostic.rs @@ -10,11 +10,13 @@ use Span; -use rustc_errors as rustc; +use rustc_errors as errors; +use syntax_pos::MultiSpan; /// An enum representing a diagnostic level. -#[unstable(feature = "proc_macro", issue = "38356")] +#[unstable(feature = "proc_macro_diagnostic", issue = "38356")] #[derive(Copy, Clone, Debug)] +#[non_exhaustive] pub enum Level { /// An error. Error, @@ -24,13 +26,11 @@ pub enum Level { Note, /// A help message. Help, - #[doc(hidden)] - __Nonexhaustive, } /// A structure representing a diagnostic message and associated children /// messages. -#[unstable(feature = "proc_macro", issue = "38356")] +#[unstable(feature = "proc_macro_diagnostic", issue = "38356")] #[derive(Clone, Debug)] pub struct Diagnostic { level: Level, @@ -43,7 +43,7 @@ macro_rules! diagnostic_child_methods { ($spanned:ident, $regular:ident, $level:expr) => ( /// Add a new child diagnostic message to `self` with the level /// identified by this methods name with the given `span` and `message`. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_diagnostic", issue = "38356")] pub fn $spanned>(mut self, span: Span, message: T) -> Diagnostic { self.children.push(Diagnostic::spanned(span, $level, message)); self @@ -51,7 +51,7 @@ macro_rules! diagnostic_child_methods { /// Add a new child diagnostic message to `self` with the level /// identified by this method's name with the given `message`. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_diagnostic", issue = "38356")] pub fn $regular>(mut self, message: T) -> Diagnostic { self.children.push(Diagnostic::new($level, message)); self @@ -61,7 +61,7 @@ macro_rules! diagnostic_child_methods { impl Diagnostic { /// Create a new diagnostic with the given `level` and `message`. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_diagnostic", issue = "38356")] pub fn new>(level: Level, message: T) -> Diagnostic { Diagnostic { level: level, @@ -73,7 +73,7 @@ impl Diagnostic { /// Create a new diagnostic with the given `level` and `message` pointing to /// the given `span`. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_diagnostic", issue = "38356")] pub fn spanned>(span: Span, level: Level, message: T) -> Diagnostic { Diagnostic { level: level, @@ -89,46 +89,29 @@ impl Diagnostic { diagnostic_child_methods!(span_help, help, Level::Help); /// Returns the diagnostic `level` for `self`. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_diagnostic", issue = "38356")] pub fn level(&self) -> Level { self.level } /// Emit the diagnostic. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_diagnostic", issue = "38356")] pub fn emit(self) { - ::__internal::with_sess(move |(sess, _)| { - let handler = &sess.span_diagnostic; - let level = __internal::level_to_internal_level(self.level); - let mut diag = rustc::DiagnosticBuilder::new(handler, level, &*self.message); + let level = self.level.to_internal(); + let mut diag = errors::Diagnostic::new(level, &*self.message); - if let Some(span) = self.span { - diag.set_span(span.0); - } + if let Some(span) = self.span { + diag.set_span(span.0); + } - for child in self.children { - let span = child.span.map(|s| s.0); - let level = __internal::level_to_internal_level(child.level); - diag.sub(level, &*child.message, span); - } + for child in self.children { + let span = child.span.map_or(MultiSpan::new(), |s| s.0.into()); + let level = child.level.to_internal(); + diag.sub(level, &*child.message, span, None); + } - diag.emit(); + ::__internal::with_sess(move |sess, _| { + errors::DiagnosticBuilder::new_diagnostic(&sess.span_diagnostic, diag).emit(); }); } } - -#[unstable(feature = "proc_macro_internals", issue = "27812")] -#[doc(hidden)] -pub mod __internal { - use super::{Level, rustc}; - - pub fn level_to_internal_level(level: Level) -> rustc::Level { - match level { - Level::Error => rustc::Level::Error, - Level::Warning => rustc::Level::Warning, - Level::Note => rustc::Level::Note, - Level::Help => rustc::Level::Help, - Level::__Nonexhaustive => unreachable!("Level::__Nonexhaustive") - } - } -} diff --git a/src/libproc_macro/lib.rs b/src/libproc_macro/lib.rs index b9e816baac0d..b54054752eaf 100644 --- a/src/libproc_macro/lib.rs +++ b/src/libproc_macro/lib.rs @@ -11,20 +11,17 @@ //! A support library for macro authors when defining new macros. //! //! This library, provided by the standard distribution, provides the types -//! consumed in the interfaces of procedurally defined macro definitions. -//! Currently the primary use of this crate is to provide the ability to define -//! new custom derive modes through `#[proc_macro_derive]`. +//! consumed in the interfaces of procedurally defined macro definitions such as +//! function-like macros `#[proc_macro]`, macro attribures `#[proc_macro_attribute]` and +//! custom derive attributes`#[proc_macro_derive]`. //! -//! Note that this crate is intentionally very bare-bones currently. The main -//! type, `TokenStream`, only supports `fmt::Display` and `FromStr` -//! implementations, indicating that it can only go to and come from a string. +//! Note that this crate is intentionally bare-bones currently. //! This functionality is intended to be expanded over time as more surface //! area for macro authors is stabilized. //! //! See [the book](../book/first-edition/procedural-macros.html) for more. #![stable(feature = "proc_macro_lib", since = "1.15.0")] -#![deny(warnings)] #![deny(missing_docs)] #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", @@ -34,47 +31,59 @@ test(no_crate_inject, attr(deny(warnings))), test(attr(allow(dead_code, deprecated, unused_variables, unused_mut))))] -#![feature(i128_type)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(rustc_private)] #![feature(staged_api)] #![feature(lang_items)] +#![feature(optin_builtin_traits)] +#![feature(non_exhaustive)] + +#![recursion_limit="256"] -#[macro_use] extern crate syntax; extern crate syntax_pos; extern crate rustc_errors; +extern crate rustc_data_structures; + +#[unstable(feature = "proc_macro_internals", issue = "27812")] +#[doc(hidden)] +pub mod rustc; mod diagnostic; -#[unstable(feature = "proc_macro", issue = "38356")] +#[unstable(feature = "proc_macro_diagnostic", issue = "38356")] pub use diagnostic::{Diagnostic, Level}; use std::{ascii, fmt, iter}; -use std::rc::Rc; +use std::path::PathBuf; +use rustc_data_structures::sync::Lrc; use std::str::FromStr; -use syntax::ast; use syntax::errors::DiagnosticBuilder; use syntax::parse::{self, token}; use syntax::symbol::Symbol; use syntax::tokenstream; -use syntax_pos::DUMMY_SP; -use syntax_pos::{FileMap, Pos, SyntaxContext, FileName}; -use syntax_pos::hygiene::Mark; +use syntax_pos::{FileMap, Pos, FileName}; /// The main type provided by this crate, representing an abstract stream of -/// tokens. +/// tokens, or, more specifically, a sequence of token trees. +/// The type provide interfaces for iterating over those token trees and, conversely, +/// collecting a number of token trees into one stream. /// -/// This is both the input and output of `#[proc_macro_derive]` definitions. -/// Currently it's required to be a list of valid Rust items, but this -/// restriction may be lifted in the future. +/// This is both the input and output of `#[proc_macro]`, `#[proc_macro_attribute]` +/// and `#[proc_macro_derive]` definitions. /// /// The API of this type is intentionally bare-bones, but it'll be expanded over /// time! #[stable(feature = "proc_macro_lib", since = "1.15.0")] -#[derive(Clone, Debug)] +#[derive(Clone)] pub struct TokenStream(tokenstream::TokenStream); +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl !Send for TokenStream {} +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl !Sync for TokenStream {} + /// Error returned from `TokenStream::from_str`. #[stable(feature = "proc_macro_lib", since = "1.15.0")] #[derive(Debug)] @@ -82,26 +91,48 @@ pub struct LexError { _inner: (), } +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl !Send for LexError {} +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl !Sync for LexError {} + +impl TokenStream { + /// Returns an empty `TokenStream` containing no token trees. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn new() -> TokenStream { + TokenStream(tokenstream::TokenStream::empty()) + } + + /// Checks if this `TokenStream` is empty. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +/// Attempts to break the string into tokens and parse those tokens into a token stream. +/// May fail for a number of reasons, for example, if the string contains unbalanced delimiters +/// or characters not existing in the language. +/// All tokens in the parsed stream get `Span::call_site()` spans. +/// +/// NOTE: Some errors may cause panics instead of returning `LexError`. We reserve the right to +/// change these errors into `LexError`s later. #[stable(feature = "proc_macro_lib", since = "1.15.0")] impl FromStr for TokenStream { type Err = LexError; fn from_str(src: &str) -> Result { - __internal::with_sess(|(sess, mark)| { - let src = src.to_string(); - let name = FileName::ProcMacroSourceCode; - let expn_info = mark.expn_info().unwrap(); - let call_site = expn_info.call_site; - // notify the expansion info that it is unhygienic - let mark = Mark::fresh(mark); - mark.set_expn_info(expn_info); - let span = call_site.with_ctxt(SyntaxContext::empty().apply_mark(mark)); - let stream = parse::parse_stream_from_source_str(name, src, sess, Some(span)); - Ok(__internal::token_stream_wrap(stream)) + __internal::with_sess(|sess, data| { + Ok(__internal::token_stream_wrap(parse::parse_stream_from_source_str( + FileName::ProcMacroSourceCode, src.to_string(), sess, Some(data.call_site.0) + ))) }) } } +/// Prints the token stream as a string that is supposed to be losslessly convertible back +/// into the same token stream (modulo spans), except for possibly `TokenTree::Group`s +/// with `Delimiter::None` delimiters and negative numeric literals. #[stable(feature = "proc_macro_lib", since = "1.15.0")] impl fmt::Display for TokenStream { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -109,13 +140,126 @@ impl fmt::Display for TokenStream { } } +/// Prints token in a form convenient for debugging. +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl fmt::Debug for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str("TokenStream ")?; + f.debug_list().entries(self.clone()).finish() + } +} + +#[unstable(feature = "proc_macro_quote", issue = "38356")] +pub use quote::{quote, quote_span}; + +/// Creates a token stream containing a single token tree. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl From for TokenStream { + fn from(tree: TokenTree) -> TokenStream { + TokenStream(tree.to_internal()) + } +} + +/// Collects a number of token trees into a single stream. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl iter::FromIterator for TokenStream { + fn from_iter>(trees: I) -> Self { + trees.into_iter().map(TokenStream::from).collect() + } +} + +/// A "flattening" operation on token streams, collects token trees +/// from multiple token streams into a single stream. +#[stable(feature = "proc_macro_lib", since = "1.15.0")] +impl iter::FromIterator for TokenStream { + fn from_iter>(streams: I) -> Self { + let mut builder = tokenstream::TokenStreamBuilder::new(); + for stream in streams { + builder.push(stream.0); + } + TokenStream(builder.build()) + } +} + +#[stable(feature = "token_stream_extend", since = "1.30.0")] +impl Extend for TokenStream { + fn extend>(&mut self, trees: I) { + self.extend(trees.into_iter().map(TokenStream::from)); + } +} + +#[stable(feature = "token_stream_extend", since = "1.30.0")] +impl Extend for TokenStream { + fn extend>(&mut self, streams: I) { + self.0.extend(streams.into_iter().map(|stream| stream.0)); + } +} + +/// Public implementation details for the `TokenStream` type, such as iterators. +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +pub mod token_stream { + use syntax::tokenstream; + use {TokenTree, TokenStream, Delimiter}; + + /// An iterator over `TokenStream`'s `TokenTree`s. + /// The iteration is "shallow", e.g. the iterator doesn't recurse into delimited groups, + /// and returns whole groups as token trees. + #[derive(Clone)] + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub struct IntoIter { + cursor: tokenstream::Cursor, + stack: Vec, + } + + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + impl Iterator for IntoIter { + type Item = TokenTree; + + fn next(&mut self) -> Option { + loop { + let tree = self.stack.pop().or_else(|| { + let next = self.cursor.next_as_stream()?; + Some(TokenTree::from_internal(next, &mut self.stack)) + })?; + // HACK: The condition "dummy span + group with empty delimiter" represents an AST + // fragment approximately converted into a token stream. This may happen, for + // example, with inputs to proc macro attributes, including derives. Such "groups" + // need to flattened during iteration over stream's token trees. + // Eventually this needs to be removed in favor of keeping original token trees + // and not doing the roundtrip through AST. + if tree.span().0.is_dummy() { + if let TokenTree::Group(ref group) = tree { + if group.delimiter() == Delimiter::None { + self.cursor.insert(group.stream.clone().0); + continue + } + } + } + return Some(tree); + } + } + } + + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + impl IntoIterator for TokenStream { + type Item = TokenTree; + type IntoIter = IntoIter; + + fn into_iter(self) -> IntoIter { + IntoIter { cursor: self.0.trees(), stack: Vec::new() } + } + } +} + /// `quote!(..)` accepts arbitrary tokens and expands into a `TokenStream` describing the input. /// For example, `quote!(a + b)` will produce a expression, that, when evaluated, constructs -/// the `TokenStream` `[Word("a"), Op('+', Alone), Word("b")]`. +/// the `TokenStream` `[Ident("a"), Punct('+', Alone), Ident("b")]`. /// /// Unquoting is done with `$`, and works by taking the single next ident as the unquoted term. /// To quote `$` itself, use `$$`. -#[unstable(feature = "proc_macro", issue = "38356")] +/// +/// This is a dummy macro, the actual implementation is in `quote::quote`.` +#[unstable(feature = "proc_macro_quote", issue = "38356")] #[macro_export] macro_rules! quote { () => {} } @@ -123,83 +267,21 @@ macro_rules! quote { () => {} } #[doc(hidden)] mod quote; -#[unstable(feature = "proc_macro", issue = "38356")] -impl From for TokenStream { - fn from(tree: TokenTree) -> TokenStream { - TokenStream(tree.to_internal()) - } -} - -#[unstable(feature = "proc_macro", issue = "38356")] -impl From for TokenStream { - fn from(kind: TokenNode) -> TokenStream { - TokenTree::from(kind).into() - } -} - -#[unstable(feature = "proc_macro", issue = "38356")] -impl> iter::FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - let mut builder = tokenstream::TokenStreamBuilder::new(); - for stream in streams { - builder.push(stream.into().0); - } - TokenStream(builder.build()) - } -} - -#[unstable(feature = "proc_macro", issue = "38356")] -impl IntoIterator for TokenStream { - type Item = TokenTree; - type IntoIter = TokenTreeIter; - - fn into_iter(self) -> TokenTreeIter { - TokenTreeIter { cursor: self.0.trees(), next: None } - } -} - -impl TokenStream { - /// Returns an empty `TokenStream`. - #[unstable(feature = "proc_macro", issue = "38356")] - pub fn empty() -> TokenStream { - TokenStream(tokenstream::TokenStream::empty()) - } - - /// Checks if this `TokenStream` is empty. - #[unstable(feature = "proc_macro", issue = "38356")] - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - /// A region of source code, along with macro expansion information. -#[unstable(feature = "proc_macro", issue = "38356")] -#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +#[derive(Copy, Clone)] pub struct Span(syntax_pos::Span); -impl Span { - /// A span that resolves at the macro definition site. - #[unstable(feature = "proc_macro", issue = "38356")] - pub fn def_site() -> Span { - ::__internal::with_sess(|(_, mark)| { - let call_site = mark.expn_info().unwrap().call_site; - Span(call_site.with_ctxt(SyntaxContext::empty().apply_mark(mark))) - }) - } -} - -/// Quote a `Span` into a `TokenStream`. -/// This is needed to implement a custom quoter. -#[unstable(feature = "proc_macro", issue = "38356")] -pub fn quote_span(span: Span) -> TokenStream { - quote::Quote::quote(span) -} +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Send for Span {} +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Sync for Span {} macro_rules! diagnostic_method { ($name:ident, $level:expr) => ( /// Create a new `Diagnostic` with the given `message` at the span /// `self`. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_diagnostic", issue = "38356")] pub fn $name>(self, message: T) -> Diagnostic { Diagnostic::spanned(self, $level, message) } @@ -207,14 +289,23 @@ macro_rules! diagnostic_method { } impl Span { + /// A span that resolves at the macro definition site. + #[unstable(feature = "proc_macro_span", issue = "38356")] + pub fn def_site() -> Span { + ::__internal::with_sess(|_, data| data.def_site) + } + /// The span of the invocation of the current procedural macro. - #[unstable(feature = "proc_macro", issue = "38356")] + /// Identifiers created with this span will be resolved as if they were written + /// directly at the macro call location (call-site hygiene) and other code + /// at the macro call site will be able to refer to them as well. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn call_site() -> Span { - ::__internal::with_sess(|(_, mark)| Span(mark.expn_info().unwrap().call_site)) + ::__internal::with_sess(|_, data| data.call_site) } /// The original source file into which this span points. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub fn source_file(&self) -> SourceFile { SourceFile { filemap: __internal::lookup_char_pos(self.0.lo()).file, @@ -223,21 +314,21 @@ impl Span { /// The `Span` for the tokens in the previous macro expansion from which /// `self` was generated from, if any. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub fn parent(&self) -> Option { - self.0.ctxt().outer().expn_info().map(|i| Span(i.call_site)) + self.0.parent().map(Span) } /// The span for the origin source code that `self` was generated from. If /// this `Span` wasn't generated from other macro expansions then the return /// value is the same as `*self`. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub fn source(&self) -> Span { Span(self.0.source_callsite()) } /// Get the starting line/column in the source file for this span. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub fn start(&self) -> LineColumn { let loc = __internal::lookup_char_pos(self.0.lo()); LineColumn { @@ -247,7 +338,7 @@ impl Span { } /// Get the ending line/column in the source file for this span. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub fn end(&self) -> LineColumn { let loc = __internal::lookup_char_pos(self.0.hi()); LineColumn { @@ -259,7 +350,7 @@ impl Span { /// Create a new span encompassing `self` and `other`. /// /// Returns `None` if `self` and `other` are from different files. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub fn join(&self, other: Span) -> Option { let self_loc = __internal::lookup_char_pos(self.0.lo()); let other_loc = __internal::lookup_char_pos(other.0.lo()); @@ -271,44 +362,71 @@ impl Span { /// Creates a new span with the same line/column information as `self` but /// that resolves symbols as though it were at `other`. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub fn resolved_at(&self, other: Span) -> Span { Span(self.0.with_ctxt(other.0.ctxt())) } /// Creates a new span with the same name resolution behavior as `self` but /// with the line/column information of `other`. - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub fn located_at(&self, other: Span) -> Span { other.resolved_at(*self) } + /// Compares to spans to see if they're equal. + #[unstable(feature = "proc_macro_span", issue = "38356")] + pub fn eq(&self, other: &Span) -> bool { + self.0 == other.0 + } + diagnostic_method!(error, Level::Error); diagnostic_method!(warning, Level::Warning); diagnostic_method!(note, Level::Note); diagnostic_method!(help, Level::Help); } +/// Prints a span in a form convenient for debugging. +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl fmt::Debug for Span { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?} bytes({}..{})", + self.0.ctxt(), + self.0.lo().0, + self.0.hi().0) + } +} + /// A line-column pair representing the start or end of a `Span`. -#[unstable(feature = "proc_macro", issue = "38356")] +#[unstable(feature = "proc_macro_span", issue = "38356")] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct LineColumn { /// The 1-indexed line in the source file on which the span starts or ends (inclusive). - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub line: usize, /// The 0-indexed column (in UTF-8 characters) in the source file on which /// the span starts or ends (inclusive). - #[unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub column: usize } +#[unstable(feature = "proc_macro_span", issue = "38356")] +impl !Send for LineColumn {} +#[unstable(feature = "proc_macro_span", issue = "38356")] +impl !Sync for LineColumn {} + /// The source file of a given `Span`. -#[unstable(feature = "proc_macro", issue = "38356")] +#[unstable(feature = "proc_macro_span", issue = "38356")] #[derive(Clone)] pub struct SourceFile { - filemap: Rc, + filemap: Lrc, } +#[unstable(feature = "proc_macro_span", issue = "38356")] +impl !Send for SourceFile {} +#[unstable(feature = "proc_macro_span", issue = "38356")] +impl !Sync for SourceFile {} + impl SourceFile { /// Get the path to this source file. /// @@ -316,18 +434,21 @@ impl SourceFile { /// If the code span associated with this `SourceFile` was generated by an external macro, this /// may not be an actual path on the filesystem. Use [`is_real`] to check. /// - /// Also note that even if `is_real` returns `true`, if `-Z remap-path-prefix-*` was passed on + /// Also note that even if `is_real` returns `true`, if `--remap-path-prefix` was passed on /// the command line, the path as given may not actually be valid. /// /// [`is_real`]: #method.is_real - # [unstable(feature = "proc_macro", issue = "38356")] - pub fn path(&self) -> &FileName { - &self.filemap.name + #[unstable(feature = "proc_macro_span", issue = "38356")] + pub fn path(&self) -> PathBuf { + match self.filemap.name { + FileName::Real(ref path) => path.clone(), + _ => PathBuf::from(self.filemap.name.to_string()) + } } /// Returns `true` if this source file is a real source file, and not generated by an external /// macro's expansion. - # [unstable(feature = "proc_macro", issue = "38356")] + #[unstable(feature = "proc_macro_span", issue = "38356")] pub fn is_real(&self) -> bool { // This is a hack until intercrate spans are implemented and we can have real source files // for spans generated in external macros. @@ -336,415 +457,705 @@ impl SourceFile { } } -#[unstable(feature = "proc_macro", issue = "38356")] -impl AsRef for SourceFile { - fn as_ref(&self) -> &FileName { - self.path() - } -} -#[unstable(feature = "proc_macro", issue = "38356")] +#[unstable(feature = "proc_macro_span", issue = "38356")] impl fmt::Debug for SourceFile { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("SourceFile") - .field("path", self.path()) + .field("path", &self.path()) .field("is_real", &self.is_real()) .finish() } } -#[unstable(feature = "proc_macro", issue = "38356")] +#[unstable(feature = "proc_macro_span", issue = "38356")] impl PartialEq for SourceFile { fn eq(&self, other: &Self) -> bool { - Rc::ptr_eq(&self.filemap, &other.filemap) + Lrc::ptr_eq(&self.filemap, &other.filemap) } } -#[unstable(feature = "proc_macro", issue = "38356")] +#[unstable(feature = "proc_macro_span", issue = "38356")] impl Eq for SourceFile {} -#[unstable(feature = "proc_macro", issue = "38356")] -impl PartialEq for SourceFile { - fn eq(&self, other: &FileName) -> bool { - self.as_ref() == other - } -} - /// A single token or a delimited sequence of token trees (e.g. `[1, (), ..]`). -#[unstable(feature = "proc_macro", issue = "38356")] -#[derive(Clone, Debug)] -pub struct TokenTree { - /// The `TokenTree`'s span - pub span: Span, - /// Description of the `TokenTree` - pub kind: TokenNode, +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +#[derive(Clone)] +pub enum TokenTree { + /// A token stream surrounded by bracket delimiters. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + Group( + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + Group + ), + /// An identifier. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + Ident( + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + Ident + ), + /// A single punctuation character (`+`, `,`, `$`, etc.). + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + Punct( + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + Punct + ), + /// A literal character (`'a'`), string (`"hello"`), number (`2.3`), etc. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + Literal( + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + Literal + ), } -#[unstable(feature = "proc_macro", issue = "38356")] -impl From for TokenTree { - fn from(kind: TokenNode) -> TokenTree { - TokenTree { span: Span::def_site(), kind: kind } +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Send for TokenTree {} +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Sync for TokenTree {} + +impl TokenTree { + /// Returns the span of this tree, delegating to the `span` method of + /// the contained token or a delimited stream. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn span(&self) -> Span { + match *self { + TokenTree::Group(ref t) => t.span(), + TokenTree::Ident(ref t) => t.span(), + TokenTree::Punct(ref t) => t.span(), + TokenTree::Literal(ref t) => t.span(), + } + } + + /// Configures the span for *only this token*. + /// + /// Note that if this token is a `Group` then this method will not configure + /// the span of each of the internal tokens, this will simply delegate to + /// the `set_span` method of each variant. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn set_span(&mut self, span: Span) { + match *self { + TokenTree::Group(ref mut t) => t.set_span(span), + TokenTree::Ident(ref mut t) => t.set_span(span), + TokenTree::Punct(ref mut t) => t.set_span(span), + TokenTree::Literal(ref mut t) => t.set_span(span), + } } } -#[unstable(feature = "proc_macro", issue = "38356")] +/// Prints token treee in a form convenient for debugging. +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl fmt::Debug for TokenTree { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // Each of these has the name in the struct type in the derived debug, + // so don't bother with an extra layer of indirection + match *self { + TokenTree::Group(ref tt) => tt.fmt(f), + TokenTree::Ident(ref tt) => tt.fmt(f), + TokenTree::Punct(ref tt) => tt.fmt(f), + TokenTree::Literal(ref tt) => tt.fmt(f), + } + } +} + +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl From for TokenTree { + fn from(g: Group) -> TokenTree { + TokenTree::Group(g) + } +} + +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl From for TokenTree { + fn from(g: Ident) -> TokenTree { + TokenTree::Ident(g) + } +} + +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl From for TokenTree { + fn from(g: Punct) -> TokenTree { + TokenTree::Punct(g) + } +} + +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl From for TokenTree { + fn from(g: Literal) -> TokenTree { + TokenTree::Literal(g) + } +} + +/// Prints the token tree as a string that is supposed to be losslessly convertible back +/// into the same token tree (modulo spans), except for possibly `TokenTree::Group`s +/// with `Delimiter::None` delimiters and negative numeric literals. +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for TokenTree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - TokenStream::from(self.clone()).fmt(f) + match *self { + TokenTree::Group(ref t) => t.fmt(f), + TokenTree::Ident(ref t) => t.fmt(f), + TokenTree::Punct(ref t) => t.fmt(f), + TokenTree::Literal(ref t) => t.fmt(f), + } } } -/// Description of a `TokenTree` -#[derive(Clone, Debug)] -#[unstable(feature = "proc_macro", issue = "38356")] -pub enum TokenNode { - /// A delimited tokenstream. - Group(Delimiter, TokenStream), - /// A unicode identifier. - Term(Term), - /// A punctuation character (`+`, `,`, `$`, etc.). - Op(char, Spacing), - /// A literal character (`'a'`), string (`"hello"`), or number (`2.3`). - Literal(Literal), +/// A delimited token stream. +/// +/// A `Group` internally contains a `TokenStream` which is surrounded by `Delimiter`s. +#[derive(Clone)] +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +pub struct Group { + delimiter: Delimiter, + stream: TokenStream, + span: Span, } +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Send for Group {} +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Sync for Group {} + /// Describes how a sequence of token trees is delimited. #[derive(Copy, Clone, Debug, PartialEq, Eq)] -#[unstable(feature = "proc_macro", issue = "38356")] +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub enum Delimiter { /// `( ... )` + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] Parenthesis, /// `{ ... }` + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] Brace, /// `[ ... ]` + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] Bracket, - /// An implicit delimiter, e.g. `$var`, where $var is `...`. + /// `Ø ... Ø` + /// An implicit delimiter, that may, for example, appear around tokens coming from a + /// "macro variable" `$var`. It is important to preserve operator priorities in cases like + /// `$var * 3` where `$var` is `1 + 2`. + /// Implicit delimiters may not survive roundtrip of a token stream through a string. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] None, } -/// An interned string. -#[derive(Copy, Clone, Debug)] -#[unstable(feature = "proc_macro", issue = "38356")] -pub struct Term(Symbol); - -impl Term { - /// Intern a string into a `Term`. - #[unstable(feature = "proc_macro", issue = "38356")] - pub fn intern(string: &str) -> Term { - Term(Symbol::intern(string)) +impl Group { + /// Creates a new `Group` with the given delimiter and token stream. + /// + /// This constructor will set the span for this group to + /// `Span::call_site()`. To change the span you can use the `set_span` + /// method below. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group { + Group { + delimiter: delimiter, + stream: stream, + span: Span::call_site(), + } } - /// Get a reference to the interned string. - #[unstable(feature = "proc_macro", issue = "38356")] - pub fn as_str(&self) -> &str { - unsafe { &*(&*self.0.as_str() as *const str) } + /// Returns the delimiter of this `Group` + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn delimiter(&self) -> Delimiter { + self.delimiter + } + + /// Returns the `TokenStream` of tokens that are delimited in this `Group`. + /// + /// Note that the returned token stream does not include the delimiter + /// returned above. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn stream(&self) -> TokenStream { + self.stream.clone() + } + + /// Returns the span for the delimiters of this token stream, spanning the + /// entire `Group`. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn span(&self) -> Span { + self.span + } + + /// Configures the span for this `Group`'s delimiters, but not its internal + /// tokens. + /// + /// This method will **not** set the span of all the internal tokens spanned + /// by this group, but rather it will only set the span of the delimiter + /// tokens at the level of the `Group`. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn set_span(&mut self, span: Span) { + self.span = span; } } -/// Whether an `Op` is either followed immediately by another `Op` or followed by whitespace. +/// Prints the group as a string that should be losslessly convertible back +/// into the same group (modulo spans), except for possibly `TokenTree::Group`s +/// with `Delimiter::None` delimiters. +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl fmt::Display for Group { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + TokenStream::from(TokenTree::from(self.clone())).fmt(f) + } +} + +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl fmt::Debug for Group { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Group") + .field("delimiter", &self.delimiter()) + .field("stream", &self.stream()) + .field("span", &self.span()) + .finish() + } +} + +/// An `Punct` is an single punctuation character like `+`, `-` or `#`. +/// +/// Multicharacter operators like `+=` are represented as two instances of `Punct` with different +/// forms of `Spacing` returned. +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +#[derive(Clone)] +pub struct Punct { + ch: char, + spacing: Spacing, + span: Span, +} + +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Send for Punct {} +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Sync for Punct {} + +/// Whether an `Punct` is followed immediately by another `Punct` or +/// followed by another token or whitespace. #[derive(Copy, Clone, Debug, PartialEq, Eq)] -#[unstable(feature = "proc_macro", issue = "38356")] +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub enum Spacing { - /// e.g. `+` is `Alone` in `+ =`. + /// E.g. `+` is `Alone` in `+ =`, `+ident` or `+()`. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] Alone, - /// e.g. `+` is `Joint` in `+=`. + /// E.g. `+` is `Joint` in `+=` or `'#`. + /// Additionally, single quote `'` can join with identifiers to form lifetimes `'ident`. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] Joint, } -/// A literal character (`'a'`), string (`"hello"`), or number (`2.3`). -#[derive(Clone, Debug)] -#[unstable(feature = "proc_macro", issue = "38356")] -pub struct Literal(token::Token); +impl Punct { + /// Creates a new `Punct` from the given character and spacing. + /// The `ch` argument must be a valid punctuation character permitted by the language, + /// otherwise the function will panic. + /// + /// The returned `Punct` will have the default span of `Span::call_site()` + /// which can be further configured with the `set_span` method below. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn new(ch: char, spacing: Spacing) -> Punct { + const LEGAL_CHARS: &[char] = &['=', '<', '>', '!', '~', '+', '-', '*', '/', '%', '^', + '&', '|', '@', '.', ',', ';', ':', '#', '$', '?', '\'']; + if !LEGAL_CHARS.contains(&ch) { + panic!("unsupported character `{:?}`", ch) + } + Punct { + ch: ch, + spacing: spacing, + span: Span::call_site(), + } + } -#[unstable(feature = "proc_macro", issue = "38356")] -impl fmt::Display for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - TokenTree { kind: TokenNode::Literal(self.clone()), span: Span(DUMMY_SP) }.fmt(f) + /// Returns the value of this punctuation character as `char`. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn as_char(&self) -> char { + self.ch + } + + /// Returns the spacing of this punctuation character, indicating whether it's immediately + /// followed by another `Punct` in the token stream, so they can potentially be combined into + /// a multicharacter operator (`Joint`), or it's followed by some other token or whitespace + /// (`Alone`) so the operator has certainly ended. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn spacing(&self) -> Spacing { + self.spacing + } + + /// Returns the span for this punctuation character. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn span(&self) -> Span { + self.span + } + + /// Configure the span for this punctuation character. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn set_span(&mut self, span: Span) { + self.span = span; } } -macro_rules! int_literals { - ($($int_kind:ident),*) => {$( - /// Integer literal. - #[unstable(feature = "proc_macro", issue = "38356")] - pub fn $int_kind(n: $int_kind) -> Literal { - Literal::typed_integer(n as i128, stringify!($int_kind)) +/// Prints the punctuation character as a string that should be losslessly convertible +/// back into the same character. +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl fmt::Display for Punct { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + TokenStream::from(TokenTree::from(self.clone())).fmt(f) + } +} + +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl fmt::Debug for Punct { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Punct") + .field("ch", &self.as_char()) + .field("spacing", &self.spacing()) + .field("span", &self.span()) + .finish() + } +} + +/// An identifier (`ident`). +#[derive(Clone)] +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +pub struct Ident { + sym: Symbol, + span: Span, + is_raw: bool, +} + +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Send for Ident {} +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Sync for Ident {} + +impl Ident { + fn is_valid(string: &str) -> bool { + let mut chars = string.chars(); + if let Some(start) = chars.next() { + (start == '_' || start.is_xid_start()) + && chars.all(|cont| cont == '_' || cont.is_xid_continue()) + } else { + false } - )*} + } + + /// Creates a new `Ident` with the given `string` as well as the specified + /// `span`. + /// The `string` argument must be a valid identifier permitted by the + /// language, otherwise the function will panic. + /// + /// Note that `span`, currently in rustc, configures the hygiene information + /// for this identifier. + /// + /// As of this time `Span::call_site()` explicitly opts-in to "call-site" hygiene + /// meaning that identifiers created with this span will be resolved as if they were written + /// directly at the location of the macro call, and other code at the macro call site will be + /// able to refer to them as well. + /// + /// Later spans like `Span::def_site()` will allow to opt-in to "definition-site" hygiene + /// meaning that identifiers created with this span will be resolved at the location of the + /// macro definition and other code at the macro call site will not be able to refer to them. + /// + /// Due to the current importance of hygiene this constructor, unlike other + /// tokens, requires a `Span` to be specified at construction. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn new(string: &str, span: Span) -> Ident { + if !Ident::is_valid(string) { + panic!("`{:?}` is not a valid identifier", string) + } + Ident::new_maybe_raw(string, span, false) + } + + /// Same as `Ident::new`, but creates a raw identifier (`r#ident`). + #[unstable(feature = "proc_macro_raw_ident", issue = "38356")] + pub fn new_raw(string: &str, span: Span) -> Ident { + if !Ident::is_valid(string) { + panic!("`{:?}` is not a valid identifier", string) + } + Ident::new_maybe_raw(string, span, true) + } + + /// Returns the span of this `Ident`, encompassing the entire string returned + /// by `as_str`. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn span(&self) -> Span { + self.span + } + + /// Configures the span of this `Ident`, possibly changing its hygiene context. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn set_span(&mut self, span: Span) { + self.span = span; + } +} + +/// Prints the identifier as a string that should be losslessly convertible +/// back into the same identifier. +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl fmt::Display for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + TokenStream::from(TokenTree::from(self.clone())).fmt(f) + } +} + +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl fmt::Debug for Ident { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Ident") + .field("ident", &self.to_string()) + .field("span", &self.span()) + .finish() + } +} + +/// A literal string (`"hello"`), byte string (`b"hello"`), +/// character (`'a'`), byte character (`b'a'`), an integer or floating point number +/// with or without a suffix (`1`, `1u8`, `2.3`, `2.3f32`). +/// Boolean literals like `true` and `false` do not belong here, they are `Ident`s. +// FIXME(eddyb) `Literal` should not expose internal `Debug` impls. +#[derive(Clone, Debug)] +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +pub struct Literal { + lit: token::Lit, + suffix: Option, + span: Span, +} + +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Send for Literal {} +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl !Sync for Literal {} + +macro_rules! suffixed_int_literals { + ($($name:ident => $kind:ident,)*) => ($( + /// Creates a new suffixed integer literal with the specified value. + /// + /// This function will create an integer like `1u32` where the integer + /// value specified is the first part of the token and the integral is + /// also suffixed at the end. + /// Literals created from negative numbers may not survive rountrips through + /// `TokenStream` or strings and may be broken into two tokens (`-` and positive literal). + /// + /// Literals created through this method have the `Span::call_site()` + /// span by default, which can be configured with the `set_span` method + /// below. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn $name(n: $kind) -> Literal { + Literal { + lit: token::Lit::Integer(Symbol::intern(&n.to_string())), + suffix: Some(Symbol::intern(stringify!($kind))), + span: Span::call_site(), + } + } + )*) +} + +macro_rules! unsuffixed_int_literals { + ($($name:ident => $kind:ident,)*) => ($( + /// Creates a new unsuffixed integer literal with the specified value. + /// + /// This function will create an integer like `1` where the integer + /// value specified is the first part of the token. No suffix is + /// specified on this token, meaning that invocations like + /// `Literal::i8_unsuffixed(1)` are equivalent to + /// `Literal::u32_unsuffixed(1)`. + /// Literals created from negative numbers may not survive rountrips through + /// `TokenStream` or strings and may be broken into two tokens (`-` and positive literal). + /// + /// Literals created through this method have the `Span::call_site()` + /// span by default, which can be configured with the `set_span` method + /// below. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn $name(n: $kind) -> Literal { + Literal { + lit: token::Lit::Integer(Symbol::intern(&n.to_string())), + suffix: None, + span: Span::call_site(), + } + } + )*) } impl Literal { - /// Integer literal - #[unstable(feature = "proc_macro", issue = "38356")] - pub fn integer(n: i128) -> Literal { - Literal(token::Literal(token::Lit::Integer(Symbol::intern(&n.to_string())), None)) + suffixed_int_literals! { + u8_suffixed => u8, + u16_suffixed => u16, + u32_suffixed => u32, + u64_suffixed => u64, + u128_suffixed => u128, + usize_suffixed => usize, + i8_suffixed => i8, + i16_suffixed => i16, + i32_suffixed => i32, + i64_suffixed => i64, + i128_suffixed => i128, + isize_suffixed => isize, } - int_literals!(u8, i8, u16, i16, u32, i32, u64, i64, usize, isize); - fn typed_integer(n: i128, kind: &'static str) -> Literal { - Literal(token::Literal(token::Lit::Integer(Symbol::intern(&n.to_string())), - Some(Symbol::intern(kind)))) + unsuffixed_int_literals! { + u8_unsuffixed => u8, + u16_unsuffixed => u16, + u32_unsuffixed => u32, + u64_unsuffixed => u64, + u128_unsuffixed => u128, + usize_unsuffixed => usize, + i8_unsuffixed => i8, + i16_unsuffixed => i16, + i32_unsuffixed => i32, + i64_unsuffixed => i64, + i128_unsuffixed => i128, + isize_unsuffixed => isize, } - /// Floating point literal. - #[unstable(feature = "proc_macro", issue = "38356")] - pub fn float(n: f64) -> Literal { + /// Creates a new unsuffixed floating-point literal. + /// + /// This constructor is similar to those like `Literal::i8_unsuffixed` where + /// the float's value is emitted directly into the token but no suffix is + /// used, so it may be inferred to be a `f64` later in the compiler. + /// Literals created from negative numbers may not survive rountrips through + /// `TokenStream` or strings and may be broken into two tokens (`-` and positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for + /// example if it is infinity or NaN this function will panic. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn f32_unsuffixed(n: f32) -> Literal { if !n.is_finite() { panic!("Invalid float literal {}", n); } - Literal(token::Literal(token::Lit::Float(Symbol::intern(&n.to_string())), None)) + Literal { + lit: token::Lit::Float(Symbol::intern(&n.to_string())), + suffix: None, + span: Span::call_site(), + } } - /// Floating point literal. - #[unstable(feature = "proc_macro", issue = "38356")] - pub fn f32(n: f32) -> Literal { + /// Creates a new suffixed floating-point literal. + /// + /// This consturctor will create a literal like `1.0f32` where the value + /// specified is the preceding part of the token and `f32` is the suffix of + /// the token. This token will always be inferred to be an `f32` in the + /// compiler. + /// Literals created from negative numbers may not survive rountrips through + /// `TokenStream` or strings and may be broken into two tokens (`-` and positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for + /// example if it is infinity or NaN this function will panic. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn f32_suffixed(n: f32) -> Literal { if !n.is_finite() { - panic!("Invalid f32 literal {}", n); + panic!("Invalid float literal {}", n); + } + Literal { + lit: token::Lit::Float(Symbol::intern(&n.to_string())), + suffix: Some(Symbol::intern("f32")), + span: Span::call_site(), } - Literal(token::Literal(token::Lit::Float(Symbol::intern(&n.to_string())), - Some(Symbol::intern("f32")))) } - /// Floating point literal. - #[unstable(feature = "proc_macro", issue = "38356")] - pub fn f64(n: f64) -> Literal { + /// Creates a new unsuffixed floating-point literal. + /// + /// This constructor is similar to those like `Literal::i8_unsuffixed` where + /// the float's value is emitted directly into the token but no suffix is + /// used, so it may be inferred to be a `f64` later in the compiler. + /// Literals created from negative numbers may not survive rountrips through + /// `TokenStream` or strings and may be broken into two tokens (`-` and positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for + /// example if it is infinity or NaN this function will panic. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn f64_unsuffixed(n: f64) -> Literal { if !n.is_finite() { - panic!("Invalid f64 literal {}", n); + panic!("Invalid float literal {}", n); + } + Literal { + lit: token::Lit::Float(Symbol::intern(&n.to_string())), + suffix: None, + span: Span::call_site(), + } + } + + /// Creates a new suffixed floating-point literal. + /// + /// This consturctor will create a literal like `1.0f64` where the value + /// specified is the preceding part of the token and `f64` is the suffix of + /// the token. This token will always be inferred to be an `f64` in the + /// compiler. + /// Literals created from negative numbers may not survive rountrips through + /// `TokenStream` or strings and may be broken into two tokens (`-` and positive literal). + /// + /// # Panics + /// + /// This function requires that the specified float is finite, for + /// example if it is infinity or NaN this function will panic. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn f64_suffixed(n: f64) -> Literal { + if !n.is_finite() { + panic!("Invalid float literal {}", n); + } + Literal { + lit: token::Lit::Float(Symbol::intern(&n.to_string())), + suffix: Some(Symbol::intern("f64")), + span: Span::call_site(), } - Literal(token::Literal(token::Lit::Float(Symbol::intern(&n.to_string())), - Some(Symbol::intern("f64")))) } /// String literal. - #[unstable(feature = "proc_macro", issue = "38356")] + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn string(string: &str) -> Literal { let mut escaped = String::new(); for ch in string.chars() { escaped.extend(ch.escape_debug()); } - Literal(token::Literal(token::Lit::Str_(Symbol::intern(&escaped)), None)) + Literal { + lit: token::Lit::Str_(Symbol::intern(&escaped)), + suffix: None, + span: Span::call_site(), + } } /// Character literal. - #[unstable(feature = "proc_macro", issue = "38356")] + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn character(ch: char) -> Literal { let mut escaped = String::new(); escaped.extend(ch.escape_unicode()); - Literal(token::Literal(token::Lit::Char(Symbol::intern(&escaped)), None)) + Literal { + lit: token::Lit::Char(Symbol::intern(&escaped)), + suffix: None, + span: Span::call_site(), + } } /// Byte string literal. - #[unstable(feature = "proc_macro", issue = "38356")] + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn byte_string(bytes: &[u8]) -> Literal { let string = bytes.iter().cloned().flat_map(ascii::escape_default) .map(Into::::into).collect::(); - Literal(token::Literal(token::Lit::ByteStr(Symbol::intern(&string)), None)) + Literal { + lit: token::Lit::ByteStr(Symbol::intern(&string)), + suffix: None, + span: Span::call_site(), + } + } + + /// Returns the span encompassing this literal. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn span(&self) -> Span { + self.span + } + + /// Configures the span associated for this literal. + #[stable(feature = "proc_macro_lib2", since = "1.29.0")] + pub fn set_span(&mut self, span: Span) { + self.span = span; } } -/// An iterator over `TokenTree`s. -#[derive(Clone)] -#[unstable(feature = "proc_macro", issue = "38356")] -pub struct TokenTreeIter { - cursor: tokenstream::Cursor, - next: Option, -} - -#[unstable(feature = "proc_macro", issue = "38356")] -impl Iterator for TokenTreeIter { - type Item = TokenTree; - - fn next(&mut self) -> Option { - loop { - let next = - unwrap_or!(self.next.take().or_else(|| self.cursor.next_as_stream()), return None); - let tree = TokenTree::from_internal(next, &mut self.next); - if tree.span.0 == DUMMY_SP { - if let TokenNode::Group(Delimiter::None, stream) = tree.kind { - self.cursor.insert(stream.0); - continue - } - } - return Some(tree); - } - } -} - -impl Delimiter { - fn from_internal(delim: token::DelimToken) -> Delimiter { - match delim { - token::Paren => Delimiter::Parenthesis, - token::Brace => Delimiter::Brace, - token::Bracket => Delimiter::Bracket, - token::NoDelim => Delimiter::None, - } - } - - fn to_internal(self) -> token::DelimToken { - match self { - Delimiter::Parenthesis => token::Paren, - Delimiter::Brace => token::Brace, - Delimiter::Bracket => token::Bracket, - Delimiter::None => token::NoDelim, - } - } -} - -impl TokenTree { - fn from_internal(stream: tokenstream::TokenStream, next: &mut Option) - -> TokenTree { - use syntax::parse::token::*; - - let (tree, is_joint) = stream.as_tree(); - let (mut span, token) = match tree { - tokenstream::TokenTree::Token(span, token) => (span, token), - tokenstream::TokenTree::Delimited(span, delimed) => { - let delimiter = Delimiter::from_internal(delimed.delim); - return TokenTree { - span: Span(span), - kind: TokenNode::Group(delimiter, TokenStream(delimed.tts.into())), - }; - } - }; - - let op_kind = if is_joint { Spacing::Joint } else { Spacing::Alone }; - macro_rules! op { - ($op:expr) => { TokenNode::Op($op, op_kind) } - } - - macro_rules! joint { - ($first:expr, $rest:expr) => { joint($first, $rest, is_joint, &mut span, next) } - } - - fn joint(first: char, rest: Token, is_joint: bool, span: &mut syntax_pos::Span, - next: &mut Option) - -> TokenNode { - let (first_span, rest_span) = (*span, *span); - *span = first_span; - let tree = tokenstream::TokenTree::Token(rest_span, rest); - *next = Some(if is_joint { tree.joint() } else { tree.into() }); - TokenNode::Op(first, Spacing::Joint) - } - - let kind = match token { - Eq => op!('='), - Lt => op!('<'), - Le => joint!('<', Eq), - EqEq => joint!('=', Eq), - Ne => joint!('!', Eq), - Ge => joint!('>', Eq), - Gt => op!('>'), - AndAnd => joint!('&', BinOp(And)), - OrOr => joint!('|', BinOp(Or)), - Not => op!('!'), - Tilde => op!('~'), - BinOp(Plus) => op!('+'), - BinOp(Minus) => op!('-'), - BinOp(Star) => op!('*'), - BinOp(Slash) => op!('/'), - BinOp(Percent) => op!('%'), - BinOp(Caret) => op!('^'), - BinOp(And) => op!('&'), - BinOp(Or) => op!('|'), - BinOp(Shl) => joint!('<', Lt), - BinOp(Shr) => joint!('>', Gt), - BinOpEq(Plus) => joint!('+', Eq), - BinOpEq(Minus) => joint!('-', Eq), - BinOpEq(Star) => joint!('*', Eq), - BinOpEq(Slash) => joint!('/', Eq), - BinOpEq(Percent) => joint!('%', Eq), - BinOpEq(Caret) => joint!('^', Eq), - BinOpEq(And) => joint!('&', Eq), - BinOpEq(Or) => joint!('|', Eq), - BinOpEq(Shl) => joint!('<', Le), - BinOpEq(Shr) => joint!('>', Ge), - At => op!('@'), - Dot => op!('.'), - DotDot => joint!('.', Dot), - DotDotDot => joint!('.', DotDot), - DotDotEq => joint!('.', DotEq), - Comma => op!(','), - Semi => op!(';'), - Colon => op!(':'), - ModSep => joint!(':', Colon), - RArrow => joint!('-', Gt), - LArrow => joint!('<', BinOp(Minus)), - FatArrow => joint!('=', Gt), - Pound => op!('#'), - Dollar => op!('$'), - Question => op!('?'), - Underscore => op!('_'), - - Ident(ident) | Lifetime(ident) => TokenNode::Term(Term(ident.name)), - Literal(..) | DocComment(..) => TokenNode::Literal(self::Literal(token)), - - Interpolated(_) => { - __internal::with_sess(|(sess, _)| { - let tts = token.interpolated_to_tokenstream(sess, span); - TokenNode::Group(Delimiter::None, TokenStream(tts)) - }) - } - - DotEq => unreachable!(), - OpenDelim(..) | CloseDelim(..) => unreachable!(), - Whitespace | Comment | Shebang(..) | Eof => unreachable!(), - }; - - TokenTree { span: Span(span), kind: kind } - } - - fn to_internal(self) -> tokenstream::TokenStream { - use syntax::parse::token::*; - use syntax::tokenstream::{TokenTree, Delimited}; - - let (op, kind) = match self.kind { - TokenNode::Op(op, kind) => (op, kind), - TokenNode::Group(delimiter, tokens) => { - return TokenTree::Delimited(self.span.0, Delimited { - delim: delimiter.to_internal(), - tts: tokens.0.into(), - }).into(); - }, - TokenNode::Term(symbol) => { - let ident = ast::Ident { name: symbol.0, ctxt: self.span.0.ctxt() }; - let token = - if symbol.0.as_str().starts_with("'") { Lifetime(ident) } else { Ident(ident) }; - return TokenTree::Token(self.span.0, token).into(); - } - TokenNode::Literal(token) => return TokenTree::Token(self.span.0, token.0).into(), - }; - - let token = match op { - '=' => Eq, - '<' => Lt, - '>' => Gt, - '!' => Not, - '~' => Tilde, - '+' => BinOp(Plus), - '-' => BinOp(Minus), - '*' => BinOp(Star), - '/' => BinOp(Slash), - '%' => BinOp(Percent), - '^' => BinOp(Caret), - '&' => BinOp(And), - '|' => BinOp(Or), - '@' => At, - '.' => Dot, - ',' => Comma, - ';' => Semi, - ':' => Colon, - '#' => Pound, - '$' => Dollar, - '?' => Question, - '_' => Underscore, - _ => panic!("unsupported character {}", op), - }; - - let tree = TokenTree::Token(self.span.0, token); - match kind { - Spacing::Alone => tree.into(), - Spacing::Joint => tree.joint(), - } +/// Prints the literal as a string that should be losslessly convertible +/// back into the same literal (except for possible rounding for floating point literals). +#[stable(feature = "proc_macro_lib2", since = "1.29.0")] +impl fmt::Display for Literal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + TokenStream::from(TokenTree::from(self.clone())).fmt(f) } } @@ -760,23 +1171,22 @@ impl TokenTree { #[unstable(feature = "proc_macro_internals", issue = "27812")] #[doc(hidden)] pub mod __internal { - pub use quote::{LiteralKind, Quoter, unquote}; - use std::cell::Cell; + use std::ptr; use syntax::ast; use syntax::ext::base::ExtCtxt; - use syntax::ext::hygiene::Mark; use syntax::ptr::P; use syntax::parse::{self, ParseSess}; use syntax::parse::token::{self, Token}; use syntax::tokenstream; use syntax_pos::{BytePos, Loc, DUMMY_SP}; + use syntax_pos::hygiene::{SyntaxContext, Transparency}; - use super::{TokenStream, LexError}; + use super::{TokenStream, LexError, Span}; pub fn lookup_char_pos(pos: BytePos) -> Loc { - with_sess(|(sess, _)| sess.codemap().lookup_char_pos(pos)) + with_sess(|sess, _| sess.codemap().lookup_char_pos(pos)) } pub fn new_token_stream(item: P) -> TokenStream { @@ -789,7 +1199,7 @@ pub mod __internal { } pub fn token_stream_parse_items(stream: TokenStream) -> Result>, LexError> { - with_sess(move |(sess, _)| { + with_sess(move |sess, _| { let mut parser = parse::stream_to_parser(sess, stream.0); let mut items = Vec::new(); @@ -820,16 +1230,30 @@ pub mod __internal { expand: fn(TokenStream) -> TokenStream); } + #[derive(Clone, Copy)] + pub struct ProcMacroData { + pub def_site: Span, + pub call_site: Span, + } + + #[derive(Clone, Copy)] + struct ProcMacroSess { + parse_sess: *const ParseSess, + data: ProcMacroData, + } + // Emulate scoped_thread_local!() here essentially thread_local! { - static CURRENT_SESS: Cell<(*const ParseSess, Mark)> = - Cell::new((0 as *const _, Mark::root())); + static CURRENT_SESS: Cell = Cell::new(ProcMacroSess { + parse_sess: ptr::null(), + data: ProcMacroData { def_site: Span(DUMMY_SP), call_site: Span(DUMMY_SP) }, + }); } pub fn set_sess(cx: &ExtCtxt, f: F) -> R where F: FnOnce() -> R { - struct Reset { prev: (*const ParseSess, Mark) } + struct Reset { prev: ProcMacroSess } impl Drop for Reset { fn drop(&mut self) { @@ -839,18 +1263,37 @@ pub mod __internal { CURRENT_SESS.with(|p| { let _reset = Reset { prev: p.get() }; - p.set((cx.parse_sess, cx.current_expansion.mark)); + + // No way to determine def location for a proc macro right now, so use call location. + let location = cx.current_expansion.mark.expn_info().unwrap().call_site; + let to_span = |transparency| Span(location.with_ctxt( + SyntaxContext::empty().apply_mark_with_transparency(cx.current_expansion.mark, + transparency)) + ); + p.set(ProcMacroSess { + parse_sess: cx.parse_sess, + data: ProcMacroData { + def_site: to_span(Transparency::Opaque), + call_site: to_span(Transparency::Transparent), + }, + }); f() }) } - pub fn with_sess(f: F) -> R - where F: FnOnce((&ParseSess, Mark)) -> R + pub fn in_sess() -> bool { - let p = CURRENT_SESS.with(|p| p.get()); - assert!(!p.0.is_null(), "proc_macro::__internal::with_sess() called \ - before set_parse_sess()!"); - f(unsafe { (&*p.0, p.1) }) + !CURRENT_SESS.with(|sess| sess.get()).parse_sess.is_null() + } + + pub fn with_sess(f: F) -> R + where F: FnOnce(&ParseSess, &ProcMacroData) -> R + { + let sess = CURRENT_SESS.with(|sess| sess.get()); + if sess.parse_sess.is_null() { + panic!("procedural macro API is used outside of a procedural macro"); + } + f(unsafe { &*sess.parse_sess }, &sess.data) } } diff --git a/src/libproc_macro/quote.rs b/src/libproc_macro/quote.rs index 8b5add1a0f0d..7ae7b13a1521 100644 --- a/src/libproc_macro/quote.rs +++ b/src/libproc_macro/quote.rs @@ -14,252 +14,137 @@ //! This quasiquoter uses macros 2.0 hygiene to reliably access //! items from `proc_macro`, to build a `proc_macro::TokenStream`. -use {Delimiter, Literal, Spacing, Span, Term, TokenNode, TokenStream, TokenTree}; +use {Delimiter, Group, Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; -use syntax::ext::base::{ExtCtxt, ProcMacro}; -use syntax::parse::token; -use syntax::tokenstream; - -pub struct Quoter; - -pub fn unquote + Clone>(tokens: &T) -> TokenStream { - T::into(tokens.clone()) +macro_rules! quote_tt { + (($($t:tt)*)) => { Group::new(Delimiter::Parenthesis, quote!($($t)*)) }; + ([$($t:tt)*]) => { Group::new(Delimiter::Bracket, quote!($($t)*)) }; + ({$($t:tt)*}) => { Group::new(Delimiter::Brace, quote!($($t)*)) }; + (,) => { Punct::new(',', Spacing::Alone) }; + (.) => { Punct::new('.', Spacing::Alone) }; + (:) => { Punct::new(':', Spacing::Alone) }; + (;) => { Punct::new(';', Spacing::Alone) }; + (!) => { Punct::new('!', Spacing::Alone) }; + (<) => { Punct::new('<', Spacing::Alone) }; + (>) => { Punct::new('>', Spacing::Alone) }; + (&) => { Punct::new('&', Spacing::Alone) }; + (=) => { Punct::new('=', Spacing::Alone) }; + ($i:ident) => { Ident::new(stringify!($i), Span::def_site()) }; } -pub trait Quote { - fn quote(self) -> TokenStream; -} - -macro_rules! quote_tok { - (,) => { TokenNode::Op(',', Spacing::Alone) }; - (.) => { TokenNode::Op('.', Spacing::Alone) }; - (:) => { TokenNode::Op(':', Spacing::Alone) }; +macro_rules! quote_ts { + ((@ $($t:tt)*)) => { $($t)* }; (::) => { [ - TokenNode::Op(':', Spacing::Joint), - TokenNode::Op(':', Spacing::Alone) - ].iter().cloned().collect::() + TokenTree::from(Punct::new(':', Spacing::Joint)), + TokenTree::from(Punct::new(':', Spacing::Alone)), + ].iter() + .cloned() + .map(|mut x| { + x.set_span(Span::def_site()); + x + }) + .collect::() }; - (!) => { TokenNode::Op('!', Spacing::Alone) }; - (<) => { TokenNode::Op('<', Spacing::Alone) }; - (>) => { TokenNode::Op('>', Spacing::Alone) }; - (_) => { TokenNode::Op('_', Spacing::Alone) }; - (0) => { TokenNode::Literal(::Literal::integer(0)) }; - (&) => { TokenNode::Op('&', Spacing::Alone) }; - ($i:ident) => { TokenNode::Term(Term::intern(stringify!($i))) }; -} - -macro_rules! quote_tree { - ((unquote $($t:tt)*)) => { $($t)* }; - ((quote $($t:tt)*)) => { ($($t)*).quote() }; - (($($t:tt)*)) => { TokenNode::Group(Delimiter::Parenthesis, quote!($($t)*)) }; - ([$($t:tt)*]) => { TokenNode::Group(Delimiter::Bracket, quote!($($t)*)) }; - ({$($t:tt)*}) => { TokenNode::Group(Delimiter::Brace, quote!($($t)*)) }; - ($t:tt) => { quote_tok!($t) }; + ($t:tt) => { TokenTree::from(quote_tt!($t)) }; } +/// Simpler version of the real `quote!` macro, implemented solely +/// through `macro_rules`, for bootstrapping the real implementation +/// (see the `quote` function), which does not have access to the +/// real `quote!` macro due to the `proc_macro` crate not being +/// able to depend on itself. +/// +/// Note: supported tokens are a subset of the real `quote!`, but +/// unquoting is different: instead of `$x`, this uses `(@ expr)`. macro_rules! quote { - () => { TokenStream::empty() }; + () => { TokenStream::new() }; ($($t:tt)*) => { [ - $(TokenStream::from(quote_tree!($t)),)* + $(TokenStream::from(quote_ts!($t)),)* ].iter().cloned().collect::() }; } -impl ProcMacro for Quoter { - fn expand<'cx>(&self, cx: &'cx mut ExtCtxt, - _: ::syntax_pos::Span, - stream: tokenstream::TokenStream) - -> tokenstream::TokenStream { - let mut info = cx.current_expansion.mark.expn_info().unwrap(); - info.callee.allow_internal_unstable = true; - cx.current_expansion.mark.set_expn_info(info); - ::__internal::set_sess(cx, || TokenStream(stream).quote().0) +/// Quote a `TokenStream` into a `TokenStream`. +/// This is the actual `quote!()` proc macro. +/// +/// It is manually loaded in `CStore::load_macro_untracked`. +#[unstable(feature = "proc_macro_quote", issue = "38356")] +pub fn quote(stream: TokenStream) -> TokenStream { + if stream.is_empty() { + return quote!(::TokenStream::new()); } -} - -impl Quote for Option { - fn quote(self) -> TokenStream { - match self { - Some(t) => quote!(Some((quote t))), - None => quote!(None), - } - } -} - -impl Quote for TokenStream { - fn quote(self) -> TokenStream { - if self.is_empty() { - return quote!(::TokenStream::empty()); - } - let mut after_dollar = false; - let tokens = self.into_iter().filter_map(|tree| { + let mut after_dollar = false; + let tokens = stream + .into_iter() + .filter_map(|tree| { if after_dollar { after_dollar = false; - match tree.kind { - TokenNode::Term(_) => { - return Some(quote!(::__internal::unquote(&(unquote tree)),)); + match tree { + TokenTree::Ident(_) => { + return Some(quote!(Into::<::TokenStream>::into( + Clone::clone(&(@ tree))),)); } - TokenNode::Op('$', _) => {} + TokenTree::Punct(ref tt) if tt.as_char() == '$' => {} _ => panic!("`$` must be followed by an ident or `$` in `quote!`"), } - } else if let TokenNode::Op('$', _) = tree.kind { - after_dollar = true; - return None; - } - - Some(quote!(::TokenStream::from((quote tree)),)) - }).collect::(); - - if after_dollar { - panic!("unexpected trailing `$` in `quote!`"); - } - - quote!([(unquote tokens)].iter().cloned().collect::<::TokenStream>()) - } -} - -impl Quote for TokenTree { - fn quote(self) -> TokenStream { - quote!(::TokenTree { span: (quote self.span), kind: (quote self.kind) }) - } -} - -impl Quote for TokenNode { - fn quote(self) -> TokenStream { - macro_rules! gen_match { - ($($i:ident($($arg:ident),+)),*) => { - match self { - $(TokenNode::$i($($arg),+) => quote! { - ::TokenNode::$i($((quote $arg)),+) - },)* + } else if let TokenTree::Punct(ref tt) = tree { + if tt.as_char() == '$' { + after_dollar = true; + return None; } } - } - gen_match! { Op(op, kind), Group(delim, tokens), Term(term), Literal(lit) } + Some(quote!(::TokenStream::from((@ match tree { + TokenTree::Punct(tt) => quote!(::TokenTree::Punct(::Punct::new( + (@ TokenTree::from(Literal::character(tt.as_char()))), + (@ match tt.spacing() { + Spacing::Alone => quote!(::Spacing::Alone), + Spacing::Joint => quote!(::Spacing::Joint), + }), + ))), + TokenTree::Group(tt) => quote!(::TokenTree::Group(::Group::new( + (@ match tt.delimiter() { + Delimiter::Parenthesis => quote!(::Delimiter::Parenthesis), + Delimiter::Brace => quote!(::Delimiter::Brace), + Delimiter::Bracket => quote!(::Delimiter::Bracket), + Delimiter::None => quote!(::Delimiter::None), + }), + (@ quote(tt.stream())), + ))), + TokenTree::Ident(tt) => quote!(::TokenTree::Ident(::Ident::new( + (@ TokenTree::from(Literal::string(&tt.to_string()))), + (@ quote_span(tt.span())), + ))), + TokenTree::Literal(tt) => quote!(::TokenTree::Literal({ + let mut iter = (@ TokenTree::from(Literal::string(&tt.to_string()))) + .parse::<::TokenStream>() + .unwrap() + .into_iter(); + if let (Some(::TokenTree::Literal(mut lit)), None) = + (iter.next(), iter.next()) + { + lit.set_span((@ quote_span(tt.span()))); + lit + } else { + unreachable!() + } + })) + })),)) + }) + .collect::(); + + if after_dollar { + panic!("unexpected trailing `$` in `quote!`"); } + + quote!([(@ tokens)].iter().cloned().collect::<::TokenStream>()) } -impl Quote for char { - fn quote(self) -> TokenStream { - TokenNode::Literal(Literal::character(self)).into() - } -} - -impl<'a> Quote for &'a str { - fn quote(self) -> TokenStream { - TokenNode::Literal(Literal::string(self)).into() - } -} - -impl Quote for usize { - fn quote(self) -> TokenStream { - TokenNode::Literal(Literal::integer(self as i128)).into() - } -} - -impl Quote for Term { - fn quote(self) -> TokenStream { - quote!(::Term::intern((quote self.as_str()))) - } -} - -impl Quote for Span { - fn quote(self) -> TokenStream { - quote!(::Span::def_site()) - } -} - -macro_rules! literals { - ($($i:ident),*; $($raw:ident),*) => { - pub enum LiteralKind { - $($i,)* - $($raw(usize),)* - } - - impl LiteralKind { - pub fn with_contents_and_suffix(self, contents: Term, suffix: Option) - -> Literal { - let contents = contents.0; - let suffix = suffix.map(|t| t.0); - match self { - $(LiteralKind::$i => { - Literal(token::Literal(token::Lit::$i(contents), suffix)) - })* - $(LiteralKind::$raw(n) => { - Literal(token::Literal(token::Lit::$raw(contents, n), suffix)) - })* - } - } - } - - impl Literal { - fn kind_contents_and_suffix(self) -> (LiteralKind, Term, Option) { - let (lit, suffix) = match self.0 { - token::Literal(lit, suffix) => (lit, suffix), - _ => panic!("unsupported literal {:?}", self.0), - }; - - let (kind, contents) = match lit { - $(token::Lit::$i(contents) => (LiteralKind::$i, contents),)* - $(token::Lit::$raw(contents, n) => (LiteralKind::$raw(n), contents),)* - }; - (kind, Term(contents), suffix.map(Term)) - } - } - - impl Quote for LiteralKind { - fn quote(self) -> TokenStream { - match self { - $(LiteralKind::$i => quote! { - ::__internal::LiteralKind::$i - },)* - $(LiteralKind::$raw(n) => quote! { - ::__internal::LiteralKind::$raw((quote n)) - },)* - } - } - } - - impl Quote for Literal { - fn quote(self) -> TokenStream { - let (kind, contents, suffix) = self.kind_contents_and_suffix(); - quote! { - (quote kind).with_contents_and_suffix((quote contents), (quote suffix)) - } - } - } - } -} - -literals!(Byte, Char, Float, Str_, Integer, ByteStr; StrRaw, ByteStrRaw); - -impl Quote for Delimiter { - fn quote(self) -> TokenStream { - macro_rules! gen_match { - ($($i:ident),*) => { - match self { - $(Delimiter::$i => { quote!(::Delimiter::$i) })* - } - } - } - - gen_match!(Parenthesis, Brace, Bracket, None) - } -} - -impl Quote for Spacing { - fn quote(self) -> TokenStream { - macro_rules! gen_match { - ($($i:ident),*) => { - match self { - $(Spacing::$i => { quote!(::Spacing::$i) })* - } - } - } - - gen_match!(Alone, Joint) - } +/// Quote a `Span` into a `TokenStream`. +/// This is needed to implement a custom quoter. +#[unstable(feature = "proc_macro_quote", issue = "38356")] +pub fn quote_span(_: Span) -> TokenStream { + quote!(::Span::def_site()) } diff --git a/src/libproc_macro/rustc.rs b/src/libproc_macro/rustc.rs new file mode 100644 index 000000000000..21229d3299d7 --- /dev/null +++ b/src/libproc_macro/rustc.rs @@ -0,0 +1,283 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use {Delimiter, Level, Spacing, Span, __internal}; +use {Group, Ident, Literal, Punct, TokenTree}; + +use rustc_errors as errors; +use syntax::ast; +use syntax::parse::lexer::comments; +use syntax::parse::token; +use syntax::tokenstream; +use syntax_pos::symbol::{keywords, Symbol}; + +impl Ident { + pub(crate) fn new_maybe_raw(string: &str, span: Span, is_raw: bool) -> Ident { + let sym = Symbol::intern(string); + if is_raw + && (sym == keywords::Underscore.name() + || ast::Ident::with_empty_ctxt(sym).is_path_segment_keyword()) + { + panic!("`{:?}` is not a valid raw identifier", string) + } + Ident { sym, span, is_raw } + } +} + +impl Delimiter { + pub(crate) fn from_internal(delim: token::DelimToken) -> Delimiter { + match delim { + token::Paren => Delimiter::Parenthesis, + token::Brace => Delimiter::Brace, + token::Bracket => Delimiter::Bracket, + token::NoDelim => Delimiter::None, + } + } + + pub(crate) fn to_internal(self) -> token::DelimToken { + match self { + Delimiter::Parenthesis => token::Paren, + Delimiter::Brace => token::Brace, + Delimiter::Bracket => token::Bracket, + Delimiter::None => token::NoDelim, + } + } +} + +impl TokenTree { + pub(crate) fn from_internal( + stream: tokenstream::TokenStream, + stack: &mut Vec, + ) -> TokenTree { + use syntax::parse::token::*; + + let (tree, is_joint) = stream.as_tree(); + let (span, token) = match tree { + tokenstream::TokenTree::Token(span, token) => (span, token), + tokenstream::TokenTree::Delimited(span, delimed) => { + let delimiter = Delimiter::from_internal(delimed.delim); + let mut g = Group::new(delimiter, ::TokenStream(delimed.tts.into())); + g.set_span(Span(span)); + return g.into(); + } + }; + + let op_kind = if is_joint { + Spacing::Joint + } else { + Spacing::Alone + }; + macro_rules! tt { + ($e:expr) => {{ + let mut x = TokenTree::from($e); + x.set_span(Span(span)); + x + }}; + } + macro_rules! op { + ($a:expr) => { + tt!(Punct::new($a, op_kind)) + }; + ($a:expr, $b:expr) => {{ + stack.push(tt!(Punct::new($b, op_kind))); + tt!(Punct::new($a, Spacing::Joint)) + }}; + ($a:expr, $b:expr, $c:expr) => {{ + stack.push(tt!(Punct::new($c, op_kind))); + stack.push(tt!(Punct::new($b, Spacing::Joint))); + tt!(Punct::new($a, Spacing::Joint)) + }}; + } + + match token { + Eq => op!('='), + Lt => op!('<'), + Le => op!('<', '='), + EqEq => op!('=', '='), + Ne => op!('!', '='), + Ge => op!('>', '='), + Gt => op!('>'), + AndAnd => op!('&', '&'), + OrOr => op!('|', '|'), + Not => op!('!'), + Tilde => op!('~'), + BinOp(Plus) => op!('+'), + BinOp(Minus) => op!('-'), + BinOp(Star) => op!('*'), + BinOp(Slash) => op!('/'), + BinOp(Percent) => op!('%'), + BinOp(Caret) => op!('^'), + BinOp(And) => op!('&'), + BinOp(Or) => op!('|'), + BinOp(Shl) => op!('<', '<'), + BinOp(Shr) => op!('>', '>'), + BinOpEq(Plus) => op!('+', '='), + BinOpEq(Minus) => op!('-', '='), + BinOpEq(Star) => op!('*', '='), + BinOpEq(Slash) => op!('/', '='), + BinOpEq(Percent) => op!('%', '='), + BinOpEq(Caret) => op!('^', '='), + BinOpEq(And) => op!('&', '='), + BinOpEq(Or) => op!('|', '='), + BinOpEq(Shl) => op!('<', '<', '='), + BinOpEq(Shr) => op!('>', '>', '='), + At => op!('@'), + Dot => op!('.'), + DotDot => op!('.', '.'), + DotDotDot => op!('.', '.', '.'), + DotDotEq => op!('.', '.', '='), + Comma => op!(','), + Semi => op!(';'), + Colon => op!(':'), + ModSep => op!(':', ':'), + RArrow => op!('-', '>'), + LArrow => op!('<', '-'), + FatArrow => op!('=', '>'), + Pound => op!('#'), + Dollar => op!('$'), + Question => op!('?'), + SingleQuote => op!('\''), + + Ident(ident, false) => tt!(self::Ident::new(&ident.as_str(), Span(span))), + Ident(ident, true) => tt!(self::Ident::new_raw(&ident.as_str(), Span(span))), + Lifetime(ident) => { + let ident = ident.without_first_quote(); + stack.push(tt!(self::Ident::new(&ident.as_str(), Span(span)))); + tt!(Punct::new('\'', Spacing::Joint)) + } + Literal(lit, suffix) => tt!(self::Literal { + lit, + suffix, + span: Span(span) + }), + DocComment(c) => { + let style = comments::doc_comment_style(&c.as_str()); + let stripped = comments::strip_doc_comment_decoration(&c.as_str()); + let stream = vec![ + tt!(self::Ident::new("doc", Span(span))), + tt!(Punct::new('=', Spacing::Alone)), + tt!(self::Literal::string(&stripped)), + ].into_iter() + .collect(); + stack.push(tt!(Group::new(Delimiter::Bracket, stream))); + if style == ast::AttrStyle::Inner { + stack.push(tt!(Punct::new('!', Spacing::Alone))); + } + tt!(Punct::new('#', Spacing::Alone)) + } + + Interpolated(_) => __internal::with_sess(|sess, _| { + let tts = token.interpolated_to_tokenstream(sess, span); + tt!(Group::new(Delimiter::None, ::TokenStream(tts))) + }), + + DotEq => op!('.', '='), + OpenDelim(..) | CloseDelim(..) => unreachable!(), + Whitespace | Comment | Shebang(..) | Eof => unreachable!(), + } + } + + pub(crate) fn to_internal(self) -> tokenstream::TokenStream { + use syntax::parse::token::*; + use syntax::tokenstream::{Delimited, TokenTree}; + + let (ch, kind, span) = match self { + self::TokenTree::Punct(tt) => (tt.as_char(), tt.spacing(), tt.span()), + self::TokenTree::Group(tt) => { + return TokenTree::Delimited( + tt.span.0, + Delimited { + delim: tt.delimiter.to_internal(), + tts: tt.stream.0.into(), + }, + ).into(); + } + self::TokenTree::Ident(tt) => { + let token = Ident(ast::Ident::new(tt.sym, tt.span.0), tt.is_raw); + return TokenTree::Token(tt.span.0, token).into(); + } + self::TokenTree::Literal(self::Literal { + lit: Lit::Integer(ref a), + suffix, + span, + }) + if a.as_str().starts_with("-") => + { + let minus = BinOp(BinOpToken::Minus); + let integer = Symbol::intern(&a.as_str()[1..]); + let integer = Literal(Lit::Integer(integer), suffix); + let a = TokenTree::Token(span.0, minus); + let b = TokenTree::Token(span.0, integer); + return vec![a, b].into_iter().collect(); + } + self::TokenTree::Literal(self::Literal { + lit: Lit::Float(ref a), + suffix, + span, + }) + if a.as_str().starts_with("-") => + { + let minus = BinOp(BinOpToken::Minus); + let float = Symbol::intern(&a.as_str()[1..]); + let float = Literal(Lit::Float(float), suffix); + let a = TokenTree::Token(span.0, minus); + let b = TokenTree::Token(span.0, float); + return vec![a, b].into_iter().collect(); + } + self::TokenTree::Literal(tt) => { + let token = Literal(tt.lit, tt.suffix); + return TokenTree::Token(tt.span.0, token).into(); + } + }; + + let token = match ch { + '=' => Eq, + '<' => Lt, + '>' => Gt, + '!' => Not, + '~' => Tilde, + '+' => BinOp(Plus), + '-' => BinOp(Minus), + '*' => BinOp(Star), + '/' => BinOp(Slash), + '%' => BinOp(Percent), + '^' => BinOp(Caret), + '&' => BinOp(And), + '|' => BinOp(Or), + '@' => At, + '.' => Dot, + ',' => Comma, + ';' => Semi, + ':' => Colon, + '#' => Pound, + '$' => Dollar, + '?' => Question, + '\'' => SingleQuote, + _ => unreachable!(), + }; + + let tree = TokenTree::Token(span.0, token); + match kind { + Spacing::Alone => tree.into(), + Spacing::Joint => tree.joint(), + } + } +} + +impl Level { + pub(crate) fn to_internal(self) -> errors::Level { + match self { + Level::Error => errors::Level::Error, + Level::Warning => errors::Level::Warning, + Level::Note => errors::Level::Note, + Level::Help => errors::Level::Help, + } + } +} diff --git a/src/libprofiler_builtins/Cargo.toml b/src/libprofiler_builtins/Cargo.toml index 04f456917b95..79192fbb6819 100644 --- a/src/libprofiler_builtins/Cargo.toml +++ b/src/libprofiler_builtins/Cargo.toml @@ -13,6 +13,7 @@ doc = false [dependencies] core = { path = "../libcore" } +compiler_builtins = { path = "../rustc/compiler_builtins_shim" } [build-dependencies] cc = "1.0.1" diff --git a/src/libprofiler_builtins/build.rs b/src/libprofiler_builtins/build.rs index dd88dd933f69..8d6c7d68dfe2 100644 --- a/src/libprofiler_builtins/build.rs +++ b/src/libprofiler_builtins/build.rs @@ -27,6 +27,7 @@ fn main() { "InstrProfilingFile.c", "InstrProfilingMerge.c", "InstrProfilingMergeFile.c", + "InstrProfilingNameVar.c", "InstrProfilingPlatformDarwin.c", "InstrProfilingPlatformLinux.c", "InstrProfilingPlatformOther.c", @@ -42,6 +43,8 @@ fn main() { cfg.define("strdup", Some("_strdup")); cfg.define("open", Some("_open")); cfg.define("fdopen", Some("_fdopen")); + cfg.define("getpid", Some("_getpid")); + cfg.define("fileno", Some("_fileno")); } else { // Turn off various features of gcc and such, mostly copying // compiler-rt's build system already @@ -50,6 +53,7 @@ fn main() { cfg.flag("-fomit-frame-pointer"); cfg.flag("-ffreestanding"); cfg.define("VISIBILITY_HIDDEN", None); + cfg.define("COMPILER_RT_HAS_UNAME", Some("1")); } for src in profile_sources { diff --git a/src/libprofiler_builtins/lib.rs b/src/libprofiler_builtins/lib.rs index 6d0d6d115b71..a85593253b10 100644 --- a/src/libprofiler_builtins/lib.rs +++ b/src/libprofiler_builtins/lib.rs @@ -15,4 +15,5 @@ reason = "internal implementation detail of rustc right now", issue = "0")] #![allow(unused_features)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(staged_api)] diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index f95dbcf411cb..59b5b58e61ea 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -14,18 +14,25 @@ bitflags = "1.0" fmt_macros = { path = "../libfmt_macros" } graphviz = { path = "../libgraphviz" } jobserver = "0.1" -log = "0.4" +lazy_static = "1.0.0" +scoped-tls = { version = "0.1.1", features = ["nightly"] } +log = { version = "0.4", features = ["release_max_level_info", "std"] } +polonius-engine = "0.5.0" +proc_macro = { path = "../libproc_macro" } +rustc-rayon = "0.1.1" +rustc-rayon-core = "0.1.1" rustc_apfloat = { path = "../librustc_apfloat" } -rustc_back = { path = "../librustc_back" } -rustc_const_math = { path = "../librustc_const_math" } +rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } backtrace = "0.3.3" +parking_lot = "0.5.5" byteorder = { version = "1.1", features = ["i128"]} - +chalk-engine = { version = "0.7.0", default-features=false } +rustc_fs_util = { path = "../librustc_fs_util" } # Note that these dependencies are a lie, they're just here to get linkage to # work. @@ -35,13 +42,13 @@ byteorder = { version = "1.1", features = ["i128"]} # rlib/dylib pair but all crates.io crates tend to just be rlibs. This means # we've got a problem for dependency graphs that look like: # -# foo - rustc_trans +# foo - rustc_codegen_llvm # / \ # rustc ---- rustc_driver # \ / # foo - rustc_metadata # -# Here the crate `foo` is linked into the `rustc_trans` and the +# Here the crate `foo` is linked into the `rustc_codegen_llvm` and the # `rustc_metadata` dylibs, meaning we've got duplicate copies! When we then # go to link `rustc_driver` the compiler notices this and gives us a compiler # error. @@ -49,9 +56,10 @@ byteorder = { version = "1.1", features = ["i128"]} # To work around this problem we just add these crates.io dependencies to the # `rustc` crate which is a shared dependency above. That way the crate `foo` # shows up in the dylib for the `rustc` crate, deduplicating it and allowing -# crates like `rustc_trans` to use `foo` *through* the `rustc` crate. +# crates like `rustc_codegen_llvm` to use `foo` *through* the `rustc` crate. # # tl;dr; this is not needed to get `rustc` to compile, but if you remove it then # later crate stop compiling. If you can remove this and everything # compiles, then please feel free to do so! flate2 = "1.0" +tempfile = "3.0" diff --git a/src/librustc/README.md b/src/librustc/README.md index ddf71a06d607..9909ff91a18a 100644 --- a/src/librustc/README.md +++ b/src/librustc/README.md @@ -1,203 +1,3 @@ -An informal guide to reading and working on the rustc compiler. -================================================================== +For more information about how rustc works, see the [rustc guide]. -If you wish to expand on this document, or have a more experienced -Rust contributor add anything else to it, please get in touch: - -* https://internals.rust-lang.org/ -* https://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust - -or file a bug: - -https://github.com/rust-lang/rust/issues - -Your concerns are probably the same as someone else's. - -You may also be interested in the -[Rust Forge](https://forge.rust-lang.org/), which includes a number of -interesting bits of information. - -Finally, at the end of this file is a GLOSSARY defining a number of -common (and not necessarily obvious!) names that are used in the Rust -compiler code. If you see some funky name and you'd like to know what -it stands for, check there! - -The crates of rustc -=================== - -Rustc consists of a number of crates, including `syntax`, -`rustc`, `rustc_back`, `rustc_trans`, `rustc_driver`, and -many more. The source for each crate can be found in a directory -like `src/libXXX`, where `XXX` is the crate name. - -(NB. The names and divisions of these crates are not set in -stone and may change over time -- for the time being, we tend towards -a finer-grained division to help with compilation time, though as -incremental improves that may change.) - -The dependency structure of these crates is roughly a diamond: - -``` - rustc_driver - / | \ - / | \ - / | \ - / v \ -rustc_trans rustc_borrowck ... rustc_metadata - \ | / - \ | / - \ | / - \ v / - rustc - | - v - syntax - / \ - / \ - syntax_pos syntax_ext -``` - -The `rustc_driver` crate, at the top of this lattice, is effectively -the "main" function for the rust compiler. It doesn't have much "real -code", but instead ties together all of the code defined in the other -crates and defines the overall flow of execution. (As we transition -more and more to the [query model](ty/maps/README.md), however, the -"flow" of compilation is becoming less centrally defined.) - -At the other extreme, the `rustc` crate defines the common and -pervasive data structures that all the rest of the compiler uses -(e.g., how to represent types, traits, and the program itself). It -also contains some amount of the compiler itself, although that is -relatively limited. - -Finally, all the crates in the bulge in the middle define the bulk of -the compiler -- they all depend on `rustc`, so that they can make use -of the various types defined there, and they export public routines -that `rustc_driver` will invoke as needed (more and more, what these -crates export are "query definitions", but those are covered later -on). - -Below `rustc` lie various crates that make up the parser and error -reporting mechanism. For historical reasons, these crates do not have -the `rustc_` prefix, but they are really just as much an internal part -of the compiler and not intended to be stable (though they do wind up -getting used by some crates in the wild; a practice we hope to -gradually phase out). - -Each crate has a `README.md` file that describes, at a high-level, -what it contains, and tries to give some kind of explanation (some -better than others). - -The compiler process -==================== - -The Rust compiler is in a bit of transition right now. It used to be a -purely "pass-based" compiler, where we ran a number of passes over the -entire program, and each did a particular check of transformation. - -We are gradually replacing this pass-based code with an alternative -setup based on on-demand **queries**. In the query-model, we work -backwards, executing a *query* that expresses our ultimate goal (e.g., -"compile this crate"). This query in turn may make other queries -(e.g., "get me a list of all modules in the crate"). Those queries -make other queries that ultimately bottom out in the base operations, -like parsing the input, running the type-checker, and so forth. This -on-demand model permits us to do exciting things like only do the -minimal amount of work needed to type-check a single function. It also -helps with incremental compilation. (For details on defining queries, -check out `src/librustc/ty/maps/README.md`.) - -Regardless of the general setup, the basic operations that the -compiler must perform are the same. The only thing that changes is -whether these operations are invoked front-to-back, or on demand. In -order to compile a Rust crate, these are the general steps that we -take: - -1. **Parsing input** - - this processes the `.rs` files and produces the AST ("abstract syntax tree") - - the AST is defined in `syntax/ast.rs`. It is intended to match the lexical - syntax of the Rust language quite closely. -2. **Name resolution, macro expansion, and configuration** - - once parsing is complete, we process the AST recursively, resolving paths - and expanding macros. This same process also processes `#[cfg]` nodes, and hence - may strip things out of the AST as well. -3. **Lowering to HIR** - - Once name resolution completes, we convert the AST into the HIR, - or "high-level IR". The HIR is defined in `src/librustc/hir/`; that module also includes - the lowering code. - - The HIR is a lightly desugared variant of the AST. It is more processed than the - AST and more suitable for the analyses that follow. It is **not** required to match - the syntax of the Rust language. - - As a simple example, in the **AST**, we preserve the parentheses - that the user wrote, so `((1 + 2) + 3)` and `1 + 2 + 3` parse - into distinct trees, even though they are equivalent. In the - HIR, however, parentheses nodes are removed, and those two - expressions are represented in the same way. -3. **Type-checking and subsequent analyses** - - An important step in processing the HIR is to perform type - checking. This process assigns types to every HIR expression, - for example, and also is responsible for resolving some - "type-dependent" paths, such as field accesses (`x.f` -- we - can't know what field `f` is being accessed until we know the - type of `x`) and associated type references (`T::Item` -- we - can't know what type `Item` is until we know what `T` is). - - Type checking creates "side-tables" (`TypeckTables`) that include - the types of expressions, the way to resolve methods, and so forth. - - After type-checking, we can do other analyses, such as privacy checking. -4. **Lowering to MIR and post-processing** - - Once type-checking is done, we can lower the HIR into MIR ("middle IR"), which - is a **very** desugared version of Rust, well suited to the borrowck but also - certain high-level optimizations. -5. **Translation to LLVM and LLVM optimizations** - - From MIR, we can produce LLVM IR. - - LLVM then runs its various optimizations, which produces a number of `.o` files - (one for each "codegen unit"). -6. **Linking** - - Finally, those `.o` files are linked together. - -Glossary -======== - -The compiler uses a number of...idiosyncratic abbreviations and -things. This glossary attempts to list them and give you a few -pointers for understanding them better. - -- AST -- the **abstract syntax tree** produced by the `syntax` crate; reflects user syntax - very closely. -- codegen unit -- when we produce LLVM IR, we group the Rust code into a number of codegen - units. Each of these units is processed by LLVM independently from one another, - enabling parallelism. They are also the unit of incremental re-use. -- cx -- we tend to use "cx" as an abbrevation for context. See also tcx, infcx, etc. -- `DefId` -- an index identifying a **definition** (see `librustc/hir/def_id.rs`). Uniquely - identifies a `DefPath`. -- HIR -- the **High-level IR**, created by lowering and desugaring the AST. See `librustc/hir`. -- `HirId` -- identifies a particular node in the HIR by combining a - def-id with an "intra-definition offset". -- `'gcx` -- the lifetime of the global arena (see `librustc/ty`). -- generics -- the set of generic type parameters defined on a type or item -- ICE -- internal compiler error. When the compiler crashes. -- infcx -- the inference context (see `librustc/infer`) -- MIR -- the **Mid-level IR** that is created after type-checking for use by borrowck and trans. - Defined in the `src/librustc/mir/` module, but much of the code that manipulates it is - found in `src/librustc_mir`. -- obligation -- something that must be proven by the trait system; see `librustc/traits`. -- local crate -- the crate currently being compiled. -- node-id or `NodeId` -- an index identifying a particular node in the - AST or HIR; gradually being phased out and replaced with `HirId`. -- query -- perhaps some sub-computation during compilation; see `librustc/maps`. -- provider -- the function that executes a query; see `librustc/maps`. -- sess -- the **compiler session**, which stores global data used throughout compilation -- side tables -- because the AST and HIR are immutable once created, we often carry extra - information about them in the form of hashtables, indexed by the id of a particular node. -- span -- a location in the user's source code, used for error - reporting primarily. These are like a file-name/line-number/column - tuple on steroids: they carry a start/end point, and also track - macro expansions and compiler desugaring. All while being packed - into a few bytes (really, it's an index into a table). See the - `Span` datatype for more. -- substs -- the **substitutions** for a given generic type or item - (e.g., the `i32, u32` in `HashMap`) -- tcx -- the "typing context", main data structure of the compiler (see `librustc/ty`). -- trans -- the code to **translate** MIR into LLVM IR. -- trait reference -- a trait and values for its type parameters (see `librustc/ty`). -- ty -- the internal representation of a **type** (see `librustc/ty`). +[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/ diff --git a/src/librustc/benches/lib.rs b/src/librustc/benches/lib.rs index 24294ec49cee..5496df1342ff 100644 --- a/src/librustc/benches/lib.rs +++ b/src/librustc/benches/lib.rs @@ -8,9 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(warnings)] - -#![feature(slice_patterns)] #![feature(test)] extern crate test; diff --git a/src/librustc/cfg/construct.rs b/src/librustc/cfg/construct.rs index ff2c36416bfd..98cfa094c169 100644 --- a/src/librustc/cfg/construct.rs +++ b/src/librustc/cfg/construct.rs @@ -8,11 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc_data_structures::graph; use cfg::*; use middle::region; -use ty::{self, TyCtxt}; +use rustc_data_structures::graph::implementation as graph; use syntax::ptr::P; +use ty::{self, TyCtxt}; use hir::{self, PatKind}; use hir::def_id::DefId; @@ -111,13 +111,13 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn stmt(&mut self, stmt: &hir::Stmt, pred: CFGIndex) -> CFGIndex { let hir_id = self.tcx.hir.node_to_hir_id(stmt.node.id()); match stmt.node { - hir::StmtDecl(ref decl, _) => { + hir::StmtKind::Decl(ref decl, _) => { let exit = self.decl(&decl, pred); self.add_ast_node(hir_id.local_id, &[exit]) } - hir::StmtExpr(ref expr, _) | - hir::StmtSemi(ref expr, _) => { + hir::StmtKind::Expr(ref expr, _) | + hir::StmtKind::Semi(ref expr, _) => { let exit = self.expr(&expr, pred); self.add_ast_node(hir_id.local_id, &[exit]) } @@ -126,12 +126,12 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn decl(&mut self, decl: &hir::Decl, pred: CFGIndex) -> CFGIndex { match decl.node { - hir::DeclLocal(ref local) => { + hir::DeclKind::Local(ref local) => { let init_exit = self.opt_expr(&local.init, pred); self.pat(&local.pat, init_exit) } - hir::DeclItem(_) => pred, + hir::DeclKind::Item(_) => pred, } } @@ -179,12 +179,12 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn expr(&mut self, expr: &hir::Expr, pred: CFGIndex) -> CFGIndex { match expr.node { - hir::ExprBlock(ref blk) => { + hir::ExprKind::Block(ref blk, _) => { let blk_exit = self.block(&blk, pred); self.add_ast_node(expr.hir_id.local_id, &[blk_exit]) } - hir::ExprIf(ref cond, ref then, None) => { + hir::ExprKind::If(ref cond, ref then, None) => { // // [pred] // | @@ -204,7 +204,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.add_ast_node(expr.hir_id.local_id, &[cond_exit, then_exit]) // 3,4 } - hir::ExprIf(ref cond, ref then, Some(ref otherwise)) => { + hir::ExprKind::If(ref cond, ref then, Some(ref otherwise)) => { // // [pred] // | @@ -225,7 +225,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.add_ast_node(expr.hir_id.local_id, &[then_exit, else_exit]) // 4, 5 } - hir::ExprWhile(ref cond, ref body, _) => { + hir::ExprKind::While(ref cond, ref body, _) => { // // [pred] // | @@ -267,7 +267,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { expr_exit } - hir::ExprLoop(ref body, _, _) => { + hir::ExprKind::Loop(ref body, _, _) => { // // [pred] // | @@ -295,11 +295,11 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { expr_exit } - hir::ExprMatch(ref discr, ref arms, _) => { + hir::ExprKind::Match(ref discr, ref arms, _) => { self.match_(expr.hir_id.local_id, &discr, &arms, pred) } - hir::ExprBinary(op, ref l, ref r) if op.node.is_lazy() => { + hir::ExprKind::Binary(op, ref l, ref r) if op.node.is_lazy() => { // // [pred] // | @@ -319,14 +319,14 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.add_ast_node(expr.hir_id.local_id, &[l_exit, r_exit]) // 3,4 } - hir::ExprRet(ref v) => { + hir::ExprKind::Ret(ref v) => { let v_exit = self.opt_expr(v, pred); let b = self.add_ast_node(expr.hir_id.local_id, &[v_exit]); self.add_returning_edge(expr, b); self.add_unreachable_node() } - hir::ExprBreak(destination, ref opt_expr) => { + hir::ExprKind::Break(destination, ref opt_expr) => { let v = self.opt_expr(opt_expr, pred); let (target_scope, break_dest) = self.find_scope_edge(expr, destination, ScopeCfKind::Break); @@ -335,7 +335,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.add_unreachable_node() } - hir::ExprAgain(destination) => { + hir::ExprKind::Continue(destination) => { let (target_scope, cont_dest) = self.find_scope_edge(expr, destination, ScopeCfKind::Continue); let a = self.add_ast_node(expr.hir_id.local_id, &[pred]); @@ -343,67 +343,66 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.add_unreachable_node() } - hir::ExprArray(ref elems) => { + hir::ExprKind::Array(ref elems) => { self.straightline(expr, pred, elems.iter().map(|e| &*e)) } - hir::ExprCall(ref func, ref args) => { + hir::ExprKind::Call(ref func, ref args) => { self.call(expr, pred, &func, args.iter().map(|e| &*e)) } - hir::ExprMethodCall(.., ref args) => { + hir::ExprKind::MethodCall(.., ref args) => { self.call(expr, pred, &args[0], args[1..].iter().map(|e| &*e)) } - hir::ExprIndex(ref l, ref r) | - hir::ExprBinary(_, ref l, ref r) if self.tables.is_method_call(expr) => { + hir::ExprKind::Index(ref l, ref r) | + hir::ExprKind::Binary(_, ref l, ref r) if self.tables.is_method_call(expr) => { self.call(expr, pred, &l, Some(&**r).into_iter()) } - hir::ExprUnary(_, ref e) if self.tables.is_method_call(expr) => { + hir::ExprKind::Unary(_, ref e) if self.tables.is_method_call(expr) => { self.call(expr, pred, &e, None::.iter()) } - hir::ExprTup(ref exprs) => { + hir::ExprKind::Tup(ref exprs) => { self.straightline(expr, pred, exprs.iter().map(|e| &*e)) } - hir::ExprStruct(_, ref fields, ref base) => { + hir::ExprKind::Struct(_, ref fields, ref base) => { let field_cfg = self.straightline(expr, pred, fields.iter().map(|f| &*f.expr)); self.opt_expr(base, field_cfg) } - hir::ExprAssign(ref l, ref r) | - hir::ExprAssignOp(_, ref l, ref r) => { + hir::ExprKind::Assign(ref l, ref r) | + hir::ExprKind::AssignOp(_, ref l, ref r) => { self.straightline(expr, pred, [r, l].iter().map(|&e| &**e)) } - hir::ExprIndex(ref l, ref r) | - hir::ExprBinary(_, ref l, ref r) => { // NB: && and || handled earlier + hir::ExprKind::Index(ref l, ref r) | + hir::ExprKind::Binary(_, ref l, ref r) => { // NB: && and || handled earlier self.straightline(expr, pred, [l, r].iter().map(|&e| &**e)) } - hir::ExprBox(ref e) | - hir::ExprAddrOf(_, ref e) | - hir::ExprCast(ref e, _) | - hir::ExprType(ref e, _) | - hir::ExprUnary(_, ref e) | - hir::ExprField(ref e, _) | - hir::ExprTupField(ref e, _) | - hir::ExprYield(ref e) | - hir::ExprRepeat(ref e, _) => { + hir::ExprKind::Box(ref e) | + hir::ExprKind::AddrOf(_, ref e) | + hir::ExprKind::Cast(ref e, _) | + hir::ExprKind::Type(ref e, _) | + hir::ExprKind::Unary(_, ref e) | + hir::ExprKind::Field(ref e, _) | + hir::ExprKind::Yield(ref e) | + hir::ExprKind::Repeat(ref e, _) => { self.straightline(expr, pred, Some(&**e).into_iter()) } - hir::ExprInlineAsm(_, ref outputs, ref inputs) => { + hir::ExprKind::InlineAsm(_, ref outputs, ref inputs) => { let post_outputs = self.exprs(outputs.iter().map(|e| &*e), pred); let post_inputs = self.exprs(inputs.iter().map(|e| &*e), post_outputs); self.add_ast_node(expr.hir_id.local_id, &[post_inputs]) } - hir::ExprClosure(..) | - hir::ExprLit(..) | - hir::ExprPath(_) => { + hir::ExprKind::Closure(..) | + hir::ExprKind::Lit(..) | + hir::ExprKind::Path(_) => { self.straightline(expr, pred, None::.iter()) } } @@ -453,13 +452,13 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // The CFG for match expression is quite complex, so no ASCII // art for it (yet). // - // The CFG generated below matches roughly what trans puts - // out. Each pattern and guard is visited in parallel, with + // The CFG generated below matches roughly what MIR contains. + // Each pattern and guard is visited in parallel, with // arms containing multiple patterns generating multiple nodes // for the same guard expression. The guard expressions chain // into each other from top to bottom, with a specific // exception to allow some additional valid programs - // (explained below). Trans differs slightly in that the + // (explained below). MIR differs slightly in that the // pattern matching may continue after a guard but the visible // behaviour should be the same. // @@ -473,8 +472,6 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // Keep track of the previous guard expressions let mut prev_guards = Vec::new(); - // Track if the previous pattern contained bindings or wildcards - let mut prev_has_bindings = false; for arm in arms { // Add an exit node for when we've visited all the @@ -493,40 +490,16 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // Visit the guard expression let guard_exit = self.expr(&guard, guard_start); - let this_has_bindings = pat.contains_bindings_or_wild(); - - // If both this pattern and the previous pattern - // were free of bindings, they must consist only - // of "constant" patterns. Note we cannot match an - // all-constant pattern, fail the guard, and then - // match *another* all-constant pattern. This is - // because if the previous pattern matches, then - // we *cannot* match this one, unless all the - // constants are the same (which is rejected by - // `check_match`). - // - // We can use this to be smarter about the flow - // along guards. If the previous pattern matched, - // then we know we will not visit the guard in - // this one (whether or not the guard succeeded), - // if the previous pattern failed, then we know - // the guard for that pattern will not have been - // visited. Thus, it is not possible to visit both - // the previous guard and the current one when - // both patterns consist only of constant - // sub-patterns. - // - // However, if the above does not hold, then all - // previous guards need to be wired to visit the - // current guard pattern. - if prev_has_bindings || this_has_bindings { - while let Some(prev) = prev_guards.pop() { - self.add_contained_edge(prev, guard_start); - } + // #47295: We used to have very special case code + // here for when a pair of arms are both formed + // solely from constants, and if so, not add these + // edges. But this was not actually sound without + // other constraints that we stopped enforcing at + // some point. + while let Some(prev) = prev_guards.pop() { + self.add_contained_edge(prev, guard_start); } - prev_has_bindings = this_has_bindings; - // Push the guard onto the list of previous guards prev_guards.push(guard_exit); @@ -594,12 +567,12 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn add_returning_edge(&mut self, _from_expr: &hir::Expr, from_index: CFGIndex) { - let mut data = CFGEdgeData { - exiting_scopes: vec![], + let data = CFGEdgeData { + exiting_scopes: self.loop_scopes.iter() + .rev() + .map(|&LoopScope { loop_id: id, .. }| id) + .collect() }; - for &LoopScope { loop_id: id, .. } in self.loop_scopes.iter().rev() { - data.exiting_scopes.push(id); - } self.graph.add_edge(from_index, self.fn_exit, data); } @@ -609,19 +582,16 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { scope_cf_kind: ScopeCfKind) -> (region::Scope, CFGIndex) { match destination.target_id { - hir::ScopeTarget::Block(block_expr_id) => { + Ok(loop_id) => { for b in &self.breakable_block_scopes { - if b.block_expr_id == self.tcx.hir.node_to_hir_id(block_expr_id).local_id { - let scope_id = self.tcx.hir.node_to_hir_id(block_expr_id).local_id; + if b.block_expr_id == self.tcx.hir.node_to_hir_id(loop_id).local_id { + let scope_id = self.tcx.hir.node_to_hir_id(loop_id).local_id; return (region::Scope::Node(scope_id), match scope_cf_kind { ScopeCfKind::Break => b.break_index, ScopeCfKind::Continue => bug!("can't continue to block"), }); } } - span_bug!(expr.span, "no block expr for id {}", block_expr_id); - } - hir::ScopeTarget::Loop(hir::LoopIdResult::Ok(loop_id)) => { for l in &self.loop_scopes { if l.loop_id == self.tcx.hir.node_to_hir_id(loop_id).local_id { let scope_id = self.tcx.hir.node_to_hir_id(loop_id).local_id; @@ -631,10 +601,9 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { }); } } - span_bug!(expr.span, "no loop scope for id {}", loop_id); + span_bug!(expr.span, "no scope for id {}", loop_id); } - hir::ScopeTarget::Loop(hir::LoopIdResult::Err(err)) => - span_bug!(expr.span, "loop scope error: {}", err), + Err(err) => span_bug!(expr.span, "scope error: {}", err), } } } diff --git a/src/librustc/cfg/mod.rs b/src/librustc/cfg/mod.rs index b379d3956e94..cf9c24cc58a6 100644 --- a/src/librustc/cfg/mod.rs +++ b/src/librustc/cfg/mod.rs @@ -11,7 +11,7 @@ //! Module that constructs a control-flow graph representing an item. //! Uses `Graph` as the underlying representation. -use rustc_data_structures::graph; +use rustc_data_structures::graph::implementation as graph; use ty::TyCtxt; use hir; use hir::def_id::DefId; diff --git a/src/librustc/dep_graph/README.md b/src/librustc/dep_graph/README.md index c8d0362f17c8..f1f383d7ad12 100644 --- a/src/librustc/dep_graph/README.md +++ b/src/librustc/dep_graph/README.md @@ -1,295 +1,4 @@ -# Dependency graph for incremental compilation +To learn more about how dependency tracking works in rustc, see the [rustc +guide]. -This module contains the infrastructure for managing the incremental -compilation dependency graph. This README aims to explain how it ought -to be used. In this document, we'll first explain the overall -strategy, and then share some tips for handling specific scenarios. - -The high-level idea is that we want to instrument the compiler to -track which parts of the AST and other IR are read/written by what. -This way, when we come back later, we can look at this graph and -determine what work needs to be redone. - -### The dependency graph - -The nodes of the graph are defined by the enum `DepNode`. They represent -one of three things: - -1. HIR nodes (like `Hir(DefId)`) represent the HIR input itself. -2. Data nodes (like `TypeOfItem(DefId)`) represent some computed - information about a particular item. -3. Procedure nodes (like `CoherenceCheckTrait(DefId)`) represent some - procedure that is executing. Usually this procedure is - performing some kind of check for errors. You can think of them as - computed values where the value being computed is `()` (and the - value may fail to be computed, if an error results). - -An edge `N1 -> N2` is added between two nodes if either: - -- the value of `N1` is used to compute `N2`; -- `N1` is read by the procedure `N2`; -- the procedure `N1` writes the value `N2`. - -The latter two conditions are equivalent to the first one if you think -of procedures as values. - -### Basic tracking - -There is a very general strategy to ensure that you have a correct, if -sometimes overconservative, dependency graph. The two main things you have -to do are (a) identify shared state and (b) identify the current tasks. - -### Identifying shared state - -Identify "shared state" that will be written by one pass and read by -another. In particular, we need to identify shared state that will be -read "across items" -- that is, anything where changes in one item -could invalidate work done for other items. So, for example: - -1. The signature for a function is "shared state". -2. The computed type of some expression in the body of a function is - not shared state, because if it changes it does not itself - invalidate other functions (though it may be that it causes new - monomorphizations to occur, but that's handled independently). - -Put another way: if the HIR for an item changes, we are going to -recompile that item for sure. But we need the dep tracking map to tell -us what *else* we have to recompile. Shared state is anything that is -used to communicate results from one item to another. - -### Identifying the current task, tracking reads/writes, etc - -FIXME(#42293). This text needs to be rewritten for the new red-green -system, which doesn't fully exist yet. - -#### Dependency tracking map - -`DepTrackingMap` is a particularly convenient way to correctly store -shared state. A `DepTrackingMap` is a special hashmap that will add -edges automatically when `get` and `insert` are called. The idea is -that, when you get/insert a value for the key `K`, we will add an edge -from/to the node `DepNode::Variant(K)` (for some variant specific to -the map). - -Each `DepTrackingMap` is parameterized by a special type `M` that -implements `DepTrackingMapConfig`; this trait defines the key and value -types of the map, and also defines a fn for converting from the key to -a `DepNode` label. You don't usually have to muck about with this by -hand, there is a macro for creating it. You can see the complete set -of `DepTrackingMap` definitions in `librustc/middle/ty/maps.rs`. - -As an example, let's look at the `adt_defs` map. The `adt_defs` map -maps from the def-id of a struct/enum to its `AdtDef`. It is defined -using this macro: - -```rust -dep_map_ty! { AdtDefs: ItemSignature(DefId) -> ty::AdtDefMaster<'tcx> } -// ~~~~~~~ ~~~~~~~~~~~~~ ~~~~~ ~~~~~~~~~~~~~~~~~~~~~~ -// | | Key type Value type -// | DepNode variant -// Name of map id type -``` - -this indicates that a map id type `AdtDefs` will be created. The key -of the map will be a `DefId` and value will be -`ty::AdtDefMaster<'tcx>`. The `DepNode` will be created by -`DepNode::ItemSignature(K)` for a given key. - -Once that is done, you can just use the `DepTrackingMap` like any -other map: - -```rust -let mut map: DepTrackingMap = DepTrackingMap::new(dep_graph); -map.insert(key, value); // registers dep_graph.write -map.get(key; // registers dep_graph.read -``` - -#### Memoization - -One particularly interesting case is memoization. If you have some -shared state that you compute in a memoized fashion, the correct thing -to do is to define a `RefCell` for it and use the -`memoize` helper: - -```rust -map.memoize(key, || /* compute value */) -``` - -This will create a graph that looks like - - ... -> MapVariant(key) -> CurrentTask - -where `MapVariant` is the `DepNode` variant that the map is associated with, -and `...` are whatever edges the `/* compute value */` closure creates. - -In particular, using the memoize helper is much better than writing -the obvious code yourself: - -```rust -if let Some(result) = map.get(key) { - return result; -} -let value = /* compute value */; -map.insert(key, value); -``` - -If you write that code manually, the dependency graph you get will -include artificial edges that are not necessary. For example, imagine that -two tasks, A and B, both invoke the manual memoization code, but A happens -to go first. The resulting graph will be: - - ... -> A -> MapVariant(key) -> B - ~~~~~~~~~~~~~~~~~~~~~~~~~~~ // caused by A writing to MapVariant(key) - ~~~~~~~~~~~~~~~~~~~~ // caused by B reading from MapVariant(key) - -This graph is not *wrong*, but it encodes a path from A to B that -should not exist. In contrast, using the memoized helper, you get: - - ... -> MapVariant(key) -> A - | - +----------> B - -which is much cleaner. - -**Be aware though that the closure is executed with `MapVariant(key)` -pushed onto the stack as the current task!** That means that you must -add explicit `read` calls for any shared state that it accesses -implicitly from its environment. See the section on "explicit calls to -read and write when starting a new subtask" above for more details. - -### How to decide where to introduce a new task - -Certainly, you need at least one task on the stack: any attempt to -`read` or `write` shared state will panic if there is no current -task. But where does it make sense to introduce subtasks? The basic -rule is that a subtask makes sense for any discrete unit of work you -may want to skip in the future. Adding a subtask separates out the -reads/writes from *that particular subtask* versus the larger -context. An example: you might have a 'meta' task for all of borrow -checking, and then subtasks for borrow checking individual fns. (Seen -in this light, memoized computations are just a special case where we -may want to avoid redoing the work even within the context of one -compilation.) - -The other case where you might want a subtask is to help with refining -the reads/writes for some later bit of work that needs to be memoized. -For example, we create a subtask for type-checking the body of each -fn. However, in the initial version of incr. comp. at least, we do -not expect to actually *SKIP* type-checking -- we only expect to skip -trans. However, it's still useful to create subtasks for type-checking -individual items, because, otherwise, if a fn sig changes, we won't -know which callers are affected -- in fact, because the graph would be -so coarse, we'd just have to retrans everything, since we can't -distinguish which fns used which fn sigs. - -### Testing the dependency graph - -There are various ways to write tests against the dependency graph. -The simplest mechanism are the -`#[rustc_if_this_changed]` and `#[rustc_then_this_would_need]` -annotations. These are used in compile-fail tests to test whether the -expected set of paths exist in the dependency graph. As an example, -see `src/test/compile-fail/dep-graph-caller-callee.rs`. - -The idea is that you can annotate a test like: - -```rust -#[rustc_if_this_changed] -fn foo() { } - -#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK -fn bar() { foo(); } - -#[rustc_then_this_would_need(TypeckTables)] //~ ERROR no path -fn baz() { } -``` - -This will check whether there is a path in the dependency graph from -`Hir(foo)` to `TypeckTables(bar)`. An error is reported for each -`#[rustc_then_this_would_need]` annotation that indicates whether a -path exists. `//~ ERROR` annotations can then be used to test if a -path is found (as demonstrated above). - -### Debugging the dependency graph - -#### Dumping the graph - -The compiler is also capable of dumping the dependency graph for your -debugging pleasure. To do so, pass the `-Z dump-dep-graph` flag. The -graph will be dumped to `dep_graph.{txt,dot}` in the current -directory. You can override the filename with the `RUST_DEP_GRAPH` -environment variable. - -Frequently, though, the full dep graph is quite overwhelming and not -particularly helpful. Therefore, the compiler also allows you to filter -the graph. You can filter in three ways: - -1. All edges originating in a particular set of nodes (usually a single node). -2. All edges reaching a particular set of nodes. -3. All edges that lie between given start and end nodes. - -To filter, use the `RUST_DEP_GRAPH_FILTER` environment variable, which should -look like one of the following: - -``` -source_filter // nodes originating from source_filter --> target_filter // nodes that can reach target_filter -source_filter -> target_filter // nodes in between source_filter and target_filter -``` - -`source_filter` and `target_filter` are a `&`-separated list of strings. -A node is considered to match a filter if all of those strings appear in its -label. So, for example: - -``` -RUST_DEP_GRAPH_FILTER='-> TypeckTables' -``` - -would select the predecessors of all `TypeckTables` nodes. Usually though you -want the `TypeckTables` node for some particular fn, so you might write: - -``` -RUST_DEP_GRAPH_FILTER='-> TypeckTables & bar' -``` - -This will select only the `TypeckTables` nodes for fns with `bar` in their name. - -Perhaps you are finding that when you change `foo` you need to re-type-check `bar`, -but you don't think you should have to. In that case, you might do: - -``` -RUST_DEP_GRAPH_FILTER='Hir&foo -> TypeckTables & bar' -``` - -This will dump out all the nodes that lead from `Hir(foo)` to -`TypeckTables(bar)`, from which you can (hopefully) see the source -of the erroneous edge. - -#### Tracking down incorrect edges - -Sometimes, after you dump the dependency graph, you will find some -path that should not exist, but you will not be quite sure how it came -to be. **When the compiler is built with debug assertions,** it can -help you track that down. Simply set the `RUST_FORBID_DEP_GRAPH_EDGE` -environment variable to a filter. Every edge created in the dep-graph -will be tested against that filter -- if it matches, a `bug!` is -reported, so you can easily see the backtrace (`RUST_BACKTRACE=1`). - -The syntax for these filters is the same as described in the previous -section. However, note that this filter is applied to every **edge** -and doesn't handle longer paths in the graph, unlike the previous -section. - -Example: - -You find that there is a path from the `Hir` of `foo` to the type -check of `bar` and you don't think there should be. You dump the -dep-graph as described in the previous section and open `dep-graph.txt` -to see something like: - - Hir(foo) -> Collect(bar) - Collect(bar) -> TypeckTables(bar) - -That first edge looks suspicious to you. So you set -`RUST_FORBID_DEP_GRAPH_EDGE` to `Hir&foo -> Collect&bar`, re-run, and -then observe the backtrace. Voila, bug fixed! +[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/query.html diff --git a/src/librustc/dep_graph/debug.rs b/src/librustc/dep_graph/debug.rs index e22552008d5a..f0e43e78a50a 100644 --- a/src/librustc/dep_graph/debug.rs +++ b/src/librustc/dep_graph/debug.rs @@ -40,7 +40,7 @@ impl DepNodeFilter { /// Tests whether `node` meets the filter, returning true if so. pub fn test(&self, node: &DepNode) -> bool { let debug_str = format!("{:?}", node); - self.text.split("&") + self.text.split('&') .map(|s| s.trim()) .all(|f| debug_str.contains(f)) } @@ -54,7 +54,7 @@ pub struct EdgeFilter { } impl EdgeFilter { - pub fn new(test: &str) -> Result> { + pub fn new(test: &str) -> Result> { let parts: Vec<_> = test.split("->").collect(); if parts.len() != 2 { Err(format!("expected a filter like `a&b -> c&d`, not `{}`", test).into()) diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index 14f54fbffac2..b7281cf0445c 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -60,18 +60,23 @@ //! user of the `DepNode` API of having to know how to compute the expected //! fingerprint for a given set of node parameters. +use mir::interpret::GlobalId; use hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX}; use hir::map::DefPathHash; use hir::{HirId, ItemLocalId}; -use ich::Fingerprint; -use ty::{TyCtxt, Instance, InstanceDef, ParamEnv, ParamEnvAnd, PolyTraitRef, Ty}; -use ty::subst::Substs; +use ich::{Fingerprint, StableHashingContext}; use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; -use ich::StableHashingContext; use std::fmt; use std::hash::Hash; use syntax_pos::symbol::InternedString; +use traits::query::{ + CanonicalProjectionGoal, CanonicalTyGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpSubtypeGoal, + CanonicalPredicateGoal, CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpNormalizeGoal, +}; +use ty::{TyCtxt, FnSig, Instance, InstanceDef, + ParamEnv, ParamEnvAnd, Predicate, PolyFnSig, PolyTraitRef, Ty, self}; +use ty::subst::Substs; // erase!() just makes tokens go away. It's used to specify which macro argument // is repeated (i.e. which sub-expression of the macro we are in) but don't need @@ -80,6 +85,10 @@ macro_rules! erase { ($x:tt) => ({}) } +macro_rules! replace { + ($x:tt with $($y:tt)*) => ($($y)*) +} + macro_rules! is_anon_attr { (anon) => (true); ($attr:ident) => (false); @@ -111,7 +120,7 @@ macro_rules! define_dep_nodes { (<$tcx:tt> $( [$($attr:ident),* ] - $variant:ident $(( $($tuple_arg:tt),* ))* + $variant:ident $(( $tuple_arg_ty:ty $(,)* ))* $({ $($struct_arg_name:ident : $struct_arg_ty:ty),* })* ,)* ) => ( @@ -134,7 +143,7 @@ macro_rules! define_dep_nodes { // tuple args $({ - return <( $($tuple_arg,)* ) as DepNodeParams> + return <$tuple_arg_ty as DepNodeParams> ::CAN_RECONSTRUCT_QUERY_KEY; })* @@ -186,7 +195,7 @@ macro_rules! define_dep_nodes { DepKind :: $variant => { // tuple args $({ - $(erase!($tuple_arg);)* + erase!($tuple_arg_ty); return true; })* @@ -205,7 +214,7 @@ macro_rules! define_dep_nodes { pub enum DepConstructor<$tcx> { $( - $variant $(( $($tuple_arg),* ))* + $variant $(( $tuple_arg_ty ))* $({ $($struct_arg_name : $struct_arg_ty),* })* ),* } @@ -227,15 +236,14 @@ macro_rules! define_dep_nodes { { match dep { $( - DepConstructor :: $variant $(( $($tuple_arg),* ))* + DepConstructor :: $variant $(( replace!(($tuple_arg_ty) with arg) ))* $({ $($struct_arg_name),* })* => { // tuple args $({ - let tupled_args = ( $($tuple_arg,)* ); - let hash = DepNodeParams::to_fingerprint(&tupled_args, - tcx); + erase!($tuple_arg_ty); + let hash = DepNodeParams::to_fingerprint(&arg, tcx); let dep_node = DepNode { kind: DepKind::$variant, hash @@ -247,7 +255,7 @@ macro_rules! define_dep_nodes { tcx.sess.opts.debugging_opts.query_dep_graph) { tcx.dep_graph.register_dep_node_debug_str(dep_node, || { - tupled_args.to_debug_str(tcx) + arg.to_debug_str(tcx) }); } @@ -326,11 +334,8 @@ macro_rules! define_dep_nodes { pub fn extract_def_id(&self, tcx: TyCtxt) -> Option { if self.kind.can_reconstruct_query_key() { let def_path_hash = DefPathHash(self.hash); - if let Some(ref def_path_map) = tcx.def_path_hash_to_def_id.as_ref() { - def_path_map.get(&def_path_hash).cloned() - } else { - None - } + tcx.def_path_hash_to_def_id.as_ref()? + .get(&def_path_hash).cloned() } else { None } @@ -436,6 +441,9 @@ impl DepKind { } define_dep_nodes!( <'tcx> + // We use this for most things when incr. comp. is turned off. + [] Null, + // Represents the `Krate` as a whole (the `hir::Krate` value) (as // distinct from the krate module). This is basically a hash of // the entire krate, so if you read from `Krate` (e.g., by calling @@ -492,7 +500,10 @@ define_dep_nodes!( <'tcx> [] TypeOfItem(DefId), [] GenericsOfItem(DefId), [] PredicatesOfItem(DefId), + [] ExplicitPredicatesOfItem(DefId), + [] PredicatesDefinedOnItem(DefId), [] InferredOutlivesOf(DefId), + [] InferredOutlivesCrate(CrateNum), [] SuperPredicatesOfItem(DefId), [] TraitDefOfItem(DefId), [] AdtDefOfItem(DefId), @@ -515,7 +526,7 @@ define_dep_nodes!( <'tcx> [] TypeckTables(DefId), [] UsedTraitImports(DefId), [] HasTypeckTables(DefId), - [] ConstEval { param_env: ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)> }, + [] ConstEval { param_env: ParamEnvAnd<'tcx, GlobalId<'tcx>> }, [] CheckMatch(DefId), [] SymbolName(DefId), [] InstanceSymbolName { instance: Instance<'tcx> }, @@ -548,15 +559,17 @@ define_dep_nodes!( <'tcx> [input] DefSpan(DefId), [] LookupStability(DefId), [] LookupDeprecationEntry(DefId), - [] ItemBodyNestedBodies(DefId), [] ConstIsRvaluePromotableToStatic(DefId), [] RvaluePromotableMap(DefId), [] ImplParent(DefId), [] TraitOfItem(DefId), - [] IsExportedSymbol(DefId), + [] IsReachableNonGeneric(DefId), + [] IsUnreachableLocalDefinition(DefId), [] IsMirAvailable(DefId), [] ItemAttrs(DefId), + [] CodegenFnAttrs(DefId), [] FnArgNames(DefId), + [] RenderedConst(DefId), [] DylibDepFormats(CrateNum), [] IsPanicRuntime(CrateNum), [] IsCompilerBuiltins(CrateNum), @@ -571,17 +584,22 @@ define_dep_nodes!( <'tcx> [] GetPanicStrategy(CrateNum), [] IsNoBuiltins(CrateNum), [] ImplDefaultness(DefId), - [] ExportedSymbolIds(CrateNum), + [] CheckItemWellFormed(DefId), + [] CheckTraitItemWellFormed(DefId), + [] CheckImplItemWellFormed(DefId), + [] ReachableNonGenerics(CrateNum), [] NativeLibraries(CrateNum), [] PluginRegistrarFn(CrateNum), [] DeriveRegistrarFn(CrateNum), [input] CrateDisambiguator(CrateNum), [input] CrateHash(CrateNum), [input] OriginalCrateName(CrateNum), + [input] ExtraFileName(CrateNum), [] ImplementationsOfTrait { krate: CrateNum, trait_id: DefId }, [] AllTraitImplementations(CrateNum), + [] DllimportForeignItems(CrateNum), [] IsDllimportForeignItem(DefId), [] IsStaticallyIncludedForeignItem(DefId), [] NativeLibraryKind(DefId), @@ -597,47 +615,65 @@ define_dep_nodes!( <'tcx> [input] CrateName(CrateNum), [] ItemChildren(DefId), [] ExternModStmtCnum(DefId), - [input] GetLangItems, + [eval_always] GetLibFeatures, + [] DefinedLibFeatures(CrateNum), + [eval_always] GetLangItems, [] DefinedLangItems(CrateNum), [] MissingLangItems(CrateNum), - [] ExternConstBody(DefId), [] VisibleParentMap, [input] MissingExternCrateItem(CrateNum), [input] UsedCrateSource(CrateNum), [input] PostorderCnums, - [input] HasCloneClosures(CrateNum), - [input] HasCopyClosures(CrateNum), - // This query is not expected to have inputs -- as a result, it's - // not a good candidate for "replay" because it's essentially a - // pure function of its input (and hence the expectation is that - // no caller would be green **apart** from just this - // query). Making it anonymous avoids hashing the result, which + // These queries are not expected to have inputs -- as a result, they + // are not good candidates for "replay" because they are essentially + // pure functions of their input (and hence the expectation is that + // no caller would be green **apart** from just these + // queries). Making them anonymous avoids hashing the result, which // may save a bit of time. [anon] EraseRegionsTy { ty: Ty<'tcx> }, + [anon] ConstValueToAllocation { val: &'tcx ty::Const<'tcx> }, [input] Freevars(DefId), [input] MaybeUnusedTraitImport(DefId), [input] MaybeUnusedExternCrates, [eval_always] StabilityIndex, + [eval_always] AllTraits, [input] AllCrateNums, [] ExportedSymbols(CrateNum), - [eval_always] CollectAndPartitionTranslationItems, - [] ExportName(DefId), - [] ContainsExternIndicator(DefId), - [] IsTranslatedFunction(DefId), + [eval_always] CollectAndPartitionMonoItems, + [] IsCodegenedItem(DefId), [] CodegenUnit(InternedString), [] CompileCodegenUnit(InternedString), [input] OutputFilenames, - [anon] NormalizeTy, - // We use this for most things when incr. comp. is turned off. - [] Null, + [] NormalizeProjectionTy(CanonicalProjectionGoal<'tcx>), + [] NormalizeTyAfterErasingRegions(ParamEnvAnd<'tcx, Ty<'tcx>>), + [] ImpliedOutlivesBounds(CanonicalTyGoal<'tcx>), + [] DropckOutlives(CanonicalTyGoal<'tcx>), + [] EvaluateObligation(CanonicalPredicateGoal<'tcx>), + [] TypeOpEq(CanonicalTypeOpEqGoal<'tcx>), + [] TypeOpSubtype(CanonicalTypeOpSubtypeGoal<'tcx>), + [] TypeOpProvePredicate(CanonicalTypeOpProvePredicateGoal<'tcx>), + [] TypeOpNormalizeTy(CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>>), + [] TypeOpNormalizePredicate(CanonicalTypeOpNormalizeGoal<'tcx, Predicate<'tcx>>), + [] TypeOpNormalizePolyFnSig(CanonicalTypeOpNormalizeGoal<'tcx, PolyFnSig<'tcx>>), + [] TypeOpNormalizeFnSig(CanonicalTypeOpNormalizeGoal<'tcx, FnSig<'tcx>>), [] SubstituteNormalizeAndTestPredicates { key: (DefId, &'tcx Substs<'tcx>) }, [input] TargetFeaturesWhitelist, - [] TargetFeaturesEnabled(DefId), + [] InstanceDefSizeEstimate { instance_def: InstanceDef<'tcx> }, + + [input] Features, + + [] ProgramClausesFor(DefId), + [] ProgramClausesForEnv(ParamEnv<'tcx>), + [] WasmImportModuleMap(CrateNum), + [] ForeignModules(CrateNum), + + [] UpstreamMonomorphizations(CrateNum), + [] UpstreamMonomorphizationsFor(DefId), ); trait DepNodeParams<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> : fmt::Debug { @@ -657,7 +693,7 @@ trait DepNodeParams<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> : fmt::Debug { } impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a, T> DepNodeParams<'a, 'gcx, 'tcx> for T - where T: HashStable> + fmt::Debug + where T: HashStable> + fmt::Debug { default const CAN_RECONSTRUCT_QUERY_KEY: bool = false; @@ -675,43 +711,43 @@ impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a, T> DepNodeParams<'a, 'gcx, 'tcx> for T } } -impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefId,) { +impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for DefId { const CAN_RECONSTRUCT_QUERY_KEY: bool = true; fn to_fingerprint(&self, tcx: TyCtxt) -> Fingerprint { - tcx.def_path_hash(self.0).0 + tcx.def_path_hash(*self).0 } fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String { - tcx.item_path_str(self.0) + tcx.item_path_str(*self) } } -impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefIndex,) { +impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for DefIndex { const CAN_RECONSTRUCT_QUERY_KEY: bool = true; fn to_fingerprint(&self, tcx: TyCtxt) -> Fingerprint { - tcx.hir.definitions().def_path_hash(self.0).0 + tcx.hir.definitions().def_path_hash(*self).0 } fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String { - tcx.item_path_str(DefId::local(self.0)) + tcx.item_path_str(DefId::local(*self)) } } -impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (CrateNum,) { +impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for CrateNum { const CAN_RECONSTRUCT_QUERY_KEY: bool = true; fn to_fingerprint(&self, tcx: TyCtxt) -> Fingerprint { let def_id = DefId { - krate: self.0, + krate: *self, index: CRATE_DEF_INDEX, }; tcx.def_path_hash(def_id).0 } fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String { - tcx.crate_name(self.0).as_str().to_string() + tcx.crate_name(*self).as_str().to_string() } } @@ -739,17 +775,17 @@ impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefId, De } } -impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (HirId,) { +impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for HirId { const CAN_RECONSTRUCT_QUERY_KEY: bool = false; // We actually would not need to specialize the implementation of this // method but it's faster to combine the hashes than to instantiate a full // hashing context and stable-hashing state. fn to_fingerprint(&self, tcx: TyCtxt) -> Fingerprint { - let (HirId { + let HirId { owner, local_id: ItemLocalId(local_id), - },) = *self; + } = *self; let def_path_hash = tcx.def_path_hash(DefId::local(owner)); let local_id = Fingerprint::from_smaller_hash(local_id as u64); diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs index 55ec8adb5fbf..e308f2924a05 100644 --- a/src/librustc/dep_graph/graph.rs +++ b/src/librustc/dep_graph/graph.rs @@ -9,39 +9,36 @@ // except according to those terms. use errors::DiagnosticBuilder; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher, - StableHashingContextProvider}; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use std::cell::{Ref, RefCell}; +use rustc_data_structures::small_vec::SmallVec; +use rustc_data_structures::sync::{Lrc, Lock}; use std::env; use std::hash::Hash; -use std::rc::Rc; -use ty::TyCtxt; +use ty::{self, TyCtxt}; use util::common::{ProfileQueriesMsg, profq_msg}; -use ich::Fingerprint; +use ich::{StableHashingContext, StableHashingContextProvider, Fingerprint}; use super::debug::EdgeFilter; use super::dep_node::{DepNode, DepKind, WorkProductId}; use super::query::DepGraphQuery; -use super::raii; use super::safe::DepGraphSafe; use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex}; use super::prev::PreviousDepGraph; #[derive(Clone)] pub struct DepGraph { - data: Option>, + data: Option>, // A vector mapping depnodes from the current graph to their associated // result value fingerprints. Do not rely on the length of this vector // being the same as the number of nodes in the graph. The vector can // contain an arbitrary number of zero-entries at the end. - fingerprints: Rc>> + fingerprints: Lrc>> } - newtype_index!(DepNodeIndex); impl DepNodeIndex { @@ -68,55 +65,54 @@ struct DepGraphData { /// tracking. The `current` field is the dependency graph of only the /// current compilation session: We don't merge the previous dep-graph into /// current one anymore. - current: RefCell, + current: Lock, /// The dep-graph from the previous compilation session. It contains all /// nodes and edges as well as all fingerprints of nodes that have them. previous: PreviousDepGraph, - colors: RefCell>, + colors: Lock, /// When we load, there may be `.o` files, cached mir, or other such /// things available to us. If we find that they are not dirty, we /// load the path to the file storing those work-products here into /// this map. We can later look for and extract that data. - previous_work_products: RefCell>, + previous_work_products: FxHashMap, - /// Work-products that we generate in this run. - work_products: RefCell>, - - dep_node_debug: RefCell>, + dep_node_debug: Lock>, // Used for testing, only populated when -Zquery-dep-graph is specified. - loaded_from_cache: RefCell>, + loaded_from_cache: Lock>, } impl DepGraph { - pub fn new(prev_graph: PreviousDepGraph) -> DepGraph { + pub fn new(prev_graph: PreviousDepGraph, + prev_work_products: FxHashMap) -> DepGraph { // Pre-allocate the fingerprints array. We over-allocate a little so // that we hopefully don't have to re-allocate during this compilation // session. + let prev_graph_node_count = prev_graph.node_count(); + let fingerprints = IndexVec::from_elem_n(Fingerprint::ZERO, - (prev_graph.node_count() * 115) / 100); + (prev_graph_node_count * 115) / 100); DepGraph { - data: Some(Rc::new(DepGraphData { - previous_work_products: RefCell::new(FxHashMap()), - work_products: RefCell::new(FxHashMap()), - dep_node_debug: RefCell::new(FxHashMap()), - current: RefCell::new(CurrentDepGraph::new()), + data: Some(Lrc::new(DepGraphData { + previous_work_products: prev_work_products, + dep_node_debug: Lock::new(FxHashMap()), + current: Lock::new(CurrentDepGraph::new()), previous: prev_graph, - colors: RefCell::new(FxHashMap()), - loaded_from_cache: RefCell::new(FxHashMap()), + colors: Lock::new(DepNodeColorMap::new(prev_graph_node_count)), + loaded_from_cache: Lock::new(FxHashMap()), })), - fingerprints: Rc::new(RefCell::new(fingerprints)), + fingerprints: Lrc::new(Lock::new(fingerprints)), } } pub fn new_disabled() -> DepGraph { DepGraph { data: None, - fingerprints: Rc::new(RefCell::new(IndexVec::new())), + fingerprints: Lrc::new(Lock::new(IndexVec::new())), } } @@ -132,7 +128,7 @@ impl DepGraph { let mut edges = Vec::new(); for (index, edge_targets) in current_dep_graph.edges.iter_enumerated() { let from = current_dep_graph.nodes[index]; - for &edge_target in edge_targets { + for &edge_target in edge_targets.iter() { let to = current_dep_graph.nodes[edge_target]; edges.push((from, to)); } @@ -143,21 +139,32 @@ impl DepGraph { pub fn assert_ignored(&self) { - if let Some(ref data) = self.data { - match data.current.borrow().task_stack.last() { - Some(&OpenTask::Ignore) | None => { - // ignored + if let Some(..) = self.data { + ty::tls::with_context_opt(|icx| { + let icx = if let Some(icx) = icx { icx } else { return }; + match *icx.task { + OpenTask::Ignore => { + // ignored + } + _ => panic!("expected an ignore context") } - _ => panic!("expected an ignore context") - } + }) } } pub fn with_ignore(&self, op: OP) -> R where OP: FnOnce() -> R { - let _task = self.data.as_ref().map(|data| raii::IgnoreTask::new(&data.current)); - op() + ty::tls::with_context(|icx| { + let icx = ty::tls::ImplicitCtxt { + task: &OpenTask::Ignore, + ..icx.clone() + }; + + ty::tls::enter_context(&icx, |_| { + op() + }) + }) } /// Starts a new dep-graph task. Dep-graph tasks are specified @@ -166,7 +173,7 @@ impl DepGraph { /// what state they have access to. In particular, we want to /// prevent implicit 'leaks' of tracked state into the task (which /// could then be read without generating correct edges in the - /// dep-graph -- see the module-level [README] for more details on + /// dep-graph -- see the [rustc guide] for more details on /// the dep-graph). To this end, the task function gets exactly two /// pieces of state: the context `cx` and an argument `arg`. Both /// of these bits of state must be of some type that implements @@ -186,53 +193,93 @@ impl DepGraph { /// - If you need 3+ arguments, use a tuple for the /// `arg` parameter. /// - /// [README]: https://github.com/rust-lang/rust/blob/master/src/librustc/dep_graph/README.md - pub fn with_task(&self, + /// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/incremental-compilation.html + pub fn with_task<'gcx, C, A, R>(&self, key: DepNode, cx: C, arg: A, task: fn(C, A) -> R) -> (R, DepNodeIndex) - where C: DepGraphSafe + StableHashingContextProvider, - R: HashStable, + where C: DepGraphSafe + StableHashingContextProvider<'gcx>, + R: HashStable>, { - self.with_task_impl(key, cx, arg, task, - |data, key| data.borrow_mut().push_task(key), - |data, key| data.borrow_mut().pop_task(key)) + self.with_task_impl(key, cx, arg, false, task, + |key| OpenTask::Regular(Lock::new(RegularOpenTask { + node: key, + reads: SmallVec::new(), + read_set: FxHashSet(), + })), + |data, key, task| data.borrow_mut().complete_task(key, task)) } - fn with_task_impl(&self, - key: DepNode, - cx: C, - arg: A, - task: fn(C, A) -> R, - push: fn(&RefCell, DepNode), - pop: fn(&RefCell, DepNode) -> DepNodeIndex) - -> (R, DepNodeIndex) - where C: DepGraphSafe + StableHashingContextProvider, - R: HashStable, + /// Creates a new dep-graph input with value `input` + pub fn input_task<'gcx, C, R>(&self, + key: DepNode, + cx: C, + input: R) + -> (R, DepNodeIndex) + where C: DepGraphSafe + StableHashingContextProvider<'gcx>, + R: HashStable>, + { + fn identity_fn(_: C, arg: A) -> A { + arg + } + + self.with_task_impl(key, cx, input, true, identity_fn, + |_| OpenTask::Ignore, + |data, key, _| data.borrow_mut().alloc_node(key, SmallVec::new())) + } + + fn with_task_impl<'gcx, C, A, R>( + &self, + key: DepNode, + cx: C, + arg: A, + no_tcx: bool, + task: fn(C, A) -> R, + create_task: fn(DepNode) -> OpenTask, + finish_task_and_alloc_depnode: fn(&Lock, + DepNode, + OpenTask) -> DepNodeIndex + ) -> (R, DepNodeIndex) + where + C: DepGraphSafe + StableHashingContextProvider<'gcx>, + R: HashStable>, { if let Some(ref data) = self.data { - debug_assert!(!data.colors.borrow().contains_key(&key)); - - push(&data.current, key); - if cfg!(debug_assertions) { - profq_msg(ProfileQueriesMsg::TaskBegin(key.clone())) - }; + let open_task = create_task(key); // In incremental mode, hash the result of the task. We don't // do anything with the hash yet, but we are computing it // anyway so that // - we make sure that the infrastructure works and // - we can get an idea of the runtime cost. - let mut hcx = cx.create_stable_hashing_context(); + let mut hcx = cx.get_stable_hashing_context(); - let result = task(cx, arg); if cfg!(debug_assertions) { - profq_msg(ProfileQueriesMsg::TaskEnd) + profq_msg(hcx.sess(), ProfileQueriesMsg::TaskBegin(key.clone())) }; - let dep_node_index = pop(&data.current, key); + let result = if no_tcx { + task(cx, arg) + } else { + ty::tls::with_context(|icx| { + let icx = ty::tls::ImplicitCtxt { + task: &open_task, + ..icx.clone() + }; + + ty::tls::enter_context(&icx, |_| { + task(cx, arg) + }) + }) + }; + + if cfg!(debug_assertions) { + profq_msg(hcx.sess(), ProfileQueriesMsg::TaskEnd) + }; + + let dep_node_index = finish_task_and_alloc_depnode(&data.current, key, open_task); let mut stable_hasher = StableHasher::new(); result.hash_stable(&mut hcx, &mut stable_hasher); @@ -254,25 +301,27 @@ impl DepGraph { } // Determine the color of the new DepNode. - { - let prev_fingerprint = data.previous.fingerprint_of(&key); + if let Some(prev_index) = data.previous.node_to_index_opt(&key) { + let prev_fingerprint = data.previous.fingerprint_by_index(prev_index); - let color = if Some(current_fingerprint) == prev_fingerprint { + let color = if current_fingerprint == prev_fingerprint { DepNodeColor::Green(dep_node_index) } else { DepNodeColor::Red }; - let old_value = data.colors.borrow_mut().insert(key, color); - debug_assert!(old_value.is_none(), + let mut colors = data.colors.borrow_mut(); + debug_assert!(colors.get(prev_index).is_none(), "DepGraph::with_task() - Duplicate DepNodeColor \ insertion for {:?}", key); + + colors.insert(prev_index, color); } (result, dep_node_index) } else { if key.kind.fingerprint_needed_for_crate_hash() { - let mut hcx = cx.create_stable_hashing_context(); + let mut hcx = cx.get_stable_hashing_context(); let result = task(cx, arg); let mut stable_hasher = StableHasher::new(); result.hash_stable(&mut hcx, &mut stable_hasher); @@ -281,9 +330,11 @@ impl DepGraph { let mut fingerprints = self.fingerprints.borrow_mut(); let dep_node_index = DepNodeIndex::new(fingerprints.len()); fingerprints.push(fingerprint); + debug_assert!(fingerprints[dep_node_index] == fingerprint, "DepGraph::with_task() - Assigned fingerprint to \ unexpected index for {:?}", key); + (result, dep_node_index) } else { (task(cx, arg), DepNodeIndex::INVALID) @@ -297,11 +348,28 @@ impl DepGraph { where OP: FnOnce() -> R { if let Some(ref data) = self.data { - data.current.borrow_mut().push_anon_task(); - let result = op(); + let (result, open_task) = ty::tls::with_context(|icx| { + let task = OpenTask::Anon(Lock::new(AnonOpenTask { + reads: SmallVec::new(), + read_set: FxHashSet(), + })); + + let r = { + let icx = ty::tls::ImplicitCtxt { + task: &task, + ..icx.clone() + }; + + ty::tls::enter_context(&icx, |_| { + op() + }) + }; + + (r, task) + }); let dep_node_index = data.current .borrow_mut() - .pop_anon_task(dep_kind); + .pop_anon_task(dep_kind, open_task); (result, dep_node_index) } else { (op(), DepNodeIndex::INVALID) @@ -310,18 +378,18 @@ impl DepGraph { /// Execute something within an "eval-always" task which is a task // that runs whenever anything changes. - pub fn with_eval_always_task(&self, + pub fn with_eval_always_task<'gcx, C, A, R>(&self, key: DepNode, cx: C, arg: A, task: fn(C, A) -> R) -> (R, DepNodeIndex) - where C: DepGraphSafe + StableHashingContextProvider, - R: HashStable, + where C: DepGraphSafe + StableHashingContextProvider<'gcx>, + R: HashStable>, { - self.with_task_impl(key, cx, arg, task, - |data, key| data.borrow_mut().push_eval_always_task(key), - |data, key| data.borrow_mut().pop_eval_always_task(key)) + self.with_task_impl(key, cx, arg, false, task, + |key| OpenTask::EvalAlways { node: key }, + |data, key, task| data.borrow_mut().complete_eval_always_task(key, task)) } #[inline] @@ -356,6 +424,15 @@ impl DepGraph { .unwrap() } + #[inline] + pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool { + if let Some(ref data) = self.data { + data.current.borrow_mut().node_to_node_index.contains_key(dep_node) + } else { + false + } + } + #[inline] pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { match self.fingerprints.borrow().get(dep_node_index) { @@ -380,52 +457,20 @@ impl DepGraph { self.data.as_ref().unwrap().previous.node_to_index(dep_node) } - /// Indicates that a previous work product exists for `v`. This is - /// invoked during initial start-up based on what nodes are clean - /// (and what files exist in the incr. directory). - pub fn insert_previous_work_product(&self, v: &WorkProductId, data: WorkProduct) { - debug!("insert_previous_work_product({:?}, {:?})", v, data); - self.data - .as_ref() - .unwrap() - .previous_work_products - .borrow_mut() - .insert(v.clone(), data); - } - - /// Indicates that we created the given work-product in this run - /// for `v`. This record will be preserved and loaded in the next - /// run. - pub fn insert_work_product(&self, v: &WorkProductId, data: WorkProduct) { - debug!("insert_work_product({:?}, {:?})", v, data); - self.data - .as_ref() - .unwrap() - .work_products - .borrow_mut() - .insert(v.clone(), data); - } - /// Check whether a previous work product exists for `v` and, if /// so, return the path that leads to it. Used to skip doing work. pub fn previous_work_product(&self, v: &WorkProductId) -> Option { self.data .as_ref() .and_then(|data| { - data.previous_work_products.borrow().get(v).cloned() + data.previous_work_products.get(v).cloned() }) } - /// Access the map of work-products created during this run. Only - /// used during saving of the dep-graph. - pub fn work_products(&self) -> Ref> { - self.data.as_ref().unwrap().work_products.borrow() - } - /// Access the map of work-products created during the cached run. Only /// used during saving of the dep-graph. - pub fn previous_work_products(&self) -> Ref> { - self.data.as_ref().unwrap().previous_work_products.borrow() + pub fn previous_work_products(&self) -> &FxHashMap { + &self.data.as_ref().unwrap().previous_work_products } #[inline(always)] @@ -444,7 +489,12 @@ impl DepGraph { } pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option { - self.data.as_ref().and_then(|t| t.dep_node_debug.borrow().get(&dep_node).cloned()) + self.data + .as_ref()? + .dep_node_debug + .borrow() + .get(&dep_node) + .cloned() } pub fn edge_deduplication_data(&self) -> (u64, u64) { @@ -454,18 +504,10 @@ impl DepGraph { } pub fn serialize(&self) -> SerializedDepGraph { - let mut fingerprints = self.fingerprints.borrow_mut(); let current_dep_graph = self.data.as_ref().unwrap().current.borrow(); - // Make sure we don't run out of bounds below. - if current_dep_graph.nodes.len() > fingerprints.len() { - fingerprints.resize(current_dep_graph.nodes.len(), Fingerprint::ZERO); - } - - let nodes: IndexVec<_, (DepNode, Fingerprint)> = - current_dep_graph.nodes.iter_enumerated().map(|(idx, &dep_node)| { - (dep_node, fingerprints[idx]) - }).collect(); + let fingerprints = self.fingerprints.borrow().clone().convert_index_type(); + let nodes = current_dep_graph.nodes.clone().convert_index_type(); let total_edge_count: usize = current_dep_graph.edges.iter() .map(|v| v.len()) @@ -489,13 +531,24 @@ impl DepGraph { SerializedDepGraph { nodes, + fingerprints, edge_list_indices, edge_list_data, } } pub fn node_color(&self, dep_node: &DepNode) -> Option { - self.data.as_ref().and_then(|data| data.colors.borrow().get(dep_node).cloned()) + if let Some(ref data) = self.data { + if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { + return data.colors.borrow().get(prev_index) + } else { + // This is a node that did not exist in the previous compilation + // session, so we consider it to be red. + return Some(DepNodeColor::Red) + } + } + + None } pub fn try_mark_green<'tcx>(&self, @@ -505,7 +558,7 @@ impl DepGraph { debug!("try_mark_green({:?}) - BEGIN", dep_node); let data = self.data.as_ref().unwrap(); - debug_assert!(!data.colors.borrow().contains_key(dep_node)); + #[cfg(not(parallel_queries))] debug_assert!(!data.current.borrow().node_to_node_index.contains_key(dep_node)); if dep_node.kind.is_input() { @@ -535,19 +588,22 @@ impl DepGraph { } }; - let mut current_deps = Vec::new(); + debug_assert!(data.colors.borrow().get(prev_dep_node_index).is_none()); + + let mut current_deps = SmallVec::new(); for &dep_dep_node_index in prev_deps { - let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index); + let dep_dep_node_color = data.colors.borrow().get(dep_dep_node_index); - let dep_dep_node_color = data.colors.borrow().get(dep_dep_node).cloned(); match dep_dep_node_color { Some(DepNodeColor::Green(node_index)) => { // This dependency has been marked as green before, we are // still fine and can continue with checking the other // dependencies. debug!("try_mark_green({:?}) --- found dependency {:?} to \ - be immediately green", dep_node, dep_dep_node); + be immediately green", + dep_node, + data.previous.index_to_node(dep_dep_node_index)); current_deps.push(node_index); } Some(DepNodeColor::Red) => { @@ -556,10 +612,14 @@ impl DepGraph { // mark the DepNode as green and also don't need to bother // with checking any of the other dependencies. debug!("try_mark_green({:?}) - END - dependency {:?} was \ - immediately red", dep_node, dep_dep_node); + immediately red", + dep_node, + data.previous.index_to_node(dep_dep_node_index)); return None } None => { + let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index); + // We don't know the state of this dependency. If it isn't // an input node, let's try to mark it green recursively. if !dep_dep_node.kind.is_input() { @@ -600,11 +660,9 @@ impl DepGraph { // We failed to mark it green, so we try to force the query. debug!("try_mark_green({:?}) --- trying to force \ dependency {:?}", dep_node, dep_dep_node); - if ::ty::maps::force_from_dep_node(tcx, dep_dep_node) { - let dep_dep_node_color = data.colors - .borrow() - .get(dep_dep_node) - .cloned(); + if ::ty::query::force_from_dep_node(tcx, dep_dep_node) { + let dep_dep_node_color = data.colors.borrow().get(dep_dep_node_index); + match dep_dep_node_color { Some(DepNodeColor::Green(node_index)) => { debug!("try_mark_green({:?}) --- managed to \ @@ -620,8 +678,15 @@ impl DepGraph { return None } None => { - bug!("try_mark_green() - Forcing the DepNode \ - should have set its color") + if !tcx.sess.has_errors() { + bug!("try_mark_green() - Forcing the DepNode \ + should have set its color") + } else { + // If the query we just forced has resulted + // in some kind of compilation error, we + // don't expect that the corresponding + // dep-node color has been updated. + } } } } else { @@ -634,16 +699,24 @@ impl DepGraph { } } - // If we got here without hitting a `return` that means that all // dependencies of this DepNode could be marked as green. Therefore we - // can also mark this DepNode as green. We do so by... + // can also mark this DepNode as green. - // ... allocating an entry for it in the current dependency graph and - // adding all the appropriate edges imported from the previous graph ... - let dep_node_index = data.current - .borrow_mut() - .alloc_node(*dep_node, current_deps); + // There may be multiple threads trying to mark the same dep node green concurrently + + let (dep_node_index, did_allocation) = { + let mut current = data.current.borrow_mut(); + + if let Some(&dep_node_index) = current.node_to_node_index.get(&dep_node) { + // Someone else allocated it before us + (dep_node_index, false) + } else { + // We allocating an entry for the node in the current dependency graph and + // adding all the appropriate edges imported from the previous graph + (current.alloc_node(*dep_node, current_deps), true) + } + }; // ... copying the fingerprint from the previous graph too, so we don't // have to recompute it ... @@ -655,6 +728,8 @@ impl DepGraph { fingerprints.resize(dep_node_index.index() + 1, Fingerprint::ZERO); } + // Multiple threads can all write the same fingerprint here + #[cfg(not(parallel_queries))] debug_assert!(fingerprints[dep_node_index] == Fingerprint::ZERO, "DepGraph::try_mark_green() - Duplicate fingerprint \ insertion for {:?}", dep_node); @@ -663,15 +738,22 @@ impl DepGraph { } // ... emitting any stored diagnostic ... - { - let diagnostics = tcx.on_disk_query_result_cache + if did_allocation { + // Only the thread which did the allocation emits the error messages + + // FIXME: Ensure that these are printed before returning for all threads. + // Currently threads where did_allocation = false can continue on + // and emit other diagnostics before these diagnostics are emitted. + // Such diagnostics should be emitted after these. + // See https://github.com/rust-lang/rust/issues/48685 + let diagnostics = tcx.queries.on_disk_cache .load_diagnostics(tcx, prev_dep_node_index); if diagnostics.len() > 0 { let handle = tcx.sess.diagnostic(); // Promote the previous diagnostics to the current session. - tcx.on_disk_query_result_cache + tcx.queries.on_disk_cache .store_diagnostics(dep_node_index, diagnostics.clone()); for diagnostic in diagnostics { @@ -681,26 +763,23 @@ impl DepGraph { } // ... and finally storing a "Green" entry in the color map. - let old_color = data.colors - .borrow_mut() - .insert(*dep_node, DepNodeColor::Green(dep_node_index)); - debug_assert!(old_color.is_none(), + let mut colors = data.colors.borrow_mut(); + // Multiple threads can all write the same color here + #[cfg(not(parallel_queries))] + debug_assert!(colors.get(prev_dep_node_index).is_none(), "DepGraph::try_mark_green() - Duplicate DepNodeColor \ insertion for {:?}", dep_node); + colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); + debug!("try_mark_green({:?}) - END - successfully marked as green", dep_node); Some(dep_node_index) } - // Used in various assertions - pub fn is_green(&self, dep_node_index: DepNodeIndex) -> bool { - let dep_node = self.data.as_ref().unwrap().current.borrow().nodes[dep_node_index]; - self.data.as_ref().unwrap().colors.borrow().get(&dep_node).map(|&color| { - match color { - DepNodeColor::Red => false, - DepNodeColor::Green(_) => true, - } - }).unwrap_or(false) + // Returns true if the given node has been marked as green during the + // current compilation session. Used in various assertions + pub fn is_green(&self, dep_node: &DepNode) -> bool { + self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false) } // This method loads all on-disk cacheable query results into memory, so @@ -714,20 +793,25 @@ impl DepGraph { pub fn exec_cache_promotions<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) { let green_nodes: Vec = { let data = self.data.as_ref().unwrap(); - data.colors.borrow().iter().filter_map(|(dep_node, color)| match color { - DepNodeColor::Green(_) => { - if dep_node.cache_on_disk(tcx) { - Some(*dep_node) - } else { + let colors = data.colors.borrow(); + colors.values.indices().filter_map(|prev_index| { + match colors.get(prev_index) { + Some(DepNodeColor::Green(_)) => { + let dep_node = data.previous.index_to_node(prev_index); + if dep_node.cache_on_disk(tcx) { + Some(dep_node) + } else { + None + } + } + None | + Some(DepNodeColor::Red) => { + // We can skip red nodes because a node can only be marked + // as red if the query result was recomputed and thus is + // already in memory. None } } - DepNodeColor::Red => { - // We can skip red nodes because a node can only be marked - // as red if the query result was recomputed and thus is - // already in memory. - None - } }).collect() }; @@ -776,10 +860,10 @@ impl DepGraph { /// each partition. In the first run, we create partitions based on /// the symbols that need to be compiled. For each partition P, we /// hash the symbols in P and create a `WorkProduct` record associated -/// with `DepNode::TransPartition(P)`; the hash is the set of symbols +/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols /// in P. /// -/// The next time we compile, if the `DepNode::TransPartition(P)` is +/// The next time we compile, if the `DepNode::CodegenUnit(P)` is /// judged to be clean (which means none of the things we read to /// generate the partition were found to be dirty), it will be loaded /// into previous work products. We will then regenerate the set of @@ -803,9 +887,8 @@ pub enum WorkProductFileKind { pub(super) struct CurrentDepGraph { nodes: IndexVec, - edges: IndexVec>, + edges: IndexVec>, node_to_node_index: FxHashMap, - task_stack: Vec, forbidden_edge: Option, // Anonymous DepNodes are nodes the ID of which we compute from the list of @@ -854,38 +937,19 @@ impl CurrentDepGraph { edges: IndexVec::new(), node_to_node_index: FxHashMap(), anon_id_seed: stable_hasher.finish(), - task_stack: Vec::new(), forbidden_edge, total_read_count: 0, total_duplicate_read_count: 0, } } - pub(super) fn push_ignore(&mut self) { - self.task_stack.push(OpenTask::Ignore); - } - - pub(super) fn pop_ignore(&mut self) { - let popped_node = self.task_stack.pop().unwrap(); - debug_assert_eq!(popped_node, OpenTask::Ignore); - } - - pub(super) fn push_task(&mut self, key: DepNode) { - self.task_stack.push(OpenTask::Regular { - node: key, - reads: Vec::new(), - read_set: FxHashSet(), - }); - } - - pub(super) fn pop_task(&mut self, key: DepNode) -> DepNodeIndex { - let popped_node = self.task_stack.pop().unwrap(); - - if let OpenTask::Regular { - node, - read_set: _, - reads - } = popped_node { + fn complete_task(&mut self, key: DepNode, task: OpenTask) -> DepNodeIndex { + if let OpenTask::Regular(task) = task { + let RegularOpenTask { + node, + read_set: _, + reads + } = task.into_inner(); assert_eq!(node, key); // If this is an input node, we expect that it either has no @@ -912,24 +976,16 @@ impl CurrentDepGraph { self.alloc_node(node, reads) } else { - bug!("pop_task() - Expected regular task to be popped") + bug!("complete_task() - Expected regular task to be popped") } } - fn push_anon_task(&mut self) { - self.task_stack.push(OpenTask::Anon { - reads: Vec::new(), - read_set: FxHashSet(), - }); - } - - fn pop_anon_task(&mut self, kind: DepKind) -> DepNodeIndex { - let popped_node = self.task_stack.pop().unwrap(); - - if let OpenTask::Anon { - read_set: _, - reads - } = popped_node { + fn pop_anon_task(&mut self, kind: DepKind, task: OpenTask) -> DepNodeIndex { + if let OpenTask::Anon(task) = task { + let AnonOpenTask { + read_set: _, + reads + } = task.into_inner(); debug_assert!(!kind.is_input()); let mut fingerprint = self.anon_id_seed; @@ -963,67 +1019,59 @@ impl CurrentDepGraph { } } - fn push_eval_always_task(&mut self, key: DepNode) { - self.task_stack.push(OpenTask::EvalAlways { node: key }); - } - - fn pop_eval_always_task(&mut self, key: DepNode) -> DepNodeIndex { - let popped_node = self.task_stack.pop().unwrap(); - + fn complete_eval_always_task(&mut self, key: DepNode, task: OpenTask) -> DepNodeIndex { if let OpenTask::EvalAlways { node, - } = popped_node { + } = task { debug_assert_eq!(node, key); let krate_idx = self.node_to_node_index[&DepNode::new_no_params(DepKind::Krate)]; - self.alloc_node(node, vec![krate_idx]) + self.alloc_node(node, SmallVec::one(krate_idx)) } else { - bug!("pop_eval_always_task() - Expected eval always task to be popped"); + bug!("complete_eval_always_task() - Expected eval always task to be popped"); } } fn read_index(&mut self, source: DepNodeIndex) { - match self.task_stack.last_mut() { - Some(&mut OpenTask::Regular { - ref mut reads, - ref mut read_set, - node: ref target, - }) => { - self.total_read_count += 1; - if read_set.insert(source) { - reads.push(source); + ty::tls::with_context_opt(|icx| { + let icx = if let Some(icx) = icx { icx } else { return }; + match *icx.task { + OpenTask::Regular(ref task) => { + let mut task = task.lock(); + self.total_read_count += 1; + if task.read_set.insert(source) { + task.reads.push(source); - if cfg!(debug_assertions) { - if let Some(ref forbidden_edge) = self.forbidden_edge { - let source = self.nodes[source]; - if forbidden_edge.test(&source, &target) { - bug!("forbidden edge {:?} -> {:?} created", - source, - target) + if cfg!(debug_assertions) { + if let Some(ref forbidden_edge) = self.forbidden_edge { + let target = &task.node; + let source = self.nodes[source]; + if forbidden_edge.test(&source, &target) { + bug!("forbidden edge {:?} -> {:?} created", + source, + target) + } } } + } else { + self.total_duplicate_read_count += 1; } - } else { - self.total_duplicate_read_count += 1; + } + OpenTask::Anon(ref task) => { + let mut task = task.lock(); + if task.read_set.insert(source) { + task.reads.push(source); + } + } + OpenTask::Ignore | OpenTask::EvalAlways { .. } => { + // ignore } } - Some(&mut OpenTask::Anon { - ref mut reads, - ref mut read_set, - }) => { - if read_set.insert(source) { - reads.push(source); - } - } - Some(&mut OpenTask::Ignore) | - Some(&mut OpenTask::EvalAlways { .. }) | None => { - // ignore - } - } + }) } fn alloc_node(&mut self, dep_node: DepNode, - edges: Vec) + edges: SmallVec<[DepNodeIndex; 8]>) -> DepNodeIndex { debug_assert_eq!(self.edges.len(), self.nodes.len()); debug_assert_eq!(self.node_to_node_index.len(), self.nodes.len()); @@ -1036,19 +1084,55 @@ impl CurrentDepGraph { } } -#[derive(Clone, Debug, PartialEq)] -enum OpenTask { - Regular { - node: DepNode, - reads: Vec, - read_set: FxHashSet, - }, - Anon { - reads: Vec, - read_set: FxHashSet, - }, +pub struct RegularOpenTask { + node: DepNode, + reads: SmallVec<[DepNodeIndex; 8]>, + read_set: FxHashSet, +} + +pub struct AnonOpenTask { + reads: SmallVec<[DepNodeIndex; 8]>, + read_set: FxHashSet, +} + +pub enum OpenTask { + Regular(Lock), + Anon(Lock), Ignore, EvalAlways { node: DepNode, }, } + +// A data structure that stores Option values as a contiguous +// array, using one u32 per entry. +struct DepNodeColorMap { + values: IndexVec, +} + +const COMPRESSED_NONE: u32 = 0; +const COMPRESSED_RED: u32 = 1; +const COMPRESSED_FIRST_GREEN: u32 = 2; + +impl DepNodeColorMap { + fn new(size: usize) -> DepNodeColorMap { + DepNodeColorMap { + values: IndexVec::from_elem_n(COMPRESSED_NONE, size) + } + } + + fn get(&self, index: SerializedDepNodeIndex) -> Option { + match self.values[index] { + COMPRESSED_NONE => None, + COMPRESSED_RED => Some(DepNodeColor::Red), + value => Some(DepNodeColor::Green(DepNodeIndex(value - COMPRESSED_FIRST_GREEN))) + } + } + + fn insert(&mut self, index: SerializedDepNodeIndex, color: DepNodeColor) { + self.values[index] = match color { + DepNodeColor::Red => COMPRESSED_RED, + DepNodeColor::Green(index) => index.0 + COMPRESSED_FIRST_GREEN, + } + } +} diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index a472183698ab..8a6f66911ece 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -14,13 +14,12 @@ mod dep_tracking_map; mod graph; mod prev; mod query; -mod raii; mod safe; mod serialized; pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig}; pub use self::dep_node::{DepNode, DepKind, DepConstructor, WorkProductId, label_strs}; -pub use self::graph::{DepGraph, WorkProduct, DepNodeIndex, DepNodeColor}; +pub use self::graph::{DepGraph, WorkProduct, DepNodeIndex, DepNodeColor, OpenTask}; pub use self::graph::WorkProductFileKind; pub use self::prev::PreviousDepGraph; pub use self::query::DepGraphQuery; diff --git a/src/librustc/dep_graph/prev.rs b/src/librustc/dep_graph/prev.rs index 50e1ee88a461..669a99019aa6 100644 --- a/src/librustc/dep_graph/prev.rs +++ b/src/librustc/dep_graph/prev.rs @@ -23,7 +23,7 @@ impl PreviousDepGraph { pub fn new(data: SerializedDepGraph) -> PreviousDepGraph { let index: FxHashMap<_, _> = data.nodes .iter_enumerated() - .map(|(idx, &(dep_node, _))| (dep_node, idx)) + .map(|(idx, &dep_node)| (dep_node, idx)) .collect(); PreviousDepGraph { data, index } } @@ -41,7 +41,7 @@ impl PreviousDepGraph { #[inline] pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode { - self.data.nodes[dep_node_index].0 + self.data.nodes[dep_node_index] } #[inline] @@ -49,18 +49,23 @@ impl PreviousDepGraph { self.index[dep_node] } + #[inline] + pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option { + self.index.get(dep_node).cloned() + } + #[inline] pub fn fingerprint_of(&self, dep_node: &DepNode) -> Option { self.index .get(dep_node) - .map(|&node_index| self.data.nodes[node_index].1) + .map(|&node_index| self.data.fingerprints[node_index]) } #[inline] pub fn fingerprint_by_index(&self, dep_node_index: SerializedDepNodeIndex) -> Fingerprint { - self.data.nodes[dep_node_index].1 + self.data.fingerprints[dep_node_index] } pub fn node_count(&self) -> usize { diff --git a/src/librustc/dep_graph/query.rs b/src/librustc/dep_graph/query.rs index ea83a4f8b310..ce0b5557a34b 100644 --- a/src/librustc/dep_graph/query.rs +++ b/src/librustc/dep_graph/query.rs @@ -9,7 +9,9 @@ // except according to those terms. use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::graph::{Direction, INCOMING, Graph, NodeIndex, OUTGOING}; +use rustc_data_structures::graph::implementation::{ + Direction, INCOMING, Graph, NodeIndex, OUTGOING +}; use super::DepNode; diff --git a/src/librustc/dep_graph/raii.rs b/src/librustc/dep_graph/raii.rs deleted file mode 100644 index 5728bcc7d277..000000000000 --- a/src/librustc/dep_graph/raii.rs +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::graph::CurrentDepGraph; - -use std::cell::RefCell; - -pub struct IgnoreTask<'graph> { - graph: &'graph RefCell, -} - -impl<'graph> IgnoreTask<'graph> { - pub(super) fn new(graph: &'graph RefCell) -> IgnoreTask<'graph> { - graph.borrow_mut().push_ignore(); - IgnoreTask { - graph, - } - } -} - -impl<'graph> Drop for IgnoreTask<'graph> { - fn drop(&mut self) { - self.graph.borrow_mut().pop_ignore(); - } -} - diff --git a/src/librustc/dep_graph/serialized.rs b/src/librustc/dep_graph/serialized.rs index c96040ab9b6e..60fc813a25d5 100644 --- a/src/librustc/dep_graph/serialized.rs +++ b/src/librustc/dep_graph/serialized.rs @@ -20,7 +20,10 @@ newtype_index!(SerializedDepNodeIndex); #[derive(Debug, RustcEncodable, RustcDecodable)] pub struct SerializedDepGraph { /// The set of all DepNodes in the graph - pub nodes: IndexVec, + pub nodes: IndexVec, + /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to + /// the DepNode at the same index in the nodes vector. + pub fingerprints: IndexVec, /// For each DepNode, stores the list of edges originating from that /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data, /// which holds the actual DepNodeIndices of the target nodes. @@ -35,6 +38,7 @@ impl SerializedDepGraph { pub fn new() -> SerializedDepGraph { SerializedDepGraph { nodes: IndexVec::new(), + fingerprints: IndexVec::new(), edge_list_indices: IndexVec::new(), edge_list_data: Vec::new(), } diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs index 7b48e7801dfb..a3c0688dcc8e 100644 --- a/src/librustc/diagnostics.rs +++ b/src/librustc/diagnostics.rs @@ -14,19 +14,6 @@ // Each message should start and end with a new line, and be wrapped to 80 characters. // In vim you can `:set tw=80` and use `gq` to wrap paragraphs. Use `:set tw=0` to disable. register_long_diagnostics! { -E0020: r##" -This error indicates that an attempt was made to divide by zero (or take the -remainder of a zero divisor) in a static or constant expression. Erroneous -code example: - -```compile_fail -#[deny(const_err)] - -const X: i32 = 42 / 0; -// error: attempt to divide by zero in a constant expression -``` -"##, - E0038: r##" Trait objects like `Box` can only be constructed when certain requirements are satisfied by the trait in question. @@ -256,6 +243,28 @@ trait Foo { } ``` +### The trait cannot contain associated constants + +Just like static functions, associated constants aren't stored on the method +table. If the trait or any subtrait contain an associated constant, they cannot +be made into an object. + +```compile_fail,E0038 +trait Foo { + const X: i32; +} + +impl Foo {} +``` + +A simple workaround is to use a helper method instead: + +``` +trait Foo { + fn x(&self) -> i32; +} +``` + ### The trait cannot use `Self` as a type parameter in the supertrait listing This is similar to the second sub-error, but subtler. It happens in situations @@ -628,8 +637,8 @@ Erroneous code example: ```compile_fail,E0152 #![feature(lang_items)] -#[lang = "panic_fmt"] -struct Foo; // error: duplicate lang item found: `panic_fmt` +#[lang = "panic_impl"] +struct Foo; // error: duplicate lang item found: `panic_impl` ``` Lang items are already implemented in the standard library. Unless you are @@ -815,7 +824,7 @@ A list of available external lang items is available in #![feature(lang_items)] extern "C" { - #[lang = "panic_fmt"] // ok! + #[lang = "panic_impl"] // ok! fn cake(); } ``` @@ -878,65 +887,6 @@ foo(3_i8); // therefore the type-checker complains with this error code. ``` -Here is a more subtle instance of the same problem, that can -arise with for-loops in Rust: - -```compile_fail -let vs: Vec = vec![1, 2, 3, 4]; -for v in &vs { - match v { - 1 => {}, - _ => {}, - } -} -``` - -The above fails because of an analogous type mismatch, -though may be harder to see. Again, here are some -explanatory comments for the same example: - -```compile_fail -{ - let vs = vec![1, 2, 3, 4]; - - // `for`-loops use a protocol based on the `Iterator` - // trait. Each item yielded in a `for` loop has the - // type `Iterator::Item` -- that is, `Item` is the - // associated type of the concrete iterator impl. - for v in &vs { -// ~ ~~~ -// | | -// | We borrow `vs`, iterating over a sequence of -// | *references* of type `&Elem` (where `Elem` is -// | vector's element type). Thus, the associated -// | type `Item` must be a reference `&`-type ... -// | -// ... and `v` has the type `Iterator::Item`, as dictated by -// the `for`-loop protocol ... - - match v { - 1 => {} -// ~ -// | -// ... but *here*, `v` is forced to have some integral type; -// only types like `u8`,`i8`,`u16`,`i16`, et cetera can -// match the pattern `1` ... - - _ => {} - } - -// ... therefore, the compiler complains, because it sees -// an attempt to solve the equations -// `some integral-type` = type-of-`v` -// = `Iterator::Item` -// = `&Elem` (i.e. `some reference type`) -// -// which cannot possibly all be true. - - } -} -``` - To avoid those issues, you have to make the types match correctly. So we can fix the previous examples like this: @@ -1755,12 +1705,12 @@ The `main` function was incorrectly declared. Erroneous code example: ```compile_fail,E0580 -fn main() -> i32 { // error: main function has wrong type - 0 +fn main(x: i32) { // error: main function has wrong type + println!("{}", x); } ``` -The `main` function prototype should never take arguments or return type. +The `main` function prototype should never take arguments. Example: ``` @@ -1780,8 +1730,6 @@ allowed as function return types. Erroneous code example: ```compile_fail,E0562 -#![feature(conservative_impl_trait)] - fn main() { let count_to_ten: impl Iterator = 0..10; // error: `impl Trait` not allowed outside of function and inherent method @@ -1795,8 +1743,6 @@ fn main() { Make sure `impl Trait` only appears in return-type position. ``` -#![feature(conservative_impl_trait)] - fn count_to_n(n: usize) -> impl Iterator { 0..n } @@ -1869,7 +1815,7 @@ is a function pointer, which is not zero-sized. This pattern should be rewritten. There are a few possible ways to do this: - change the original fn declaration to match the expected signature, - and do the cast in the fn body (the prefered option) + and do the cast in the fn body (the preferred option) - cast the fn item fo a fn pointer before calling transmute, as shown here: ``` @@ -1972,6 +1918,30 @@ fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 { ``` "##, +E0635: r##" +The `#![feature]` attribute specified an unknown feature. + +Erroneous code example: + +```compile_fail,E0635 +#![feature(nonexistent_rust_feature)] // error: unknown feature +``` + +"##, + +E0636: r##" +A `#![feature]` attribute was declared multiple times. + +Erroneous code example: + +```compile_fail,E0636 +#![allow(stable_features)] +#![feature(rust1)] +#![feature(rust1)] // error: the feature `rust1` has already been declared +``` + +"##, + E0644: r##" A closure or generator was constructed that references its own type. @@ -2003,7 +1973,136 @@ that refers to itself. That is permitting, since the closure would be invoking itself via a virtual call, and hence does not directly reference its own *type*. -"##, } +"##, + +E0692: r##" +A `repr(transparent)` type was also annotated with other, incompatible +representation hints. + +Erroneous code example: + +```compile_fail,E0692 +#[repr(transparent, C)] // error: incompatible representation hints +struct Grams(f32); +``` + +A type annotated as `repr(transparent)` delegates all representation concerns to +another type, so adding more representation hints is contradictory. Remove +either the `transparent` hint or the other hints, like this: + +``` +#[repr(transparent)] +struct Grams(f32); +``` + +Alternatively, move the other attributes to the contained type: + +``` +#[repr(C)] +struct Foo { + x: i32, + // ... +} + +#[repr(transparent)] +struct FooWrapper(Foo); +``` + +Note that introducing another `struct` just to have a place for the other +attributes may have unintended side effects on the representation: + +``` +#[repr(transparent)] +struct Grams(f32); + +#[repr(C)] +struct Float(f32); + +#[repr(transparent)] +struct Grams2(Float); // this is not equivalent to `Grams` above +``` + +Here, `Grams2` is a not equivalent to `Grams` -- the former transparently wraps +a (non-transparent) struct containing a single float, while `Grams` is a +transparent wrapper around a float. This can make a difference for the ABI. +"##, + +E0700: r##" +The `impl Trait` return type captures lifetime parameters that do not +appear within the `impl Trait` itself. + +Erroneous code example: + +```compile-fail,E0700 +use std::cell::Cell; + +trait Trait<'a> { } + +impl<'a, 'b> Trait<'b> for Cell<&'a u32> { } + +fn foo<'x, 'y>(x: Cell<&'x u32>) -> impl Trait<'y> +where 'x: 'y +{ + x +} +``` + +Here, the function `foo` returns a value of type `Cell<&'x u32>`, +which references the lifetime `'x`. However, the return type is +declared as `impl Trait<'y>` -- this indicates that `foo` returns +"some type that implements `Trait<'y>`", but it also indicates that +the return type **only captures data referencing the lifetime `'y`**. +In this case, though, we are referencing data with lifetime `'x`, so +this function is in error. + +To fix this, you must reference the lifetime `'x` from the return +type. For example, changing the return type to `impl Trait<'y> + 'x` +would work: + +``` +use std::cell::Cell; + +trait Trait<'a> { } + +impl<'a,'b> Trait<'b> for Cell<&'a u32> { } + +fn foo<'x, 'y>(x: Cell<&'x u32>) -> impl Trait<'y> + 'x +where 'x: 'y +{ + x +} +``` +"##, + +E0701: r##" +This error indicates that a `#[non_exhaustive]` attribute was incorrectly placed +on something other than a struct or enum. + +Examples of erroneous code: + +```compile_fail,E0701 +# #![feature(non_exhaustive)] + +#[non_exhaustive] +trait Foo { } +``` +"##, + +E0702: r##" +This error indicates that a `#[non_exhaustive]` attribute had a value. The +`#[non_exhaustive]` should be empty. + +Examples of erroneous code: + +```compile_fail,E0702 +# #![feature(non_exhaustive)] + +#[non_exhaustive(anything)] +struct Foo; +``` +"##, + +} register_diagnostics! { @@ -2056,4 +2155,12 @@ register_diagnostics! { E0657, // `impl Trait` can only capture lifetimes bound at the fn level E0687, // in-band lifetimes cannot be used in `fn`/`Fn` syntax E0688, // in-band lifetimes cannot be mixed with explicit lifetime binders + + E0697, // closures cannot be static + + E0707, // multiple elided lifetimes used in arguments of `async fn` + E0708, // `async` non-`move` closures with arguments are not currently supported + E0709, // multiple different lifetimes used in arguments of `async fn` + E0710, // an unknown tool name found in scoped lint + E0711, // a feature has been declared with conflicting stability attributes } diff --git a/src/librustc/hir/README.md b/src/librustc/hir/README.md deleted file mode 100644 index e283fc40c50a..000000000000 --- a/src/librustc/hir/README.md +++ /dev/null @@ -1,119 +0,0 @@ -# Introduction to the HIR - -The HIR -- "High-level IR" -- is the primary IR used in most of -rustc. It is a desugared version of the "abstract syntax tree" (AST) -that is generated after parsing, macro expansion, and name resolution -have completed. Many parts of HIR resemble Rust surface syntax quite -closely, with the exception that some of Rust's expression forms have -been desugared away (as an example, `for` loops are converted into a -`loop` and do not appear in the HIR). - -This README covers the main concepts of the HIR. - -### Out-of-band storage and the `Crate` type - -The top-level data-structure in the HIR is the `Crate`, which stores -the contents of the crate currently being compiled (we only ever -construct HIR for the current crate). Whereas in the AST the crate -data structure basically just contains the root module, the HIR -`Crate` structure contains a number of maps and other things that -serve to organize the content of the crate for easier access. - -For example, the contents of individual items (e.g., modules, -functions, traits, impls, etc) in the HIR are not immediately -accessible in the parents. So, for example, if had a module item `foo` -containing a function `bar()`: - -``` -mod foo { - fn bar() { } -} -``` - -Then in the HIR the representation of module `foo` (the `Mod` -stuct) would have only the **`ItemId`** `I` of `bar()`. To get the -details of the function `bar()`, we would lookup `I` in the -`items` map. - -One nice result from this representation is that one can iterate -over all items in the crate by iterating over the key-value pairs -in these maps (without the need to trawl through the IR in total). -There are similar maps for things like trait items and impl items, -as well as "bodies" (explained below). - -The other reason to setup the representation this way is for better -integration with incremental compilation. This way, if you gain access -to a `&hir::Item` (e.g. for the mod `foo`), you do not immediately -gain access to the contents of the function `bar()`. Instead, you only -gain access to the **id** for `bar()`, and you must invoke some -function to lookup the contents of `bar()` given its id; this gives us -a chance to observe that you accessed the data for `bar()` and record -the dependency. - -### Identifiers in the HIR - -Most of the code that has to deal with things in HIR tends not to -carry around references into the HIR, but rather to carry around -*identifier numbers* (or just "ids"). Right now, you will find four -sorts of identifiers in active use: - -- `DefId`, which primarily names "definitions" or top-level items. - - You can think of a `DefId` as being shorthand for a very explicit - and complete path, like `std::collections::HashMap`. However, - these paths are able to name things that are not nameable in - normal Rust (e.g., impls), and they also include extra information - about the crate (such as its version number, as two versions of - the same crate can co-exist). - - A `DefId` really consists of two parts, a `CrateNum` (which - identifies the crate) and a `DefIndex` (which indixes into a list - of items that is maintained per crate). -- `HirId`, which combines the index of a particular item with an - offset within that item. - - the key point of a `HirId` is that it is *relative* to some item (which is named - via a `DefId`). -- `BodyId`, this is an absolute identifier that refers to a specific - body (definition of a function or constant) in the crate. It is currently - effectively a "newtype'd" `NodeId`. -- `NodeId`, which is an absolute id that identifies a single node in the HIR tree. - - While these are still in common use, **they are being slowly phased out**. - - Since they are absolute within the crate, adding a new node - anywhere in the tree causes the node-ids of all subsequent code in - the crate to change. This is terrible for incremental compilation, - as you can perhaps imagine. - -### HIR Map - -Most of the time when you are working with the HIR, you will do so via -the **HIR Map**, accessible in the tcx via `tcx.hir` (and defined in -the `hir::map` module). The HIR map contains a number of methods to -convert between ids of various kinds and to lookup data associated -with a HIR node. - -For example, if you have a `DefId`, and you would like to convert it -to a `NodeId`, you can use `tcx.hir.as_local_node_id(def_id)`. This -returns an `Option` -- this will be `None` if the def-id -refers to something outside of the current crate (since then it has no -HIR node), but otherwise returns `Some(n)` where `n` is the node-id of -the definition. - -Similarly, you can use `tcx.hir.find(n)` to lookup the node for a -`NodeId`. This returns a `Option>`, where `Node` is an enum -defined in the map; by matching on this you can find out what sort of -node the node-id referred to and also get a pointer to the data -itself. Often, you know what sort of node `n` is -- e.g., if you know -that `n` must be some HIR expression, you can do -`tcx.hir.expect_expr(n)`, which will extract and return the -`&hir::Expr`, panicking if `n` is not in fact an expression. - -Finally, you can use the HIR map to find the parents of nodes, via -calls like `tcx.hir.get_parent_node(n)`. - -### HIR Bodies - -A **body** represents some kind of executable code, such as the body -of a function/closure or the definition of a constant. Bodies are -associated with an **owner**, which is typically some kind of item -(e.g., a `fn()` or `const`), but could also be a closure expression -(e.g., `|x, y| x + y`). You can use the HIR map to find the body -associated with a given def-id (`maybe_body_owned_by()`) or to find -the owner of a body (`body_owner_def_id()`). diff --git a/src/librustc/hir/check_attr.rs b/src/librustc/hir/check_attr.rs index 4b528a0fdc77..9ded7bc28b30 100644 --- a/src/librustc/hir/check_attr.rs +++ b/src/librustc/hir/check_attr.rs @@ -14,6 +14,7 @@ //! conflicts between multiple such attributes attached to the same //! item. +use syntax_pos::Span; use ty::TyCtxt; use hir; @@ -25,16 +26,25 @@ enum Target { Struct, Union, Enum, + Const, + ForeignMod, + Expression, + Statement, + Closure, + Static, Other, } impl Target { fn from_item(item: &hir::Item) -> Target { match item.node { - hir::ItemFn(..) => Target::Fn, - hir::ItemStruct(..) => Target::Struct, - hir::ItemUnion(..) => Target::Union, - hir::ItemEnum(..) => Target::Enum, + hir::ItemKind::Fn(..) => Target::Fn, + hir::ItemKind::Struct(..) => Target::Struct, + hir::ItemKind::Union(..) => Target::Union, + hir::ItemKind::Enum(..) => Target::Enum, + hir::ItemKind::Const(..) => Target::Const, + hir::ItemKind::ForeignMod(..) => Target::ForeignMod, + hir::ItemKind::Static(..) => Target::Static, _ => Target::Other, } } @@ -47,27 +57,59 @@ struct CheckAttrVisitor<'a, 'tcx: 'a> { impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { /// Check any attribute. fn check_attributes(&self, item: &hir::Item, target: Target) { - self.tcx.target_features_enabled(self.tcx.hir.local_def_id(item.id)); + if target == Target::Fn || target == Target::Const { + self.tcx.codegen_fn_attrs(self.tcx.hir.local_def_id(item.id)); + } else if let Some(a) = item.attrs.iter().find(|a| a.check_name("target_feature")) { + self.tcx.sess.struct_span_err(a.span, "attribute should be applied to a function") + .span_label(item.span, "not a function") + .emit(); + } for attr in &item.attrs { - if let Some(name) = attr.name() { - if name == "inline" { - self.check_inline(attr, item, target) - } + if attr.check_name("inline") { + self.check_inline(attr, &item.span, target) + } else if attr.check_name("non_exhaustive") { + self.check_non_exhaustive(attr, item, target) } } self.check_repr(item, target); + self.check_used(item, target); } - /// Check if an `#[inline]` is applied to a function. - fn check_inline(&self, attr: &hir::Attribute, item: &hir::Item, target: Target) { - if target != Target::Fn { + /// Check if an `#[inline]` is applied to a function or a closure. + fn check_inline(&self, attr: &hir::Attribute, span: &Span, target: Target) { + if target != Target::Fn && target != Target::Closure { struct_span_err!(self.tcx.sess, attr.span, E0518, - "attribute should be applied to function") - .span_label(item.span, "not a function") + "attribute should be applied to function or closure") + .span_label(*span, "not a function or closure") + .emit(); + } + } + + /// Check if the `#[non_exhaustive]` attribute on an `item` is valid. + fn check_non_exhaustive(&self, attr: &hir::Attribute, item: &hir::Item, target: Target) { + match target { + Target::Struct | Target::Enum => { /* Valid */ }, + _ => { + struct_span_err!(self.tcx.sess, + attr.span, + E0701, + "attribute can only be applied to a struct or enum") + .span_label(item.span, "not a struct or enum") + .emit(); + return; + } + } + + if attr.meta_item_list().is_some() || attr.value_str().is_some() { + struct_span_err!(self.tcx.sess, + attr.span, + E0702, + "attribute should be empty") + .span_label(item.span, "not empty") .emit(); } } @@ -81,10 +123,7 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { // ``` let hints: Vec<_> = item.attrs .iter() - .filter(|attr| match attr.name() { - Some(name) => name == "repr", - None => false, - }) + .filter(|attr| attr.name() == "repr") .filter_map(|attr| attr.meta_item_list()) .flat_map(|hints| hints) .collect(); @@ -92,6 +131,7 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { let mut int_reprs = 0; let mut is_c = false; let mut is_simd = false; + let mut is_transparent = false; for hint in &hints { let name = if let Some(name) = hint.name() { @@ -137,6 +177,14 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { continue } } + "transparent" => { + is_transparent = true; + if target != Target::Struct { + ("a", "struct") + } else { + continue + } + } "i8" | "u8" | "i16" | "u16" | "i32" | "u32" | "i64" | "u64" | "isize" | "usize" => { @@ -149,34 +197,115 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { } _ => continue, }; - struct_span_err!(self.tcx.sess, hint.span, E0517, - "attribute should be applied to {}", allowed_targets) - .span_label(item.span, format!("not {} {}", article, allowed_targets)) - .emit(); + self.emit_repr_error( + hint.span, + item.span, + &format!("attribute should be applied to {}", allowed_targets), + &format!("not {} {}", article, allowed_targets), + ) } + // Just point at all repr hints if there are any incompatibilities. + // This is not ideal, but tracking precisely which ones are at fault is a huge hassle. + let hint_spans = hints.iter().map(|hint| hint.span); + + // Error on repr(transparent, ). + if is_transparent && hints.len() > 1 { + let hint_spans: Vec<_> = hint_spans.clone().collect(); + span_err!(self.tcx.sess, hint_spans, E0692, + "transparent struct cannot have other repr hints"); + } // Warn on repr(u8, u16), repr(C, simd), and c-like-enum-repr(C, u8) if (int_reprs > 1) || (is_simd && is_c) || (int_reprs == 1 && is_c && is_c_like_enum(item)) { - // Just point at all repr hints. This is not ideal, but tracking - // precisely which ones are at fault is a huge hassle. - let spans: Vec<_> = hints.iter().map(|hint| hint.span).collect(); - span_warn!(self.tcx.sess, spans, E0566, + let hint_spans: Vec<_> = hint_spans.collect(); + span_warn!(self.tcx.sess, hint_spans, E0566, "conflicting representation hints"); } } + + fn emit_repr_error( + &self, + hint_span: Span, + label_span: Span, + hint_message: &str, + label_message: &str, + ) { + struct_span_err!(self.tcx.sess, hint_span, E0517, "{}", hint_message) + .span_label(label_span, label_message) + .emit(); + } + + fn check_stmt_attributes(&self, stmt: &hir::Stmt) { + // When checking statements ignore expressions, they will be checked later + if let hir::StmtKind::Decl(_, _) = stmt.node { + for attr in stmt.node.attrs() { + if attr.check_name("inline") { + self.check_inline(attr, &stmt.span, Target::Statement); + } + if attr.check_name("repr") { + self.emit_repr_error( + attr.span, + stmt.span, + "attribute should not be applied to a statement", + "not a struct, enum or union", + ); + } + } + } + } + + fn check_expr_attributes(&self, expr: &hir::Expr) { + let target = match expr.node { + hir::ExprKind::Closure(..) => Target::Closure, + _ => Target::Expression, + }; + for attr in expr.attrs.iter() { + if attr.check_name("inline") { + self.check_inline(attr, &expr.span, target); + } + if attr.check_name("repr") { + self.emit_repr_error( + attr.span, + expr.span, + "attribute should not be applied to an expression", + "not defining a struct, enum or union", + ); + } + } + } + + fn check_used(&self, item: &hir::Item, target: Target) { + for attr in &item.attrs { + if attr.name() == "used" && target != Target::Static { + self.tcx.sess + .span_err(attr.span, "attribute must be applied to a `static` variable"); + } + } + } } impl<'a, 'tcx> Visitor<'tcx> for CheckAttrVisitor<'a, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { - NestedVisitorMap::None + NestedVisitorMap::OnlyBodies(&self.tcx.hir) } fn visit_item(&mut self, item: &'tcx hir::Item) { let target = Target::from_item(item); self.check_attributes(item, target); - intravisit::walk_item(self, item); + intravisit::walk_item(self, item) + } + + + fn visit_stmt(&mut self, stmt: &'tcx hir::Stmt) { + self.check_stmt_attributes(stmt); + intravisit::walk_stmt(self, stmt) + } + + fn visit_expr(&mut self, expr: &'tcx hir::Expr) { + self.check_expr_attributes(expr); + intravisit::walk_expr(self, expr) } } @@ -186,7 +315,7 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { } fn is_c_like_enum(item: &hir::Item) -> bool { - if let hir::ItemEnum(ref def, _) = item.node { + if let hir::ItemKind::Enum(ref def, _) = item.node { for variant in &def.variants { match variant.node.data { hir::VariantData::Unit(_) => { /* continue */ } diff --git a/src/librustc/hir/def.rs b/src/librustc/hir/def.rs index 8e4a4d32c0ba..4a14223eb88f 100644 --- a/src/librustc/hir/def.rs +++ b/src/librustc/hir/def.rs @@ -16,6 +16,8 @@ use syntax_pos::Span; use hir; use ty; +use self::Namespace::*; + #[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub enum CtorKind { /// Constructor function automatically created by a tuple struct/variant. @@ -26,6 +28,18 @@ pub enum CtorKind { Fictive, } +#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum NonMacroAttrKind { + /// Single-segment attribute defined by the language (`#[inline]`) + Builtin, + /// Multi-segment custom attribute living in a "tool module" (`#[rustfmt::skip]`). + Tool, + /// Single-segment custom attribute registered by a derive macro (`#[serde(default)]`). + DeriveHelper, + /// Single-segment custom attribute not registered in any way (`#[my_attr]`). + Custom, +} + #[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] pub enum Def { // Type namespace @@ -35,13 +49,19 @@ pub enum Def { Enum(DefId), Variant(DefId), Trait(DefId), + /// `existential type Foo: Bar;` + Existential(DefId), + /// `type Foo = Bar;` TyAlias(DefId), TyForeign(DefId), TraitAlias(DefId), AssociatedTy(DefId), + /// `existential type Foo: Bar;` + AssociatedExistential(DefId), PrimTy(hir::PrimTy), TyParam(DefId), SelfTy(Option /* trait */, Option /* impl */), + ToolMod, // e.g. `rustfmt` in `#[rustfmt::skip]` // Value namespace Fn(DefId), @@ -60,8 +80,7 @@ pub enum Def { // Macro namespace Macro(DefId, MacroKind), - - GlobalAsm(DefId), + NonMacroAttr(NonMacroAttrKind), // e.g. `#[inline]` or `#[rustfmt::skip]` // Both namespaces Err, @@ -116,6 +135,82 @@ impl PathResolution { } } +/// Different kinds of symbols don't influence each other. +/// +/// Therefore, they have a separate universe (namespace). +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum Namespace { + TypeNS, + ValueNS, + MacroNS, +} + +impl Namespace { + pub fn descr(self) -> &'static str { + match self { + TypeNS => "type", + ValueNS => "value", + MacroNS => "macro", + } + } +} + +/// Just a helper ‒ separate structure for each namespace. +#[derive(Copy, Clone, Default, Debug)] +pub struct PerNS { + pub value_ns: T, + pub type_ns: T, + pub macro_ns: T, +} + +impl PerNS { + pub fn map U>(self, mut f: F) -> PerNS { + PerNS { + value_ns: f(self.value_ns), + type_ns: f(self.type_ns), + macro_ns: f(self.macro_ns), + } + } +} + +impl ::std::ops::Index for PerNS { + type Output = T; + fn index(&self, ns: Namespace) -> &T { + match ns { + ValueNS => &self.value_ns, + TypeNS => &self.type_ns, + MacroNS => &self.macro_ns, + } + } +} + +impl ::std::ops::IndexMut for PerNS { + fn index_mut(&mut self, ns: Namespace) -> &mut T { + match ns { + ValueNS => &mut self.value_ns, + TypeNS => &mut self.type_ns, + MacroNS => &mut self.macro_ns, + } + } +} + +impl PerNS> { + /// Returns whether all the items in this collection are `None`. + pub fn is_empty(&self) -> bool { + self.type_ns.is_none() && self.value_ns.is_none() && self.macro_ns.is_none() + } + + /// Returns an iterator over the items which are `Some`. + pub fn present_items(self) -> impl Iterator { + use std::iter::once; + + once(self.type_ns) + .chain(once(self.value_ns)) + .chain(once(self.macro_ns)) + .filter_map(|it| it) + } +} + /// Definition mapping pub type DefMap = NodeMap; @@ -123,6 +218,10 @@ pub type DefMap = NodeMap; /// within. pub type ExportMap = DefIdMap>; +/// Map used to track the `use` statements within a scope, matching it with all the items in every +/// namespace. +pub type ImportMap = NodeMap>>; + #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] pub struct Export { /// The name of the target. @@ -134,9 +233,6 @@ pub struct Export { /// The visibility of the export. /// We include non-`pub` exports for hygienic macros that get used from extern crates. pub vis: ty::Visibility, - /// True if from a `use` or and `extern crate`. - /// Used in rustdoc. - pub is_import: bool, } impl CtorKind { @@ -156,6 +252,17 @@ impl CtorKind { } } +impl NonMacroAttrKind { + fn descr(self) -> &'static str { + match self { + NonMacroAttrKind::Builtin => "built-in attribute", + NonMacroAttrKind::Tool => "tool attribute", + NonMacroAttrKind::DeriveHelper => "derive helper attribute", + NonMacroAttrKind::Custom => "custom attribute", + } + } +} + impl Def { pub fn def_id(&self) -> DefId { match *self { @@ -165,7 +272,7 @@ impl Def { Def::AssociatedTy(id) | Def::TyParam(id) | Def::Struct(id) | Def::StructCtor(id, ..) | Def::Union(id) | Def::Trait(id) | Def::Method(id) | Def::Const(id) | Def::AssociatedConst(id) | Def::Macro(id, ..) | - Def::GlobalAsm(id) | Def::TyForeign(id) => { + Def::Existential(id) | Def::AssociatedExistential(id) | Def::TyForeign(id) => { id } @@ -174,6 +281,8 @@ impl Def { Def::Label(..) | Def::PrimTy(..) | Def::SelfTy(..) | + Def::ToolMod | + Def::NonMacroAttr(..) | Def::Err => { bug!("attempted .def_id() on invalid def: {:?}", self) } @@ -191,9 +300,11 @@ impl Def { Def::VariantCtor(.., CtorKind::Const) => "unit variant", Def::VariantCtor(.., CtorKind::Fictive) => "struct variant", Def::Enum(..) => "enum", + Def::Existential(..) => "existential type", Def::TyAlias(..) => "type alias", Def::TraitAlias(..) => "trait alias", Def::AssociatedTy(..) => "associated type", + Def::AssociatedExistential(..) => "associated existential type", Def::Struct(..) => "struct", Def::StructCtor(.., CtorKind::Fn) => "tuple struct", Def::StructCtor(.., CtorKind::Const) => "unit struct", @@ -210,8 +321,9 @@ impl Def { Def::Upvar(..) => "closure capture", Def::Label(..) => "label", Def::SelfTy(..) => "self type", - Def::Macro(..) => "macro", - Def::GlobalAsm(..) => "global asm", + Def::Macro(.., macro_kind) => macro_kind.descr(), + Def::ToolMod => "tool module", + Def::NonMacroAttr(attr_kind) => attr_kind.descr(), Def::Err => "unresolved item", } } diff --git a/src/librustc/hir/def_id.rs b/src/librustc/hir/def_id.rs index 637b156ceef5..7acfe6839540 100644 --- a/src/librustc/hir/def_id.rs +++ b/src/librustc/hir/def_id.rs @@ -81,7 +81,7 @@ impl serialize::UseSpecializedDecodable for CrateNum {} /// Since the DefIndex is mostly treated as an opaque ID, you probably /// don't have to care about these address spaces. -#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, Hash, Copy)] +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy)] pub struct DefIndex(u32); /// The crate root is always assigned index 0 by the AST Map code, @@ -150,7 +150,7 @@ impl DefIndex { impl serialize::UseSpecializedEncodable for DefIndex {} impl serialize::UseSpecializedDecodable for DefIndex {} -#[derive(Copy, Clone, Eq, PartialEq, Hash)] +#[derive(Copy, Clone, Hash)] pub enum DefIndexAddressSpace { Low = 0, High = 1, @@ -165,7 +165,7 @@ impl DefIndexAddressSpace { /// A DefId identifies a particular *definition*, by combining a crate /// index and a def index. -#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, Hash, Copy)] +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy)] pub struct DefId { pub krate: CrateNum, pub index: DefIndex, @@ -216,11 +216,10 @@ impl serialize::UseSpecializedDecodable for DefId {} /// few cases where we know that only DefIds from the local crate are expected /// and a DefId from a different crate would signify a bug somewhere. This /// is when LocalDefId comes in handy. -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct LocalDefId(DefIndex); impl LocalDefId { - #[inline] pub fn from_def_id(def_id: DefId) -> LocalDefId { assert!(def_id.is_local()); diff --git a/src/librustc/hir/intravisit.rs b/src/librustc/hir/intravisit.rs index ce35e6552ca8..d853d3d9a7fb 100644 --- a/src/librustc/hir/intravisit.rs +++ b/src/librustc/hir/intravisit.rs @@ -41,9 +41,7 @@ //! This order consistency is required in a few places in rustc, for //! example generator inference, and possibly also HIR borrowck. -use syntax::abi::Abi; -use syntax::ast::{NodeId, CRATE_NODE_ID, Name, Attribute}; -use syntax::codemap::Spanned; +use syntax::ast::{NodeId, CRATE_NODE_ID, Ident, Name, Attribute}; use syntax_pos::Span; use hir::*; use hir::def::Def; @@ -52,14 +50,15 @@ use super::itemlikevisit::DeepVisitor; use std::cmp; use std::u32; +use std::result::Result::Err; -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone)] pub enum FnKind<'a> { - /// fn foo() or extern "Abi" fn foo() - ItemFn(Name, &'a Generics, Unsafety, Constness, Abi, &'a Visibility, &'a [Attribute]), + /// #[xxx] pub async/const/extern "Abi" fn foo() + ItemFn(Name, &'a Generics, FnHeader, &'a Visibility, &'a [Attribute]), /// fn foo(&self) - Method(Name, &'a MethodSig, Option<&'a Visibility>, &'a [Attribute]), + Method(Ident, &'a MethodSig, Option<&'a Visibility>, &'a [Attribute]), /// |x, y| {} Closure(&'a [Attribute]), @@ -249,6 +248,9 @@ pub trait Visitor<'v> : Sized { fn visit_name(&mut self, _span: Span, _name: Name) { // Nothing to do. } + fn visit_ident(&mut self, ident: Ident) { + walk_ident(self, ident) + } fn visit_mod(&mut self, m: &'v Mod, _s: Span, n: NodeId) { walk_mod(self, m, n) } @@ -273,6 +275,9 @@ pub trait Visitor<'v> : Sized { fn visit_decl(&mut self, d: &'v Decl) { walk_decl(self, d) } + fn visit_anon_const(&mut self, c: &'v AnonConst) { + walk_anon_const(self, c) + } fn visit_expr(&mut self, ex: &'v Expr) { walk_expr(self, ex) } @@ -309,8 +314,8 @@ pub trait Visitor<'v> : Sized { fn visit_trait_ref(&mut self, t: &'v TraitRef) { walk_trait_ref(self, t) } - fn visit_ty_param_bound(&mut self, bounds: &'v TyParamBound) { - walk_ty_param_bound(self, bounds) + fn visit_param_bound(&mut self, bounds: &'v GenericBound) { + walk_param_bound(self, bounds) } fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef, m: TraitBoundModifier) { walk_poly_trait_ref(self, t, m) @@ -336,20 +341,29 @@ pub trait Visitor<'v> : Sized { fn visit_variant(&mut self, v: &'v Variant, g: &'v Generics, item_id: NodeId) { walk_variant(self, v, g, item_id) } + fn visit_label(&mut self, label: &'v Label) { + walk_label(self, label) + } + fn visit_generic_arg(&mut self, generic_arg: &'v GenericArg) { + match generic_arg { + GenericArg::Lifetime(lt) => self.visit_lifetime(lt), + GenericArg::Type(ty) => self.visit_ty(ty), + } + } fn visit_lifetime(&mut self, lifetime: &'v Lifetime) { walk_lifetime(self, lifetime) } - fn visit_qpath(&mut self, qpath: &'v QPath, id: NodeId, span: Span) { + fn visit_qpath(&mut self, qpath: &'v QPath, id: HirId, span: Span) { walk_qpath(self, qpath, id, span) } - fn visit_path(&mut self, path: &'v Path, _id: NodeId) { + fn visit_path(&mut self, path: &'v Path, _id: HirId) { walk_path(self, path) } fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v PathSegment) { walk_path_segment(self, path_span, path_segment) } - fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &'v PathParameters) { - walk_path_parameters(self, path_span, path_parameters) + fn visit_generic_args(&mut self, path_span: Span, generic_args: &'v GenericArgs) { + walk_generic_args(self, path_span, generic_args) } fn visit_assoc_type_binding(&mut self, type_binding: &'v TypeBinding) { walk_assoc_type_binding(self, type_binding) @@ -370,18 +384,6 @@ pub trait Visitor<'v> : Sized { } } -pub fn walk_opt_name<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, opt_name: Option) { - if let Some(name) = opt_name { - visitor.visit_name(span, name); - } -} - -pub fn walk_opt_sp_name<'v, V: Visitor<'v>>(visitor: &mut V, opt_sp_name: &Option>) { - if let Some(ref sp_name) = *opt_sp_name { - visitor.visit_name(sp_name.span, sp_name.node); - } -} - /// Walks the contents of a crate. See also `Crate::visit_all_items`. pub fn walk_crate<'v, V: Visitor<'v>>(visitor: &mut V, krate: &'v Crate) { visitor.visit_mod(&krate.module, krate.span, CRATE_NODE_ID); @@ -414,19 +416,30 @@ pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local) { // Intentionally visiting the expr first - the initialization expr // dominates the local's definition. walk_list!(visitor, visit_expr, &local.init); - + walk_list!(visitor, visit_attribute, local.attrs.iter()); visitor.visit_id(local.id); visitor.visit_pat(&local.pat); walk_list!(visitor, visit_ty, &local.ty); } +pub fn walk_ident<'v, V: Visitor<'v>>(visitor: &mut V, ident: Ident) { + visitor.visit_name(ident.span, ident.name); +} + +pub fn walk_label<'v, V: Visitor<'v>>(visitor: &mut V, label: &'v Label) { + visitor.visit_ident(label.ident); +} + pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) { visitor.visit_id(lifetime.id); match lifetime.name { - LifetimeName::Name(name) => { - visitor.visit_name(lifetime.span, name); + LifetimeName::Param(ParamName::Plain(ident)) => { + visitor.visit_ident(ident); } - LifetimeName::Static | LifetimeName::Implicit | LifetimeName::Underscore => {} + LifetimeName::Param(ParamName::Fresh(_)) | + LifetimeName::Static | + LifetimeName::Implicit | + LifetimeName::Underscore => {} } } @@ -443,33 +456,33 @@ pub fn walk_trait_ref<'v, V>(visitor: &mut V, trait_ref: &'v TraitRef) where V: Visitor<'v> { visitor.visit_id(trait_ref.ref_id); - visitor.visit_path(&trait_ref.path, trait_ref.ref_id) + visitor.visit_path(&trait_ref.path, trait_ref.hir_ref_id) } pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) { visitor.visit_vis(&item.vis); visitor.visit_name(item.span, item.name); match item.node { - ItemExternCrate(opt_name) => { + ItemKind::ExternCrate(orig_name) => { visitor.visit_id(item.id); - walk_opt_name(visitor, item.span, opt_name) + if let Some(orig_name) = orig_name { + visitor.visit_name(item.span, orig_name); + } } - ItemUse(ref path, _) => { + ItemKind::Use(ref path, _) => { visitor.visit_id(item.id); - visitor.visit_path(path, item.id); + visitor.visit_path(path, item.hir_id); } - ItemStatic(ref typ, _, body) | - ItemConst(ref typ, body) => { + ItemKind::Static(ref typ, _, body) | + ItemKind::Const(ref typ, body) => { visitor.visit_id(item.id); visitor.visit_ty(typ); visitor.visit_nested_body(body); } - ItemFn(ref declaration, unsafety, constness, abi, ref generics, body_id) => { + ItemKind::Fn(ref declaration, header, ref generics, body_id) => { visitor.visit_fn(FnKind::ItemFn(item.name, generics, - unsafety, - constness, - abi, + header, &item.vis, &item.attrs), declaration, @@ -477,50 +490,64 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) { item.span, item.id) } - ItemMod(ref module) => { + ItemKind::Mod(ref module) => { // visit_mod() takes care of visiting the Item's NodeId visitor.visit_mod(module, item.span, item.id) } - ItemForeignMod(ref foreign_module) => { + ItemKind::ForeignMod(ref foreign_module) => { visitor.visit_id(item.id); walk_list!(visitor, visit_foreign_item, &foreign_module.items); } - ItemGlobalAsm(_) => { + ItemKind::GlobalAsm(_) => { visitor.visit_id(item.id); } - ItemTy(ref typ, ref type_parameters) => { + ItemKind::Ty(ref typ, ref type_parameters) => { visitor.visit_id(item.id); visitor.visit_ty(typ); visitor.visit_generics(type_parameters) } - ItemEnum(ref enum_definition, ref type_parameters) => { + ItemKind::Existential(ExistTy {ref generics, ref bounds, impl_trait_fn}) => { + visitor.visit_id(item.id); + walk_generics(visitor, generics); + walk_list!(visitor, visit_param_bound, bounds); + if let Some(impl_trait_fn) = impl_trait_fn { + visitor.visit_def_mention(Def::Fn(impl_trait_fn)) + } + } + ItemKind::Enum(ref enum_definition, ref type_parameters) => { visitor.visit_generics(type_parameters); // visit_enum_def() takes care of visiting the Item's NodeId visitor.visit_enum_def(enum_definition, type_parameters, item.id, item.span) } - ItemImpl(.., ref type_parameters, ref opt_trait_reference, ref typ, ref impl_item_refs) => { + ItemKind::Impl( + .., + ref type_parameters, + ref opt_trait_reference, + ref typ, + ref impl_item_refs + ) => { visitor.visit_id(item.id); visitor.visit_generics(type_parameters); walk_list!(visitor, visit_trait_ref, opt_trait_reference); visitor.visit_ty(typ); walk_list!(visitor, visit_impl_item_ref, impl_item_refs); } - ItemStruct(ref struct_definition, ref generics) | - ItemUnion(ref struct_definition, ref generics) => { + ItemKind::Struct(ref struct_definition, ref generics) | + ItemKind::Union(ref struct_definition, ref generics) => { visitor.visit_generics(generics); visitor.visit_id(item.id); visitor.visit_variant_data(struct_definition, item.name, generics, item.id, item.span); } - ItemTrait(.., ref generics, ref bounds, ref trait_item_refs) => { + ItemKind::Trait(.., ref generics, ref bounds, ref trait_item_refs) => { visitor.visit_id(item.id); visitor.visit_generics(generics); - walk_list!(visitor, visit_ty_param_bound, bounds); + walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_trait_item_ref, trait_item_refs); } - ItemTraitAlias(ref generics, ref bounds) => { + ItemKind::TraitAlias(ref generics, ref bounds) => { visitor.visit_id(item.id); visitor.visit_generics(generics); - walk_list!(visitor, visit_ty_param_bound, bounds); + walk_list!(visitor, visit_param_bound, bounds); } } walk_list!(visitor, visit_attribute, &item.attrs); @@ -548,7 +575,7 @@ pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V, generics, parent_item_id, variant.span); - walk_list!(visitor, visit_nested_body, variant.node.disr_expr); + walk_list!(visitor, visit_anon_const, &variant.node.disr_expr); walk_list!(visitor, visit_attribute, &variant.node.attrs); } @@ -556,51 +583,45 @@ pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) { visitor.visit_id(typ.id); match typ.node { - TySlice(ref ty) => { + TyKind::Slice(ref ty) => { visitor.visit_ty(ty) } - TyPtr(ref mutable_type) => { + TyKind::Ptr(ref mutable_type) => { visitor.visit_ty(&mutable_type.ty) } - TyRptr(ref lifetime, ref mutable_type) => { + TyKind::Rptr(ref lifetime, ref mutable_type) => { visitor.visit_lifetime(lifetime); visitor.visit_ty(&mutable_type.ty) } - TyNever => {}, - TyTup(ref tuple_element_types) => { + TyKind::Never => {}, + TyKind::Tup(ref tuple_element_types) => { walk_list!(visitor, visit_ty, tuple_element_types); } - TyBareFn(ref function_declaration) => { - visitor.visit_fn_decl(&function_declaration.decl); + TyKind::BareFn(ref function_declaration) => { walk_list!(visitor, visit_generic_param, &function_declaration.generic_params); + visitor.visit_fn_decl(&function_declaration.decl); } - TyPath(ref qpath) => { - visitor.visit_qpath(qpath, typ.id, typ.span); + TyKind::Path(ref qpath) => { + visitor.visit_qpath(qpath, typ.hir_id, typ.span); } - TyArray(ref ty, length) => { + TyKind::Array(ref ty, ref length) => { visitor.visit_ty(ty); - visitor.visit_nested_body(length) + visitor.visit_anon_const(length) } - TyTraitObject(ref bounds, ref lifetime) => { + TyKind::TraitObject(ref bounds, ref lifetime) => { for bound in bounds { visitor.visit_poly_trait_ref(bound, TraitBoundModifier::None); } visitor.visit_lifetime(lifetime); } - TyImplTraitExistential(ref existty, ref lifetimes) => { - let ExistTy { ref generics, ref bounds } = *existty; - walk_generics(visitor, generics); - walk_list!(visitor, visit_ty_param_bound, bounds); - walk_list!(visitor, visit_lifetime, lifetimes); + TyKind::Typeof(ref expression) => { + visitor.visit_anon_const(expression) } - TyTypeof(expression) => { - visitor.visit_nested_body(expression) - } - TyInfer | TyErr => {} + TyKind::Infer | TyKind::Err => {} } } -pub fn walk_qpath<'v, V: Visitor<'v>>(visitor: &mut V, qpath: &'v QPath, id: NodeId, span: Span) { +pub fn walk_qpath<'v, V: Visitor<'v>>(visitor: &mut V, qpath: &'v QPath, id: HirId, span: Span) { match *qpath { QPath::Resolved(ref maybe_qself, ref path) => { if let Some(ref qself) = *maybe_qself { @@ -625,24 +646,23 @@ pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path) { pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V, path_span: Span, segment: &'v PathSegment) { - visitor.visit_name(path_span, segment.name); - if let Some(ref parameters) = segment.parameters { - visitor.visit_path_parameters(path_span, parameters); + visitor.visit_ident(segment.ident); + if let Some(ref args) = segment.args { + visitor.visit_generic_args(path_span, args); } } -pub fn walk_path_parameters<'v, V: Visitor<'v>>(visitor: &mut V, - _path_span: Span, - path_parameters: &'v PathParameters) { - walk_list!(visitor, visit_lifetime, &path_parameters.lifetimes); - walk_list!(visitor, visit_ty, &path_parameters.types); - walk_list!(visitor, visit_assoc_type_binding, &path_parameters.bindings); +pub fn walk_generic_args<'v, V: Visitor<'v>>(visitor: &mut V, + _path_span: Span, + generic_args: &'v GenericArgs) { + walk_list!(visitor, visit_generic_arg, &generic_args.args); + walk_list!(visitor, visit_assoc_type_binding, &generic_args.bindings); } pub fn walk_assoc_type_binding<'v, V: Visitor<'v>>(visitor: &mut V, type_binding: &'v TypeBinding) { visitor.visit_id(type_binding.id); - visitor.visit_name(type_binding.span, type_binding.name); + visitor.visit_ident(type_binding.ident); visitor.visit_ty(&type_binding.ty); } @@ -650,16 +670,17 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) { visitor.visit_id(pattern.id); match pattern.node { PatKind::TupleStruct(ref qpath, ref children, _) => { - visitor.visit_qpath(qpath, pattern.id, pattern.span); + visitor.visit_qpath(qpath, pattern.hir_id, pattern.span); walk_list!(visitor, visit_pat, children); } PatKind::Path(ref qpath) => { - visitor.visit_qpath(qpath, pattern.id, pattern.span); + visitor.visit_qpath(qpath, pattern.hir_id, pattern.span); } PatKind::Struct(ref qpath, ref fields, _) => { - visitor.visit_qpath(qpath, pattern.id, pattern.span); + visitor.visit_qpath(qpath, pattern.hir_id, pattern.span); for field in fields { - visitor.visit_name(field.span, field.node.name); + visitor.visit_id(field.node.id); + visitor.visit_ident(field.node.ident); visitor.visit_pat(&field.node.pat) } } @@ -670,9 +691,9 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) { PatKind::Ref(ref subpattern, _) => { visitor.visit_pat(subpattern) } - PatKind::Binding(_, canonical_id, ref pth1, ref optional_subpattern) => { + PatKind::Binding(_, canonical_id, ident, ref optional_subpattern) => { visitor.visit_def_mention(Def::Local(canonical_id)); - visitor.visit_name(pth1.span, pth1.node); + visitor.visit_ident(ident); walk_list!(visitor, visit_pat, optional_subpattern); } PatKind::Lit(ref expression) => visitor.visit_expr(expression), @@ -695,44 +716,41 @@ pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v visitor.visit_name(foreign_item.span, foreign_item.name); match foreign_item.node { - ForeignItemFn(ref function_declaration, ref names, ref generics) => { + ForeignItemKind::Fn(ref function_declaration, ref param_names, ref generics) => { visitor.visit_generics(generics); visitor.visit_fn_decl(function_declaration); - for name in names { - visitor.visit_name(name.span, name.node); + for ¶m_name in param_names { + visitor.visit_ident(param_name); } } - ForeignItemStatic(ref typ, _) => visitor.visit_ty(typ), - ForeignItemType => (), + ForeignItemKind::Static(ref typ, _) => visitor.visit_ty(typ), + ForeignItemKind::Type => (), } walk_list!(visitor, visit_attribute, &foreign_item.attrs); } -pub fn walk_ty_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v TyParamBound) { +pub fn walk_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v GenericBound) { match *bound { - TraitTyParamBound(ref typ, modifier) => { + GenericBound::Trait(ref typ, modifier) => { visitor.visit_poly_trait_ref(typ, modifier); } - RegionTyParamBound(ref lifetime) => { - visitor.visit_lifetime(lifetime); - } + GenericBound::Outlives(ref lifetime) => visitor.visit_lifetime(lifetime), } } pub fn walk_generic_param<'v, V: Visitor<'v>>(visitor: &mut V, param: &'v GenericParam) { - match *param { - GenericParam::Lifetime(ref ld) => { - visitor.visit_lifetime(&ld.lifetime); - walk_list!(visitor, visit_lifetime, &ld.bounds); - } - GenericParam::Type(ref ty_param) => { - visitor.visit_id(ty_param.id); - visitor.visit_name(ty_param.span, ty_param.name); - walk_list!(visitor, visit_ty_param_bound, &ty_param.bounds); - walk_list!(visitor, visit_ty, &ty_param.default); - } + visitor.visit_id(param.id); + walk_list!(visitor, visit_attribute, ¶m.attrs); + match param.name { + ParamName::Plain(ident) => visitor.visit_ident(ident), + ParamName::Fresh(_) => {} } + match param.kind { + GenericParamKind::Lifetime { .. } => {} + GenericParamKind::Type { ref default, .. } => walk_list!(visitor, visit_ty, default), + } + walk_list!(visitor, visit_param_bound, ¶m.bounds); } pub fn walk_generics<'v, V: Visitor<'v>>(visitor: &mut V, generics: &'v Generics) { @@ -751,14 +769,14 @@ pub fn walk_where_predicate<'v, V: Visitor<'v>>( ref bound_generic_params, ..}) => { visitor.visit_ty(bounded_ty); - walk_list!(visitor, visit_ty_param_bound, bounds); + walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_generic_param, bound_generic_params); } &WherePredicate::RegionPredicate(WhereRegionPredicate{ref lifetime, ref bounds, ..}) => { visitor.visit_lifetime(lifetime); - walk_list!(visitor, visit_lifetime, bounds); + walk_list!(visitor, visit_param_bound, bounds); } &WherePredicate::EqPredicate(WhereEqPredicate{id, ref lhs_ty, @@ -807,7 +825,7 @@ pub fn walk_fn<'v, V: Visitor<'v>>(visitor: &mut V, } pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v TraitItem) { - visitor.visit_name(trait_item.span, trait_item.name); + visitor.visit_ident(trait_item.ident); walk_list!(visitor, visit_attribute, &trait_item.attrs); visitor.visit_generics(&trait_item.generics); match trait_item.node { @@ -816,15 +834,15 @@ pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v Trai visitor.visit_ty(ty); walk_list!(visitor, visit_nested_body, default); } - TraitItemKind::Method(ref sig, TraitMethod::Required(ref names)) => { + TraitItemKind::Method(ref sig, TraitMethod::Required(ref param_names)) => { visitor.visit_id(trait_item.id); visitor.visit_fn_decl(&sig.decl); - for name in names { - visitor.visit_name(name.span, name.node); + for ¶m_name in param_names { + visitor.visit_ident(param_name); } } TraitItemKind::Method(ref sig, TraitMethod::Provided(body_id)) => { - visitor.visit_fn(FnKind::Method(trait_item.name, + visitor.visit_fn(FnKind::Method(trait_item.ident, sig, None, &trait_item.attrs), @@ -835,7 +853,7 @@ pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v Trai } TraitItemKind::Type(ref bounds, ref default) => { visitor.visit_id(trait_item.id); - walk_list!(visitor, visit_ty_param_bound, bounds); + walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_ty, default); } } @@ -843,9 +861,9 @@ pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v Trai pub fn walk_trait_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, trait_item_ref: &'v TraitItemRef) { // NB: Deliberately force a compilation error if/when new fields are added. - let TraitItemRef { id, name, ref kind, span, ref defaultness } = *trait_item_ref; + let TraitItemRef { id, ident, ref kind, span: _, ref defaultness } = *trait_item_ref; visitor.visit_nested_trait_item(id); - visitor.visit_name(span, name); + visitor.visit_ident(ident); visitor.visit_associated_item_kind(kind); visitor.visit_defaultness(defaultness); } @@ -855,16 +873,16 @@ pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplIt let ImplItem { id: _, hir_id: _, - name, + ident, ref vis, ref defaultness, ref attrs, ref generics, ref node, - span + span: _, } = *impl_item; - visitor.visit_name(span, name); + visitor.visit_ident(ident); visitor.visit_vis(vis); visitor.visit_defaultness(defaultness); walk_list!(visitor, visit_attribute, attrs); @@ -876,7 +894,7 @@ pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplIt visitor.visit_nested_body(body); } ImplItemKind::Method(ref sig, body_id) => { - visitor.visit_fn(FnKind::Method(impl_item.name, + visitor.visit_fn(FnKind::Method(impl_item.ident, sig, Some(&impl_item.vis), &impl_item.attrs), @@ -889,14 +907,18 @@ pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplIt visitor.visit_id(impl_item.id); visitor.visit_ty(ty); } + ImplItemKind::Existential(ref bounds) => { + visitor.visit_id(impl_item.id); + walk_list!(visitor, visit_param_bound, bounds); + } } } pub fn walk_impl_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, impl_item_ref: &'v ImplItemRef) { // NB: Deliberately force a compilation error if/when new fields are added. - let ImplItemRef { id, name, ref kind, span, ref vis, ref defaultness } = *impl_item_ref; + let ImplItemRef { id, ident, ref kind, span: _, ref vis, ref defaultness } = *impl_item_ref; visitor.visit_nested_impl_item(id); - visitor.visit_name(span, name); + visitor.visit_ident(ident); visitor.visit_associated_item_kind(kind); visitor.visit_vis(vis); visitor.visit_defaultness(defaultness); @@ -911,7 +933,7 @@ pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V, struct_definition: & pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField) { visitor.visit_id(struct_field.id); visitor.visit_vis(&struct_field.vis); - visitor.visit_name(struct_field.span, struct_field.name); + visitor.visit_ident(struct_field.ident); visitor.visit_ty(&struct_field.ty); walk_list!(visitor, visit_attribute, &struct_field.attrs); } @@ -924,12 +946,12 @@ pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block) { pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt) { match statement.node { - StmtDecl(ref declaration, id) => { + StmtKind::Decl(ref declaration, id) => { visitor.visit_id(id); visitor.visit_decl(declaration) } - StmtExpr(ref expression, id) | - StmtSemi(ref expression, id) => { + StmtKind::Expr(ref expression, id) | + StmtKind::Semi(ref expression, id) => { visitor.visit_id(id); visitor.visit_expr(expression) } @@ -938,131 +960,133 @@ pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt) { pub fn walk_decl<'v, V: Visitor<'v>>(visitor: &mut V, declaration: &'v Decl) { match declaration.node { - DeclLocal(ref local) => visitor.visit_local(local), - DeclItem(item) => visitor.visit_nested_item(item), + DeclKind::Local(ref local) => visitor.visit_local(local), + DeclKind::Item(item) => visitor.visit_nested_item(item), } } +pub fn walk_anon_const<'v, V: Visitor<'v>>(visitor: &mut V, constant: &'v AnonConst) { + visitor.visit_id(constant.id); + visitor.visit_nested_body(constant.body); +} + pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { visitor.visit_id(expression.id); walk_list!(visitor, visit_attribute, expression.attrs.iter()); match expression.node { - ExprBox(ref subexpression) => { + ExprKind::Box(ref subexpression) => { visitor.visit_expr(subexpression) } - ExprArray(ref subexpressions) => { + ExprKind::Array(ref subexpressions) => { walk_list!(visitor, visit_expr, subexpressions); } - ExprRepeat(ref element, count) => { + ExprKind::Repeat(ref element, ref count) => { visitor.visit_expr(element); - visitor.visit_nested_body(count) + visitor.visit_anon_const(count) } - ExprStruct(ref qpath, ref fields, ref optional_base) => { - visitor.visit_qpath(qpath, expression.id, expression.span); + ExprKind::Struct(ref qpath, ref fields, ref optional_base) => { + visitor.visit_qpath(qpath, expression.hir_id, expression.span); for field in fields { - visitor.visit_name(field.name.span, field.name.node); + visitor.visit_id(field.id); + visitor.visit_ident(field.ident); visitor.visit_expr(&field.expr) } walk_list!(visitor, visit_expr, optional_base); } - ExprTup(ref subexpressions) => { + ExprKind::Tup(ref subexpressions) => { walk_list!(visitor, visit_expr, subexpressions); } - ExprCall(ref callee_expression, ref arguments) => { + ExprKind::Call(ref callee_expression, ref arguments) => { + visitor.visit_expr(callee_expression); walk_list!(visitor, visit_expr, arguments); - visitor.visit_expr(callee_expression) } - ExprMethodCall(ref segment, _, ref arguments) => { + ExprKind::MethodCall(ref segment, _, ref arguments) => { visitor.visit_path_segment(expression.span, segment); walk_list!(visitor, visit_expr, arguments); } - ExprBinary(_, ref left_expression, ref right_expression) => { + ExprKind::Binary(_, ref left_expression, ref right_expression) => { visitor.visit_expr(left_expression); visitor.visit_expr(right_expression) } - ExprAddrOf(_, ref subexpression) | ExprUnary(_, ref subexpression) => { + ExprKind::AddrOf(_, ref subexpression) | ExprKind::Unary(_, ref subexpression) => { visitor.visit_expr(subexpression) } - ExprLit(_) => {} - ExprCast(ref subexpression, ref typ) | ExprType(ref subexpression, ref typ) => { + ExprKind::Lit(_) => {} + ExprKind::Cast(ref subexpression, ref typ) | ExprKind::Type(ref subexpression, ref typ) => { visitor.visit_expr(subexpression); visitor.visit_ty(typ) } - ExprIf(ref head_expression, ref if_block, ref optional_else) => { + ExprKind::If(ref head_expression, ref if_block, ref optional_else) => { visitor.visit_expr(head_expression); visitor.visit_expr(if_block); walk_list!(visitor, visit_expr, optional_else); } - ExprWhile(ref subexpression, ref block, ref opt_sp_name) => { + ExprKind::While(ref subexpression, ref block, ref opt_label) => { + walk_list!(visitor, visit_label, opt_label); visitor.visit_expr(subexpression); visitor.visit_block(block); - walk_opt_sp_name(visitor, opt_sp_name); } - ExprLoop(ref block, ref opt_sp_name, _) => { + ExprKind::Loop(ref block, ref opt_label, _) => { + walk_list!(visitor, visit_label, opt_label); visitor.visit_block(block); - walk_opt_sp_name(visitor, opt_sp_name); } - ExprMatch(ref subexpression, ref arms, _) => { + ExprKind::Match(ref subexpression, ref arms, _) => { visitor.visit_expr(subexpression); walk_list!(visitor, visit_arm, arms); } - ExprClosure(_, ref function_declaration, body, _fn_decl_span, _gen) => { + ExprKind::Closure(_, ref function_declaration, body, _fn_decl_span, _gen) => { visitor.visit_fn(FnKind::Closure(&expression.attrs), function_declaration, body, expression.span, expression.id) } - ExprBlock(ref block) => visitor.visit_block(block), - ExprAssign(ref left_hand_expression, ref right_hand_expression) => { + ExprKind::Block(ref block, ref opt_label) => { + walk_list!(visitor, visit_label, opt_label); + visitor.visit_block(block); + } + ExprKind::Assign(ref left_hand_expression, ref right_hand_expression) => { visitor.visit_expr(right_hand_expression); visitor.visit_expr(left_hand_expression) } - ExprAssignOp(_, ref left_expression, ref right_expression) => { + ExprKind::AssignOp(_, ref left_expression, ref right_expression) => { visitor.visit_expr(right_expression); visitor.visit_expr(left_expression) } - ExprField(ref subexpression, ref name) => { + ExprKind::Field(ref subexpression, ident) => { visitor.visit_expr(subexpression); - visitor.visit_name(name.span, name.node); + visitor.visit_ident(ident); } - ExprTupField(ref subexpression, _) => { - visitor.visit_expr(subexpression); - } - ExprIndex(ref main_expression, ref index_expression) => { + ExprKind::Index(ref main_expression, ref index_expression) => { visitor.visit_expr(main_expression); visitor.visit_expr(index_expression) } - ExprPath(ref qpath) => { - visitor.visit_qpath(qpath, expression.id, expression.span); + ExprKind::Path(ref qpath) => { + visitor.visit_qpath(qpath, expression.hir_id, expression.span); } - ExprBreak(label, ref opt_expr) => { - label.ident.map(|ident| { - match label.target_id { - ScopeTarget::Block(node_id) | - ScopeTarget::Loop(LoopIdResult::Ok(node_id)) => - visitor.visit_def_mention(Def::Label(node_id)), - ScopeTarget::Loop(LoopIdResult::Err(_)) => {}, + ExprKind::Break(ref destination, ref opt_expr) => { + if let Some(ref label) = destination.label { + visitor.visit_label(label); + match destination.target_id { + Ok(node_id) => visitor.visit_def_mention(Def::Label(node_id)), + Err(_) => {}, }; - visitor.visit_name(ident.span, ident.node.name); - }); + } walk_list!(visitor, visit_expr, opt_expr); } - ExprAgain(label) => { - label.ident.map(|ident| { - match label.target_id { - ScopeTarget::Block(_) => bug!("can't `continue` to a non-loop block"), - ScopeTarget::Loop(LoopIdResult::Ok(node_id)) => - visitor.visit_def_mention(Def::Label(node_id)), - ScopeTarget::Loop(LoopIdResult::Err(_)) => {}, + ExprKind::Continue(ref destination) => { + if let Some(ref label) = destination.label { + visitor.visit_label(label); + match destination.target_id { + Ok(node_id) => visitor.visit_def_mention(Def::Label(node_id)), + Err(_) => {}, }; - visitor.visit_name(ident.span, ident.node.name); - }); + } } - ExprRet(ref optional_expression) => { + ExprKind::Ret(ref optional_expression) => { walk_list!(visitor, visit_expr, optional_expression); } - ExprInlineAsm(_, ref outputs, ref inputs) => { + ExprKind::InlineAsm(_, ref outputs, ref inputs) => { for output in outputs { visitor.visit_expr(output) } @@ -1070,7 +1094,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { visitor.visit_expr(input) } } - ExprYield(ref subexpression) => { + ExprKind::Yield(ref subexpression) => { visitor.visit_expr(subexpression); } } @@ -1084,9 +1108,9 @@ pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm) { } pub fn walk_vis<'v, V: Visitor<'v>>(visitor: &mut V, vis: &'v Visibility) { - if let Visibility::Restricted { ref path, id } = *vis { + if let VisibilityKind::Restricted { ref path, id, hir_id } = vis.node { visitor.visit_id(id); - visitor.visit_path(path, id) + visitor.visit_path(path, hir_id) } } @@ -1102,7 +1126,7 @@ pub fn walk_defaultness<'v, V: Visitor<'v>>(_: &mut V, _: &'v Defaultness) { // would be to walk it. } -#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug)] pub struct IdRange { pub min: NodeId, pub max: NodeId, diff --git a/src/librustc/hir/itemlikevisit.rs b/src/librustc/hir/itemlikevisit.rs index 2221ecf07b43..a62000e10c79 100644 --- a/src/librustc/hir/itemlikevisit.rs +++ b/src/librustc/hir/itemlikevisit.rs @@ -88,3 +88,33 @@ impl<'v, 'hir, V> ItemLikeVisitor<'hir> for DeepVisitor<'v, V> self.visitor.visit_impl_item(impl_item); } } + +/// A parallel variant of ItemLikeVisitor +pub trait ParItemLikeVisitor<'hir> { + fn visit_item(&self, item: &'hir Item); + fn visit_trait_item(&self, trait_item: &'hir TraitItem); + fn visit_impl_item(&self, impl_item: &'hir ImplItem); +} + +pub trait IntoVisitor<'hir> { + type Visitor: Visitor<'hir>; + fn into_visitor(&self) -> Self::Visitor; +} + +pub struct ParDeepVisitor(pub V); + +impl<'hir, V> ParItemLikeVisitor<'hir> for ParDeepVisitor + where V: IntoVisitor<'hir> +{ + fn visit_item(&self, item: &'hir Item) { + self.0.into_visitor().visit_item(item); + } + + fn visit_trait_item(&self, trait_item: &'hir TraitItem) { + self.0.into_visitor().visit_trait_item(trait_item); + } + + fn visit_impl_item(&self, impl_item: &'hir ImplItem) { + self.0.into_visitor().visit_impl_item(impl_item); + } +} diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs index 238145a061f5..09f76552279f 100644 --- a/src/librustc/hir/lowering.rs +++ b/src/librustc/hir/lowering.rs @@ -41,36 +41,40 @@ //! in the HIR, especially for multiple identifiers. use dep_graph::DepGraph; -use hir; +use hir::{self, ParamName}; use hir::HirVec; -use hir::map::{Definitions, DefKey, DefPathData}; -use hir::def_id::{DefIndex, DefId, CRATE_DEF_INDEX, DefIndexAddressSpace}; -use hir::def::{Def, PathResolution}; -use lint::builtin::PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES; +use hir::map::{DefKey, DefPathData, Definitions}; +use hir::def_id::{DefId, DefIndex, DefIndexAddressSpace, CRATE_DEF_INDEX}; +use hir::def::{Def, PathResolution, PerNS}; +use hir::GenericArg; +use lint::builtin::{self, PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES, + ELIDED_LIFETIMES_IN_PATHS}; use middle::cstore::CrateStore; use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::small_vec::OneVector; +use rustc_data_structures::thin_vec::ThinVec; use session::Session; use util::common::FN_OUTPUT_NAME; -use util::nodemap::{DefIdMap, FxHashMap, NodeMap}; +use util::nodemap::{DefIdMap, NodeMap}; use std::collections::{BTreeMap, HashSet}; use std::fmt::Debug; use std::iter; use std::mem; use syntax::attr; +use syntax::ast; use syntax::ast::*; use syntax::errors; use syntax::ext::hygiene::{Mark, SyntaxContext}; use syntax::print::pprust; use syntax::ptr::P; -use syntax::codemap::{self, respan, Spanned, CompilerDesugaringKind}; +use syntax::codemap::{self, respan, CompilerDesugaringKind, Spanned}; use syntax::std_inject; -use syntax::symbol::{Symbol, keywords}; -use syntax::tokenstream::{TokenStream, TokenTree, Delimited}; +use syntax::symbol::{keywords, Symbol}; +use syntax::tokenstream::{Delimited, TokenStream, TokenTree}; use syntax::parse::token::Token; -use syntax::util::small_vector::SmallVector; use syntax::visit::{self, Visitor}; -use syntax_pos::Span; +use syntax_pos::{Span, MultiSpan}; const HIR_ID_COUNTER_LOCKED: u32 = 0xFFFFFFFF; @@ -80,14 +84,9 @@ pub struct LoweringContext<'a> { // Use to assign ids to hir nodes that do not directly correspond to an ast node sess: &'a Session, - cstore: &'a CrateStore, + cstore: &'a dyn CrateStore, - // As we walk the AST we must keep track of the current 'parent' def id (in - // the form of a DefIndex) so that if we create a new node which introduces - // a definition, then we can properly create the def id. - parent_def: Option, - resolver: &'a mut Resolver, - name_map: FxHashMap, + resolver: &'a mut dyn Resolver, /// The items being lowered are collected here. items: BTreeMap, @@ -107,12 +106,11 @@ pub struct LoweringContext<'a> { is_in_loop_condition: bool, is_in_trait_impl: bool, - // This is a list of in-band type definitions being generated by - // Argument-position `impl Trait`. - // When traversing a signature such as `fn foo(x: impl Trait)`, - // we record `impl Trait` as a new type parameter, then later - // add it on to `foo`s generics. - in_band_ty_params: Vec, + /// What to do when we encounter either an "anonymous lifetime + /// reference". The term "anonymous" is meant to encompass both + /// `'_` lifetimes as well as fully elided cases where nothing is + /// written at all (e.g., `&T` or `std::cell::Ref`). + anonymous_lifetime_mode: AnonymousLifetimeMode, // Used to create lifetime definitions from in-band lifetime usages. // e.g. `fn foo(x: &'x u8) -> &'x u8` to `fn foo<'x>(x: &'x u8) -> &'x u8` @@ -121,18 +119,20 @@ pub struct LoweringContext<'a> { // (i.e. it doesn't appear in the in_scope_lifetimes list), it is added // to this list. The results of this list are then added to the list of // lifetime definitions in the corresponding impl or function generics. - lifetimes_to_define: Vec<(Span, Name)>, + lifetimes_to_define: Vec<(Span, ParamName)>, + // Whether or not in-band lifetimes are being collected. This is used to // indicate whether or not we're in a place where new lifetimes will result // in in-band lifetime definitions, such a function or an impl header. - // This will always be false unless the `in_band_lifetimes` feature is - // enabled. + // This will always be false unless the `in_band_lifetimes` or + // `impl_header_lifetime_elision` feature is enabled. is_collecting_in_band_lifetimes: bool, + // Currently in-scope lifetimes defined in impl headers, fn headers, or HRTB. // When `is_collectin_in_band_lifetimes` is true, each lifetime is checked // against this list to see if it is already in-scope, or if a definition // needs to be created for it. - in_scope_lifetimes: Vec, + in_scope_lifetimes: Vec, type_def_lifetime_params: DefIdMap, @@ -148,35 +148,63 @@ pub trait Resolver { /// Obtain the resolution for a node id fn get_resolution(&mut self, id: NodeId) -> Option; + /// Obtain the possible resolutions for the given `use` statement. + fn get_import(&mut self, id: NodeId) -> PerNS>; + /// We must keep the set of definitions up to date as we add nodes that weren't in the AST. /// This should only return `None` during testing. fn definitions(&mut self) -> &mut Definitions; + + /// Given suffix ["b","c","d"], creates a HIR path for `[::crate_root]::b::c::d` and resolves + /// it based on `is_value`. + fn resolve_str_path( + &mut self, + span: Span, + crate_root: Option<&str>, + components: &[&str], + params: Option>, + is_value: bool, + ) -> hir::Path; } -#[derive(Clone, Copy, Debug)] -enum ImplTraitContext { +#[derive(Debug)] +enum ImplTraitContext<'a> { /// Treat `impl Trait` as shorthand for a new universal generic parameter. /// Example: `fn foo(x: impl Debug)`, where `impl Debug` is conceptually /// equivalent to a fresh universal parameter like `fn foo(x: T)`. /// - /// We store a DefId here so we can look up necessary information later - Universal(DefId), + /// Newly generated parameters should be inserted into the given `Vec` + Universal(&'a mut Vec), /// Treat `impl Trait` as shorthand for a new universal existential parameter. /// Example: `fn foo() -> impl Debug`, where `impl Debug` is conceptually /// equivalent to a fresh existential parameter like `abstract type T; fn foo() -> T`. - Existential, + /// + /// We store a DefId here so we can look up necessary information later + Existential(DefId), /// `impl Trait` is not accepted in this position. Disallowed, } -pub fn lower_crate(sess: &Session, - cstore: &CrateStore, - dep_graph: &DepGraph, - krate: &Crate, - resolver: &mut Resolver) - -> hir::Crate { +impl<'a> ImplTraitContext<'a> { + fn reborrow(&'b mut self) -> ImplTraitContext<'b> { + use self::ImplTraitContext::*; + match self { + Universal(params) => Universal(params), + Existential(did) => Existential(*did), + Disallowed => Disallowed, + } + } +} + +pub fn lower_crate( + sess: &Session, + cstore: &dyn CrateStore, + dep_graph: &DepGraph, + krate: &Crate, + resolver: &mut dyn Resolver, +) -> hir::Crate { // We're constructing the HIR here; we don't care what we will // read, since we haven't even constructed the *input* to // incr. comp. yet. @@ -186,9 +214,7 @@ pub fn lower_crate(sess: &Session, crate_root: std_inject::injected_crate_name(), sess, cstore, - parent_def: None, resolver, - name_map: FxHashMap(), items: BTreeMap::new(), trait_items: BTreeMap::new(), impl_items: BTreeMap::new(), @@ -199,27 +225,28 @@ pub fn lower_crate(sess: &Session, catch_scopes: Vec::new(), loop_scopes: Vec::new(), is_in_loop_condition: false, + anonymous_lifetime_mode: AnonymousLifetimeMode::PassThrough, type_def_lifetime_params: DefIdMap(), current_hir_id_owner: vec![(CRATE_DEF_INDEX, 0)], item_local_id_counters: NodeMap(), node_id_to_hir_id: IndexVec::new(), is_generator: false, is_in_trait_impl: false, - in_band_ty_params: Vec::new(), lifetimes_to_define: Vec::new(), is_collecting_in_band_lifetimes: false, in_scope_lifetimes: Vec::new(), }.lower_crate(krate) } -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, PartialEq)] enum ParamMode { /// Any path in a type context. Explicit, /// The `module::Type` in `module::Type::method` in an expression. - Optional + Optional, } +#[derive(Debug)] struct LoweredNodeId { node_id: NodeId, hir_id: hir::HirId, @@ -231,6 +258,51 @@ enum ParenthesizedGenericArgs { Err, } +/// What to do when we encounter an **anonymous** lifetime +/// reference. Anonymous lifetime references come in two flavors. You +/// have implicit, or fully elided, references to lifetimes, like the +/// one in `&T` or `Ref`, and you have `'_` lifetimes, like `&'_ T` +/// or `Ref<'_, T>`. These often behave the same, but not always: +/// +/// - certain usages of implicit references are deprecated, like +/// `Ref`, and we sometimes just give hard errors in those cases +/// as well. +/// - for object bounds there is a difference: `Box` is not +/// the same as `Box`. +/// +/// We describe the effects of the various modes in terms of three cases: +/// +/// - **Modern** -- includes all uses of `'_`, but also the lifetime arg +/// of a `&` (e.g., the missing lifetime in something like `&T`) +/// - **Dyn Bound** -- if you have something like `Box`, +/// there is an elided lifetime bound (`Box`). These +/// elided bounds follow special rules. Note that this only covers +/// cases where *nothing* is written; the `'_` in `Box` is a case of "modern" elision. +/// - **Deprecated** -- this coverse cases like `Ref`, where the lifetime +/// parameter to ref is completely elided. `Ref<'_, T>` would be the modern, +/// non-deprecated equivalent. +/// +/// Currently, the handling of lifetime elision is somewhat spread out +/// between HIR lowering and -- as described below -- the +/// `resolve_lifetime` module. Often we "fallthrough" to that code by generating +/// an "elided" or "underscore" lifetime name. In the future, we probably want to move +/// everything into HIR lowering. +#[derive(Copy, Clone)] +enum AnonymousLifetimeMode { + /// For **Modern** cases, create a new anonymous region parameter + /// and reference that. + /// + /// For **Dyn Bound** cases, pass responsibility to + /// `resolve_lifetime` code. + /// + /// For **Deprecated** cases, report an error. + CreateParameter, + + /// Pass responsibility to `resolve_lifetime` code for all cases. + PassThrough, +} + impl<'a> LoweringContext<'a> { fn lower_crate(mut self, c: &Crate) -> hir::Crate { /// Full-crate AST visitor that inserts into a fresh @@ -247,14 +319,20 @@ impl<'a> LoweringContext<'a> { self.lctx.allocate_hir_id_counter(item.id, item); match item.node { - ItemKind::Struct(_, ref generics) | - ItemKind::Union(_, ref generics) | - ItemKind::Enum(_, ref generics) | - ItemKind::Ty(_, ref generics) | - ItemKind::Trait(_, _, ref generics, ..) => { + ItemKind::Struct(_, ref generics) + | ItemKind::Union(_, ref generics) + | ItemKind::Enum(_, ref generics) + | ItemKind::Ty(_, ref generics) + | ItemKind::Existential(_, ref generics) + | ItemKind::Trait(_, _, ref generics, ..) => { let def_id = self.lctx.resolver.definitions().local_def_id(item.id); - let count = generics.params.iter() - .filter(|param| param.is_lifetime_param()) + let count = generics + .params + .iter() + .filter(|param| match param.kind { + ast::GenericParamKind::Lifetime { .. } => true, + _ => false, + }) .count(); self.lctx.type_def_lifetime_params.insert(def_id, count); } @@ -280,7 +358,8 @@ impl<'a> LoweringContext<'a> { impl<'lcx, 'interner> ItemLowerer<'lcx, 'interner> { fn with_trait_impl_ref(&mut self, trait_impl_ref: &Option, f: F) - where F: FnOnce(&mut Self) + where + F: FnOnce(&mut Self), { let old = self.lctx.is_in_trait_impl; self.lctx.is_in_trait_impl = if let &None = trait_impl_ref { @@ -305,16 +384,17 @@ impl<'a> LoweringContext<'a> { }); if item_lowered { - let item_lifetimes = match self.lctx.items.get(&item.id).unwrap().node { - hir::Item_::ItemImpl(_,_,_,ref generics,..) | - hir::Item_::ItemTrait(_,_,ref generics,..) => - generics.lifetimes().cloned().collect::>(), - _ => Vec::new(), + let item_generics = match self.lctx.items.get(&item.id).unwrap().node { + hir::ItemKind::Impl(_, _, _, ref generics, ..) + | hir::ItemKind::Trait(_, _, ref generics, ..) => { + generics.params.clone() + } + _ => HirVec::new(), }; - self.lctx.with_parent_impl_lifetime_defs(&item_lifetimes, |this| { + self.lctx.with_parent_impl_lifetime_defs(&item_generics, |this| { let this = &mut ItemLowerer { lctx: this }; - if let ItemKind::Impl(_,_,_,_,ref opt_trait_ref,_,_) = item.node { + if let ItemKind::Impl(_, _, _, _, ref opt_trait_ref, _, _) = item.node { this.with_trait_impl_ref(opt_trait_ref, |this| { visit::walk_item(this, item) }); @@ -374,27 +454,26 @@ impl<'a> LoweringContext<'a> { } } - fn allocate_hir_id_counter(&mut self, - owner: NodeId, - debug: &T) { + fn allocate_hir_id_counter(&mut self, owner: NodeId, debug: &T) -> LoweredNodeId { if self.item_local_id_counters.insert(owner, 0).is_some() { - bug!("Tried to allocate item_local_id_counter for {:?} twice", debug); + bug!( + "Tried to allocate item_local_id_counter for {:?} twice", + debug + ); } // Always allocate the first HirId for the owner itself - self.lower_node_id_with_owner(owner, owner); + self.lower_node_id_with_owner(owner, owner) } - fn lower_node_id_generic(&mut self, - ast_node_id: NodeId, - alloc_hir_id: F) - -> LoweredNodeId - where F: FnOnce(&mut Self) -> hir::HirId + fn lower_node_id_generic(&mut self, ast_node_id: NodeId, alloc_hir_id: F) -> LoweredNodeId + where + F: FnOnce(&mut Self) -> hir::HirId, { if ast_node_id == DUMMY_NODE_ID { return LoweredNodeId { node_id: DUMMY_NODE_ID, hir_id: hir::DUMMY_HIR_ID, - } + }; } let min_size = ast_node_id.as_usize() + 1; @@ -421,22 +500,26 @@ impl<'a> LoweringContext<'a> { } } - fn with_hir_id_owner(&mut self, owner: NodeId, f: F) - where F: FnOnce(&mut Self) + fn with_hir_id_owner(&mut self, owner: NodeId, f: F) -> T + where + F: FnOnce(&mut Self) -> T, { let counter = self.item_local_id_counters - .insert(owner, HIR_ID_COUNTER_LOCKED) - .unwrap(); + .insert(owner, HIR_ID_COUNTER_LOCKED) + .unwrap_or_else(|| panic!("No item_local_id_counters entry for {:?}", owner)); let def_index = self.resolver.definitions().opt_def_index(owner).unwrap(); self.current_hir_id_owner.push((def_index, counter)); - f(self); + let ret = f(self); let (new_def_index, new_counter) = self.current_hir_id_owner.pop().unwrap(); debug_assert!(def_index == new_def_index); debug_assert!(new_counter >= counter); - let prev = self.item_local_id_counters.insert(owner, new_counter).unwrap(); + let prev = self.item_local_id_counters + .insert(owner, new_counter) + .unwrap(); debug_assert!(prev == HIR_ID_COUNTER_LOCKED); + ret } /// This method allocates a new HirId for the given NodeId and stores it in @@ -447,9 +530,8 @@ impl<'a> LoweringContext<'a> { /// properly. Calling the method twice with the same NodeId is fine though. fn lower_node_id(&mut self, ast_node_id: NodeId) -> LoweredNodeId { self.lower_node_id_generic(ast_node_id, |this| { - let &mut (def_index, ref mut local_id_counter) = this.current_hir_id_owner - .last_mut() - .unwrap(); + let &mut (def_index, ref mut local_id_counter) = + this.current_hir_id_owner.last_mut().unwrap(); let local_id = *local_id_counter; *local_id_counter += 1; hir::HirId { @@ -459,14 +541,12 @@ impl<'a> LoweringContext<'a> { }) } - fn lower_node_id_with_owner(&mut self, - ast_node_id: NodeId, - owner: NodeId) - -> LoweredNodeId { + fn lower_node_id_with_owner(&mut self, ast_node_id: NodeId, owner: NodeId) -> LoweredNodeId { self.lower_node_id_generic(ast_node_id, |this| { - let local_id_counter = this.item_local_id_counters - .get_mut(&owner) - .unwrap(); + let local_id_counter = this + .item_local_id_counters + .get_mut(&owner) + .expect("called lower_node_id_with_owner before allocate_hir_id_counter"); let local_id = *local_id_counter; // We want to be sure not to modify the counter in the map while it @@ -475,7 +555,12 @@ impl<'a> LoweringContext<'a> { debug_assert!(local_id != HIR_ID_COUNTER_LOCKED); *local_id_counter += 1; - let def_index = this.resolver.definitions().opt_def_index(owner).unwrap(); + let def_index = this + .resolver + .definitions() + .opt_def_index(owner) + .expect("You forgot to call `create_def_with_parent` or are lowering node ids \ + that do not belong to the current owner"); hir::HirId { owner: def_index, @@ -484,8 +569,7 @@ impl<'a> LoweringContext<'a> { }) } - fn record_body(&mut self, value: hir::Expr, decl: Option<&FnDecl>) - -> hir::BodyId { + fn record_body(&mut self, value: hir::Expr, decl: Option<&FnDecl>) -> hir::BodyId { let body = hir::Body { arguments: decl.map_or(hir_vec![], |decl| { decl.inputs.iter().map(|x| self.lower_arg(x)).collect() @@ -511,95 +595,173 @@ impl<'a> LoweringContext<'a> { }) } + fn expect_full_def_from_use(&mut self, id: NodeId) -> impl Iterator { + self.resolver.get_import(id).present_items().map(|pr| { + if pr.unresolved_segments() != 0 { + bug!("path not fully resolved: {:?}", pr); + } + pr.base_def() + }) + } + fn diagnostic(&self) -> &errors::Handler { self.sess.diagnostic() } - fn str_to_ident(&self, s: &'static str) -> Name { - Symbol::gensym(s) + fn str_to_ident(&self, s: &'static str) -> Ident { + Ident::with_empty_ctxt(Symbol::gensym(s)) } - fn allow_internal_unstable(&self, reason: CompilerDesugaringKind, span: Span) -> Span - { + fn allow_internal_unstable(&self, reason: CompilerDesugaringKind, span: Span) -> Span { let mark = Mark::fresh(Mark::root()); mark.set_expn_info(codemap::ExpnInfo { call_site: span, - callee: codemap::NameAndSpan { - format: codemap::CompilerDesugaring(reason), - span: Some(span), - allow_internal_unstable: true, - allow_internal_unsafe: false, - }, + def_site: Some(span), + format: codemap::CompilerDesugaring(reason), + allow_internal_unstable: true, + allow_internal_unsafe: false, + local_inner_macros: false, + edition: codemap::hygiene::default_edition(), }); span.with_ctxt(SyntaxContext::empty().apply_mark(mark)) } - // Creates a new hir::GenericParam for every new lifetime and type parameter - // encountered while evaluating `f`. Definitions are created with the parent - // provided. If no `parent_id` is provided, no definitions will be returned. + fn with_anonymous_lifetime_mode( + &mut self, + anonymous_lifetime_mode: AnonymousLifetimeMode, + op: impl FnOnce(&mut Self) -> R, + ) -> R { + let old_anonymous_lifetime_mode = self.anonymous_lifetime_mode; + self.anonymous_lifetime_mode = anonymous_lifetime_mode; + let result = op(self); + self.anonymous_lifetime_mode = old_anonymous_lifetime_mode; + result + } + + /// Creates a new hir::GenericParam for every new lifetime and + /// type parameter encountered while evaluating `f`. Definitions + /// are created with the parent provided. If no `parent_id` is + /// provided, no definitions will be returned. + /// + /// Presuming that in-band lifetimes are enabled, then + /// `self.anonymous_lifetime_mode` will be updated to match the + /// argument while `f` is running (and restored afterwards). fn collect_in_band_defs( &mut self, - parent_id: Option, - f: F - ) -> (Vec, T) where F: FnOnce(&mut LoweringContext) -> T + parent_id: DefId, + anonymous_lifetime_mode: AnonymousLifetimeMode, + f: F, + ) -> (Vec, T) + where + F: FnOnce(&mut LoweringContext) -> (Vec, T), { assert!(!self.is_collecting_in_band_lifetimes); assert!(self.lifetimes_to_define.is_empty()); - self.is_collecting_in_band_lifetimes = self.sess.features.borrow().in_band_lifetimes; + let old_anonymous_lifetime_mode = self.anonymous_lifetime_mode; - assert!(self.in_band_ty_params.is_empty()); + if self.sess.features_untracked().impl_header_lifetime_elision { + self.anonymous_lifetime_mode = anonymous_lifetime_mode; + self.is_collecting_in_band_lifetimes = true; + } else if self.sess.features_untracked().in_band_lifetimes { + self.is_collecting_in_band_lifetimes = true; + } - let res = f(self); + let (in_band_ty_params, res) = f(self); self.is_collecting_in_band_lifetimes = false; + self.anonymous_lifetime_mode = old_anonymous_lifetime_mode; - let in_band_ty_params = self.in_band_ty_params.split_off(0); let lifetimes_to_define = self.lifetimes_to_define.split_off(0); - let mut params = match parent_id { - Some(parent_id) => lifetimes_to_define.into_iter().map(|(span, name)| { - let def_node_id = self.next_id().node_id; + let params = lifetimes_to_define + .into_iter() + .map(|(span, hir_name)| { + let def_node_id = self.next_id().node_id; - // Add a definition for the in-band lifetime def - self.resolver.definitions().create_def_with_parent( - parent_id.index, - def_node_id, - DefPathData::LifetimeDef(name.as_str()), - DefIndexAddressSpace::High, - Mark::root() - ); + // Get the name we'll use to make the def-path. Note + // that collisions are ok here and this shouldn't + // really show up for end-user. + let str_name = match hir_name { + ParamName::Plain(ident) => ident.as_interned_str(), + ParamName::Fresh(_) => keywords::UnderscoreLifetime.name().as_interned_str(), + }; - hir::GenericParam::Lifetime(hir::LifetimeDef { - lifetime: hir::Lifetime { - id: def_node_id, - span, - name: hir::LifetimeName::Name(name), - }, - bounds: Vec::new().into(), - pure_wrt_drop: false, - in_band: true, - }) - }).collect(), - None => Vec::new(), - }; + // Add a definition for the in-band lifetime def + self.resolver.definitions().create_def_with_parent( + parent_id.index, + def_node_id, + DefPathData::LifetimeParam(str_name), + DefIndexAddressSpace::High, + Mark::root(), + span, + ); - params.extend(in_band_ty_params.into_iter().map(|tp| hir::GenericParam::Type(tp))); + hir::GenericParam { + id: def_node_id, + name: hir_name, + attrs: hir_vec![], + bounds: hir_vec![], + span, + pure_wrt_drop: false, + kind: hir::GenericParamKind::Lifetime { in_band: true } + } + }) + .chain(in_band_ty_params.into_iter()) + .collect(); (params, res) } - // Evaluates `f` with the lifetimes in `lt_defs` in-scope. + /// When there is a reference to some lifetime `'a`, and in-band + /// lifetimes are enabled, then we want to push that lifetime into + /// the vector of names to define later. In that case, it will get + /// added to the appropriate generics. + fn maybe_collect_in_band_lifetime(&mut self, ident: Ident) { + if !self.is_collecting_in_band_lifetimes { + return; + } + + if !self.sess.features_untracked().in_band_lifetimes { + return; + } + + if self.in_scope_lifetimes.contains(&ident.modern()) { + return; + } + + let hir_name = ParamName::Plain(ident); + + if self.lifetimes_to_define.iter() + .any(|(_, lt_name)| lt_name.modern() == hir_name.modern()) { + return; + } + + self.lifetimes_to_define.push((ident.span, hir_name)); + } + + /// When we have either an elided or `'_` lifetime in an impl + /// header, we convert it to + fn collect_fresh_in_band_lifetime(&mut self, span: Span) -> ParamName { + assert!(self.is_collecting_in_band_lifetimes); + let index = self.lifetimes_to_define.len(); + let hir_name = ParamName::Fresh(index); + self.lifetimes_to_define.push((span, hir_name)); + hir_name + } + + // Evaluates `f` with the lifetimes in `params` in-scope. // This is used to track which lifetimes have already been defined, and // which are new in-band lifetimes that need to have a definition created // for them. - fn with_in_scope_lifetime_defs( - &mut self, - lt_defs: &[LifetimeDef], - f: F - ) -> T where F: FnOnce(&mut LoweringContext) -> T + fn with_in_scope_lifetime_defs(&mut self, params: &[GenericParam], f: F) -> T + where + F: FnOnce(&mut LoweringContext) -> T, { let old_len = self.in_scope_lifetimes.len(); - let lt_def_names = lt_defs.iter().map(|lt_def| lt_def.lifetime.ident.name); + let lt_def_names = params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => Some(param.ident.modern()), + _ => None, + }); self.in_scope_lifetimes.extend(lt_def_names); let res = f(self); @@ -608,19 +770,22 @@ impl<'a> LoweringContext<'a> { res } - // Same as the method above, but accepts `hir::LifetimeDef`s - // instead of `ast::LifetimeDef`s. + // Same as the method above, but accepts `hir::GenericParam`s + // instead of `ast::GenericParam`s. // This should only be used with generics that have already had their // in-band lifetimes added. In practice, this means that this function is // only used when lowering a child item of a trait or impl. - fn with_parent_impl_lifetime_defs( - &mut self, - lt_defs: &[hir::LifetimeDef], + fn with_parent_impl_lifetime_defs(&mut self, + params: &HirVec, f: F - ) -> T where F: FnOnce(&mut LoweringContext) -> T + ) -> T where + F: FnOnce(&mut LoweringContext) -> T, { let old_len = self.in_scope_lifetimes.len(); - let lt_def_names = lt_defs.iter().map(|lt_def| lt_def.lifetime.name.name()); + let lt_def_names = params.iter().filter_map(|param| match param.kind { + hir::GenericParamKind::Lifetime { .. } => Some(param.name.ident().modern()), + _ => None, + }); self.in_scope_lifetimes.extend(lt_def_names); let res = f(self); @@ -629,65 +794,120 @@ impl<'a> LoweringContext<'a> { res } - // Appends in-band lifetime defs and argument-position `impl Trait` defs - // to the existing set of generics. + /// Appends in-band lifetime defs and argument-position `impl + /// Trait` defs to the existing set of generics. + /// + /// Presuming that in-band lifetimes are enabled, then + /// `self.anonymous_lifetime_mode` will be updated to match the + /// argument while `f` is running (and restored afterwards). fn add_in_band_defs( &mut self, generics: &Generics, - parent_id: Option, - f: F + parent_id: DefId, + anonymous_lifetime_mode: AnonymousLifetimeMode, + f: F, ) -> (hir::Generics, T) - where F: FnOnce(&mut LoweringContext) -> T + where + F: FnOnce(&mut LoweringContext, &mut Vec) -> T, { - let (in_band_defs, (mut lowered_generics, res)) = - self.with_in_scope_lifetime_defs( - &generics.params - .iter() - .filter_map(|p| match *p { - GenericParam::Lifetime(ref ld) => Some(ld.clone()), - _ => None, - }) - .collect::>(), - |this| { - this.collect_in_band_defs(parent_id, |this| { - (this.lower_generics(generics), f(this)) - }) - } - ); + let (in_band_defs, (mut lowered_generics, res)) = self.with_in_scope_lifetime_defs( + &generics.params, + |this| { + this.collect_in_band_defs(parent_id, anonymous_lifetime_mode, |this| { + let mut params = Vec::new(); + let generics = this.lower_generics( + generics, + ImplTraitContext::Universal(&mut params), + ); + let res = f(this, &mut params); + (params, (generics, res)) + }) + }, + ); - lowered_generics.params = - lowered_generics.params.iter().cloned().chain(in_band_defs).collect(); + lowered_generics.params = lowered_generics + .params + .iter() + .cloned() + .chain(in_band_defs) + .collect(); (lowered_generics, res) } fn with_catch_scope(&mut self, catch_id: NodeId, f: F) -> T - where F: FnOnce(&mut LoweringContext) -> T + where + F: FnOnce(&mut LoweringContext) -> T, { let len = self.catch_scopes.len(); self.catch_scopes.push(catch_id); let result = f(self); - assert_eq!(len + 1, self.catch_scopes.len(), - "catch scopes should be added and removed in stack order"); + assert_eq!( + len + 1, + self.catch_scopes.len(), + "catch scopes should be added and removed in stack order" + ); self.catch_scopes.pop().unwrap(); result } + fn make_async_expr( + &mut self, + capture_clause: CaptureBy, + closure_node_id: NodeId, + ret_ty: Option<&Ty>, + body: impl FnOnce(&mut LoweringContext) -> hir::Expr, + ) -> hir::ExprKind { + let prev_is_generator = mem::replace(&mut self.is_generator, true); + let body_expr = body(self); + let span = body_expr.span; + let output = match ret_ty { + Some(ty) => FunctionRetTy::Ty(P(ty.clone())), + None => FunctionRetTy::Default(span), + }; + let decl = FnDecl { + inputs: vec![], + output, + variadic: false + }; + let body_id = self.record_body(body_expr, Some(&decl)); + self.is_generator = prev_is_generator; + + let capture_clause = self.lower_capture_clause(capture_clause); + let closure_hir_id = self.lower_node_id(closure_node_id).hir_id; + let decl = self.lower_fn_decl(&decl, None, /* impl trait allowed */ false, None); + let generator = hir::Expr { + id: closure_node_id, + hir_id: closure_hir_id, + node: hir::ExprKind::Closure(capture_clause, decl, body_id, span, + Some(hir::GeneratorMovability::Static)), + span, + attrs: ThinVec::new(), + }; + + let unstable_span = self.allow_internal_unstable(CompilerDesugaringKind::Async, span); + let gen_future = self.expr_std_path( + unstable_span, &["future", "from_generator"], None, ThinVec::new()); + hir::ExprKind::Call(P(gen_future), hir_vec![generator]) + } + fn lower_body(&mut self, decl: Option<&FnDecl>, f: F) -> hir::BodyId - where F: FnOnce(&mut LoweringContext) -> hir::Expr + where + F: FnOnce(&mut LoweringContext) -> hir::Expr, { let prev = mem::replace(&mut self.is_generator, false); let result = f(self); let r = self.record_body(result, decl); self.is_generator = prev; - return r + return r; } fn with_loop_scope(&mut self, loop_id: NodeId, f: F) -> T - where F: FnOnce(&mut LoweringContext) -> T + where + F: FnOnce(&mut LoweringContext) -> T, { // We're no longer in the base loop's condition; we're in another loop. let was_in_loop_condition = self.is_in_loop_condition; @@ -697,8 +917,11 @@ impl<'a> LoweringContext<'a> { self.loop_scopes.push(loop_id); let result = f(self); - assert_eq!(len + 1, self.loop_scopes.len(), - "Loop scopes should be added and removed in stack order"); + assert_eq!( + len + 1, + self.loop_scopes.len(), + "Loop scopes should be added and removed in stack order" + ); self.loop_scopes.pop().unwrap(); @@ -708,7 +931,8 @@ impl<'a> LoweringContext<'a> { } fn with_loop_condition_scope(&mut self, f: F) -> T - where F: FnOnce(&mut LoweringContext) -> T + where + F: FnOnce(&mut LoweringContext) -> T, { let was_in_loop_condition = self.is_in_loop_condition; self.is_in_loop_condition = true; @@ -721,7 +945,8 @@ impl<'a> LoweringContext<'a> { } fn with_new_scopes(&mut self, f: F) -> T - where F: FnOnce(&mut LoweringContext) -> T + where + F: FnOnce(&mut LoweringContext) -> T, { let was_in_loop_condition = self.is_in_loop_condition; self.is_in_loop_condition = false; @@ -737,21 +962,6 @@ impl<'a> LoweringContext<'a> { result } - fn with_parent_def(&mut self, parent_id: NodeId, f: F) -> T - where F: FnOnce(&mut LoweringContext) -> T - { - let old_def = self.parent_def; - self.parent_def = { - let defs = self.resolver.definitions(); - Some(defs.opt_def_index(parent_id).unwrap()) - }; - - let result = f(self); - - self.parent_def = old_def; - result - } - fn def_key(&mut self, id: DefId) -> DefKey { if id.is_local() { self.resolver.definitions().def_key(id.index) @@ -760,51 +970,42 @@ impl<'a> LoweringContext<'a> { } } - fn lower_ident(&mut self, ident: Ident) -> Name { - let ident = ident.modern(); - if ident.ctxt == SyntaxContext::empty() { - return ident.name; - } - *self.name_map.entry(ident).or_insert_with(|| Symbol::from_ident(ident)) + fn lower_label(&mut self, label: Option, D}` - ItemEnum(EnumDef, Generics), + Enum(EnumDef, Generics), /// A struct definition, e.g. `struct Foo {x: A}` - ItemStruct(VariantData, Generics), + Struct(VariantData, Generics), /// A union definition, e.g. `union Foo {x: A, y: B}` - ItemUnion(VariantData, Generics), + Union(VariantData, Generics), /// Represents a Trait Declaration - ItemTrait(IsAuto, Unsafety, Generics, TyParamBounds, HirVec), + Trait(IsAuto, Unsafety, Generics, GenericBounds, HirVec), /// Represents a Trait Alias Declaration - ItemTraitAlias(Generics, TyParamBounds), + TraitAlias(Generics, GenericBounds), /// An implementation, eg `impl Trait for Foo { .. }` - ItemImpl(Unsafety, - ImplPolarity, - Defaultness, - Generics, - Option, // (optional) trait this impl implements - P, // self - HirVec), + Impl(Unsafety, + ImplPolarity, + Defaultness, + Generics, + Option, // (optional) trait this impl implements + P, // self + HirVec), } -impl Item_ { +impl ItemKind { pub fn descriptive_variant(&self) -> &str { match *self { - ItemExternCrate(..) => "extern crate", - ItemUse(..) => "use", - ItemStatic(..) => "static item", - ItemConst(..) => "constant item", - ItemFn(..) => "function", - ItemMod(..) => "module", - ItemForeignMod(..) => "foreign module", - ItemGlobalAsm(..) => "global asm", - ItemTy(..) => "type alias", - ItemEnum(..) => "enum", - ItemStruct(..) => "struct", - ItemUnion(..) => "union", - ItemTrait(..) => "trait", - ItemTraitAlias(..) => "trait alias", - ItemImpl(..) => "item", + ItemKind::ExternCrate(..) => "extern crate", + ItemKind::Use(..) => "use", + ItemKind::Static(..) => "static item", + ItemKind::Const(..) => "constant item", + ItemKind::Fn(..) => "function", + ItemKind::Mod(..) => "module", + ItemKind::ForeignMod(..) => "foreign module", + ItemKind::GlobalAsm(..) => "global asm", + ItemKind::Ty(..) => "type alias", + ItemKind::Existential(..) => "existential type", + ItemKind::Enum(..) => "enum", + ItemKind::Struct(..) => "struct", + ItemKind::Union(..) => "union", + ItemKind::Trait(..) => "trait", + ItemKind::TraitAlias(..) => "trait alias", + ItemKind::Impl(..) => "item", } } pub fn adt_kind(&self) -> Option { match *self { - ItemStruct(..) => Some(AdtKind::Struct), - ItemUnion(..) => Some(AdtKind::Union), - ItemEnum(..) => Some(AdtKind::Enum), + ItemKind::Struct(..) => Some(AdtKind::Struct), + ItemKind::Union(..) => Some(AdtKind::Union), + ItemKind::Enum(..) => Some(AdtKind::Enum), _ => None, } } pub fn generics(&self) -> Option<&Generics> { Some(match *self { - ItemFn(_, _, _, _, ref generics, _) | - ItemTy(_, ref generics) | - ItemEnum(_, ref generics) | - ItemStruct(_, ref generics) | - ItemUnion(_, ref generics) | - ItemTrait(_, _, ref generics, _, _) | - ItemImpl(_, _, _, ref generics, _, _, _)=> generics, + ItemKind::Fn(_, _, ref generics, _) | + ItemKind::Ty(_, ref generics) | + ItemKind::Existential(ExistTy { ref generics, impl_trait_fn: None, .. }) | + ItemKind::Enum(_, ref generics) | + ItemKind::Struct(_, ref generics) | + ItemKind::Union(_, ref generics) | + ItemKind::Trait(_, _, ref generics, _, _) | + ItemKind::Impl(_, _, _, ref generics, _, _, _)=> generics, _ => return None }) } @@ -2025,10 +2157,10 @@ impl Item_ { /// type or method, and whether it is public). This allows other /// passes to find the impl they want without loading the id (which /// means fewer edges in the incremental compilation graph). -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct TraitItemRef { pub id: TraitItemId, - pub name: Name, + pub ident: Ident, pub kind: AssociatedItemKind, pub span: Span, pub defaultness: Defaultness, @@ -2040,51 +2172,52 @@ pub struct TraitItemRef { /// type or method, and whether it is public). This allows other /// passes to find the impl they want without loading the id (which /// means fewer edges in the incremental compilation graph). -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct ImplItemRef { pub id: ImplItemId, - pub name: Name, + pub ident: Ident, pub kind: AssociatedItemKind, pub span: Span, pub vis: Visibility, pub defaultness: Defaultness, } -#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, RustcEncodable, RustcDecodable, Debug)] pub enum AssociatedItemKind { Const, Method { has_self: bool }, Type, + Existential, } -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct ForeignItem { pub name: Name, pub attrs: HirVec, - pub node: ForeignItem_, + pub node: ForeignItemKind, pub id: NodeId, pub span: Span, pub vis: Visibility, } /// An item within an `extern` block -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum ForeignItem_ { +#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +pub enum ForeignItemKind { /// A foreign function - ForeignItemFn(P, HirVec>, Generics), + Fn(P, HirVec, Generics), /// A foreign static item (`static ext: u8`), with optional mutability /// (the boolean is true when mutable) - ForeignItemStatic(P, bool), + Static(P, bool), /// A foreign type - ForeignItemType, + Type, } -impl ForeignItem_ { +impl ForeignItemKind { pub fn descriptive_variant(&self) -> &str { match *self { - ForeignItemFn(..) => "foreign function", - ForeignItemStatic(..) => "foreign static item", - ForeignItemType => "foreign type", + ForeignItemKind::Fn(..) => "foreign function", + ForeignItemKind::Static(..) => "foreign static item", + ForeignItemKind::Type => "foreign type", } } } @@ -2124,3 +2257,60 @@ pub type TraitMap = NodeMap>; // Map from the NodeId of a glob import to a list of items which are actually // imported. pub type GlobMap = NodeMap>; + + +pub fn provide(providers: &mut Providers) { + providers.describe_def = map::describe_def; +} + +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct CodegenFnAttrs { + pub flags: CodegenFnAttrFlags, + pub inline: InlineAttr, + pub export_name: Option, + pub target_features: Vec, + pub linkage: Option, + pub link_section: Option, +} + +bitflags! { + #[derive(RustcEncodable, RustcDecodable)] + pub struct CodegenFnAttrFlags: u32 { + const COLD = 1 << 0; + const ALLOCATOR = 1 << 1; + const UNWIND = 1 << 2; + const RUSTC_ALLOCATOR_NOUNWIND = 1 << 3; + const NAKED = 1 << 4; + const NO_MANGLE = 1 << 5; + const RUSTC_STD_INTERNAL_SYMBOL = 1 << 6; + const NO_DEBUG = 1 << 7; + const THREAD_LOCAL = 1 << 8; + const USED = 1 << 9; + } +} + +impl CodegenFnAttrs { + pub fn new() -> CodegenFnAttrs { + CodegenFnAttrs { + flags: CodegenFnAttrFlags::empty(), + inline: InlineAttr::None, + export_name: None, + target_features: vec![], + linkage: None, + link_section: None, + } + } + + /// True if `#[inline]` or `#[inline(always)]` is present. + pub fn requests_inline(&self) -> bool { + match self.inline { + InlineAttr::Hint | InlineAttr::Always => true, + InlineAttr::None | InlineAttr::Never => false, + } + } + + /// True if `#[no_mangle]` or `#[export_name(...)]` is present. + pub fn contains_extern_indicator(&self) -> bool { + self.flags.contains(CodegenFnAttrFlags::NO_MANGLE) || self.export_name.is_some() + } +} diff --git a/src/librustc/hir/pat_util.rs b/src/librustc/hir/pat_util.rs index 2bec224362ea..8a714a5fbd84 100644 --- a/src/librustc/hir/pat_util.rs +++ b/src/librustc/hir/pat_util.rs @@ -10,9 +10,8 @@ use hir::def::Def; use hir::def_id::DefId; -use hir::{self, PatKind}; +use hir::{self, HirId, PatKind}; use syntax::ast; -use syntax::codemap::Spanned; use syntax_pos::Span; use std::iter::{Enumerate, ExactSizeIterator}; @@ -31,6 +30,10 @@ impl Iterator for EnumerateAndAdjust where I: Iterator { (if i < self.gap_pos { i } else { i + self.gap_len }, elem) }) } + + fn size_hint(&self) -> (usize, Option) { + self.enumerate.size_hint() + } } pub trait EnumerateAndAdjustIterator { @@ -44,7 +47,7 @@ impl EnumerateAndAdjustIterator for T { let actual_len = self.len(); EnumerateAndAdjust { enumerate: self.enumerate(), - gap_pos: if let Some(gap_pos) = gap_pos { gap_pos } else { expected_len }, + gap_pos: gap_pos.unwrap_or(expected_len), gap_len: expected_len - actual_len, } } @@ -87,11 +90,11 @@ impl hir::Pat { /// Call `f` on every "binding" in a pattern, e.g., on `a` in /// `match foo() { Some(a) => (), None => () }` pub fn each_binding(&self, mut f: F) - where F: FnMut(hir::BindingAnnotation, ast::NodeId, Span, &Spanned), + where F: FnMut(hir::BindingAnnotation, HirId, Span, ast::Ident), { self.walk(|p| { - if let PatKind::Binding(binding_mode, _, ref pth, _) = p.node { - f(binding_mode, p.id, p.span, pth); + if let PatKind::Binding(binding_mode, _, ident, _) = p.node { + f(binding_mode, p.hir_id, p.span, ident); } true }); @@ -128,11 +131,10 @@ impl hir::Pat { contains_bindings } - pub fn simple_name(&self) -> Option { + pub fn simple_ident(&self) -> Option { match self.node { - PatKind::Binding(hir::BindingAnnotation::Unannotated, _, ref path1, None) | - PatKind::Binding(hir::BindingAnnotation::Mutable, _, ref path1, None) => - Some(path1.node), + PatKind::Binding(hir::BindingAnnotation::Unannotated, _, ident, None) | + PatKind::Binding(hir::BindingAnnotation::Mutable, _, ident, None) => Some(ident), _ => None, } } diff --git a/src/librustc/hir/print.rs b/src/librustc/hir/print.rs index a8e55674ae52..4499a378be21 100644 --- a/src/librustc/hir/print.rs +++ b/src/librustc/hir/print.rs @@ -10,7 +10,7 @@ pub use self::AnnNode::*; -use syntax::abi::Abi; +use rustc_target::spec::abi::Abi; use syntax::ast; use syntax::codemap::{CodeMap, Spanned}; use syntax::parse::ParseSess; @@ -24,7 +24,8 @@ use syntax::util::parser::{self, AssocOp, Fixity}; use syntax_pos::{self, BytePos, FileName}; use hir; -use hir::{PatKind, RegionTyParamBound, TraitTyParamBound, TraitBoundModifier, RangeEnd}; +use hir::{PatKind, GenericBound, TraitBoundModifier, RangeEnd}; +use hir::{GenericParam, GenericParamKind, GenericArg}; use std::cell::Cell; use std::io::{self, Write, Read}; @@ -58,13 +59,19 @@ pub trait PpAnn { fn post(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> { Ok(()) } + fn try_fetch_item(&self, _: ast::NodeId) -> Option<&hir::Item> { + None + } } pub struct NoAnn; impl PpAnn for NoAnn {} -pub const NO_ANN: &'static PpAnn = &NoAnn; +pub const NO_ANN: &'static dyn PpAnn = &NoAnn; impl PpAnn for hir::Crate { + fn try_fetch_item(&self, item: ast::NodeId) -> Option<&hir::Item> { + Some(self.item(item)) + } fn nested(&self, state: &mut State, nested: Nested) -> io::Result<()> { match nested { Nested::Item(id) => state.print_item(self.item(id.id)), @@ -83,7 +90,7 @@ pub struct State<'a> { literals: Peekable>, cur_cmnt: usize, boxes: Vec, - ann: &'a (PpAnn + 'a), + ann: &'a (dyn PpAnn + 'a), } impl<'a> PrintState<'a> for State<'a> { @@ -126,9 +133,9 @@ pub fn print_crate<'a>(cm: &'a CodeMap, sess: &ParseSess, krate: &hir::Crate, filename: FileName, - input: &mut Read, - out: Box, - ann: &'a PpAnn, + input: &mut dyn Read, + out: Box, + ann: &'a dyn PpAnn, is_expanded: bool) -> io::Result<()> { let mut s = State::new_from_input(cm, sess, filename, input, out, ann, is_expanded); @@ -145,9 +152,9 @@ impl<'a> State<'a> { pub fn new_from_input(cm: &'a CodeMap, sess: &ParseSess, filename: FileName, - input: &mut Read, - out: Box, - ann: &'a PpAnn, + input: &mut dyn Read, + out: Box, + ann: &'a dyn PpAnn, is_expanded: bool) -> State<'a> { let (cmnts, lits) = comments::gather_comments_and_literals(sess, filename, input); @@ -167,8 +174,8 @@ impl<'a> State<'a> { } pub fn new(cm: &'a CodeMap, - out: Box, - ann: &'a PpAnn, + out: Box, + ann: &'a dyn PpAnn, comments: Option>, literals: Option>) -> State<'a> { @@ -184,7 +191,7 @@ impl<'a> State<'a> { } } -pub fn to_string(ann: &PpAnn, f: F) -> String +pub fn to_string(ann: &dyn PpAnn, f: F) -> String where F: FnOnce(&mut State) -> io::Result<()> { let mut wr = Vec::new(); @@ -360,12 +367,12 @@ impl<'a> State<'a> { self.maybe_print_comment(ty.span.lo())?; self.ibox(0)?; match ty.node { - hir::TySlice(ref ty) => { + hir::TyKind::Slice(ref ty) => { self.s.word("[")?; self.print_type(&ty)?; self.s.word("]")?; } - hir::TyPtr(ref mt) => { + hir::TyKind::Ptr(ref mt) => { self.s.word("*")?; match mt.mutbl { hir::MutMutable => self.word_nbsp("mut")?, @@ -373,15 +380,15 @@ impl<'a> State<'a> { } self.print_type(&mt.ty)?; } - hir::TyRptr(ref lifetime, ref mt) => { + hir::TyKind::Rptr(ref lifetime, ref mt) => { self.s.word("&")?; self.print_opt_lifetime(lifetime)?; self.print_mt(mt)?; } - hir::TyNever => { + hir::TyKind::Never => { self.s.word("!")?; }, - hir::TyTup(ref elts) => { + hir::TyKind::Tup(ref elts) => { self.popen()?; self.commasep(Inconsistent, &elts[..], |s, ty| s.print_type(&ty))?; if elts.len() == 1 { @@ -389,14 +396,14 @@ impl<'a> State<'a> { } self.pclose()?; } - hir::TyBareFn(ref f) => { + hir::TyKind::BareFn(ref f) => { self.print_ty_fn(f.abi, f.unsafety, &f.decl, None, &f.generic_params, &f.arg_names[..])?; } - hir::TyPath(ref qpath) => { + hir::TyKind::Path(ref qpath) => { self.print_qpath(qpath, false)? } - hir::TyTraitObject(ref bounds, ref lifetime) => { + hir::TyKind::TraitObject(ref bounds, ref lifetime) => { let mut first = true; for bound in bounds { if first { @@ -413,25 +420,22 @@ impl<'a> State<'a> { self.print_lifetime(lifetime)?; } } - hir::TyImplTraitExistential(ref existty, ref _lifetimes) => { - self.print_bounds("impl", &existty.bounds[..])?; - } - hir::TyArray(ref ty, v) => { + hir::TyKind::Array(ref ty, ref length) => { self.s.word("[")?; self.print_type(&ty)?; self.s.word("; ")?; - self.ann.nested(self, Nested::Body(v))?; + self.print_anon_const(length)?; self.s.word("]")?; } - hir::TyTypeof(e) => { + hir::TyKind::Typeof(ref e) => { self.s.word("typeof(")?; - self.ann.nested(self, Nested::Body(e))?; + self.print_anon_const(e)?; self.s.word(")")?; } - hir::TyInfer => { + hir::TyKind::Infer => { self.s.word("_")?; } - hir::TyErr => { + hir::TyKind::Err => { self.s.word("?")?; } } @@ -443,12 +447,15 @@ impl<'a> State<'a> { self.maybe_print_comment(item.span.lo())?; self.print_outer_attributes(&item.attrs)?; match item.node { - hir::ForeignItemFn(ref decl, ref arg_names, ref generics) => { + hir::ForeignItemKind::Fn(ref decl, ref arg_names, ref generics) => { self.head("")?; self.print_fn(decl, - hir::Unsafety::Normal, - hir::Constness::NotConst, - Abi::Rust, + hir::FnHeader { + unsafety: hir::Unsafety::Normal, + constness: hir::Constness::NotConst, + abi: Abi::Rust, + asyncness: hir::IsAsync::NotAsync, + }, Some(item.name), generics, &item.vis, @@ -458,7 +465,7 @@ impl<'a> State<'a> { self.s.word(";")?; self.end() // end the outer fn box } - hir::ForeignItemStatic(ref t, m) => { + hir::ForeignItemKind::Static(ref t, m) => { self.head(&visibility_qualified(&item.vis, "static"))?; if m { self.word_space("mut")?; @@ -470,7 +477,7 @@ impl<'a> State<'a> { self.end()?; // end the head-ibox self.end() // end the outer cbox } - hir::ForeignItemType => { + hir::ForeignItemKind::Type => { self.head(&visibility_qualified(&item.vis, "type"))?; self.print_name(item.name)?; self.s.word(";")?; @@ -481,14 +488,14 @@ impl<'a> State<'a> { } fn print_associated_const(&mut self, - name: ast::Name, + ident: ast::Ident, ty: &hir::Ty, default: Option, vis: &hir::Visibility) -> io::Result<()> { self.s.word(&visibility_qualified(vis, ""))?; self.word_space("const")?; - self.print_name(name)?; + self.print_ident(ident)?; self.word_space(":")?; self.print_type(ty)?; if let Some(expr) = default { @@ -500,12 +507,12 @@ impl<'a> State<'a> { } fn print_associated_type(&mut self, - name: ast::Name, - bounds: Option<&hir::TyParamBounds>, + ident: ast::Ident, + bounds: Option<&hir::GenericBounds>, ty: Option<&hir::Ty>) -> io::Result<()> { self.word_space("type")?; - self.print_name(name)?; + self.print_ident(ident)?; if let Some(bounds) = bounds { self.print_bounds(":", bounds)?; } @@ -524,15 +531,10 @@ impl<'a> State<'a> { self.print_outer_attributes(&item.attrs)?; self.ann.pre(self, NodeItem(item))?; match item.node { - hir::ItemExternCrate(ref optional_path) => { + hir::ItemKind::ExternCrate(orig_name) => { self.head(&visibility_qualified(&item.vis, "extern crate"))?; - if let Some(p) = *optional_path { - let val = p.as_str(); - if val.contains("-") { - self.print_string(&val, ast::StrStyle::Cooked)?; - } else { - self.print_name(p)?; - } + if let Some(orig_name) = orig_name { + self.print_name(orig_name)?; self.s.space()?; self.s.word("as")?; self.s.space()?; @@ -542,13 +544,13 @@ impl<'a> State<'a> { self.end()?; // end inner head-block self.end()?; // end outer head-block } - hir::ItemUse(ref path, kind) => { + hir::ItemKind::Use(ref path, kind) => { self.head(&visibility_qualified(&item.vis, "use"))?; self.print_path(path, false)?; match kind { hir::UseKind::Single => { - if path.segments.last().unwrap().name != item.name { + if path.segments.last().unwrap().ident.name != item.name { self.s.space()?; self.word_space("as")?; self.print_name(item.name)?; @@ -561,7 +563,7 @@ impl<'a> State<'a> { self.end()?; // end inner head-block self.end()?; // end outer head-block } - hir::ItemStatic(ref ty, m, expr) => { + hir::ItemKind::Static(ref ty, m, expr) => { self.head(&visibility_qualified(&item.vis, "static"))?; if m == hir::MutMutable { self.word_space("mut")?; @@ -577,7 +579,7 @@ impl<'a> State<'a> { self.s.word(";")?; self.end()?; // end the outer cbox } - hir::ItemConst(ref ty, expr) => { + hir::ItemKind::Const(ref ty, expr) => { self.head(&visibility_qualified(&item.vis, "const"))?; self.print_name(item.name)?; self.word_space(":")?; @@ -590,12 +592,10 @@ impl<'a> State<'a> { self.s.word(";")?; self.end()?; // end the outer cbox } - hir::ItemFn(ref decl, unsafety, constness, abi, ref typarams, body) => { + hir::ItemKind::Fn(ref decl, header, ref typarams, body) => { self.head("")?; self.print_fn(decl, - unsafety, - constness, - abi, + header, Some(item.name), typarams, &item.vis, @@ -606,7 +606,7 @@ impl<'a> State<'a> { self.end()?; // need to close a box self.ann.nested(self, Nested::Body(body))?; } - hir::ItemMod(ref _mod) => { + hir::ItemKind::Mod(ref _mod) => { self.head(&visibility_qualified(&item.vis, "mod"))?; self.print_name(item.name)?; self.nbsp()?; @@ -614,22 +614,20 @@ impl<'a> State<'a> { self.print_mod(_mod, &item.attrs)?; self.bclose(item.span)?; } - hir::ItemForeignMod(ref nmod) => { + hir::ItemKind::ForeignMod(ref nmod) => { self.head("extern")?; self.word_nbsp(&nmod.abi.to_string())?; self.bopen()?; self.print_foreign_mod(nmod, &item.attrs)?; self.bclose(item.span)?; } - hir::ItemGlobalAsm(ref ga) => { + hir::ItemKind::GlobalAsm(ref ga) => { self.head(&visibility_qualified(&item.vis, "global asm"))?; self.s.word(&ga.asm.as_str())?; self.end()? } - hir::ItemTy(ref ty, ref generics) => { - self.ibox(indent_unit)?; - self.ibox(0)?; - self.word_nbsp(&visibility_qualified(&item.vis, "type"))?; + hir::ItemKind::Ty(ref ty, ref generics) => { + self.head(&visibility_qualified(&item.vis, "type"))?; self.print_name(item.name)?; self.print_generic_params(&generics.params)?; self.end()?; // end the inner ibox @@ -641,18 +639,41 @@ impl<'a> State<'a> { self.s.word(";")?; self.end()?; // end the outer ibox } - hir::ItemEnum(ref enum_definition, ref params) => { + hir::ItemKind::Existential(ref exist) => { + self.head(&visibility_qualified(&item.vis, "existential type"))?; + self.print_name(item.name)?; + self.print_generic_params(&exist.generics.params)?; + self.end()?; // end the inner ibox + + self.print_where_clause(&exist.generics.where_clause)?; + self.s.space()?; + self.word_space(":")?; + let mut real_bounds = Vec::with_capacity(exist.bounds.len()); + for b in exist.bounds.iter() { + if let GenericBound::Trait(ref ptr, hir::TraitBoundModifier::Maybe) = *b { + self.s.space()?; + self.word_space("for ?")?; + self.print_trait_ref(&ptr.trait_ref)?; + } else { + real_bounds.push(b.clone()); + } + } + self.print_bounds(":", &real_bounds[..])?; + self.s.word(";")?; + self.end()?; // end the outer ibox + } + hir::ItemKind::Enum(ref enum_definition, ref params) => { self.print_enum_def(enum_definition, params, item.name, item.span, &item.vis)?; } - hir::ItemStruct(ref struct_def, ref generics) => { + hir::ItemKind::Struct(ref struct_def, ref generics) => { self.head(&visibility_qualified(&item.vis, "struct"))?; self.print_struct(struct_def, generics, item.name, item.span, true)?; } - hir::ItemUnion(ref struct_def, ref generics) => { + hir::ItemKind::Union(ref struct_def, ref generics) => { self.head(&visibility_qualified(&item.vis, "union"))?; self.print_struct(struct_def, generics, item.name, item.span, true)?; } - hir::ItemImpl(unsafety, + hir::ItemKind::Impl(unsafety, polarity, defaultness, ref generics, @@ -697,7 +718,7 @@ impl<'a> State<'a> { } self.bclose(item.span)?; } - hir::ItemTrait(is_auto, unsafety, ref generics, ref bounds, ref trait_items) => { + hir::ItemKind::Trait(is_auto, unsafety, ref generics, ref bounds, ref trait_items) => { self.head("")?; self.print_visibility(&item.vis)?; self.print_is_auto(is_auto)?; @@ -707,7 +728,7 @@ impl<'a> State<'a> { self.print_generic_params(&generics.params)?; let mut real_bounds = Vec::with_capacity(bounds.len()); for b in bounds.iter() { - if let TraitTyParamBound(ref ptr, hir::TraitBoundModifier::Maybe) = *b { + if let GenericBound::Trait(ref ptr, hir::TraitBoundModifier::Maybe) = *b { self.s.space()?; self.word_space("for ?")?; self.print_trait_ref(&ptr.trait_ref)?; @@ -724,7 +745,7 @@ impl<'a> State<'a> { } self.bclose(item.span)?; } - hir::ItemTraitAlias(ref generics, ref bounds) => { + hir::ItemKind::TraitAlias(ref generics, ref bounds) => { self.head("")?; self.print_visibility(&item.vis)?; self.word_nbsp("trait")?; @@ -733,7 +754,7 @@ impl<'a> State<'a> { let mut real_bounds = Vec::with_capacity(bounds.len()); // FIXME(durka) this seems to be some quite outdated syntax for b in bounds.iter() { - if let TraitTyParamBound(ref ptr, hir::TraitBoundModifier::Maybe) = *b { + if let GenericBound::Trait(ref ptr, hir::TraitBoundModifier::Maybe) = *b { self.s.space()?; self.word_space("for ?")?; self.print_trait_ref(&ptr.trait_ref)?; @@ -805,16 +826,27 @@ impl<'a> State<'a> { } pub fn print_visibility(&mut self, vis: &hir::Visibility) -> io::Result<()> { - match *vis { - hir::Public => self.word_nbsp("pub"), - hir::Visibility::Crate => self.word_nbsp("pub(crate)"), - hir::Visibility::Restricted { ref path, .. } => { + match vis.node { + hir::VisibilityKind::Public => self.word_nbsp("pub")?, + hir::VisibilityKind::Crate(ast::CrateSugar::JustCrate) => self.word_nbsp("crate")?, + hir::VisibilityKind::Crate(ast::CrateSugar::PubCrate) => self.word_nbsp("pub(crate)")?, + hir::VisibilityKind::Restricted { ref path, .. } => { self.s.word("pub(")?; - self.print_path(path, false)?; - self.word_nbsp(")") + if path.segments.len() == 1 && + path.segments[0].ident.name == keywords::Super.name() { + // Special case: `super` can print like `pub(super)`. + self.s.word("super")?; + } else { + // Everything else requires `in` at present. + self.word_nbsp("in")?; + self.print_path(path, false)?; + } + self.word_nbsp(")")?; } - hir::Inherited => Ok(()), + hir::VisibilityKind::Inherited => () } + + Ok(()) } pub fn print_defaultness(&mut self, defaultness: hir::Defaultness) -> io::Result<()> { @@ -862,7 +894,7 @@ impl<'a> State<'a> { self.maybe_print_comment(field.span.lo())?; self.print_outer_attributes(&field.attrs)?; self.print_visibility(&field.vis)?; - self.print_name(field.name)?; + self.print_ident(field.ident)?; self.word_nbsp(":")?; self.print_type(&field.ty)?; self.s.word(",")?; @@ -876,26 +908,24 @@ impl<'a> State<'a> { self.head("")?; let generics = hir::Generics::empty(); self.print_struct(&v.node.data, &generics, v.node.name, v.span, false)?; - if let Some(d) = v.node.disr_expr { + if let Some(ref d) = v.node.disr_expr { self.s.space()?; self.word_space("=")?; - self.ann.nested(self, Nested::Body(d))?; + self.print_anon_const(d)?; } Ok(()) } pub fn print_method_sig(&mut self, - name: ast::Name, + ident: ast::Ident, m: &hir::MethodSig, generics: &hir::Generics, vis: &hir::Visibility, - arg_names: &[Spanned], + arg_names: &[ast::Ident], body_id: Option) -> io::Result<()> { self.print_fn(&m.decl, - m.unsafety, - m.constness, - m.abi, - Some(name), + m.header, + Some(ident.name), generics, vis, arg_names, @@ -909,24 +939,28 @@ impl<'a> State<'a> { self.print_outer_attributes(&ti.attrs)?; match ti.node { hir::TraitItemKind::Const(ref ty, default) => { - self.print_associated_const(ti.name, &ty, default, &hir::Inherited)?; + let vis = Spanned { span: syntax_pos::DUMMY_SP, + node: hir::VisibilityKind::Inherited }; + self.print_associated_const(ti.ident, &ty, default, &vis)?; } hir::TraitItemKind::Method(ref sig, hir::TraitMethod::Required(ref arg_names)) => { - self.print_method_sig(ti.name, sig, &ti.generics, &hir::Inherited, arg_names, - None)?; + let vis = Spanned { span: syntax_pos::DUMMY_SP, + node: hir::VisibilityKind::Inherited }; + self.print_method_sig(ti.ident, sig, &ti.generics, &vis, arg_names, None)?; self.s.word(";")?; } hir::TraitItemKind::Method(ref sig, hir::TraitMethod::Provided(body)) => { + let vis = Spanned { span: syntax_pos::DUMMY_SP, + node: hir::VisibilityKind::Inherited }; self.head("")?; - self.print_method_sig(ti.name, sig, &ti.generics, &hir::Inherited, &[], - Some(body))?; + self.print_method_sig(ti.ident, sig, &ti.generics, &vis, &[], Some(body))?; self.nbsp()?; self.end()?; // need to close a box self.end()?; // need to close a box self.ann.nested(self, Nested::Body(body))?; } hir::TraitItemKind::Type(ref bounds, ref default) => { - self.print_associated_type(ti.name, + self.print_associated_type(ti.ident, Some(bounds), default.as_ref().map(|ty| &**ty))?; } @@ -943,18 +977,22 @@ impl<'a> State<'a> { match ii.node { hir::ImplItemKind::Const(ref ty, expr) => { - self.print_associated_const(ii.name, &ty, Some(expr), &ii.vis)?; + self.print_associated_const(ii.ident, &ty, Some(expr), &ii.vis)?; } hir::ImplItemKind::Method(ref sig, body) => { self.head("")?; - self.print_method_sig(ii.name, sig, &ii.generics, &ii.vis, &[], Some(body))?; + self.print_method_sig(ii.ident, sig, &ii.generics, &ii.vis, &[], Some(body))?; self.nbsp()?; self.end()?; // need to close a box self.end()?; // need to close a box self.ann.nested(self, Nested::Body(body))?; } hir::ImplItemKind::Type(ref ty) => { - self.print_associated_type(ii.name, None, Some(ty))?; + self.print_associated_type(ii.ident, None, Some(ty))?; + } + hir::ImplItemKind::Existential(ref bounds) => { + self.word_space("existential")?; + self.print_associated_type(ii.ident, Some(bounds), None)?; } } self.ann.post(self, NodeSubItem(ii.id)) @@ -963,14 +1001,14 @@ impl<'a> State<'a> { pub fn print_stmt(&mut self, st: &hir::Stmt) -> io::Result<()> { self.maybe_print_comment(st.span.lo())?; match st.node { - hir::StmtDecl(ref decl, _) => { + hir::StmtKind::Decl(ref decl, _) => { self.print_decl(&decl)?; } - hir::StmtExpr(ref expr, _) => { + hir::StmtKind::Expr(ref expr, _) => { self.space_if_not_bol()?; self.print_expr(&expr)?; } - hir::StmtSemi(ref expr, _) => { + hir::StmtKind::Semi(ref expr, _) => { self.space_if_not_bol()?; self.print_expr(&expr)?; self.s.word(";")?; @@ -1042,7 +1080,7 @@ impl<'a> State<'a> { Some(_else) => { match _else.node { // "another else-if" - hir::ExprIf(ref i, ref then, ref e) => { + hir::ExprKind::If(ref i, ref then, ref e) => { self.cbox(indent_unit - 1)?; self.ibox(0)?; self.s.word(" else if ")?; @@ -1052,7 +1090,7 @@ impl<'a> State<'a> { self.print_else(e.as_ref().map(|e| &**e)) } // "final else" - hir::ExprBlock(ref b) => { + hir::ExprKind::Block(ref b, _) => { self.cbox(indent_unit - 1)?; self.ibox(0)?; self.s.word(" else ")?; @@ -1096,6 +1134,9 @@ impl<'a> State<'a> { self.print_else(elseopt) } + pub fn print_anon_const(&mut self, constant: &hir::AnonConst) -> io::Result<()> { + self.ann.nested(self, Nested::Body(constant.body)) + } fn print_call_post(&mut self, args: &[hir::Expr]) -> io::Result<()> { self.popen()?; @@ -1104,7 +1145,7 @@ impl<'a> State<'a> { } pub fn print_expr_maybe_paren(&mut self, expr: &hir::Expr, prec: i8) -> io::Result<()> { - let needs_par = expr_precedence(expr) < prec; + let needs_par = expr.precedence().order() < prec; if needs_par { self.popen()?; } @@ -1121,9 +1162,9 @@ impl<'a> State<'a> { let needs_par = match expr.node { // These cases need parens due to the parse error observed in #26461: `if return {}` // parses as the erroneous construct `if (return {})`, not `if (return) {}`. - hir::ExprClosure(..) | - hir::ExprRet(..) | - hir::ExprBreak(..) => true, + hir::ExprKind::Closure(..) | + hir::ExprKind::Ret(..) | + hir::ExprKind::Break(..) => true, _ => contains_exterior_struct_lit(expr), }; @@ -1146,12 +1187,12 @@ impl<'a> State<'a> { self.end() } - fn print_expr_repeat(&mut self, element: &hir::Expr, count: hir::BodyId) -> io::Result<()> { + fn print_expr_repeat(&mut self, element: &hir::Expr, count: &hir::AnonConst) -> io::Result<()> { self.ibox(indent_unit)?; self.s.word("[")?; self.print_expr(element)?; self.word_space(";")?; - self.ann.nested(self, Nested::Body(count))?; + self.print_anon_const(count)?; self.s.word("]")?; self.end() } @@ -1168,7 +1209,7 @@ impl<'a> State<'a> { |s, field| { s.ibox(indent_unit)?; if !field.is_shorthand { - s.print_name(field.name.node)?; + s.print_ident(field.ident)?; s.word_space(":")?; } s.print_expr(&field.expr)?; @@ -1206,8 +1247,7 @@ impl<'a> State<'a> { fn print_expr_call(&mut self, func: &hir::Expr, args: &[hir::Expr]) -> io::Result<()> { let prec = match func.node { - hir::ExprField(..) | - hir::ExprTupField(..) => parser::PREC_FORCE_PAREN, + hir::ExprKind::Field(..) => parser::PREC_FORCE_PAREN, _ => parser::PREC_POSTFIX, }; @@ -1222,17 +1262,13 @@ impl<'a> State<'a> { let base_args = &args[1..]; self.print_expr_maybe_paren(&args[0], parser::PREC_POSTFIX)?; self.s.word(".")?; - self.print_name(segment.name)?; + self.print_ident(segment.ident)?; - segment.with_parameters(|parameters| { - if !parameters.lifetimes.is_empty() || - !parameters.types.is_empty() || - !parameters.bindings.is_empty() - { - self.print_path_parameters(¶meters, segment.infer_types, true) - } else { - Ok(()) + segment.with_generic_args(|generic_args| { + if !generic_args.args.is_empty() || !generic_args.bindings.is_empty() { + return self.print_generic_args(&generic_args, segment.infer_types, true); } + Ok(()) })?; self.print_call_post(base_args) } @@ -1256,8 +1292,8 @@ impl<'a> State<'a> { // These cases need parens: `x as i32 < y` has the parser thinking that `i32 < y` is // the beginning of a path type. It starts trying to parse `x as (i32 < y ...` instead // of `(x as i32) < ...`. We need to convince it _not_ to do that. - (&hir::ExprCast { .. }, hir::BinOp_::BiLt) | - (&hir::ExprCast { .. }, hir::BinOp_::BiShl) => parser::PREC_FORCE_PAREN, + (&hir::ExprKind::Cast { .. }, hir::BinOpKind::Lt) | + (&hir::ExprKind::Cast { .. }, hir::BinOpKind::Shl) => parser::PREC_FORCE_PAREN, _ => left_prec, }; @@ -1287,59 +1323,59 @@ impl<'a> State<'a> { self.ibox(indent_unit)?; self.ann.pre(self, NodeExpr(expr))?; match expr.node { - hir::ExprBox(ref expr) => { + hir::ExprKind::Box(ref expr) => { self.word_space("box")?; self.print_expr_maybe_paren(expr, parser::PREC_PREFIX)?; } - hir::ExprArray(ref exprs) => { + hir::ExprKind::Array(ref exprs) => { self.print_expr_vec(exprs)?; } - hir::ExprRepeat(ref element, count) => { + hir::ExprKind::Repeat(ref element, ref count) => { self.print_expr_repeat(&element, count)?; } - hir::ExprStruct(ref qpath, ref fields, ref wth) => { + hir::ExprKind::Struct(ref qpath, ref fields, ref wth) => { self.print_expr_struct(qpath, &fields[..], wth)?; } - hir::ExprTup(ref exprs) => { + hir::ExprKind::Tup(ref exprs) => { self.print_expr_tup(exprs)?; } - hir::ExprCall(ref func, ref args) => { + hir::ExprKind::Call(ref func, ref args) => { self.print_expr_call(&func, args)?; } - hir::ExprMethodCall(ref segment, _, ref args) => { + hir::ExprKind::MethodCall(ref segment, _, ref args) => { self.print_expr_method_call(segment, args)?; } - hir::ExprBinary(op, ref lhs, ref rhs) => { + hir::ExprKind::Binary(op, ref lhs, ref rhs) => { self.print_expr_binary(op, &lhs, &rhs)?; } - hir::ExprUnary(op, ref expr) => { + hir::ExprKind::Unary(op, ref expr) => { self.print_expr_unary(op, &expr)?; } - hir::ExprAddrOf(m, ref expr) => { + hir::ExprKind::AddrOf(m, ref expr) => { self.print_expr_addr_of(m, &expr)?; } - hir::ExprLit(ref lit) => { + hir::ExprKind::Lit(ref lit) => { self.print_literal(&lit)?; } - hir::ExprCast(ref expr, ref ty) => { + hir::ExprKind::Cast(ref expr, ref ty) => { let prec = AssocOp::As.precedence() as i8; self.print_expr_maybe_paren(&expr, prec)?; self.s.space()?; self.word_space("as")?; self.print_type(&ty)?; } - hir::ExprType(ref expr, ref ty) => { + hir::ExprKind::Type(ref expr, ref ty) => { let prec = AssocOp::Colon.precedence() as i8; self.print_expr_maybe_paren(&expr, prec)?; self.word_space(":")?; self.print_type(&ty)?; } - hir::ExprIf(ref test, ref blk, ref elseopt) => { + hir::ExprKind::If(ref test, ref blk, ref elseopt) => { self.print_if(&test, &blk, elseopt.as_ref().map(|e| &**e))?; } - hir::ExprWhile(ref test, ref blk, opt_sp_name) => { - if let Some(sp_name) = opt_sp_name { - self.print_name(sp_name.node)?; + hir::ExprKind::While(ref test, ref blk, opt_label) => { + if let Some(label) = opt_label { + self.print_ident(label.ident)?; self.word_space(":")?; } self.head("while")?; @@ -1347,16 +1383,16 @@ impl<'a> State<'a> { self.s.space()?; self.print_block(&blk)?; } - hir::ExprLoop(ref blk, opt_sp_name, _) => { - if let Some(sp_name) = opt_sp_name { - self.print_name(sp_name.node)?; + hir::ExprKind::Loop(ref blk, opt_label, _) => { + if let Some(label) = opt_label { + self.print_ident(label.ident)?; self.word_space(":")?; } self.head("loop")?; self.s.space()?; self.print_block(&blk)?; } - hir::ExprMatch(ref expr, ref arms, _) => { + hir::ExprKind::Match(ref expr, ref arms, _) => { self.cbox(indent_unit)?; self.ibox(4)?; self.word_nbsp("match")?; @@ -1368,7 +1404,7 @@ impl<'a> State<'a> { } self.bclose_(expr.span, indent_unit)?; } - hir::ExprClosure(capture_clause, ref decl, body, _fn_decl_span, _gen) => { + hir::ExprKind::Closure(capture_clause, ref decl, body, _fn_decl_span, _gen) => { self.print_capture_clause(capture_clause)?; self.print_closure_args(&decl, body)?; @@ -1383,21 +1419,25 @@ impl<'a> State<'a> { // empty box to satisfy the close. self.ibox(0)?; } - hir::ExprBlock(ref blk) => { + hir::ExprKind::Block(ref blk, opt_label) => { + if let Some(label) = opt_label { + self.print_ident(label.ident)?; + self.word_space(":")?; + } // containing cbox, will be closed by print-block at } self.cbox(indent_unit)?; // head-box, will be closed by print-block after { self.ibox(0)?; self.print_block(&blk)?; } - hir::ExprAssign(ref lhs, ref rhs) => { + hir::ExprKind::Assign(ref lhs, ref rhs) => { let prec = AssocOp::Assign.precedence() as i8; self.print_expr_maybe_paren(&lhs, prec + 1)?; self.s.space()?; self.word_space("=")?; self.print_expr_maybe_paren(&rhs, prec)?; } - hir::ExprAssignOp(op, ref lhs, ref rhs) => { + hir::ExprKind::AssignOp(op, ref lhs, ref rhs) => { let prec = AssocOp::Assign.precedence() as i8; self.print_expr_maybe_paren(&lhs, prec + 1)?; self.s.space()?; @@ -1405,30 +1445,25 @@ impl<'a> State<'a> { self.word_space("=")?; self.print_expr_maybe_paren(&rhs, prec)?; } - hir::ExprField(ref expr, name) => { + hir::ExprKind::Field(ref expr, ident) => { self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX)?; self.s.word(".")?; - self.print_name(name.node)?; + self.print_ident(ident)?; } - hir::ExprTupField(ref expr, id) => { - self.print_expr_maybe_paren(&expr, parser::PREC_POSTFIX)?; - self.s.word(".")?; - self.print_usize(id.node)?; - } - hir::ExprIndex(ref expr, ref index) => { + hir::ExprKind::Index(ref expr, ref index) => { self.print_expr_maybe_paren(&expr, parser::PREC_POSTFIX)?; self.s.word("[")?; self.print_expr(&index)?; self.s.word("]")?; } - hir::ExprPath(ref qpath) => { + hir::ExprKind::Path(ref qpath) => { self.print_qpath(qpath, true)? } - hir::ExprBreak(label, ref opt_expr) => { + hir::ExprKind::Break(destination, ref opt_expr) => { self.s.word("break")?; self.s.space()?; - if let Some(label_ident) = label.ident { - self.print_name(label_ident.node.name)?; + if let Some(label) = destination.label { + self.print_ident(label.ident)?; self.s.space()?; } if let Some(ref expr) = *opt_expr { @@ -1436,15 +1471,15 @@ impl<'a> State<'a> { self.s.space()?; } } - hir::ExprAgain(label) => { + hir::ExprKind::Continue(destination) => { self.s.word("continue")?; self.s.space()?; - if let Some(label_ident) = label.ident { - self.print_name(label_ident.node.name)?; + if let Some(label) = destination.label { + self.print_ident(label.ident)?; self.s.space()? } } - hir::ExprRet(ref result) => { + hir::ExprKind::Ret(ref result) => { self.s.word("return")?; match *result { Some(ref expr) => { @@ -1454,7 +1489,7 @@ impl<'a> State<'a> { _ => (), } } - hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => { + hir::ExprKind::InlineAsm(ref a, ref outputs, ref inputs) => { self.s.word("asm!")?; self.popen()?; self.print_string(&a.asm.as_str(), a.asm_str_style)?; @@ -1519,7 +1554,7 @@ impl<'a> State<'a> { self.pclose()?; } - hir::ExprYield(ref expr) => { + hir::ExprKind::Yield(ref expr) => { self.word_space("yield")?; self.print_expr_maybe_paren(&expr, parser::PREC_JUMP)?; } @@ -1540,7 +1575,7 @@ impl<'a> State<'a> { pub fn print_decl(&mut self, decl: &hir::Decl) -> io::Result<()> { self.maybe_print_comment(decl.span.lo())?; match decl.node { - hir::DeclLocal(ref loc) => { + hir::DeclKind::Local(ref loc) => { self.space_if_not_bol()?; self.ibox(indent_unit)?; self.word_nbsp("let")?; @@ -1555,7 +1590,7 @@ impl<'a> State<'a> { } self.end() } - hir::DeclItem(item) => { + hir::DeclKind::Item(item) => { self.ann.nested(self, Nested::Item(item)) } } @@ -1565,9 +1600,17 @@ impl<'a> State<'a> { self.s.word(&i.to_string()) } + pub fn print_ident(&mut self, ident: ast::Ident) -> io::Result<()> { + if ident.is_raw_guess() { + self.s.word(&format!("r#{}", ident.name))?; + } else { + self.s.word(&ident.as_str())?; + } + self.ann.post(self, NodeName(&ident.name)) + } + pub fn print_name(&mut self, name: ast::Name) -> io::Result<()> { - self.s.word(&name.as_str())?; - self.ann.post(self, NodeName(&name)) + self.print_ident(ast::Ident::with_empty_ctxt(name)) } pub fn print_for_decl(&mut self, loc: &hir::Local, coll: &hir::Expr) -> io::Result<()> { @@ -1587,13 +1630,12 @@ impl<'a> State<'a> { if i > 0 { self.s.word("::")? } - if segment.name != keywords::CrateRoot.name() && - segment.name != keywords::DollarCrate.name() { - self.print_name(segment.name)?; - segment.with_parameters(|parameters| { - self.print_path_parameters(parameters, - segment.infer_types, - colons_before_params) + if segment.ident.name != keywords::CrateRoot.name() && + segment.ident.name != keywords::DollarCrate.name() { + self.print_ident(segment.ident)?; + segment.with_generic_args(|generic_args| { + self.print_generic_args(generic_args, segment.infer_types, + colons_before_params) })?; } } @@ -1619,13 +1661,13 @@ impl<'a> State<'a> { if i > 0 { self.s.word("::")? } - if segment.name != keywords::CrateRoot.name() && - segment.name != keywords::DollarCrate.name() { - self.print_name(segment.name)?; - segment.with_parameters(|parameters| { - self.print_path_parameters(parameters, - segment.infer_types, - colons_before_params) + if segment.ident.name != keywords::CrateRoot.name() && + segment.ident.name != keywords::DollarCrate.name() { + self.print_ident(segment.ident)?; + segment.with_generic_args(|generic_args| { + self.print_generic_args(generic_args, + segment.infer_types, + colons_before_params) })?; } } @@ -1633,11 +1675,11 @@ impl<'a> State<'a> { self.s.word(">")?; self.s.word("::")?; let item_segment = path.segments.last().unwrap(); - self.print_name(item_segment.name)?; - item_segment.with_parameters(|parameters| { - self.print_path_parameters(parameters, - item_segment.infer_types, - colons_before_params) + self.print_ident(item_segment.ident)?; + item_segment.with_generic_args(|generic_args| { + self.print_generic_args(generic_args, + item_segment.infer_types, + colons_before_params) }) } hir::QPath::TypeRelative(ref qself, ref item_segment) => { @@ -1645,29 +1687,29 @@ impl<'a> State<'a> { self.print_type(qself)?; self.s.word(">")?; self.s.word("::")?; - self.print_name(item_segment.name)?; - item_segment.with_parameters(|parameters| { - self.print_path_parameters(parameters, - item_segment.infer_types, - colons_before_params) + self.print_ident(item_segment.ident)?; + item_segment.with_generic_args(|generic_args| { + self.print_generic_args(generic_args, + item_segment.infer_types, + colons_before_params) }) } } } - fn print_path_parameters(&mut self, - parameters: &hir::PathParameters, + fn print_generic_args(&mut self, + generic_args: &hir::GenericArgs, infer_types: bool, colons_before_params: bool) -> io::Result<()> { - if parameters.parenthesized { + if generic_args.parenthesized { self.s.word("(")?; - self.commasep(Inconsistent, parameters.inputs(), |s, ty| s.print_type(&ty))?; + self.commasep(Inconsistent, generic_args.inputs(), |s, ty| s.print_type(&ty))?; self.s.word(")")?; self.space_if_not_bol()?; self.word_space("->")?; - self.print_type(¶meters.bindings[0].ty)?; + self.print_type(&generic_args.bindings[0].ty)?; } else { let start = if colons_before_params { "::<" } else { "<" }; let empty = Cell::new(true); @@ -1680,16 +1722,31 @@ impl<'a> State<'a> { } }; - if !parameters.lifetimes.iter().all(|lt| lt.is_elided()) { - for lifetime in ¶meters.lifetimes { - start_or_comma(self)?; - self.print_lifetime(lifetime)?; + let mut types = vec![]; + let mut elide_lifetimes = true; + for arg in &generic_args.args { + match arg { + GenericArg::Lifetime(lt) => { + if !lt.is_elided() { + elide_lifetimes = false; + } + } + GenericArg::Type(ty) => { + types.push(ty); + } } } - - if !parameters.types.is_empty() { + if !elide_lifetimes { start_or_comma(self)?; - self.commasep(Inconsistent, ¶meters.types, |s, ty| s.print_type(&ty))?; + self.commasep(Inconsistent, &generic_args.args, |s, generic_arg| { + match generic_arg { + GenericArg::Lifetime(lt) => s.print_lifetime(lt), + GenericArg::Type(ty) => s.print_type(ty), + } + })?; + } else if !types.is_empty() { + start_or_comma(self)?; + self.commasep(Inconsistent, &types, |s, ty| s.print_type(&ty))?; } // FIXME(eddyb) This would leak into error messages, e.g.: @@ -1699,9 +1756,9 @@ impl<'a> State<'a> { self.s.word("..")?; } - for binding in parameters.bindings.iter() { + for binding in generic_args.bindings.iter() { start_or_comma(self)?; - self.print_name(binding.name)?; + self.print_ident(binding.ident)?; self.s.space()?; self.word_space("=")?; self.print_type(&binding.ty)?; @@ -1722,7 +1779,7 @@ impl<'a> State<'a> { // is that it doesn't matter match pat.node { PatKind::Wild => self.s.word("_")?, - PatKind::Binding(binding_mode, _, ref path1, ref sub) => { + PatKind::Binding(binding_mode, _, ident, ref sub) => { match binding_mode { hir::BindingAnnotation::Ref => { self.word_nbsp("ref")?; @@ -1737,7 +1794,7 @@ impl<'a> State<'a> { self.word_nbsp("mut")?; } } - self.print_name(path1.node)?; + self.print_ident(ident)?; if let Some(ref p) = *sub { self.s.word("@")?; self.print_pat(&p)?; @@ -1773,7 +1830,7 @@ impl<'a> State<'a> { |s, f| { s.cbox(indent_unit)?; if !f.node.is_shorthand { - s.print_name(f.node.name)?; + s.print_ident(f.node.ident)?; s.word_nbsp(":")?; } s.print_pat(&f.node.pat)?; @@ -1810,15 +1867,35 @@ impl<'a> State<'a> { self.pclose()?; } PatKind::Box(ref inner) => { + let is_range_inner = match inner.node { + PatKind::Range(..) => true, + _ => false, + }; self.s.word("box ")?; + if is_range_inner { + self.popen()?; + } self.print_pat(&inner)?; + if is_range_inner { + self.pclose()?; + } } PatKind::Ref(ref inner, mutbl) => { + let is_range_inner = match inner.node { + PatKind::Range(..) => true, + _ => false, + }; self.s.word("&")?; if mutbl == hir::MutMutable { self.s.word("mut ")?; } + if is_range_inner { + self.popen()?; + } self.print_pat(&inner)?; + if is_range_inner { + self.pclose()?; + } } PatKind::Lit(ref e) => self.print_expr(&e)?, PatKind::Range(ref begin, ref end, ref end_kind) => { @@ -1837,7 +1914,9 @@ impl<'a> State<'a> { if !before.is_empty() { self.word_space(",")?; } - if p.node != PatKind::Wild { + if let PatKind::Wild = p.node { + // Print nothing + } else { self.print_pat(&p)?; } self.s.word("..")?; @@ -1880,7 +1959,11 @@ impl<'a> State<'a> { self.word_space("=>")?; match arm.body.node { - hir::ExprBlock(ref blk) => { + hir::ExprKind::Block(ref blk, opt_label) => { + if let Some(label) = opt_label { + self.print_ident(label.ident)?; + self.word_space(":")?; + } // the block will close the pattern's ibox self.print_block_unclosed_indent(&blk, indent_unit)?; @@ -1900,16 +1983,14 @@ impl<'a> State<'a> { pub fn print_fn(&mut self, decl: &hir::FnDecl, - unsafety: hir::Unsafety, - constness: hir::Constness, - abi: Abi, + header: hir::FnHeader, name: Option, generics: &hir::Generics, vis: &hir::Visibility, - arg_names: &[Spanned], + arg_names: &[ast::Ident], body_id: Option) -> io::Result<()> { - self.print_fn_header_info(unsafety, constness, abi, vis)?; + self.print_fn_header_info(header, vis)?; if let Some(name) = name { self.nbsp()?; @@ -1923,8 +2004,8 @@ impl<'a> State<'a> { assert!(arg_names.is_empty() || body_id.is_none()); self.commasep(Inconsistent, &decl.inputs, |s, ty| { s.ibox(indent_unit)?; - if let Some(name) = arg_names.get(i) { - s.s.word(&name.node.as_str())?; + if let Some(arg_name) = arg_names.get(i) { + s.s.word(&arg_name.as_str())?; s.s.word(":")?; s.s.space()?; } else if let Some(body_id) = body_id { @@ -1954,7 +2035,9 @@ impl<'a> State<'a> { s.ann.nested(s, Nested::BodyArgPat(body_id, i))?; i += 1; - if ty.node != hir::TyInfer { + if let hir::TyKind::Infer = ty.node { + // Print nothing + } else { s.s.word(":")?; s.s.space()?; s.print_type(ty)?; @@ -1985,7 +2068,7 @@ impl<'a> State<'a> { } } - pub fn print_bounds(&mut self, prefix: &str, bounds: &[hir::TyParamBound]) -> io::Result<()> { + pub fn print_bounds(&mut self, prefix: &str, bounds: &[hir::GenericBound]) -> io::Result<()> { if !bounds.is_empty() { self.s.word(prefix)?; let mut first = true; @@ -2000,13 +2083,13 @@ impl<'a> State<'a> { } match bound { - TraitTyParamBound(tref, modifier) => { + GenericBound::Trait(tref, modifier) => { if modifier == &TraitBoundModifier::Maybe { self.s.word("?")?; } self.print_poly_trait_ref(tref)?; } - RegionTyParamBound(lt) => { + GenericBound::Outlives(lt) => { self.print_lifetime(lt)?; } } @@ -2015,30 +2098,12 @@ impl<'a> State<'a> { Ok(()) } - pub fn print_lifetime(&mut self, lifetime: &hir::Lifetime) -> io::Result<()> { - self.print_name(lifetime.name.name()) - } - - pub fn print_lifetime_def(&mut self, lifetime: &hir::LifetimeDef) -> io::Result<()> { - self.print_lifetime(&lifetime.lifetime)?; - let mut sep = ":"; - for v in &lifetime.bounds { - self.s.word(sep)?; - self.print_lifetime(v)?; - sep = "+"; - } - Ok(()) - } - - pub fn print_generic_params(&mut self, generic_params: &[hir::GenericParam]) -> io::Result<()> { + pub fn print_generic_params(&mut self, generic_params: &[GenericParam]) -> io::Result<()> { if !generic_params.is_empty() { self.s.word("<")?; self.commasep(Inconsistent, generic_params, |s, param| { - match *param { - hir::GenericParam::Lifetime(ref ld) => s.print_lifetime_def(ld), - hir::GenericParam::Type(ref tp) => s.print_ty_param(tp), - } + s.print_generic_param(param) })?; self.s.word(">")?; @@ -2046,19 +2111,41 @@ impl<'a> State<'a> { Ok(()) } - pub fn print_ty_param(&mut self, param: &hir::TyParam) -> io::Result<()> { - self.print_name(param.name)?; - self.print_bounds(":", ¶m.bounds)?; - match param.default { - Some(ref default) => { - self.s.space()?; - self.word_space("=")?; - self.print_type(&default) + pub fn print_generic_param(&mut self, param: &GenericParam) -> io::Result<()> { + self.print_ident(param.name.ident())?; + match param.kind { + GenericParamKind::Lifetime { .. } => { + let mut sep = ":"; + for bound in ¶m.bounds { + match bound { + GenericBound::Outlives(lt) => { + self.s.word(sep)?; + self.print_lifetime(lt)?; + sep = "+"; + } + _ => bug!(), + } + } + Ok(()) + } + GenericParamKind::Type { ref default, .. } => { + self.print_bounds(":", ¶m.bounds)?; + match default { + Some(default) => { + self.s.space()?; + self.word_space("=")?; + self.print_type(&default) + } + _ => Ok(()), + } } - _ => Ok(()), } } + pub fn print_lifetime(&mut self, lifetime: &hir::Lifetime) -> io::Result<()> { + self.print_ident(lifetime.name.ident()) + } + pub fn print_where_clause(&mut self, where_clause: &hir::WhereClause) -> io::Result<()> { if where_clause.predicates.is_empty() { return Ok(()); @@ -2090,7 +2177,12 @@ impl<'a> State<'a> { self.s.word(":")?; for (i, bound) in bounds.iter().enumerate() { - self.print_lifetime(bound)?; + match bound { + GenericBound::Outlives(lt) => { + self.print_lifetime(lt)?; + } + _ => bug!(), + } if i != 0 { self.s.word(":")?; @@ -2149,7 +2241,7 @@ impl<'a> State<'a> { decl: &hir::FnDecl, name: Option, generic_params: &[hir::GenericParam], - arg_names: &[Spanned]) + arg_names: &[ast::Ident]) -> io::Result<()> { self.ibox(indent_unit)?; if !generic_params.is_empty() { @@ -2165,12 +2257,16 @@ impl<'a> State<'a> { span: syntax_pos::DUMMY_SP, }; self.print_fn(decl, - unsafety, - hir::Constness::NotConst, - abi, + hir::FnHeader { + unsafety, + abi, + constness: hir::Constness::NotConst, + asyncness: hir::IsAsync::NotAsync, + }, name, &generics, - &hir::Inherited, + &Spanned { span: syntax_pos::DUMMY_SP, + node: hir::VisibilityKind::Inherited }, arg_names, None)?; self.end() @@ -2208,13 +2304,8 @@ impl<'a> State<'a> { if self.next_comment().is_none() { self.s.hardbreak()?; } - loop { - match self.next_comment() { - Some(ref cmnt) => { - self.print_comment(cmnt)?; - } - _ => break, - } + while let Some(ref cmnt) = self.next_comment() { + self.print_comment(cmnt)? } Ok(()) } @@ -2243,22 +2334,26 @@ impl<'a> State<'a> { } pub fn print_fn_header_info(&mut self, - unsafety: hir::Unsafety, - constness: hir::Constness, - abi: Abi, + header: hir::FnHeader, vis: &hir::Visibility) -> io::Result<()> { self.s.word(&visibility_qualified(vis, ""))?; - self.print_unsafety(unsafety)?; - match constness { + match header.constness { hir::Constness::NotConst => {} hir::Constness::Const => self.word_nbsp("const")?, } - if abi != Abi::Rust { + match header.asyncness { + hir::IsAsync::NotAsync => {} + hir::IsAsync::Async => self.word_nbsp("async")?, + } + + self.print_unsafety(header.unsafety)?; + + if header.abi != Abi::Rust { self.word_nbsp("extern")?; - self.word_nbsp(&abi.to_string())?; + self.word_nbsp(&header.abi.to_string())?; } self.s.word("fn") @@ -2289,11 +2384,11 @@ impl<'a> State<'a> { /// isn't parsed as (if true {...} else {...} | x) | 5 fn expr_requires_semi_to_be_stmt(e: &hir::Expr) -> bool { match e.node { - hir::ExprIf(..) | - hir::ExprMatch(..) | - hir::ExprBlock(_) | - hir::ExprWhile(..) | - hir::ExprLoop(..) => false, + hir::ExprKind::If(..) | + hir::ExprKind::Match(..) | + hir::ExprKind::Block(..) | + hir::ExprKind::While(..) | + hir::ExprKind::Loop(..) => false, _ => true, } } @@ -2301,96 +2396,47 @@ fn expr_requires_semi_to_be_stmt(e: &hir::Expr) -> bool { /// this statement requires a semicolon after it. /// note that in one case (stmt_semi), we've already /// seen the semicolon, and thus don't need another. -fn stmt_ends_with_semi(stmt: &hir::Stmt_) -> bool { +fn stmt_ends_with_semi(stmt: &hir::StmtKind) -> bool { match *stmt { - hir::StmtDecl(ref d, _) => { + hir::StmtKind::Decl(ref d, _) => { match d.node { - hir::DeclLocal(_) => true, - hir::DeclItem(_) => false, + hir::DeclKind::Local(_) => true, + hir::DeclKind::Item(_) => false, } } - hir::StmtExpr(ref e, _) => { + hir::StmtKind::Expr(ref e, _) => { expr_requires_semi_to_be_stmt(&e) } - hir::StmtSemi(..) => { + hir::StmtKind::Semi(..) => { false } } } - -fn expr_precedence(expr: &hir::Expr) -> i8 { - use syntax::util::parser::*; - - match expr.node { - hir::ExprClosure(..) => PREC_CLOSURE, - - hir::ExprBreak(..) | - hir::ExprAgain(..) | - hir::ExprRet(..) | - hir::ExprYield(..) => PREC_JUMP, - - // Binop-like expr kinds, handled by `AssocOp`. - hir::ExprBinary(op, _, _) => bin_op_to_assoc_op(op.node).precedence() as i8, - - hir::ExprCast(..) => AssocOp::As.precedence() as i8, - hir::ExprType(..) => AssocOp::Colon.precedence() as i8, - - hir::ExprAssign(..) | - hir::ExprAssignOp(..) => AssocOp::Assign.precedence() as i8, - - // Unary, prefix - hir::ExprBox(..) | - hir::ExprAddrOf(..) | - hir::ExprUnary(..) => PREC_PREFIX, - - // Unary, postfix - hir::ExprCall(..) | - hir::ExprMethodCall(..) | - hir::ExprField(..) | - hir::ExprTupField(..) | - hir::ExprIndex(..) | - hir::ExprInlineAsm(..) => PREC_POSTFIX, - - // Never need parens - hir::ExprArray(..) | - hir::ExprRepeat(..) | - hir::ExprTup(..) | - hir::ExprLit(..) | - hir::ExprPath(..) | - hir::ExprIf(..) | - hir::ExprWhile(..) | - hir::ExprLoop(..) | - hir::ExprMatch(..) | - hir::ExprBlock(..) | - hir::ExprStruct(..) => PREC_PAREN, - } -} - -fn bin_op_to_assoc_op(op: hir::BinOp_) -> AssocOp { - use hir::BinOp_::*; +fn bin_op_to_assoc_op(op: hir::BinOpKind) -> AssocOp { + use hir::BinOpKind::*; match op { - BiAdd => AssocOp::Add, - BiSub => AssocOp::Subtract, - BiMul => AssocOp::Multiply, - BiDiv => AssocOp::Divide, - BiRem => AssocOp::Modulus, + Add => AssocOp::Add, + Sub => AssocOp::Subtract, + Mul => AssocOp::Multiply, + Div => AssocOp::Divide, + Rem => AssocOp::Modulus, - BiAnd => AssocOp::LAnd, - BiOr => AssocOp::LOr, + And => AssocOp::LAnd, + Or => AssocOp::LOr, - BiBitXor => AssocOp::BitXor, - BiBitAnd => AssocOp::BitAnd, - BiBitOr => AssocOp::BitOr, - BiShl => AssocOp::ShiftLeft, - BiShr => AssocOp::ShiftRight, + BitXor => AssocOp::BitXor, + BitAnd => AssocOp::BitAnd, + BitOr => AssocOp::BitOr, + Shl => AssocOp::ShiftLeft, + Shr => AssocOp::ShiftRight, - BiEq => AssocOp::Equal, - BiLt => AssocOp::Less, - BiLe => AssocOp::LessEqual, - BiNe => AssocOp::NotEqual, - BiGe => AssocOp::GreaterEqual, - BiGt => AssocOp::Greater, + Eq => AssocOp::Equal, + Lt => AssocOp::Less, + Le => AssocOp::LessEqual, + Ne => AssocOp::NotEqual, + Ge => AssocOp::GreaterEqual, + Gt => AssocOp::Greater, } } @@ -2399,25 +2445,24 @@ fn bin_op_to_assoc_op(op: hir::BinOp_) -> AssocOp { /// `X { y: 1 } == foo` all do, but `(X { y: 1 }) == foo` does not. fn contains_exterior_struct_lit(value: &hir::Expr) -> bool { match value.node { - hir::ExprStruct(..) => true, + hir::ExprKind::Struct(..) => true, - hir::ExprAssign(ref lhs, ref rhs) | - hir::ExprAssignOp(_, ref lhs, ref rhs) | - hir::ExprBinary(_, ref lhs, ref rhs) => { + hir::ExprKind::Assign(ref lhs, ref rhs) | + hir::ExprKind::AssignOp(_, ref lhs, ref rhs) | + hir::ExprKind::Binary(_, ref lhs, ref rhs) => { // X { y: 1 } + X { y: 2 } contains_exterior_struct_lit(&lhs) || contains_exterior_struct_lit(&rhs) } - hir::ExprUnary(_, ref x) | - hir::ExprCast(ref x, _) | - hir::ExprType(ref x, _) | - hir::ExprField(ref x, _) | - hir::ExprTupField(ref x, _) | - hir::ExprIndex(ref x, _) => { + hir::ExprKind::Unary(_, ref x) | + hir::ExprKind::Cast(ref x, _) | + hir::ExprKind::Type(ref x, _) | + hir::ExprKind::Field(ref x, _) | + hir::ExprKind::Index(ref x, _) => { // &X { y: 1 }, X { y: 1 }.y contains_exterior_struct_lit(&x) } - hir::ExprMethodCall(.., ref exprs) => { + hir::ExprKind::MethodCall(.., ref exprs) => { // X { y: 1 }.bar(...) contains_exterior_struct_lit(&exprs[0]) } diff --git a/src/librustc/ich/caching_codemap_view.rs b/src/librustc/ich/caching_codemap_view.rs index 3caf308d6526..e5bf384d253c 100644 --- a/src/librustc/ich/caching_codemap_view.rs +++ b/src/librustc/ich/caching_codemap_view.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::rc::Rc; +use rustc_data_structures::sync::Lrc; use syntax::codemap::CodeMap; use syntax_pos::{BytePos, FileMap}; @@ -18,7 +18,7 @@ struct CacheEntry { line_number: usize, line_start: BytePos, line_end: BytePos, - file: Rc, + file: Lrc, file_index: usize, } @@ -51,7 +51,7 @@ impl<'cm> CachingCodemapView<'cm> { pub fn byte_pos_to_line_and_col(&mut self, pos: BytePos) - -> Option<(Rc, usize, BytePos)> { + -> Option<(Lrc, usize, BytePos)> { self.time_stamp += 1; // Check if the position is in one of the cached lines diff --git a/src/librustc/ich/hcx.rs b/src/librustc/ich/hcx.rs index 2945b1ab9124..329cc2216a49 100644 --- a/src/librustc/ich/hcx.rs +++ b/src/librustc/ich/hcx.rs @@ -15,12 +15,13 @@ use hir::map::definitions::Definitions; use ich::{self, CachingCodemapView, Fingerprint}; use middle::cstore::CrateStore; use ty::{TyCtxt, fast_reject}; +use mir::interpret::AllocId; use session::Session; use std::cmp::Ord; use std::hash as std_hash; -use std::cell::RefCell; use std::collections::HashMap; +use std::cell::RefCell; use syntax::ast; @@ -30,33 +31,37 @@ use syntax::symbol::Symbol; use syntax_pos::{Span, DUMMY_SP}; use syntax_pos::hygiene; -use rustc_data_structures::stable_hasher::{HashStable, StableHashingContextProvider, +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult, ToStableHashKey}; use rustc_data_structures::accumulate_vec::AccumulateVec; use rustc_data_structures::fx::{FxHashSet, FxHashMap}; -thread_local!(static IGNORED_ATTR_NAMES: RefCell> = - RefCell::new(FxHashSet())); +fn compute_ignored_attr_names() -> FxHashSet { + debug_assert!(ich::IGNORED_ATTRIBUTES.len() > 0); + ich::IGNORED_ATTRIBUTES.iter().map(|&s| Symbol::intern(s)).collect() +} /// This is the context state available during incr. comp. hashing. It contains /// enough information to transform DefIds and HirIds into stable DefPaths (i.e. /// a reference to the TyCtxt) and it holds a few caches for speeding up various /// things (e.g. each DefId/DefPath is only hashed once). #[derive(Clone)] -pub struct StableHashingContext<'gcx> { - sess: &'gcx Session, - definitions: &'gcx Definitions, - cstore: &'gcx CrateStore, - body_resolver: BodyResolver<'gcx>, +pub struct StableHashingContext<'a> { + sess: &'a Session, + definitions: &'a Definitions, + cstore: &'a dyn CrateStore, + body_resolver: BodyResolver<'a>, hash_spans: bool, hash_bodies: bool, node_id_hashing_mode: NodeIdHashingMode, // Very often, we are hashing something that does not need the // CachingCodemapView, so we initialize it lazily. - raw_codemap: &'gcx CodeMap, - caching_codemap: Option>, + raw_codemap: &'a CodeMap, + caching_codemap: Option>, + + pub(super) alloc_id_recursion_tracker: FxHashSet, } #[derive(PartialEq, Eq, Clone, Copy)] @@ -79,26 +84,17 @@ impl<'gcx> BodyResolver<'gcx> { } } -impl<'gcx> StableHashingContext<'gcx> { +impl<'a> StableHashingContext<'a> { // The `krate` here is only used for mapping BodyIds to Bodies. // Don't use it for anything else or you'll run the risk of // leaking data out of the tracking system. - pub fn new(sess: &'gcx Session, - krate: &'gcx hir::Crate, - definitions: &'gcx Definitions, - cstore: &'gcx CrateStore) + pub fn new(sess: &'a Session, + krate: &'a hir::Crate, + definitions: &'a Definitions, + cstore: &'a dyn CrateStore) -> Self { let hash_spans_initial = !sess.opts.debugging_opts.incremental_ignore_spans; - debug_assert!(ich::IGNORED_ATTRIBUTES.len() > 0); - IGNORED_ATTR_NAMES.with(|names| { - let mut names = names.borrow_mut(); - if names.is_empty() { - names.extend(ich::IGNORED_ATTRIBUTES.iter() - .map(|&s| Symbol::intern(s))); - } - }); - StableHashingContext { sess, body_resolver: BodyResolver(krate), @@ -109,11 +105,12 @@ impl<'gcx> StableHashingContext<'gcx> { hash_spans: hash_spans_initial, hash_bodies: true, node_id_hashing_mode: NodeIdHashingMode::HashDefPath, + alloc_id_recursion_tracker: Default::default(), } } #[inline] - pub fn sess(&self) -> &'gcx Session { + pub fn sess(&self) -> &'a Session { self.sess } @@ -172,7 +169,7 @@ impl<'gcx> StableHashingContext<'gcx> { } #[inline] - pub fn codemap(&mut self) -> &mut CachingCodemapView<'gcx> { + pub fn codemap(&mut self) -> &mut CachingCodemapView<'a> { match self.caching_codemap { Some(ref mut cm) => { cm @@ -186,9 +183,10 @@ impl<'gcx> StableHashingContext<'gcx> { #[inline] pub fn is_ignored_attr(&self, name: Symbol) -> bool { - IGNORED_ATTR_NAMES.with(|names| { - names.borrow().contains(&name) - }) + thread_local! { + static IGNORED_ATTRIBUTES: FxHashSet = compute_ignored_attr_names(); + } + IGNORED_ATTRIBUTES.with(|attrs| attrs.contains(&name)) } pub fn hash_hir_item_like(&mut self, f: F) { @@ -201,28 +199,44 @@ impl<'gcx> StableHashingContext<'gcx> { } } -impl<'a, 'gcx, 'lcx> StableHashingContextProvider for TyCtxt<'a, 'gcx, 'lcx> { - type ContextType = StableHashingContext<'gcx>; - fn create_stable_hashing_context(&self) -> Self::ContextType { +/// Something that can provide a stable hashing context. +pub trait StableHashingContextProvider<'a> { + fn get_stable_hashing_context(&self) -> StableHashingContext<'a>; +} + +impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> +for &'b T { + fn get_stable_hashing_context(&self) -> StableHashingContext<'a> { + (**self).get_stable_hashing_context() + } +} + +impl<'a, 'b, T: StableHashingContextProvider<'a>> StableHashingContextProvider<'a> +for &'b mut T { + fn get_stable_hashing_context(&self) -> StableHashingContext<'a> { + (**self).get_stable_hashing_context() + } +} + +impl<'a, 'gcx, 'lcx> StableHashingContextProvider<'a> for TyCtxt<'a, 'gcx, 'lcx> { + fn get_stable_hashing_context(&self) -> StableHashingContext<'a> { (*self).create_stable_hashing_context() } } - -impl<'gcx> StableHashingContextProvider for StableHashingContext<'gcx> { - type ContextType = StableHashingContext<'gcx>; - fn create_stable_hashing_context(&self) -> Self::ContextType { +impl<'a> StableHashingContextProvider<'a> for StableHashingContext<'a> { + fn get_stable_hashing_context(&self) -> StableHashingContext<'a> { self.clone() } } -impl<'gcx> ::dep_graph::DepGraphSafe for StableHashingContext<'gcx> { +impl<'a> ::dep_graph::DepGraphSafe for StableHashingContext<'a> { } -impl<'gcx> HashStable> for hir::BodyId { +impl<'a> HashStable> for hir::BodyId { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { if hcx.hash_bodies() { hcx.body_resolver.body(*self).hash_stable(hcx, hasher); @@ -230,10 +244,10 @@ impl<'gcx> HashStable> for hir::BodyId { } } -impl<'gcx> HashStable> for hir::HirId { +impl<'a> HashStable> for hir::HirId { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { match hcx.node_id_hashing_mode { NodeIdHashingMode::Ignore => { @@ -252,21 +266,21 @@ impl<'gcx> HashStable> for hir::HirId { } } -impl<'gcx> ToStableHashKey> for hir::HirId { +impl<'a> ToStableHashKey> for hir::HirId { type KeyType = (DefPathHash, hir::ItemLocalId); #[inline] fn to_stable_hash_key(&self, - hcx: &StableHashingContext<'gcx>) + hcx: &StableHashingContext<'a>) -> (DefPathHash, hir::ItemLocalId) { let def_path_hash = hcx.local_def_path_hash(self.owner); (def_path_hash, self.local_id) } } -impl<'gcx> HashStable> for ast::NodeId { +impl<'a> HashStable> for ast::NodeId { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { match hcx.node_id_hashing_mode { NodeIdHashingMode::Ignore => { @@ -279,18 +293,18 @@ impl<'gcx> HashStable> for ast::NodeId { } } -impl<'gcx> ToStableHashKey> for ast::NodeId { +impl<'a> ToStableHashKey> for ast::NodeId { type KeyType = (DefPathHash, hir::ItemLocalId); #[inline] fn to_stable_hash_key(&self, - hcx: &StableHashingContext<'gcx>) + hcx: &StableHashingContext<'a>) -> (DefPathHash, hir::ItemLocalId) { hcx.definitions.node_to_hir_id(*self).to_stable_hash_key(hcx) } } -impl<'gcx> HashStable> for Span { +impl<'a> HashStable> for Span { // Hash a span in a stable way. We can't directly hash the span's BytePos // fields (that would be similar to hashing pointers, since those are just @@ -302,7 +316,7 @@ impl<'gcx> HashStable> for Span { // Also, hashing filenames is expensive so we avoid doing it twice when the // span starts and ends in the same file, which is almost always the case. fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { const TAG_VALID_SPAN: u8 = 0; const TAG_INVALID_SPAN: u8 = 1; @@ -382,10 +396,10 @@ impl<'gcx> HashStable> for Span { } } -pub fn hash_stable_trait_impls<'gcx, W, R>( - hcx: &mut StableHashingContext<'gcx>, +pub fn hash_stable_trait_impls<'a, 'gcx, W, R>( + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher, - blanket_impls: &Vec, + blanket_impls: &[DefId], non_blanket_impls: &HashMap, R>) where W: StableHasherResult, R: std_hash::BuildHasher, diff --git a/src/librustc/ich/impls_const_math.rs b/src/librustc/ich/impls_const_math.rs deleted file mode 100644 index 6790c2ac7dec..000000000000 --- a/src/librustc/ich/impls_const_math.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This module contains `HashStable` implementations for various data types -//! from `rustc_const_math` in no particular order. - -impl_stable_hash_for!(struct ::rustc_const_math::ConstFloat { - ty, - bits -}); - -impl_stable_hash_for!(enum ::rustc_const_math::ConstInt { - I8(val), - I16(val), - I32(val), - I64(val), - I128(val), - Isize(val), - U8(val), - U16(val), - U32(val), - U64(val), - U128(val), - Usize(val) -}); - -impl_stable_hash_for!(enum ::rustc_const_math::ConstIsize { - Is16(i16), - Is32(i32), - Is64(i64) -}); - -impl_stable_hash_for!(enum ::rustc_const_math::ConstUsize { - Us16(i16), - Us32(i32), - Us64(i64) -}); - -impl_stable_hash_for!(enum ::rustc_const_math::ConstMathErr { - NotInRange, - CmpBetweenUnequalTypes, - UnequalTypes(op), - Overflow(op), - ShiftNegative, - DivisionByZero, - RemainderByZero, - UnsignedNegation, - ULitOutOfRange(int_ty), - LitOutOfRange(int_ty) -}); - -impl_stable_hash_for!(enum ::rustc_const_math::Op { - Add, - Sub, - Mul, - Div, - Rem, - Shr, - Shl, - Neg, - BitAnd, - BitOr, - BitXor -}); diff --git a/src/librustc/ich/impls_cstore.rs b/src/librustc/ich/impls_cstore.rs index 18a02ff5c588..f8cd3b8a18a3 100644 --- a/src/librustc/ich/impls_cstore.rs +++ b/src/librustc/ich/impls_cstore.rs @@ -11,8 +11,6 @@ //! This module contains `HashStable` implementations for various data types //! from rustc::middle::cstore in no particular order. -use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; - use middle; impl_stable_hash_for!(enum middle::cstore::DepKind { @@ -33,7 +31,13 @@ impl_stable_hash_for!(struct middle::cstore::NativeLibrary { kind, name, cfg, - foreign_items + foreign_module, + wasm_import_module +}); + +impl_stable_hash_for!(struct middle::cstore::ForeignModule { + foreign_items, + def_id }); impl_stable_hash_for!(enum middle::cstore::LinkagePreference { @@ -42,10 +46,16 @@ impl_stable_hash_for!(enum middle::cstore::LinkagePreference { }); impl_stable_hash_for!(struct middle::cstore::ExternCrate { - def_id, + src, span, - direct, - path_len + path_len, + direct +}); + +impl_stable_hash_for!(enum middle::cstore::ExternCrateSource { + Extern(def_id), + Use, + Path, }); impl_stable_hash_for!(struct middle::cstore::CrateSource { @@ -53,29 +63,3 @@ impl_stable_hash_for!(struct middle::cstore::CrateSource { rlib, rmeta }); - -impl HashStable for middle::cstore::ExternBodyNestedBodies { - fn hash_stable(&self, - hcx: &mut HCX, - hasher: &mut StableHasher) { - let middle::cstore::ExternBodyNestedBodies { - nested_bodies: _, - fingerprint, - } = *self; - - fingerprint.hash_stable(hcx, hasher); - } -} - -impl<'a, HCX> HashStable for middle::cstore::ExternConstBody<'a> { - fn hash_stable(&self, - hcx: &mut HCX, - hasher: &mut StableHasher) { - let middle::cstore::ExternConstBody { - body: _, - fingerprint, - } = *self; - - fingerprint.hash_stable(hcx, hasher); - } -} diff --git a/src/librustc/ich/impls_hir.rs b/src/librustc/ich/impls_hir.rs index cc1b028480e4..76e57558bfe0 100644 --- a/src/librustc/ich/impls_hir.rs +++ b/src/librustc/ich/impls_hir.rs @@ -14,52 +14,53 @@ use hir; use hir::map::DefPathHash; use hir::def_id::{DefId, LocalDefId, CrateNum, CRATE_DEF_INDEX}; -use ich::{StableHashingContext, NodeIdHashingMode}; +use ich::{StableHashingContext, NodeIdHashingMode, Fingerprint}; use rustc_data_structures::stable_hasher::{HashStable, ToStableHashKey, StableHasher, StableHasherResult}; use std::mem; use syntax::ast; +use syntax::attr; -impl<'gcx> HashStable> for DefId { +impl<'a> HashStable> for DefId { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { hcx.def_path_hash(*self).hash_stable(hcx, hasher); } } -impl<'gcx> ToStableHashKey> for DefId { +impl<'a> ToStableHashKey> for DefId { type KeyType = DefPathHash; #[inline] - fn to_stable_hash_key(&self, hcx: &StableHashingContext<'gcx>) -> DefPathHash { + fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { hcx.def_path_hash(*self) } } -impl<'gcx> HashStable> for LocalDefId { +impl<'a> HashStable> for LocalDefId { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { hcx.def_path_hash(self.to_def_id()).hash_stable(hcx, hasher); } } -impl<'gcx> ToStableHashKey> for LocalDefId { +impl<'a> ToStableHashKey> for LocalDefId { type KeyType = DefPathHash; #[inline] - fn to_stable_hash_key(&self, hcx: &StableHashingContext<'gcx>) -> DefPathHash { + fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { hcx.def_path_hash(self.to_def_id()) } } -impl<'gcx> HashStable> for CrateNum { +impl<'a> HashStable> for CrateNum { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { hcx.def_path_hash(DefId { krate: *self, @@ -68,11 +69,11 @@ impl<'gcx> HashStable> for CrateNum { } } -impl<'gcx> ToStableHashKey> for CrateNum { +impl<'a> ToStableHashKey> for CrateNum { type KeyType = DefPathHash; #[inline] - fn to_stable_hash_key(&self, hcx: &StableHashingContext<'gcx>) -> DefPathHash { + fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { let def_id = DefId { krate: *self, index: CRATE_DEF_INDEX }; def_id.to_stable_hash_key(hcx) } @@ -80,13 +81,13 @@ impl<'gcx> ToStableHashKey> for CrateNum { impl_stable_hash_for!(tuple_struct hir::ItemLocalId { index }); -impl<'gcx> ToStableHashKey> +impl<'a> ToStableHashKey> for hir::ItemLocalId { type KeyType = hir::ItemLocalId; #[inline] fn to_stable_hash_key(&self, - _: &StableHashingContext<'gcx>) + _: &StableHashingContext<'a>) -> hir::ItemLocalId { *self } @@ -99,9 +100,9 @@ for hir::ItemLocalId { // want to pick up on a reference changing its target, so we hash the NodeIds // in "DefPath Mode". -impl<'gcx> HashStable> for hir::ItemId { +impl<'a> HashStable> for hir::ItemId { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let hir::ItemId { id @@ -113,9 +114,9 @@ impl<'gcx> HashStable> for hir::ItemId { } } -impl<'gcx> HashStable> for hir::TraitItemId { +impl<'a> HashStable> for hir::TraitItemId { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let hir::TraitItemId { node_id @@ -127,9 +128,9 @@ impl<'gcx> HashStable> for hir::TraitItemId { } } -impl<'gcx> HashStable> for hir::ImplItemId { +impl<'a> HashStable> for hir::ImplItemId { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let hir::ImplItemId { node_id @@ -141,11 +142,20 @@ impl<'gcx> HashStable> for hir::ImplItemId { } } +impl_stable_hash_for!(enum hir::ParamName { + Plain(name), + Fresh(index) +}); + impl_stable_hash_for!(enum hir::LifetimeName { + Param(param_name), Implicit, Underscore, Static, - Name(name) +}); + +impl_stable_hash_for!(struct hir::Label { + ident }); impl_stable_hash_for!(struct hir::Lifetime { @@ -154,13 +164,6 @@ impl_stable_hash_for!(struct hir::Lifetime { name }); -impl_stable_hash_for!(struct hir::LifetimeDef { - lifetime, - bounds, - pure_wrt_drop, - in_band -}); - impl_stable_hash_for!(struct hir::Path { span, def, @@ -168,21 +171,25 @@ impl_stable_hash_for!(struct hir::Path { }); impl_stable_hash_for!(struct hir::PathSegment { - name, + ident -> (ident.name), infer_types, - parameters + args }); -impl_stable_hash_for!(struct hir::PathParameters { - lifetimes, - types, +impl_stable_hash_for!(enum hir::GenericArg { + Lifetime(lt), + Type(ty) +}); + +impl_stable_hash_for!(struct hir::GenericArgs { + args, bindings, parenthesized }); -impl_stable_hash_for!(enum hir::TyParamBound { - TraitTyParamBound(poly_trait_ref, trait_bound_modifier), - RegionTyParamBound(lifetime) +impl_stable_hash_for!(enum hir::GenericBound { + Trait(poly_trait_ref, trait_bound_modifier), + Outlives(lifetime) }); impl_stable_hash_for!(enum hir::TraitBoundModifier { @@ -190,20 +197,32 @@ impl_stable_hash_for!(enum hir::TraitBoundModifier { Maybe }); -impl_stable_hash_for!(struct hir::TyParam { - name, +impl_stable_hash_for!(struct hir::GenericParam { id, - bounds, - default, - span, + name, pure_wrt_drop, - synthetic + attrs, + bounds, + span, + kind }); -impl_stable_hash_for!(enum hir::GenericParam { - Lifetime(lifetime_def), - Type(ty_param) -}); +impl<'a> HashStable> for hir::GenericParamKind { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match self { + hir::GenericParamKind::Lifetime { in_band } => { + in_band.hash_stable(hcx, hasher); + } + hir::GenericParamKind::Type { ref default, synthetic } => { + default.hash_stable(hcx, hasher); + synthetic.hash_stable(hcx, hasher); + } + } + } +} impl_stable_hash_for!(struct hir::Generics { params, @@ -252,22 +271,27 @@ impl_stable_hash_for!(struct hir::MutTy { }); impl_stable_hash_for!(struct hir::MethodSig { - unsafety, - constness, - abi, + header, decl }); impl_stable_hash_for!(struct hir::TypeBinding { id, - name, + ident -> (ident.name), ty, span }); -impl<'gcx> HashStable> for hir::Ty { +impl_stable_hash_for!(struct hir::FnHeader { + unsafety, + constness, + asyncness, + abi +}); + +impl<'a> HashStable> for hir::Ty { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { hcx.while_hashing_hir_bodies(true, |hcx| { let hir::Ty { @@ -302,23 +326,23 @@ impl_stable_hash_for!(struct hir::BareFnTy { impl_stable_hash_for!(struct hir::ExistTy { generics, + impl_trait_fn, bounds }); -impl_stable_hash_for!(enum hir::Ty_ { - TySlice(t), - TyArray(t, body_id), - TyPtr(t), - TyRptr(lifetime, t), - TyBareFn(t), - TyNever, - TyTup(ts), - TyPath(qpath), - TyTraitObject(trait_refs, lifetime), - TyImplTraitExistential(existty, lifetimes), - TyTypeof(body_id), - TyErr, - TyInfer +impl_stable_hash_for!(enum hir::TyKind { + Slice(t), + Array(t, body_id), + Ptr(t), + Rptr(lifetime, t), + BareFn(t), + Never, + Tup(ts), + Path(qpath), + TraitObject(trait_refs, lifetime), + Typeof(body_id), + Err, + Infer }); impl_stable_hash_for!(struct hir::FnDecl { @@ -333,20 +357,12 @@ impl_stable_hash_for!(enum hir::FunctionRetTy { Return(t) }); -impl<'gcx> HashStable> for hir::TraitRef { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - let hir::TraitRef { - ref path, - // Don't hash the ref_id. It is tracked via the thing it is used to access - ref_id: _, - } = *self; - - path.hash_stable(hcx, hasher); - } -} - +impl_stable_hash_for!(struct hir::TraitRef { + // Don't hash the ref_id. It is tracked via the thing it is used to access + ref_id -> _, + hir_ref_id -> _, + path, +}); impl_stable_hash_for!(struct hir::PolyTraitRef { bound_generic_params, @@ -369,53 +385,31 @@ impl_stable_hash_for!(struct hir::MacroDef { body }); +impl_stable_hash_for!(struct hir::Block { + stmts, + expr, + id -> _, + hir_id -> _, + rules, + span, + targeted_by_break, + recovered, +}); -impl<'gcx> HashStable> for hir::Block { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - let hir::Block { - ref stmts, - ref expr, - id: _, - hir_id: _, - rules, - span, - targeted_by_break, - recovered, - } = *self; - - stmts.hash_stable(hcx, hasher); - expr.hash_stable(hcx, hasher); - rules.hash_stable(hcx, hasher); - span.hash_stable(hcx, hasher); - recovered.hash_stable(hcx, hasher); - targeted_by_break.hash_stable(hcx, hasher); - } -} - -impl<'gcx> HashStable> for hir::Pat { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - let hir::Pat { - id: _, - hir_id: _, - ref node, - ref span - } = *self; - - - node.hash_stable(hcx, hasher); - span.hash_stable(hcx, hasher); - } -} +impl_stable_hash_for!(struct hir::Pat { + id -> _, + hir_id -> _, + node, + span, +}); impl_stable_hash_for_spanned!(hir::FieldPat); + impl_stable_hash_for!(struct hir::FieldPat { - name, + id -> _, + ident -> (ident.name), pat, - is_shorthand + is_shorthand, }); impl_stable_hash_for!(enum hir::BindingAnnotation { @@ -444,28 +438,28 @@ impl_stable_hash_for!(enum hir::PatKind { Slice(one, two, three) }); -impl_stable_hash_for!(enum hir::BinOp_ { - BiAdd, - BiSub, - BiMul, - BiDiv, - BiRem, - BiAnd, - BiOr, - BiBitXor, - BiBitAnd, - BiBitOr, - BiShl, - BiShr, - BiEq, - BiLt, - BiLe, - BiNe, - BiGe, - BiGt +impl_stable_hash_for!(enum hir::BinOpKind { + Add, + Sub, + Mul, + Div, + Rem, + And, + Or, + BitXor, + BitAnd, + BitOr, + Shl, + Shr, + Eq, + Lt, + Le, + Ne, + Ge, + Gt }); -impl_stable_hash_for_spanned!(hir::BinOp_); +impl_stable_hash_for_spanned!(hir::BinOpKind); impl_stable_hash_for!(enum hir::UnOp { UnDeref, @@ -473,7 +467,7 @@ impl_stable_hash_for!(enum hir::UnOp { UnNeg }); -impl_stable_hash_for_spanned!(hir::Stmt_); +impl_stable_hash_for_spanned!(hir::StmtKind); impl_stable_hash_for!(struct hir::Local { pat, @@ -486,10 +480,10 @@ impl_stable_hash_for!(struct hir::Local { source }); -impl_stable_hash_for_spanned!(hir::Decl_); -impl_stable_hash_for!(enum hir::Decl_ { - DeclLocal(local), - DeclItem(item_id) +impl_stable_hash_for_spanned!(hir::DeclKind); +impl_stable_hash_for!(enum hir::DeclKind { + Local(local), + Item(item_id) }); impl_stable_hash_for!(struct hir::Arm { @@ -500,10 +494,11 @@ impl_stable_hash_for!(struct hir::Arm { }); impl_stable_hash_for!(struct hir::Field { - name, + id -> _, + ident, expr, span, - is_shorthand + is_shorthand, }); impl_stable_hash_for_spanned!(ast::Name); @@ -521,9 +516,15 @@ impl_stable_hash_for!(enum hir::UnsafeSource { UserProvided }); -impl<'gcx> HashStable> for hir::Expr { +impl_stable_hash_for!(struct hir::AnonConst { + id, + hir_id, + body +}); + +impl<'a> HashStable> for hir::Expr { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { hcx.while_hashing_hir_bodies(true, |hcx| { let hir::Expr { @@ -541,37 +542,36 @@ impl<'gcx> HashStable> for hir::Expr { } } -impl_stable_hash_for!(enum hir::Expr_ { - ExprBox(sub), - ExprArray(subs), - ExprCall(callee, args), - ExprMethodCall(segment, span, args), - ExprTup(fields), - ExprBinary(op, lhs, rhs), - ExprUnary(op, operand), - ExprLit(value), - ExprCast(expr, t), - ExprType(expr, t), - ExprIf(cond, then, els), - ExprWhile(cond, body, label), - ExprLoop(body, label, loop_src), - ExprMatch(matchee, arms, match_src), - ExprClosure(capture_clause, decl, body_id, span, gen), - ExprBlock(blk), - ExprAssign(lhs, rhs), - ExprAssignOp(op, lhs, rhs), - ExprField(owner, field_name), - ExprTupField(owner, idx), - ExprIndex(lhs, rhs), - ExprPath(path), - ExprAddrOf(mutability, sub), - ExprBreak(destination, sub), - ExprAgain(destination), - ExprRet(val), - ExprInlineAsm(asm, inputs, outputs), - ExprStruct(path, fields, base), - ExprRepeat(val, times), - ExprYield(val) +impl_stable_hash_for!(enum hir::ExprKind { + Box(sub), + Array(subs), + Call(callee, args), + MethodCall(segment, span, args), + Tup(fields), + Binary(op, lhs, rhs), + Unary(op, operand), + Lit(value), + Cast(expr, t), + Type(expr, t), + If(cond, then, els), + While(cond, body, label), + Loop(body, label, loop_src), + Match(matchee, arms, match_src), + Closure(capture_clause, decl, body_id, span, gen), + Block(blk, label), + Assign(lhs, rhs), + AssignOp(op, lhs, rhs), + Field(owner, ident), + Index(lhs, rhs), + Path(path), + AddrOf(mutability, sub), + Break(destination, sub), + Continue(destination), + Ret(val), + InlineAsm(asm, inputs, outputs), + Struct(path, fields, base), + Repeat(val, times), + Yield(val) }); impl_stable_hash_for!(enum hir::LocalSource { @@ -585,9 +585,9 @@ impl_stable_hash_for!(enum hir::LoopSource { ForLoop }); -impl<'gcx> HashStable> for hir::MatchSource { +impl<'a> HashStable> for hir::MatchSource { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use hir::MatchSource; @@ -606,6 +606,11 @@ impl<'gcx> HashStable> for hir::MatchSource { } } +impl_stable_hash_for!(enum hir::GeneratorMovability { + Static, + Movable +}); + impl_stable_hash_for!(enum hir::CaptureClause { CaptureByValue, CaptureByRef @@ -614,49 +619,31 @@ impl_stable_hash_for!(enum hir::CaptureClause { impl_stable_hash_for_spanned!(usize); impl_stable_hash_for!(struct hir::Destination { - ident, + label, target_id }); impl_stable_hash_for_spanned!(ast::Ident); -impl_stable_hash_for!(enum hir::LoopIdResult { - Ok(node_id), - Err(loop_id_error) -}); - impl_stable_hash_for!(enum hir::LoopIdError { OutsideLoopScope, UnlabeledCfInWhileCondition, UnresolvedLabel }); -impl_stable_hash_for!(enum hir::ScopeTarget { - Block(node_id), - Loop(loop_id_result) +impl_stable_hash_for!(struct ast::Ident { + name, + span, }); -impl<'gcx> HashStable> for ast::Ident { +impl<'a> HashStable> for hir::TraitItem { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - let ast::Ident { - ref name, - ctxt: _ // Ignore this - } = *self; - - name.hash_stable(hcx, hasher); - } -} - -impl<'gcx> HashStable> for hir::TraitItem { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let hir::TraitItem { id: _, hir_id: _, - name, + ident, ref attrs, ref generics, ref node, @@ -664,7 +651,7 @@ impl<'gcx> HashStable> for hir::TraitItem { } = *self; hcx.hash_hir_item_like(|hcx| { - name.hash_stable(hcx, hasher); + ident.name.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); generics.hash_stable(hcx, hasher); node.hash_stable(hcx, hasher); @@ -684,14 +671,14 @@ impl_stable_hash_for!(enum hir::TraitItemKind { Type(bounds, rhs) }); -impl<'gcx> HashStable> for hir::ImplItem { +impl<'a> HashStable> for hir::ImplItem { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let hir::ImplItem { id: _, hir_id: _, - name, + ident, ref vis, defaultness, ref attrs, @@ -701,7 +688,7 @@ impl<'gcx> HashStable> for hir::ImplItem { } = *self; hcx.hash_hir_item_like(|hcx| { - name.hash_stable(hcx, hasher); + ident.name.hash_stable(hcx, hasher); vis.hash_stable(hcx, hasher); defaultness.hash_stable(hcx, hasher); attrs.hash_stable(hcx, hasher); @@ -715,23 +702,32 @@ impl<'gcx> HashStable> for hir::ImplItem { impl_stable_hash_for!(enum hir::ImplItemKind { Const(t, body), Method(sig, body), + Existential(bounds), Type(t) }); -impl<'gcx> HashStable> for hir::Visibility { +impl_stable_hash_for!(enum ::syntax::ast::CrateSugar { + JustCrate, + PubCrate, +}); + +impl<'a> HashStable> for hir::VisibilityKind { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { - hir::Visibility::Public | - hir::Visibility::Crate | - hir::Visibility::Inherited => { + hir::VisibilityKind::Public | + hir::VisibilityKind::Inherited => { // No fields to hash. } - hir::Visibility::Restricted { ref path, id } => { + hir::VisibilityKind::Crate(sugar) => { + sugar.hash_stable(hcx, hasher); + } + hir::VisibilityKind::Restricted { ref path, id, hir_id } => { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { id.hash_stable(hcx, hasher); + hir_id.hash_stable(hcx, hasher); }); path.hash_stable(hcx, hasher); } @@ -739,9 +735,11 @@ impl<'gcx> HashStable> for hir::Visibility { } } -impl<'gcx> HashStable> for hir::Defaultness { +impl_stable_hash_for_spanned!(hir::VisibilityKind); + +impl<'a> HashStable> for hir::Defaultness { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -760,19 +758,32 @@ impl_stable_hash_for!(enum hir::ImplPolarity { Negative }); -impl<'gcx> HashStable> for hir::Mod { +impl<'a> HashStable> for hir::Mod { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let hir::Mod { - inner, - // We are not hashing the IDs of the items contained in the module. - // This is harmless and matches the current behavior but it's not - // actually correct. See issue #40876. - item_ids: _, + inner: ref inner_span, + ref item_ids, } = *self; - inner.hash_stable(hcx, hasher); + inner_span.hash_stable(hcx, hasher); + + // Combining the DefPathHashes directly is faster than feeding them + // into the hasher. Because we use a commutative combine, we also don't + // have to sort the array. + let item_ids_hash = item_ids + .iter() + .map(|id| { + let (def_path_hash, local_id) = id.id.to_stable_hash_key(hcx); + debug_assert_eq!(local_id, hir::ItemLocalId(0)); + def_path_hash.0 + }).fold(Fingerprint::ZERO, |a, b| { + a.combine_commutative(b) + }); + + item_ids.len().hash_stable(hcx, hasher); + item_ids_hash.hash_stable(hcx, hasher); } } @@ -785,14 +796,14 @@ impl_stable_hash_for!(struct hir::EnumDef { variants }); -impl_stable_hash_for!(struct hir::Variant_ { +impl_stable_hash_for!(struct hir::VariantKind { name, attrs, data, disr_expr }); -impl_stable_hash_for_spanned!(hir::Variant_); +impl_stable_hash_for_spanned!(hir::VariantKind); impl_stable_hash_for!(enum hir::UseKind { Single, @@ -802,7 +813,7 @@ impl_stable_hash_for!(enum hir::UseKind { impl_stable_hash_for!(struct hir::StructField { span, - name, + ident -> (ident.name), vis, id, ty, @@ -815,9 +826,9 @@ impl_stable_hash_for!(enum hir::VariantData { Unit(id) }); -impl<'gcx> HashStable> for hir::Item { +impl<'a> HashStable> for hir::Item { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let hir::Item { name, @@ -839,27 +850,28 @@ impl<'gcx> HashStable> for hir::Item { } } -impl_stable_hash_for!(enum hir::Item_ { - ItemExternCrate(name), - ItemUse(path, use_kind), - ItemStatic(ty, mutability, body_id), - ItemConst(ty, body_id), - ItemFn(fn_decl, unsafety, constness, abi, generics, body_id), - ItemMod(module), - ItemForeignMod(foreign_mod), - ItemGlobalAsm(global_asm), - ItemTy(ty, generics), - ItemEnum(enum_def, generics), - ItemStruct(variant_data, generics), - ItemUnion(variant_data, generics), - ItemTrait(is_auto, unsafety, generics, bounds, item_refs), - ItemTraitAlias(generics, bounds), - ItemImpl(unsafety, impl_polarity, impl_defaultness, generics, trait_ref, ty, impl_item_refs) +impl_stable_hash_for!(enum hir::ItemKind { + ExternCrate(orig_name), + Use(path, use_kind), + Static(ty, mutability, body_id), + Const(ty, body_id), + Fn(fn_decl, header, generics, body_id), + Mod(module), + ForeignMod(foreign_mod), + GlobalAsm(global_asm), + Ty(ty, generics), + Existential(exist), + Enum(enum_def, generics), + Struct(variant_data, generics), + Union(variant_data, generics), + Trait(is_auto, unsafety, generics, bounds, item_refs), + TraitAlias(generics, bounds), + Impl(unsafety, impl_polarity, impl_defaultness, generics, trait_ref, ty, impl_item_refs) }); impl_stable_hash_for!(struct hir::TraitItemRef { id, - name, + ident -> (ident.name), kind, span, defaultness @@ -867,21 +879,21 @@ impl_stable_hash_for!(struct hir::TraitItemRef { impl_stable_hash_for!(struct hir::ImplItemRef { id, - name, + ident -> (ident.name), kind, span, vis, defaultness }); -impl<'gcx> HashStable> -for hir::AssociatedItemKind { +impl<'a> HashStable> for hir::AssociatedItemKind { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { hir::AssociatedItemKind::Const | + hir::AssociatedItemKind::Existential | hir::AssociatedItemKind::Type => { // No fields to hash. } @@ -901,16 +913,16 @@ impl_stable_hash_for!(struct hir::ForeignItem { vis }); -impl_stable_hash_for!(enum hir::ForeignItem_ { - ForeignItemFn(fn_decl, arg_names, generics), - ForeignItemStatic(ty, is_mutbl), - ForeignItemType +impl_stable_hash_for!(enum hir::ForeignItemKind { + Fn(fn_decl, arg_names, generics), + Static(ty, is_mutbl), + Type }); -impl_stable_hash_for!(enum hir::Stmt_ { - StmtDecl(decl, id), - StmtExpr(expr, id), - StmtSemi(expr, id) +impl_stable_hash_for!(enum hir::StmtKind { + Decl(decl, id), + Expr(expr, id), + Semi(expr, id) }); impl_stable_hash_for!(struct hir::Arg { @@ -919,9 +931,9 @@ impl_stable_hash_for!(struct hir::Arg { hir_id }); -impl<'gcx> HashStable> for hir::Body { +impl<'a> HashStable> for hir::Body { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let hir::Body { ref arguments, @@ -937,12 +949,12 @@ impl<'gcx> HashStable> for hir::Body { } } -impl<'gcx> ToStableHashKey> for hir::BodyId { +impl<'a> ToStableHashKey> for hir::BodyId { type KeyType = (DefPathHash, hir::ItemLocalId); #[inline] fn to_stable_hash_key(&self, - hcx: &StableHashingContext<'gcx>) + hcx: &StableHashingContext<'a>) -> (DefPathHash, hir::ItemLocalId) { let hir::BodyId { node_id } = *self; node_id.to_stable_hash_key(hcx) @@ -955,45 +967,22 @@ impl_stable_hash_for!(struct hir::InlineAsmOutput { is_indirect }); -impl<'gcx> HashStable> for hir::GlobalAsm { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - let hir::GlobalAsm { - asm, - ctxt: _ - } = *self; +impl_stable_hash_for!(struct hir::GlobalAsm { + asm, + ctxt -> _, // This is used for error reporting +}); - asm.hash_stable(hcx, hasher); - } -} - -impl<'gcx> HashStable> for hir::InlineAsm { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - let hir::InlineAsm { - asm, - asm_str_style, - ref outputs, - ref inputs, - ref clobbers, - volatile, - alignstack, - dialect, - ctxt: _, // This is used for error reporting - } = *self; - - asm.hash_stable(hcx, hasher); - asm_str_style.hash_stable(hcx, hasher); - outputs.hash_stable(hcx, hasher); - inputs.hash_stable(hcx, hasher); - clobbers.hash_stable(hcx, hasher); - volatile.hash_stable(hcx, hasher); - alignstack.hash_stable(hcx, hasher); - dialect.hash_stable(hcx, hasher); - } -} +impl_stable_hash_for!(struct hir::InlineAsm { + asm, + asm_str_style, + outputs, + inputs, + clobbers, + volatile, + alignstack, + dialect, + ctxt -> _, // This is used for error reporting +}); impl_stable_hash_for!(enum hir::def::CtorKind { Fn, @@ -1001,16 +990,25 @@ impl_stable_hash_for!(enum hir::def::CtorKind { Fictive }); +impl_stable_hash_for!(enum hir::def::NonMacroAttrKind { + Builtin, + Tool, + DeriveHelper, + Custom, +}); + impl_stable_hash_for!(enum hir::def::Def { Mod(def_id), Struct(def_id), Union(def_id), Enum(def_id), + Existential(def_id), Variant(def_id), Trait(def_id), TyAlias(def_id), TraitAlias(def_id), AssociatedTy(def_id), + AssociatedExistential(def_id), PrimTy(prim_ty), TyParam(def_id), SelfTy(trait_def_id, impl_def_id), @@ -1026,7 +1024,8 @@ impl_stable_hash_for!(enum hir::def::Def { Upvar(def_id, index, expr_id), Label(node_id), Macro(def_id, macro_kind), - GlobalAsm(def_id), + ToolMod, + NonMacroAttr(attr_kind), Err }); @@ -1045,28 +1044,31 @@ impl_stable_hash_for!(enum hir::Unsafety { Normal }); +impl_stable_hash_for!(enum hir::IsAsync { + Async, + NotAsync +}); impl_stable_hash_for!(enum hir::Constness { Const, NotConst }); -impl<'gcx> HashStable> -for hir::def_id::DefIndex { +impl<'a> HashStable> for hir::def_id::DefIndex { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { hcx.local_def_path_hash(*self).hash_stable(hcx, hasher); } } -impl<'gcx> ToStableHashKey> +impl<'a> ToStableHashKey> for hir::def_id::DefIndex { type KeyType = DefPathHash; #[inline] - fn to_stable_hash_key(&self, hcx: &StableHashingContext<'gcx>) -> DefPathHash { + fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> DefPathHash { hcx.local_def_path_hash(*self) } } @@ -1075,14 +1077,17 @@ impl_stable_hash_for!(struct hir::def::Export { ident, def, vis, - span, - is_import + span }); -impl<'gcx> HashStable> -for ::middle::lang_items::LangItem { +impl_stable_hash_for!(struct ::middle::lib_features::LibFeatures { + stable, + unstable +}); + +impl<'a> HashStable> for ::middle::lang_items::LangItem { fn hash_stable(&self, - _: &mut StableHashingContext<'gcx>, + _: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { ::std::hash::Hash::hash(self, hasher); } @@ -1093,10 +1098,9 @@ impl_stable_hash_for!(struct ::middle::lang_items::LanguageItems { missing }); -impl<'gcx> HashStable> -for hir::TraitCandidate { +impl<'a> HashStable> for hir::TraitCandidate { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { let hir::TraitCandidate { @@ -1110,11 +1114,11 @@ for hir::TraitCandidate { } } -impl<'gcx> ToStableHashKey> for hir::TraitCandidate { +impl<'a> ToStableHashKey> for hir::TraitCandidate { type KeyType = (DefPathHash, Option<(DefPathHash, hir::ItemLocalId)>); fn to_stable_hash_key(&self, - hcx: &StableHashingContext<'gcx>) + hcx: &StableHashingContext<'a>) -> Self::KeyType { let hir::TraitCandidate { def_id, @@ -1128,6 +1132,31 @@ impl<'gcx> ToStableHashKey> for hir::TraitCandidate { } } +impl_stable_hash_for!(struct hir::CodegenFnAttrs { + flags, + inline, + export_name, + target_features, + linkage, + link_section, +}); + +impl<'hir> HashStable> for hir::CodegenFnAttrFlags +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'hir>, + hasher: &mut StableHasher) { + self.bits().hash_stable(hcx, hasher); + } +} + +impl<'hir> HashStable> for attr::InlineAttr { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'hir>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + } +} impl_stable_hash_for!(struct hir::Freevar { def, diff --git a/src/librustc/ich/impls_mir.rs b/src/librustc/ich/impls_mir.rs index f46b590d2dc5..38ea536b4ee7 100644 --- a/src/librustc/ich/impls_mir.rs +++ b/src/librustc/ich/impls_mir.rs @@ -20,27 +20,45 @@ use std::mem; impl_stable_hash_for!(struct mir::GeneratorLayout<'tcx> { fields }); impl_stable_hash_for!(struct mir::SourceInfo { span, scope }); impl_stable_hash_for!(enum mir::Mutability { Mut, Not }); -impl_stable_hash_for!(enum mir::BorrowKind { Shared, Unique, Mut }); impl_stable_hash_for!(enum mir::LocalKind { Var, Temp, Arg, ReturnPointer }); impl_stable_hash_for!(struct mir::LocalDecl<'tcx> { mutability, ty, name, source_info, + visibility_scope, internal, - syntactic_scope, is_user_variable }); -impl_stable_hash_for!(struct mir::UpvarDecl { debug_name, by_ref, mutability }); +impl_stable_hash_for!(struct mir::UpvarDecl { debug_name, var_hir_id, by_ref, mutability }); impl_stable_hash_for!(struct mir::BasicBlockData<'tcx> { statements, terminator, is_cleanup }); -impl_stable_hash_for!(struct mir::UnsafetyViolation { source_info, description, kind }); +impl_stable_hash_for!(struct mir::UnsafetyViolation { source_info, description, details, kind }); impl_stable_hash_for!(struct mir::UnsafetyCheckResult { violations, unsafe_blocks }); -impl<'gcx> HashStable> +impl<'a> HashStable> +for mir::BorrowKind { + #[inline] + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + mir::BorrowKind::Shared | + mir::BorrowKind::Unique => {} + mir::BorrowKind::Mut { allow_two_phase_borrow } => { + allow_two_phase_borrow.hash_stable(hcx, hasher); + } + } + } +} + + +impl<'a> HashStable> for mir::UnsafetyViolationKind { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -61,12 +79,12 @@ impl_stable_hash_for!(struct mir::Terminator<'tcx> { source_info }); -impl<'gcx, T> HashStable> for mir::ClearCrossCrate - where T: HashStable> +impl<'a, 'gcx, T> HashStable> for mir::ClearCrossCrate + where T: HashStable> { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -78,61 +96,61 @@ impl<'gcx, T> HashStable> for mir::ClearCrossCrate } } -impl<'gcx> HashStable> for mir::Local { +impl<'a> HashStable> for mir::Local { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use rustc_data_structures::indexed_vec::Idx; self.index().hash_stable(hcx, hasher); } } -impl<'gcx> HashStable> for mir::BasicBlock { +impl<'a> HashStable> for mir::BasicBlock { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use rustc_data_structures::indexed_vec::Idx; self.index().hash_stable(hcx, hasher); } } -impl<'gcx> HashStable> for mir::Field { +impl<'a> HashStable> for mir::Field { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use rustc_data_structures::indexed_vec::Idx; self.index().hash_stable(hcx, hasher); } } -impl<'gcx> HashStable> -for mir::VisibilityScope { +impl<'a> HashStable> +for mir::SourceScope { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use rustc_data_structures::indexed_vec::Idx; self.index().hash_stable(hcx, hasher); } } -impl<'gcx> HashStable> for mir::Promoted { +impl<'a> HashStable> for mir::Promoted { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use rustc_data_structures::indexed_vec::Idx; self.index().hash_stable(hcx, hasher); } } -impl<'gcx> HashStable> +impl<'a, 'gcx> HashStable> for mir::TerminatorKind<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -201,37 +219,20 @@ for mir::TerminatorKind<'gcx> { target.hash_stable(hcx, hasher); } } - } - } -} - -impl<'gcx> HashStable> -for mir::AssertMessage<'gcx> { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - mem::discriminant(self).hash_stable(hcx, hasher); - - match *self { - mir::AssertMessage::BoundsCheck { ref len, ref index } => { - len.hash_stable(hcx, hasher); - index.hash_stable(hcx, hasher); + mir::TerminatorKind::FalseUnwind { ref real_target, ref unwind } => { + real_target.hash_stable(hcx, hasher); + unwind.hash_stable(hcx, hasher); } - mir::AssertMessage::Math(ref const_math_err) => { - const_math_err.hash_stable(hcx, hasher); - } - mir::AssertMessage::GeneratorResumedAfterReturn => (), - mir::AssertMessage::GeneratorResumedAfterPanic => (), } } } impl_stable_hash_for!(struct mir::Statement<'tcx> { source_info, kind }); -impl<'gcx> HashStable> +impl<'a, 'gcx> HashStable> for mir::StatementKind<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -240,6 +241,9 @@ for mir::StatementKind<'gcx> { place.hash_stable(hcx, hasher); rvalue.hash_stable(hcx, hasher); } + mir::StatementKind::ReadForMatch(ref place) => { + place.hash_stable(hcx, hasher); + } mir::StatementKind::SetDiscriminant { ref place, variant_index } => { place.hash_stable(hcx, hasher); variant_index.hash_stable(hcx, hasher); @@ -255,6 +259,10 @@ for mir::StatementKind<'gcx> { op.hash_stable(hcx, hasher); places.hash_stable(hcx, hasher); } + mir::StatementKind::UserAssertTy(ref c_ty, ref local) => { + c_ty.hash_stable(hcx, hasher); + local.hash_stable(hcx, hasher); + } mir::StatementKind::Nop => {} mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { asm.hash_stable(hcx, hasher); @@ -265,12 +273,12 @@ for mir::StatementKind<'gcx> { } } -impl<'gcx, T> HashStable> +impl<'a, 'gcx, T> HashStable> for mir::ValidationOperand<'gcx, T> - where T: HashStable> + where T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { self.place.hash_stable(hcx, hasher); @@ -282,9 +290,9 @@ impl<'gcx, T> HashStable> impl_stable_hash_for!(enum mir::ValidationOp { Acquire, Release, Suspend(region_scope) }); -impl<'gcx> HashStable> for mir::Place<'gcx> { +impl<'a, 'gcx> HashStable> for mir::Place<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -294,6 +302,9 @@ impl<'gcx> HashStable> for mir::Place<'gcx> { mir::Place::Static(ref statik) => { statik.hash_stable(hcx, hasher); } + mir::Place::Promoted(ref promoted) => { + promoted.hash_stable(hcx, hasher); + } mir::Place::Projection(ref place_projection) => { place_projection.hash_stable(hcx, hasher); } @@ -301,14 +312,14 @@ impl<'gcx> HashStable> for mir::Place<'gcx> { } } -impl<'gcx, B, V, T> HashStable> +impl<'a, 'gcx, B, V, T> HashStable> for mir::Projection<'gcx, B, V, T> - where B: HashStable>, - V: HashStable>, - T: HashStable> + where B: HashStable>, + V: HashStable>, + T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let mir::Projection { ref base, @@ -320,13 +331,13 @@ for mir::Projection<'gcx, B, V, T> } } -impl<'gcx, V, T> HashStable> +impl<'a, 'gcx, V, T> HashStable> for mir::ProjectionElem<'gcx, V, T> - where V: HashStable>, - T: HashStable> + where V: HashStable>, + T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -355,14 +366,14 @@ for mir::ProjectionElem<'gcx, V, T> } } -impl_stable_hash_for!(struct mir::VisibilityScopeData { span, parent_scope }); -impl_stable_hash_for!(struct mir::VisibilityScopeInfo { +impl_stable_hash_for!(struct mir::SourceScopeData { span, parent_scope }); +impl_stable_hash_for!(struct mir::SourceScopeLocalData { lint_root, safety }); -impl<'gcx> HashStable> for mir::Safety { +impl<'a> HashStable> for mir::Safety { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -377,9 +388,9 @@ impl<'gcx> HashStable> for mir::Safety { } } -impl<'gcx> HashStable> for mir::Operand<'gcx> { +impl<'a, 'gcx> HashStable> for mir::Operand<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -397,9 +408,9 @@ impl<'gcx> HashStable> for mir::Operand<'gcx> { } } -impl<'gcx> HashStable> for mir::Rvalue<'gcx> { +impl<'a, 'gcx> HashStable> for mir::Rvalue<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -457,10 +468,10 @@ impl_stable_hash_for!(enum mir::CastKind { Unsize }); -impl<'gcx> HashStable> +impl<'a, 'gcx> HashStable> for mir::AggregateKind<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -478,10 +489,10 @@ for mir::AggregateKind<'gcx> { def_id.hash_stable(hcx, hasher); substs.hash_stable(hcx, hasher); } - mir::AggregateKind::Generator(def_id, ref substs, ref interior) => { + mir::AggregateKind::Generator(def_id, ref substs, movability) => { def_id.hash_stable(hcx, hasher); substs.hash_stable(hcx, hasher); - interior.hash_stable(hcx, hasher); + movability.hash_stable(hcx, hasher); } } } @@ -519,24 +530,13 @@ impl_stable_hash_for!(enum mir::NullOp { impl_stable_hash_for!(struct mir::Constant<'tcx> { span, ty, literal }); -impl<'gcx> HashStable> for mir::Literal<'gcx> { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - mem::discriminant(self).hash_stable(hcx, hasher); - match *self { - mir::Literal::Value { ref value } => { - value.hash_stable(hcx, hasher); - } - mir::Literal::Promoted { index } => { - index.hash_stable(hcx, hasher); - } - } - } -} - impl_stable_hash_for!(struct mir::Location { block, statement_index }); +impl_stable_hash_for!(struct mir::BorrowCheckResult<'tcx> { + closure_requirements, + used_mut_upvars +}); + impl_stable_hash_for!(struct mir::ClosureRegionRequirements<'tcx> { num_external_vids, outlives_requirements @@ -548,9 +548,9 @@ impl_stable_hash_for!(struct mir::ClosureOutlivesRequirement<'tcx> { blame_span }); -impl<'gcx> HashStable> for mir::ClosureOutlivesSubject<'gcx> { +impl<'a, 'gcx> HashStable> for mir::ClosureOutlivesSubject<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -563,3 +563,5 @@ impl<'gcx> HashStable> for mir::ClosureOutlivesSubjec } } } + +impl_stable_hash_for!(struct mir::interpret::GlobalId<'tcx> { instance, promoted }); diff --git a/src/librustc/ich/impls_misc.rs b/src/librustc/ich/impls_misc.rs index 951315fb4a84..db4041791754 100644 --- a/src/librustc/ich/impls_misc.rs +++ b/src/librustc/ich/impls_misc.rs @@ -20,7 +20,7 @@ impl_stable_hash_for!(enum ::session::search_paths::PathKind { All }); -impl_stable_hash_for!(enum ::rustc_back::PanicStrategy { +impl_stable_hash_for!(enum ::rustc_target::spec::PanicStrategy { Abort, Unwind }); diff --git a/src/librustc/ich/impls_syntax.rs b/src/librustc/ich/impls_syntax.rs index c31a5c9d86d7..d086d3bd28df 100644 --- a/src/librustc/ich/impls_syntax.rs +++ b/src/librustc/ich/impls_syntax.rs @@ -17,8 +17,9 @@ use std::hash as std_hash; use std::mem; use syntax::ast; +use syntax::feature_gate; use syntax::parse::token; -use syntax::symbol::InternedString; +use syntax::symbol::{InternedString, LocalInternedString}; use syntax::tokenstream; use syntax_pos::FileMap; @@ -28,44 +29,64 @@ use rustc_data_structures::stable_hasher::{HashStable, ToStableHashKey, StableHasher, StableHasherResult}; use rustc_data_structures::accumulate_vec::AccumulateVec; -impl<'gcx> HashStable> for InternedString { +impl<'a> HashStable> for InternedString { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + self.with(|s| s.hash_stable(hcx, hasher)) + } +} + +impl<'a> ToStableHashKey> for InternedString { + type KeyType = InternedString; + + #[inline] + fn to_stable_hash_key(&self, + _: &StableHashingContext<'a>) + -> InternedString { + self.clone() + } +} + +impl<'a> HashStable> for LocalInternedString { + #[inline] + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let s: &str = &**self; s.hash_stable(hcx, hasher); } } -impl<'gcx> ToStableHashKey> for InternedString { - type KeyType = InternedString; +impl<'a> ToStableHashKey> for LocalInternedString { + type KeyType = LocalInternedString; #[inline] fn to_stable_hash_key(&self, - _: &StableHashingContext<'gcx>) - -> InternedString { + _: &StableHashingContext<'a>) + -> LocalInternedString { self.clone() } } -impl<'gcx> HashStable> for ast::Name { +impl<'a> HashStable> for ast::Name { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { self.as_str().hash_stable(hcx, hasher); } } -impl<'gcx> ToStableHashKey> for ast::Name { +impl<'a> ToStableHashKey> for ast::Name { type KeyType = InternedString; #[inline] fn to_stable_hash_key(&self, - _: &StableHashingContext<'gcx>) + _: &StableHashingContext<'a>) -> InternedString { - self.as_str() + self.as_interned_str() } } @@ -77,11 +98,12 @@ impl_stable_hash_for!(enum ::syntax::ast::AsmDialect { impl_stable_hash_for!(enum ::syntax::ext::base::MacroKind { Bang, Attr, - Derive + Derive, + ProcMacroStub, }); -impl_stable_hash_for!(enum ::syntax::abi::Abi { +impl_stable_hash_for!(enum ::rustc_target::spec::abi::Abi { Cdecl, Stdcall, Fastcall, @@ -93,6 +115,7 @@ impl_stable_hash_for!(enum ::syntax::abi::Abi { PtxKernel, Msp430Interrupt, X86Interrupt, + AmdGpuKernel, Rust, C, System, @@ -110,10 +133,19 @@ impl_stable_hash_for!(struct ::syntax::attr::Stability { rustc_const_unstable }); -impl<'gcx> HashStable> +impl<'a> HashStable> +for ::syntax::edition::Edition { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + } +} + +impl<'a> HashStable> for ::syntax::attr::StabilityLevel { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -161,13 +193,13 @@ impl_stable_hash_for!(enum ::syntax::ast::FloatTy { F32, F64 }); impl_stable_hash_for!(enum ::syntax::ast::Unsafety { Unsafe, Normal }); impl_stable_hash_for!(enum ::syntax::ast::Constness { Const, NotConst }); impl_stable_hash_for!(enum ::syntax::ast::Defaultness { Default, Final }); -impl_stable_hash_for!(struct ::syntax::ast::Lifetime { id, span, ident }); +impl_stable_hash_for!(struct ::syntax::ast::Lifetime { id, ident }); impl_stable_hash_for!(enum ::syntax::ast::StrStyle { Cooked, Raw(pounds) }); impl_stable_hash_for!(enum ::syntax::ast::AttrStyle { Outer, Inner }); -impl<'gcx> HashStable> for [ast::Attribute] { +impl<'a> HashStable> for [ast::Attribute] { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { if self.len() == 0 { self.len().hash_stable(hcx, hasher); @@ -178,8 +210,7 @@ impl<'gcx> HashStable> for [ast::Attribute] { let filtered: AccumulateVec<[&ast::Attribute; 8]> = self .iter() .filter(|attr| { - !attr.is_sugared_doc && - attr.name().map(|name| !hcx.is_ignored_attr(name)).unwrap_or(true) + !attr.is_sugared_doc && !hcx.is_ignored_attr(attr.name()) }) .collect(); @@ -190,12 +221,23 @@ impl<'gcx> HashStable> for [ast::Attribute] { } } -impl<'gcx> HashStable> for ast::Attribute { +impl<'a> HashStable> for ast::Path { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + self.segments.len().hash_stable(hcx, hasher); + for segment in &self.segments { + segment.ident.name.hash_stable(hcx, hasher); + } + } +} + +impl<'a> HashStable> for ast::Attribute { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { // Make sure that these have been filtered out. - debug_assert!(self.name().map(|name| !hcx.is_ignored_attr(name)).unwrap_or(true)); + debug_assert!(!hcx.is_ignored_attr(self.name())); debug_assert!(!self.is_sugared_doc); let ast::Attribute { @@ -208,10 +250,7 @@ impl<'gcx> HashStable> for ast::Attribute { } = *self; style.hash_stable(hcx, hasher); - path.segments.len().hash_stable(hcx, hasher); - for segment in &path.segments { - segment.identifier.name.hash_stable(hcx, hasher); - } + path.hash_stable(hcx, hasher); for tt in tokens.trees() { tt.hash_stable(hcx, hasher); } @@ -219,10 +258,10 @@ impl<'gcx> HashStable> for ast::Attribute { } } -impl<'gcx> HashStable> +impl<'a> HashStable> for tokenstream::TokenTree { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -241,10 +280,10 @@ for tokenstream::TokenTree { } } -impl<'gcx> HashStable> +impl<'a> HashStable> for tokenstream::TokenStream { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { for sub_tt in self.trees() { sub_tt.hash_stable(hcx, hasher); @@ -252,9 +291,11 @@ for tokenstream::TokenStream { } } -fn hash_token<'gcx, W: StableHasherResult>(token: &token::Token, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { +fn hash_token<'a, 'gcx, W: StableHasherResult>( + token: &token::Token, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher, +) { mem::discriminant(token).hash_stable(hcx, hasher); match *token { token::Token::Eq | @@ -284,7 +325,7 @@ fn hash_token<'gcx, W: StableHasherResult>(token: &token::Token, token::Token::Pound | token::Token::Dollar | token::Token::Question | - token::Token::Underscore | + token::Token::SingleQuote | token::Token::Whitespace | token::Token::Comment | token::Token::Eof => {} @@ -316,7 +357,10 @@ fn hash_token<'gcx, W: StableHasherResult>(token: &token::Token, opt_name.hash_stable(hcx, hasher); } - token::Token::Ident(ident) | + token::Token::Ident(ident, is_raw) => { + ident.name.hash_stable(hcx, hasher); + is_raw.hash_stable(hcx, hasher); + } token::Token::Lifetime(ident) => ident.name.hash_stable(hcx, hasher), token::Token::Interpolated(_) => { @@ -336,7 +380,7 @@ impl_stable_hash_for!(enum ::syntax::ast::NestedMetaItemKind { }); impl_stable_hash_for!(struct ::syntax::ast::MetaItem { - name, + ident, node, span }); @@ -349,14 +393,12 @@ impl_stable_hash_for!(enum ::syntax::ast::MetaItemKind { impl_stable_hash_for!(struct ::syntax_pos::hygiene::ExpnInfo { call_site, - callee -}); - -impl_stable_hash_for!(struct ::syntax_pos::hygiene::NameAndSpan { + def_site, format, allow_internal_unstable, allow_internal_unsafe, - span + local_inner_macros, + edition }); impl_stable_hash_for!(enum ::syntax_pos::hygiene::ExpnFormat { @@ -366,9 +408,11 @@ impl_stable_hash_for!(enum ::syntax_pos::hygiene::ExpnFormat { }); impl_stable_hash_for!(enum ::syntax_pos::hygiene::CompilerDesugaringKind { - BackArrow, - DotFill, - QuestionMark + Async, + QuestionMark, + ExistentialReturnType, + ForLoop, + Catch }); impl_stable_hash_for!(enum ::syntax_pos::FileName { @@ -378,13 +422,14 @@ impl_stable_hash_for!(enum ::syntax_pos::FileName { Anon, MacroExpansion, ProcMacroSourceCode, + CliCrateAttr, CfgSpec, Custom(s) }); -impl<'gcx> HashStable> for FileMap { +impl<'a> HashStable> for FileMap { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let FileMap { name: _, // We hash the smaller name_hash instead of this @@ -414,20 +459,17 @@ impl<'gcx> HashStable> for FileMap { src_hash.hash_stable(hcx, hasher); // We only hash the relative position within this filemap - let lines = lines.borrow(); lines.len().hash_stable(hcx, hasher); for &line in lines.iter() { stable_byte_pos(line, start_pos).hash_stable(hcx, hasher); } // We only hash the relative position within this filemap - let multibyte_chars = multibyte_chars.borrow(); multibyte_chars.len().hash_stable(hcx, hasher); for &char_pos in multibyte_chars.iter() { stable_multibyte_char(char_pos, start_pos).hash_stable(hcx, hasher); } - let non_narrow_chars = non_narrow_chars.borrow(); non_narrow_chars.len().hash_stable(hcx, hasher); for &char_pos in non_narrow_chars.iter() { stable_non_narrow_char(char_pos, start_pos).hash_stable(hcx, hasher); @@ -460,3 +502,21 @@ fn stable_non_narrow_char(swc: ::syntax_pos::NonNarrowChar, (pos.0 - filemap_start.0, width as u32) } + + + +impl<'gcx> HashStable> for feature_gate::Features { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + // Unfortunately we cannot exhaustively list fields here, since the + // struct is macro generated. + self.declared_lang_features.hash_stable(hcx, hasher); + self.declared_lib_features.hash_stable(hcx, hasher); + + self.walk_feature_fields(|feature_name, value| { + feature_name.hash_stable(hcx, hasher); + value.hash_stable(hcx, hasher); + }); + } +} diff --git a/src/librustc/ich/impls_ty.rs b/src/librustc/ich/impls_ty.rs index ea3a1074aa26..f13e26fee3ee 100644 --- a/src/librustc/ich/impls_ty.rs +++ b/src/librustc/ich/impls_ty.rs @@ -11,39 +11,87 @@ //! This module contains `HashStable` implementations for various data types //! from rustc::ty in no particular order. -use ich::{StableHashingContext, NodeIdHashingMode}; +use ich::{Fingerprint, StableHashingContext, NodeIdHashingMode}; +use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::stable_hasher::{HashStable, ToStableHashKey, StableHasher, StableHasherResult}; +use std::cell::RefCell; use std::hash as std_hash; use std::mem; use middle::region; +use infer; use traits; use ty; +use mir; -impl<'gcx, T> HashStable> +impl<'a, 'gcx, T> HashStable> for &'gcx ty::Slice - where T: HashStable> { + where T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - (&self[..]).hash_stable(hcx, hasher); + thread_local! { + static CACHE: RefCell> = + RefCell::new(FxHashMap()); + } + + let hash = CACHE.with(|cache| { + let key = (self.as_ptr() as usize, self.len()); + if let Some(&hash) = cache.borrow().get(&key) { + return hash; + } + + let mut hasher = StableHasher::new(); + (&self[..]).hash_stable(hcx, &mut hasher); + + let hash: Fingerprint = hasher.finish(); + cache.borrow_mut().insert(key, hash); + hash + }); + + hash.hash_stable(hcx, hasher); } } -impl<'gcx> HashStable> -for ty::subst::Kind<'gcx> { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - self.as_type().hash_stable(hcx, hasher); - self.as_region().hash_stable(hcx, hasher); +impl<'a, 'gcx, T> ToStableHashKey> for &'gcx ty::Slice + where T: HashStable> +{ + type KeyType = Fingerprint; + + #[inline] + fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Fingerprint { + let mut hasher = StableHasher::new(); + let mut hcx: StableHashingContext<'a> = hcx.clone(); + self.hash_stable(&mut hcx, &mut hasher); + hasher.finish() } } -impl<'gcx> HashStable> +impl<'a, 'gcx> HashStable> for ty::subst::Kind<'gcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + self.unpack().hash_stable(hcx, hasher); + } +} + +impl<'a, 'gcx> HashStable> +for ty::subst::UnpackedKind<'gcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match self { + ty::subst::UnpackedKind::Lifetime(lt) => lt.hash_stable(hcx, hasher), + ty::subst::UnpackedKind::Type(ty) => ty.hash_stable(hcx, hasher), + } + } +} + +impl<'a> HashStable> for ty::RegionKind { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -52,17 +100,20 @@ for ty::RegionKind { ty::ReEmpty => { // No variant fields to hash for these ... } + ty::ReCanonical(c) => { + c.hash_stable(hcx, hasher); + } ty::ReLateBound(db, ty::BrAnon(i)) => { - db.depth.hash_stable(hcx, hasher); + db.hash_stable(hcx, hasher); i.hash_stable(hcx, hasher); } ty::ReLateBound(db, ty::BrNamed(def_id, name)) => { - db.depth.hash_stable(hcx, hasher); + db.hash_stable(hcx, hasher); def_id.hash_stable(hcx, hasher); name.hash_stable(hcx, hasher); } ty::ReLateBound(db, ty::BrEnv) => { - db.depth.hash_stable(hcx, hasher); + db.hash_stable(hcx, hasher); } ty::ReEarlyBound(ty::EarlyBoundRegion { def_id, index, name }) => { def_id.hash_stable(hcx, hasher); @@ -81,13 +132,23 @@ for ty::RegionKind { ty::ReLateBound(..) | ty::ReVar(..) | ty::ReSkolemized(..) => { - bug!("TypeIdHasher: unexpected region {:?}", *self) + bug!("StableHasher: unexpected region {:?}", *self) } } } } -impl<'gcx> HashStable> for ty::RegionVid { +impl<'a> HashStable> for ty::RegionVid { + #[inline] + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use rustc_data_structures::indexed_vec::Idx; + self.index().hash_stable(hcx, hasher); + } +} + +impl<'gcx> HashStable> for ty::CanonicalVar { #[inline] fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, @@ -97,10 +158,10 @@ impl<'gcx> HashStable> for ty::RegionVid { } } -impl<'gcx> HashStable> +impl<'a, 'gcx> HashStable> for ty::adjustment::AutoBorrow<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -115,10 +176,10 @@ for ty::adjustment::AutoBorrow<'gcx> { } } -impl<'gcx> HashStable> +impl<'a, 'gcx> HashStable> for ty::adjustment::Adjust<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -141,6 +202,24 @@ for ty::adjustment::Adjust<'gcx> { impl_stable_hash_for!(struct ty::adjustment::Adjustment<'tcx> { kind, target }); impl_stable_hash_for!(struct ty::adjustment::OverloadedDeref<'tcx> { region, mutbl }); impl_stable_hash_for!(struct ty::UpvarBorrow<'tcx> { kind, region }); +impl_stable_hash_for!(enum ty::adjustment::AllowTwoPhase { + Yes, + No +}); + +impl<'gcx> HashStable> for ty::adjustment::AutoBorrowMutability { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + ty::adjustment::AutoBorrowMutability::Mutable { ref allow_two_phase_borrow } => { + allow_two_phase_borrow.hash_stable(hcx, hasher); + } + ty::adjustment::AutoBorrowMutability::Immutable => {} + } + } +} impl_stable_hash_for!(struct ty::UpvarId { var_id, closure_expr_id }); @@ -150,10 +229,10 @@ impl_stable_hash_for!(enum ty::BorrowKind { MutBorrow }); -impl<'gcx> HashStable> +impl<'a, 'gcx> HashStable> for ty::UpvarCapture<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -177,14 +256,13 @@ impl_stable_hash_for!(struct ty::FnSig<'tcx> { abi }); -impl<'gcx, T> HashStable> for ty::Binder - where T: HashStable> +impl<'a, 'gcx, T> HashStable> for ty::Binder + where T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - let ty::Binder(ref inner) = *self; - inner.hash_stable(hcx, hasher); + self.skip_binder().hash_stable(hcx, hasher); } } @@ -198,16 +276,15 @@ impl_stable_hash_for!(enum ty::Visibility { impl_stable_hash_for!(struct ty::TraitRef<'tcx> { def_id, substs }); impl_stable_hash_for!(struct ty::TraitPredicate<'tcx> { trait_ref }); -impl_stable_hash_for!(tuple_struct ty::EquatePredicate<'tcx> { t1, t2 }); impl_stable_hash_for!(struct ty::SubtypePredicate<'tcx> { a_is_expected, a, b }); -impl<'gcx, A, B> HashStable> +impl<'a, 'gcx, A, B> HashStable> for ty::OutlivesPredicate - where A: HashStable>, - B: HashStable>, + where A: HashStable>, + B: HashStable>, { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let ty::OutlivesPredicate(ref a, ref b) = *self; a.hash_stable(hcx, hasher); @@ -219,18 +296,15 @@ impl_stable_hash_for!(struct ty::ProjectionPredicate<'tcx> { projection_ty, ty } impl_stable_hash_for!(struct ty::ProjectionTy<'tcx> { substs, item_def_id }); -impl<'gcx> HashStable> for ty::Predicate<'gcx> { +impl<'a, 'gcx> HashStable> for ty::Predicate<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { ty::Predicate::Trait(ref pred) => { pred.hash_stable(hcx, hasher); } - ty::Predicate::Equate(ref pred) => { - pred.hash_stable(hcx, hasher); - } ty::Predicate::Subtype(ref pred) => { pred.hash_stable(hcx, hasher); } @@ -262,9 +336,9 @@ impl<'gcx> HashStable> for ty::Predicate<'gcx> { } } -impl<'gcx> HashStable> for ty::AdtFlags { +impl<'a> HashStable> for ty::AdtFlags { fn hash_stable(&self, - _: &mut StableHashingContext<'gcx>, + _: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { std_hash::Hash::hash(self, hasher); } @@ -285,138 +359,318 @@ impl_stable_hash_for!(enum ty::VariantDiscr { impl_stable_hash_for!(struct ty::FieldDef { did, - name, - vis + ident -> (ident.name), + vis, }); -impl<'gcx> HashStable> -for ::middle::const_val::ConstVal<'gcx> { +impl<'a, 'gcx> HashStable> +for ::mir::interpret::ConstValue<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - use middle::const_val::ConstVal::*; - use middle::const_val::ConstAggregate::*; + use mir::interpret::ConstValue::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { - Integral(ref value) => { - value.hash_stable(hcx, hasher); - } - Float(ref value) => { - value.hash_stable(hcx, hasher); - } - Str(ref value) => { - value.hash_stable(hcx, hasher); - } - ByteStr(ref value) => { - value.hash_stable(hcx, hasher); - } - Bool(value) => { - value.hash_stable(hcx, hasher); - } - Char(value) => { - value.hash_stable(hcx, hasher); - } - Variant(def_id) => { - def_id.hash_stable(hcx, hasher); - } - Function(def_id, substs) => { - def_id.hash_stable(hcx, hasher); - hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { - substs.hash_stable(hcx, hasher); - }); - } - Aggregate(Struct(ref name_values)) => { - let mut values = name_values.to_vec(); - values.sort_unstable_by_key(|&(ref name, _)| name.clone()); - values.hash_stable(hcx, hasher); - } - Aggregate(Tuple(ref value)) => { - value.hash_stable(hcx, hasher); - } - Aggregate(Array(ref value)) => { - value.hash_stable(hcx, hasher); - } - Aggregate(Repeat(ref value, times)) => { - value.hash_stable(hcx, hasher); - times.hash_stable(hcx, hasher); - } Unevaluated(def_id, substs) => { def_id.hash_stable(hcx, hasher); substs.hash_stable(hcx, hasher); } + Scalar(val) => { + val.hash_stable(hcx, hasher); + } + ScalarPair(a, b) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher); + } + ByRef(alloc, offset) => { + alloc.hash_stable(hcx, hasher); + offset.hash_stable(hcx, hasher); + } } } } -impl_stable_hash_for!(struct ::middle::const_val::ByteArray<'tcx> { - data +impl_stable_hash_for!(enum mir::interpret::ScalarMaybeUndef { + Scalar(v), + Undef }); +impl_stable_hash_for!(enum mir::interpret::Value { + Scalar(v), + ScalarPair(a, b), + ByRef(ptr, align) +}); + +impl_stable_hash_for!(struct mir::interpret::Pointer { + alloc_id, + offset +}); + +impl<'a> HashStable> for mir::interpret::AllocId { + fn hash_stable( + &self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher, + ) { + ty::tls::with_opt(|tcx| { + trace!("hashing {:?}", *self); + let tcx = tcx.expect("can't hash AllocIds during hir lowering"); + let alloc_kind = tcx.alloc_map.lock().get(*self).expect("no value for AllocId"); + alloc_kind.hash_stable(hcx, hasher); + }); + } +} + +impl<'a, 'gcx, M: HashStable>> HashStable> +for mir::interpret::AllocType<'gcx, M> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use mir::interpret::AllocType::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + Function(instance) => instance.hash_stable(hcx, hasher), + Static(def_id) => def_id.hash_stable(hcx, hasher), + Memory(ref mem) => mem.hash_stable(hcx, hasher), + } + } +} + +impl<'a> HashStable> for mir::interpret::Allocation { + fn hash_stable( + &self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher, + ) { + self.bytes.hash_stable(hcx, hasher); + for reloc in self.relocations.iter() { + reloc.hash_stable(hcx, hasher); + } + self.undef_mask.hash_stable(hcx, hasher); + self.align.hash_stable(hcx, hasher); + self.runtime_mutability.hash_stable(hcx, hasher); + } +} + +impl_stable_hash_for!(enum ::syntax::ast::Mutability { + Immutable, + Mutable +}); + + +impl<'a> HashStable> +for ::mir::interpret::Scalar { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use mir::interpret::Scalar::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + Bits { bits, size } => { + bits.hash_stable(hcx, hasher); + size.hash_stable(hcx, hasher); + }, + Ptr(ptr) => ptr.hash_stable(hcx, hasher), + } + } +} + impl_stable_hash_for!(struct ty::Const<'tcx> { ty, val }); -impl_stable_hash_for!(struct ::middle::const_val::ConstEvalErr<'tcx> { +impl_stable_hash_for!(struct ::mir::interpret::ConstEvalErr<'tcx> { span, - kind + stacktrace, + error }); -impl<'gcx> HashStable> -for ::middle::const_val::ErrKind<'gcx> { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - use middle::const_val::ErrKind::*; - - mem::discriminant(self).hash_stable(hcx, hasher); - - match *self { - CannotCast | - MissingStructField | - NonConstPath | - ExpectedConstTuple | - ExpectedConstStruct | - IndexedNonVec | - IndexNotUsize | - MiscBinaryOp | - MiscCatchAll | - IndexOpFeatureGated | - TypeckError | - CheckMatchError => { - // nothing to do - } - UnimplementedConstVal(s) => { - s.hash_stable(hcx, hasher); - } - IndexOutOfBounds { len, index } => { - len.hash_stable(hcx, hasher); - index.hash_stable(hcx, hasher); - } - Math(ref const_math_err) => { - const_math_err.hash_stable(hcx, hasher); - } - LayoutError(ref layout_error) => { - layout_error.hash_stable(hcx, hasher); - } - ErroneousReferencedConstant(ref const_val) => { - const_val.hash_stable(hcx, hasher); - } - } - } -} +impl_stable_hash_for!(struct ::mir::interpret::FrameInfo { + span, + lint_root, + location +}); impl_stable_hash_for!(struct ty::ClosureSubsts<'tcx> { substs }); - -impl_stable_hash_for!(struct ty::GeneratorInterior<'tcx> { witness }); +impl_stable_hash_for!(struct ty::GeneratorSubsts<'tcx> { substs }); impl_stable_hash_for!(struct ty::GenericPredicates<'tcx> { parent, predicates }); +impl_stable_hash_for!(struct ::mir::interpret::EvalError<'tcx> { kind }); + +impl<'a, 'gcx, O: HashStable>> HashStable> +for ::mir::interpret::EvalErrorKind<'gcx, O> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use mir::interpret::EvalErrorKind::*; + + mem::discriminant(&self).hash_stable(hcx, hasher); + + match *self { + DanglingPointerDeref | + DoubleFree | + InvalidMemoryAccess | + InvalidFunctionPointer | + InvalidBool | + InvalidDiscriminant | + InvalidNullPointerUsage | + ReadPointerAsBytes | + ReadBytesAsPointer | + ReadForeignStatic | + InvalidPointerMath | + ReadUndefBytes | + DeadLocal | + StackFrameLimitReached | + OutOfTls | + TlsOutOfBounds | + CalledClosureAsFunction | + VtableForArgumentlessMethod | + ModifiedConstantMemory | + AssumptionNotHeld | + InlineAsm | + ReallocateNonBasePtr | + DeallocateNonBasePtr | + HeapAllocZeroBytes | + Unreachable | + Panic | + ReadFromReturnPointer | + UnimplementedTraitSelection | + TypeckError | + TooGeneric | + CheckMatchError | + DerefFunctionPointer | + ExecuteMemory | + OverflowNeg | + RemainderByZero | + DivisionByZero | + GeneratorResumedAfterReturn | + GeneratorResumedAfterPanic | + InfiniteLoop => {} + ReferencedConstant(ref err) => err.hash_stable(hcx, hasher), + MachineError(ref err) => err.hash_stable(hcx, hasher), + FunctionPointerTyMismatch(a, b) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher) + }, + NoMirFor(ref s) => s.hash_stable(hcx, hasher), + UnterminatedCString(ptr) => ptr.hash_stable(hcx, hasher), + PointerOutOfBounds { + ptr, + access, + allocation_size, + } => { + ptr.hash_stable(hcx, hasher); + access.hash_stable(hcx, hasher); + allocation_size.hash_stable(hcx, hasher) + }, + InvalidBoolOp(bop) => bop.hash_stable(hcx, hasher), + Unimplemented(ref s) => s.hash_stable(hcx, hasher), + BoundsCheck { ref len, ref index } => { + len.hash_stable(hcx, hasher); + index.hash_stable(hcx, hasher) + }, + Intrinsic(ref s) => s.hash_stable(hcx, hasher), + InvalidChar(c) => c.hash_stable(hcx, hasher), + AbiViolation(ref s) => s.hash_stable(hcx, hasher), + AlignmentCheckFailed { + required, + has, + } => { + required.hash_stable(hcx, hasher); + has.hash_stable(hcx, hasher) + }, + MemoryLockViolation { + ptr, + len, + frame, + access, + ref lock, + } => { + ptr.hash_stable(hcx, hasher); + len.hash_stable(hcx, hasher); + frame.hash_stable(hcx, hasher); + access.hash_stable(hcx, hasher); + lock.hash_stable(hcx, hasher) + }, + MemoryAcquireConflict { + ptr, + len, + kind, + ref lock, + } => { + ptr.hash_stable(hcx, hasher); + len.hash_stable(hcx, hasher); + kind.hash_stable(hcx, hasher); + lock.hash_stable(hcx, hasher) + }, + InvalidMemoryLockRelease { + ptr, + len, + frame, + ref lock, + } => { + ptr.hash_stable(hcx, hasher); + len.hash_stable(hcx, hasher); + frame.hash_stable(hcx, hasher); + lock.hash_stable(hcx, hasher) + }, + DeallocatedLockedMemory { + ptr, + ref lock, + } => { + ptr.hash_stable(hcx, hasher); + lock.hash_stable(hcx, hasher) + }, + ValidationFailure(ref s) => s.hash_stable(hcx, hasher), + TypeNotPrimitive(ty) => ty.hash_stable(hcx, hasher), + ReallocatedWrongMemoryKind(ref a, ref b) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher) + }, + DeallocatedWrongMemoryKind(ref a, ref b) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher) + }, + IncorrectAllocationInformation(a, b, c, d) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher); + c.hash_stable(hcx, hasher); + d.hash_stable(hcx, hasher) + }, + Layout(lay) => lay.hash_stable(hcx, hasher), + HeapAllocNonPowerOfTwoAlignment(n) => n.hash_stable(hcx, hasher), + PathNotFound(ref v) => v.hash_stable(hcx, hasher), + Overflow(op) => op.hash_stable(hcx, hasher), + } + } +} + +impl_stable_hash_for!(enum mir::interpret::Lock { + NoLock, + WriteLock(dl), + ReadLock(v) +}); + +impl_stable_hash_for!(struct mir::interpret::DynamicLifetime { + frame, + region +}); + +impl_stable_hash_for!(enum mir::interpret::AccessKind { + Read, + Write +}); + impl_stable_hash_for!(enum ty::Variance { Covariant, Invariant, @@ -428,69 +682,50 @@ impl_stable_hash_for!(enum ty::adjustment::CustomCoerceUnsized { Struct(index) }); -impl<'gcx> HashStable> for ty::Generics { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - let ty::Generics { - parent, - parent_regions, - parent_types, - ref regions, - ref types, +impl_stable_hash_for!(struct ty::Generics { + parent, + parent_count, + params, + // Reverse map to each param's `index` field, from its `def_id`. + param_def_id_to_index -> _, // Don't hash this + has_self, + has_late_bound_regions, +}); - // Reverse map to each `TypeParameterDef`'s `index` field, from - // `def_id.index` (`def_id.krate` is the same as the item's). - type_param_to_index: _, // Don't hash this - has_self, - has_late_bound_regions, - } = *self; - - parent.hash_stable(hcx, hasher); - parent_regions.hash_stable(hcx, hasher); - parent_types.hash_stable(hcx, hasher); - regions.hash_stable(hcx, hasher); - types.hash_stable(hcx, hasher); - has_self.hash_stable(hcx, hasher); - has_late_bound_regions.hash_stable(hcx, hasher); - } -} - -impl<'gcx> HashStable> -for ty::RegionParameterDef { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - let ty::RegionParameterDef { - name, - def_id, - index, - pure_wrt_drop - } = *self; - - name.hash_stable(hcx, hasher); - def_id.hash_stable(hcx, hasher); - index.hash_stable(hcx, hasher); - pure_wrt_drop.hash_stable(hcx, hasher); - } -} - -impl_stable_hash_for!(struct ty::TypeParameterDef { +impl_stable_hash_for!(struct ty::GenericParamDef { name, def_id, index, - has_default, - object_lifetime_default, pure_wrt_drop, - synthetic + kind }); -impl<'gcx, T> HashStable> +impl<'a> HashStable> for ty::GenericParamDefKind { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + ty::GenericParamDefKind::Lifetime => {} + ty::GenericParamDefKind::Type { + has_default, + ref object_lifetime_default, + ref synthetic, + } => { + has_default.hash_stable(hcx, hasher); + object_lifetime_default.hash_stable(hcx, hasher); + synthetic.hash_stable(hcx, hasher); + } + } + } +} + +impl<'a, 'gcx, T> HashStable> for ::middle::resolve_lifetime::Set1 - where T: HashStable> + where T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use middle::resolve_lifetime::Set1; @@ -520,10 +755,6 @@ impl_stable_hash_for!(enum ::middle::resolve_lifetime::Region { Free(call_site_scope_data, decl) }); -impl_stable_hash_for!(struct ty::DebruijnIndex { - depth -}); - impl_stable_hash_for!(enum ty::cast::CastKind { CoercionCast, PtrPtrCast, @@ -541,11 +772,11 @@ impl_stable_hash_for!(enum ty::cast::CastKind { impl_stable_hash_for!(tuple_struct ::middle::region::FirstStatementIndex { idx }); impl_stable_hash_for!(struct ::middle::region::Scope { id, code }); -impl<'gcx> ToStableHashKey> for region::Scope { +impl<'a> ToStableHashKey> for region::Scope { type KeyType = region::Scope; #[inline] - fn to_stable_hash_key(&self, _: &StableHashingContext<'gcx>) -> region::Scope { + fn to_stable_hash_key(&self, _: &StableHashingContext<'a>) -> region::Scope { *self } } @@ -571,11 +802,11 @@ impl_stable_hash_for!(enum ty::BoundRegion { BrEnv }); -impl<'gcx> HashStable> +impl<'a, 'gcx> HashStable> for ty::TypeVariants<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use ty::TypeVariants::*; @@ -611,9 +842,10 @@ for ty::TypeVariants<'gcx> TyRawPtr(pointee_ty) => { pointee_ty.hash_stable(hcx, hasher); } - TyRef(region, pointee_ty) => { + TyRef(region, pointee_ty, mutbl) => { region.hash_stable(hcx, hasher); pointee_ty.hash_stable(hcx, hasher); + mutbl.hash_stable(hcx, hasher); } TyFnDef(def_id, substs) => { def_id.hash_stable(hcx, hasher); @@ -630,14 +862,16 @@ for ty::TypeVariants<'gcx> def_id.hash_stable(hcx, hasher); closure_substs.hash_stable(hcx, hasher); } - TyGenerator(def_id, closure_substs, interior) => { + TyGenerator(def_id, generator_substs, movability) => { def_id.hash_stable(hcx, hasher); - closure_substs.hash_stable(hcx, hasher); - interior.hash_stable(hcx, hasher); + generator_substs.hash_stable(hcx, hasher); + movability.hash_stable(hcx, hasher); } - TyTuple(inner_tys, from_diverging_type_var) => { + TyGeneratorWitness(types) => { + types.hash_stable(hcx, hasher) + } + TyTuple(inner_tys) => { inner_tys.hash_stable(hcx, hasher); - from_diverging_type_var.hash_stable(hcx, hasher); } TyProjection(ref projection_ty) => { projection_ty.hash_stable(hcx, hasher); @@ -652,13 +886,59 @@ for ty::TypeVariants<'gcx> TyForeign(def_id) => { def_id.hash_stable(hcx, hasher); } - TyInfer(..) => { - bug!("ty::TypeVariants::hash_stable() - Unexpected variant {:?}.", *self) + TyInfer(infer_ty) => { + infer_ty.hash_stable(hcx, hasher); } } } } +impl_stable_hash_for!(enum ty::InferTy { + TyVar(a), + IntVar(a), + FloatVar(a), + FreshTy(a), + FreshIntTy(a), + FreshFloatTy(a), + CanonicalTy(a), +}); + +impl<'a, 'gcx> HashStable> +for ty::TyVid +{ + fn hash_stable(&self, + _hcx: &mut StableHashingContext<'a>, + _hasher: &mut StableHasher) { + // TyVid values are confined to an inference context and hence + // should not be hashed. + bug!("ty::TypeVariants::hash_stable() - can't hash a TyVid {:?}.", *self) + } +} + +impl<'a, 'gcx> HashStable> +for ty::IntVid +{ + fn hash_stable(&self, + _hcx: &mut StableHashingContext<'a>, + _hasher: &mut StableHasher) { + // IntVid values are confined to an inference context and hence + // should not be hashed. + bug!("ty::TypeVariants::hash_stable() - can't hash an IntVid {:?}.", *self) + } +} + +impl<'a, 'gcx> HashStable> +for ty::FloatVid +{ + fn hash_stable(&self, + _hcx: &mut StableHashingContext<'a>, + _hasher: &mut StableHasher) { + // FloatVid values are confined to an inference context and hence + // should not be hashed. + bug!("ty::TypeVariants::hash_stable() - can't hash a FloatVid {:?}.", *self) + } +} + impl_stable_hash_for!(struct ty::ParamTy { idx, name @@ -669,11 +949,11 @@ impl_stable_hash_for!(struct ty::TypeAndMut<'tcx> { mutbl }); -impl<'gcx> HashStable> +impl<'a, 'gcx> HashStable> for ty::ExistentialPredicate<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -706,9 +986,9 @@ impl_stable_hash_for!(struct ty::Instance<'tcx> { substs }); -impl<'gcx> HashStable> for ty::InstanceDef<'gcx> { +impl<'a, 'gcx> HashStable> for ty::InstanceDef<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); @@ -730,65 +1010,46 @@ impl<'gcx> HashStable> for ty::InstanceDef<'gcx> { ty::InstanceDef::ClosureOnceShim { call_once } => { call_once.hash_stable(hcx, hasher); } - ty::InstanceDef::DropGlue(def_id, t) => { + ty::InstanceDef::DropGlue(def_id, ty) => { def_id.hash_stable(hcx, hasher); - t.hash_stable(hcx, hasher); + ty.hash_stable(hcx, hasher); } - ty::InstanceDef::CloneShim(def_id, t) => { + ty::InstanceDef::CloneShim(def_id, ty) => { def_id.hash_stable(hcx, hasher); - t.hash_stable(hcx, hasher); + ty.hash_stable(hcx, hasher); } } } } -impl<'gcx> HashStable> for ty::TraitDef { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - let ty::TraitDef { - // We already have the def_path_hash below, no need to hash it twice - def_id: _, - unsafety, - paren_sugar, - has_auto_impl, - def_path_hash, - } = *self; - - unsafety.hash_stable(hcx, hasher); - paren_sugar.hash_stable(hcx, hasher); - has_auto_impl.hash_stable(hcx, hasher); - def_path_hash.hash_stable(hcx, hasher); - } -} +impl_stable_hash_for!(struct ty::TraitDef { + // We already have the def_path_hash below, no need to hash it twice + def_id -> _, + unsafety, + paren_sugar, + has_auto_impl, + def_path_hash, +}); impl_stable_hash_for!(struct ty::Destructor { did }); -impl_stable_hash_for!(struct ty::DtorckConstraint<'tcx> { - outlives, - dtorck_types +impl_stable_hash_for!(struct ty::CrateVariancesMap { + variances, + // This is just an irrelevant helper value. + empty_variance -> _, }); - -impl<'gcx> HashStable> for ty::CrateVariancesMap { - fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, - hasher: &mut StableHasher) { - let ty::CrateVariancesMap { - ref variances, - // This is just an irrelevant helper value. - empty_variance: _, - } = *self; - - variances.hash_stable(hcx, hasher); - } -} +impl_stable_hash_for!(struct ty::CratePredicatesMap<'tcx> { + predicates, + // This is just an irrelevant helper value. + empty_predicate -> _, +}); impl_stable_hash_for!(struct ty::AssociatedItem { def_id, - name, + ident -> (ident.name), kind, vis, defaultness, @@ -799,6 +1060,7 @@ impl_stable_hash_for!(struct ty::AssociatedItem { impl_stable_hash_for!(enum ty::AssociatedKind { Const, Method, + Existential, Type }); @@ -808,12 +1070,12 @@ impl_stable_hash_for!(enum ty::AssociatedItemContainer { }); -impl<'gcx, T> HashStable> +impl<'a, 'gcx, T> HashStable> for ty::steal::Steal - where T: HashStable> + where T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { self.borrow().hash_stable(hcx, hasher); } @@ -835,10 +1097,10 @@ impl_stable_hash_for!(enum ::middle::privacy::AccessLevel { Public }); -impl<'gcx> HashStable> +impl<'a> HashStable> for ::middle::privacy::AccessLevels { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { let ::middle::privacy::AccessLevels { @@ -865,10 +1127,10 @@ impl_stable_hash_for!(tuple_struct ::middle::reachable::ReachableSet { reachable_set }); -impl<'gcx, N> HashStable> -for traits::Vtable<'gcx, N> where N: HashStable> { +impl<'a, 'gcx, N> HashStable> +for traits::Vtable<'gcx, N> where N: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use traits::Vtable::*; @@ -887,10 +1149,10 @@ for traits::Vtable<'gcx, N> where N: HashStable> { } } -impl<'gcx, N> HashStable> -for traits::VtableImplData<'gcx, N> where N: HashStable> { +impl<'a, 'gcx, N> HashStable> +for traits::VtableImplData<'gcx, N> where N: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let traits::VtableImplData { impl_def_id, @@ -903,10 +1165,10 @@ for traits::VtableImplData<'gcx, N> where N: HashStable HashStable> -for traits::VtableAutoImplData where N: HashStable> { +impl<'a, 'gcx, N> HashStable> +for traits::VtableAutoImplData where N: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let traits::VtableAutoImplData { trait_def_id, @@ -917,10 +1179,10 @@ for traits::VtableAutoImplData where N: HashStable } } -impl<'gcx, N> HashStable> -for traits::VtableObjectData<'gcx, N> where N: HashStable> { +impl<'a, 'gcx, N> HashStable> +for traits::VtableObjectData<'gcx, N> where N: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let traits::VtableObjectData { upcast_trait_ref, @@ -933,10 +1195,10 @@ for traits::VtableObjectData<'gcx, N> where N: HashStable HashStable> -for traits::VtableBuiltinData where N: HashStable> { +impl<'a, 'gcx, N> HashStable> +for traits::VtableBuiltinData where N: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let traits::VtableBuiltinData { ref nested, @@ -945,10 +1207,10 @@ for traits::VtableBuiltinData where N: HashStable> } } -impl<'gcx, N> HashStable> -for traits::VtableClosureData<'gcx, N> where N: HashStable> { +impl<'a, 'gcx, N> HashStable> +for traits::VtableClosureData<'gcx, N> where N: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let traits::VtableClosureData { closure_def_id, @@ -961,10 +1223,10 @@ for traits::VtableClosureData<'gcx, N> where N: HashStable HashStable> -for traits::VtableFnPointerData<'gcx, N> where N: HashStable> { +impl<'a, 'gcx, N> HashStable> +for traits::VtableFnPointerData<'gcx, N> where N: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let traits::VtableFnPointerData { fn_ty, @@ -975,18 +1237,167 @@ for traits::VtableFnPointerData<'gcx, N> where N: HashStable HashStable> -for traits::VtableGeneratorData<'gcx, N> where N: HashStable> { +impl<'a, 'gcx, N> HashStable> +for traits::VtableGeneratorData<'gcx, N> where N: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let traits::VtableGeneratorData { - closure_def_id, + generator_def_id, substs, ref nested, } = *self; - closure_def_id.hash_stable(hcx, hasher); + generator_def_id.hash_stable(hcx, hasher); substs.hash_stable(hcx, hasher); nested.hash_stable(hcx, hasher); } } + +impl_stable_hash_for!( + impl<'tcx, V> for struct infer::canonical::Canonical<'tcx, V> { + variables, value + } +); + +impl_stable_hash_for!( + impl<'tcx> for struct infer::canonical::CanonicalVarValues<'tcx> { + var_values + } +); + +impl_stable_hash_for!(struct infer::canonical::CanonicalVarInfo { + kind +}); + +impl_stable_hash_for!(enum infer::canonical::CanonicalVarKind { + Ty(k), + Region +}); + +impl_stable_hash_for!(enum infer::canonical::CanonicalTyVarKind { + General, + Int, + Float +}); + +impl_stable_hash_for!( + impl<'tcx, R> for struct infer::canonical::QueryResult<'tcx, R> { + var_values, region_constraints, certainty, value + } +); + +impl_stable_hash_for!(enum infer::canonical::Certainty { + Proven, Ambiguous +}); + +impl<'a, 'tcx> HashStable> for traits::WhereClause<'tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use traits::WhereClause::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + match self { + Implemented(trait_ref) => trait_ref.hash_stable(hcx, hasher), + ProjectionEq(projection) => projection.hash_stable(hcx, hasher), + TypeOutlives(ty_outlives) => ty_outlives.hash_stable(hcx, hasher), + RegionOutlives(region_outlives) => region_outlives.hash_stable(hcx, hasher), + } + } +} + +impl<'a, 'tcx> HashStable> for traits::WellFormed<'tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use traits::WellFormed::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + match self { + Trait(trait_ref) => trait_ref.hash_stable(hcx, hasher), + Ty(ty) => ty.hash_stable(hcx, hasher), + } + } +} + +impl<'a, 'tcx> HashStable> for traits::FromEnv<'tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use traits::FromEnv::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + match self { + Trait(trait_ref) => trait_ref.hash_stable(hcx, hasher), + Ty(ty) => ty.hash_stable(hcx, hasher), + } + } +} + +impl<'a, 'tcx> HashStable> for traits::DomainGoal<'tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use traits::DomainGoal::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + match self { + Holds(wc) => wc.hash_stable(hcx, hasher), + WellFormed(wf) => wf.hash_stable(hcx, hasher), + FromEnv(from_env) => from_env.hash_stable(hcx, hasher), + Normalize(projection) => projection.hash_stable(hcx, hasher), + } + } +} + +impl<'a, 'tcx> HashStable> for traits::Goal<'tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use traits::Goal::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + match self { + Implies(hypotheses, goal) => { + hypotheses.hash_stable(hcx, hasher); + goal.hash_stable(hcx, hasher); + }, + And(goal1, goal2) => { + goal1.hash_stable(hcx, hasher); + goal2.hash_stable(hcx, hasher); + } + Not(goal) => goal.hash_stable(hcx, hasher), + DomainGoal(domain_goal) => domain_goal.hash_stable(hcx, hasher), + Quantified(quantifier, goal) => { + quantifier.hash_stable(hcx, hasher); + goal.hash_stable(hcx, hasher); + }, + CannotProve => { }, + } + } +} + +impl_stable_hash_for!( + impl<'tcx> for struct traits::ProgramClause<'tcx> { + goal, hypotheses + } +); + +impl<'a, 'tcx> HashStable> for traits::Clause<'tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use traits::Clause::*; + + mem::discriminant(self).hash_stable(hcx, hasher); + match self { + Implies(clause) => clause.hash_stable(hcx, hasher), + ForAll(clause) => clause.hash_stable(hcx, hasher), + } + } +} + +impl_stable_hash_for!(enum traits::QuantifierKind { + Universal, + Existential +}); diff --git a/src/librustc/ich/mod.rs b/src/librustc/ich/mod.rs index cbd76ee14db3..b00d8c565694 100644 --- a/src/librustc/ich/mod.rs +++ b/src/librustc/ich/mod.rs @@ -10,15 +10,13 @@ //! ICH - Incremental Compilation Hash -pub use self::fingerprint::Fingerprint; +crate use rustc_data_structures::fingerprint::Fingerprint; pub use self::caching_codemap_view::CachingCodemapView; -pub use self::hcx::{StableHashingContext, NodeIdHashingMode, +pub use self::hcx::{StableHashingContextProvider, StableHashingContext, NodeIdHashingMode, hash_stable_trait_impls}; -mod fingerprint; mod caching_codemap_view; mod hcx; -mod impls_const_math; mod impls_cstore; mod impls_hir; mod impls_mir; @@ -31,7 +29,7 @@ pub const ATTR_CLEAN: &'static str = "rustc_clean"; pub const ATTR_IF_THIS_CHANGED: &'static str = "rustc_if_this_changed"; pub const ATTR_THEN_THIS_WOULD_NEED: &'static str = "rustc_then_this_would_need"; pub const ATTR_PARTITION_REUSED: &'static str = "rustc_partition_reused"; -pub const ATTR_PARTITION_TRANSLATED: &'static str = "rustc_partition_translated"; +pub const ATTR_PARTITION_CODEGENED: &'static str = "rustc_partition_codegened"; pub const DEP_GRAPH_ASSERT_ATTRS: &'static [&'static str] = &[ @@ -40,7 +38,7 @@ pub const DEP_GRAPH_ASSERT_ATTRS: &'static [&'static str] = &[ ATTR_DIRTY, ATTR_CLEAN, ATTR_PARTITION_REUSED, - ATTR_PARTITION_TRANSLATED, + ATTR_PARTITION_CODEGENED, ]; pub const IGNORED_ATTRIBUTES: &'static [&'static str] = &[ @@ -50,5 +48,5 @@ pub const IGNORED_ATTRIBUTES: &'static [&'static str] = &[ ATTR_DIRTY, ATTR_CLEAN, ATTR_PARTITION_REUSED, - ATTR_PARTITION_TRANSLATED, + ATTR_PARTITION_CODEGENED, ]; diff --git a/src/librustc/infer/README.md b/src/librustc/infer/README.md deleted file mode 100644 index 7f58d0390602..000000000000 --- a/src/librustc/infer/README.md +++ /dev/null @@ -1,227 +0,0 @@ -# Type inference engine - -The type inference is based on standard HM-type inference, but -extended in various way to accommodate subtyping, region inference, -and higher-ranked types. - -## A note on terminology - -We use the notation `?T` to refer to inference variables, also called -existential variables. - -We use the term "region" and "lifetime" interchangeably. Both refer to -the `'a` in `&'a T`. - -The term "bound region" refers to regions bound in a function -signature, such as the `'a` in `for<'a> fn(&'a u32)`. A region is -"free" if it is not bound. - -## Creating an inference context - -You create and "enter" an inference context by doing something like -the following: - -```rust -tcx.infer_ctxt().enter(|infcx| { - // use the inference context `infcx` in here -}) -``` - -Each inference context creates a short-lived type arena to store the -fresh types and things that it will create, as described in -[the README in the ty module][ty-readme]. This arena is created by the `enter` -function and disposed after it returns. - -[ty-readme]: src/librustc/ty/README.md - -Within the closure, the infcx will have the type `InferCtxt<'cx, 'gcx, -'tcx>` for some fresh `'cx` and `'tcx` -- the latter corresponds to -the lifetime of this temporary arena, and the `'cx` is the lifetime of -the `InferCtxt` itself. (Again, see [that ty README][ty-readme] for -more details on this setup.) - -The `tcx.infer_ctxt` method actually returns a build, which means -there are some kinds of configuration you can do before the `infcx` is -created. See `InferCtxtBuilder` for more information. - -## Inference variables - -The main purpose of the inference context is to house a bunch of -**inference variables** -- these represent types or regions whose precise -value is not yet known, but will be uncovered as we perform type-checking. - -If you're familiar with the basic ideas of unification from H-M type -systems, or logic languages like Prolog, this is the same concept. If -you're not, you might want to read a tutorial on how H-M type -inference works, or perhaps this blog post on -[unification in the Chalk project]. - -[Unification in the Chalk project]: http://smallcultfollowing.com/babysteps/blog/2017/03/25/unification-in-chalk-part-1/ - -All told, the inference context stores four kinds of inference variables as of this -writing: - -- Type variables, which come in three varieties: - - General type variables (the most common). These can be unified with any type. - - Integral type variables, which can only be unified with an integral type, and - arise from an integer literal expression like `22`. - - Float type variables, which can only be unified with a float type, and - arise from a float literal expression like `22.0`. -- Region variables, which represent lifetimes, and arise all over the dang place. - -All the type variables work in much the same way: you can create a new -type variable, and what you get is `Ty<'tcx>` representing an -unresolved type `?T`. Then later you can apply the various operations -that the inferencer supports, such as equality or subtyping, and it -will possibly **instantiate** (or **bind**) that `?T` to a specific -value as a result. - -The region variables work somewhat differently, and are described -below in a separate section. - -## Enforcing equality / subtyping - -The most basic operations you can perform in the type inferencer is -**equality**, which forces two types `T` and `U` to be the same. The -recommended way to add an equality constraint is using the `at` -method, roughly like so: - -``` -infcx.at(...).eq(t, u); -``` - -The first `at()` call provides a bit of context, i.e., why you are -doing this unification, and in what environment, and the `eq` method -performs the actual equality constraint. - -When you equate things, you force them to be precisely equal. Equating -returns a `InferResult` -- if it returns `Err(err)`, then equating -failed, and the enclosing `TypeError` will tell you what went wrong. - -The success case is perhaps more interesting. The "primary" return -type of `eq` is `()` -- that is, when it succeeds, it doesn't return a -value of any particular interest. Rather, it is executed for its -side-effects of constraining type variables and so forth. However, the -actual return type is not `()`, but rather `InferOk<()>`. The -`InferOk` type is used to carry extra trait obligations -- your job is -to ensure that these are fulfilled (typically by enrolling them in a -fulfillment context). See the [trait README] for more background here. - -[trait README]: ../traits/README.md - -You can also enforce subtyping through `infcx.at(..).sub(..)`. The same -basic concepts apply as above. - -## "Trying" equality - -Sometimes you would like to know if it is *possible* to equate two -types without error. You can test that with `infcx.can_eq` (or -`infcx.can_sub` for subtyping). If this returns `Ok`, then equality -is possible -- but in all cases, any side-effects are reversed. - -Be aware though that the success or failure of these methods is always -**modulo regions**. That is, two types `&'a u32` and `&'b u32` will -return `Ok` for `can_eq`, even if `'a != 'b`. This falls out from the -"two-phase" nature of how we solve region constraints. - -## Snapshots - -As described in the previous section on `can_eq`, often it is useful -to be able to do a series of operations and then roll back their -side-effects. This is done for various reasons: one of them is to be -able to backtrack, trying out multiple possibilities before settling -on which path to take. Another is in order to ensure that a series of -smaller changes take place atomically or not at all. - -To allow for this, the inference context supports a `snapshot` method. -When you call it, it will start recording changes that occur from the -operations you perform. When you are done, you can either invoke -`rollback_to`, which will undo those changes, or else `confirm`, which -will make the permanent. Snapshots can be nested as long as you follow -a stack-like discipline. - -Rather than use snapshots directly, it is often helpful to use the -methods like `commit_if_ok` or `probe` that encapsulate higher-level -patterns. - -## Subtyping obligations - -One thing worth discussing are subtyping obligations. When you force -two types to be a subtype, like `?T <: i32`, we can often convert those -into equality constraints. This follows from Rust's rather limited notion -of subtyping: so, in the above case, `?T <: i32` is equivalent to `?T = i32`. - -However, in some cases we have to be more careful. For example, when -regions are involved. So if you have `?T <: &'a i32`, what we would do -is to first "generalize" `&'a i32` into a type with a region variable: -`&'?b i32`, and then unify `?T` with that (`?T = &'?b i32`). We then -relate this new variable with the original bound: - - &'?b i32 <: &'a i32 - -This will result in a region constraint (see below) of `'?b: 'a`. - -One final interesting case is relating two unbound type variables, -like `?T <: ?U`. In that case, we can't make progress, so we enqueue -an obligation `Subtype(?T, ?U)` and return it via the `InferOk` -mechanism. You'll have to try again when more details about `?T` or -`?U` are known. - -## Region constraints - -Regions are inferred somewhat differently from types. Rather than -eagerly unifying things, we simply collect constraints as we go, but -make (almost) no attempt to solve regions. These constraints have the -form of an outlives constraint: - - 'a: 'b - -Actually the code tends to view them as a subregion relation, but it's the same -idea: - - 'b <= 'a - -(There are various other kinds of constriants, such as "verifys"; see -the `region_constraints` module for details.) - -There is one case where we do some amount of eager unification. If you have an equality constraint -between two regions - - 'a = 'b - -we will record that fact in a unification table. You can then use -`opportunistic_resolve_var` to convert `'b` to `'a` (or vice -versa). This is sometimes needed to ensure termination of fixed-point -algorithms. - -## Extracting region constraints - -Ultimately, region constraints are only solved at the very end of -type-checking, once all other constraints are known. There are two -ways to solve region constraints right now: lexical and -non-lexical. Eventually there will only be one. - -To solve **lexical** region constraints, you invoke -`resolve_regions_and_report_errors`. This will "close" the region -constraint process and invoke the `lexical_region_resolve` code. Once -this is done, any further attempt to equate or create a subtyping -relationship will yield an ICE. - -Non-lexical region constraints are not handled within the inference -context. Instead, the NLL solver (actually, the MIR type-checker) -invokes `take_and_reset_region_constraints` periodically. This -extracts all of the outlives constraints from the region solver, but -leaves the set of variables intact. This is used to get *just* the -region constraints that resulted from some particular point in the -program, since the NLL solver needs to know not just *what* regions -were subregions but *where*. Finally, the NLL solver invokes -`take_region_var_origins`, which "closes" the region constraint -process in the same way as normal solving. - -## Lexical region resolution - -Lexical region resolution is done by initially assigning each region -variable to an empty value. We then process each outlives constraint -repeatedly, growing region variables until a fixed-point is reached. -Region variables can be grown using a least-upper-bound relation on -the region lattice in a fairly straight-forward fashion. diff --git a/src/librustc/infer/anon_types/mod.rs b/src/librustc/infer/anon_types/mod.rs index f5b88dbc2a9c..205f8c5ad068 100644 --- a/src/librustc/infer/anon_types/mod.rs +++ b/src/librustc/infer/anon_types/mod.rs @@ -9,15 +9,16 @@ // except according to those terms. use hir::def_id::DefId; +use hir; use infer::{self, InferCtxt, InferOk, TypeVariableOrigin}; use infer::outlives::free_region_map::FreeRegionRelations; use rustc_data_structures::fx::FxHashMap; use syntax::ast; use traits::{self, PredicateObligation}; -use ty::{self, Ty}; -use ty::fold::{BottomUpFolder, TypeFoldable}; +use ty::{self, Ty, TyCtxt, GenericParamDefKind}; +use ty::fold::{BottomUpFolder, TypeFoldable, TypeFolder}; use ty::outlives::Component; -use ty::subst::{Kind, Substs}; +use ty::subst::{Kind, Substs, UnpackedKind}; use util::nodemap::DefIdMap; pub type AnonTypeMap<'tcx> = DefIdMap>; @@ -113,10 +114,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ) -> InferOk<'tcx, (T, AnonTypeMap<'tcx>)> { debug!( "instantiate_anon_types(value={:?}, parent_def_id={:?}, body_id={:?}, param_env={:?})", - value, - parent_def_id, - body_id, - param_env, + value, parent_def_id, body_id, param_env, ); let mut instantiator = Instantiator { infcx: self, @@ -316,12 +314,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // `['a]` for the first impl trait and `'b` for the // second. let mut least_region = None; - for region_def in &abstract_type_generics.regions { - // Find the index of this region in the list of substitutions. - let index = region_def.index as usize; - + for param in &abstract_type_generics.params { + match param.kind { + GenericParamDefKind::Lifetime => {} + _ => continue + } // Get the value supplied for this region from the substs. - let subst_arg = anon_defn.substs[index].as_region().unwrap(); + let subst_arg = anon_defn.substs.region_at(param.index as usize); // Compute the least upper bound of it with the other regions. debug!("constrain_anon_types: least_region={:?}", least_region); @@ -435,8 +434,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { instantiated_ty: Ty<'gcx>, ) -> Ty<'gcx> { debug!( - "infer_anon_definition_from_instantiation(instantiated_ty={:?})", - instantiated_ty + "infer_anon_definition_from_instantiation(def_id={:?}, instantiated_ty={:?})", + def_id, instantiated_ty ); let gcx = self.tcx.global_tcx(); @@ -458,54 +457,187 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // Convert the type from the function into a type valid outside // the function, by replacing invalid regions with 'static, // after producing an error for each of them. - let definition_ty = gcx.fold_regions(&instantiated_ty, &mut false, |r, _| { - match *r { - // 'static and early-bound regions are valid. - ty::ReStatic | ty::ReEmpty => r, - - // All other regions, we map them appropriately to their adjusted - // indices, erroring if we find any lifetimes that were not mapped - // into the new set. - _ => if let Some(r1) = map.get(&Kind::from(r)).and_then(|k| k.as_region()) { - r1 - } else { - // No mapping was found. This means that - // it is either a disallowed lifetime, - // which will be caught by regionck, or it - // is a region in a non-upvar closure - // generic, which is explicitly - // allowed. If that surprises you, read - // on. - // - // The case of closure is a somewhat - // subtle (read: hacky) consideration. The - // problem is that our closure types - // currently include all the lifetime - // parameters declared on the enclosing - // function, even if they are unused by - // the closure itself. We can't readily - // filter them out, so here we replace - // those values with `'empty`. This can't - // really make a difference to the rest of - // the compiler; those regions are ignored - // for the outlives relation, and hence - // don't affect trait selection or auto - // traits, and they are erased during - // trans. - gcx.types.re_empty - }, - } - }); - + let definition_ty = + instantiated_ty.fold_with(&mut ReverseMapper::new( + self.tcx, + self.is_tainted_by_errors(), + def_id, + map, + instantiated_ty, + )); debug!( "infer_anon_definition_from_instantiation: definition_ty={:?}", definition_ty ); + // We can unwrap here because our reverse mapper always + // produces things with 'gcx lifetime, though the type folder + // obscures that. + let definition_ty = gcx.lift(&definition_ty).unwrap(); + definition_ty } } +struct ReverseMapper<'cx, 'gcx: 'tcx, 'tcx: 'cx> { + tcx: TyCtxt<'cx, 'gcx, 'tcx>, + + /// If errors have already been reported in this fn, we suppress + /// our own errors because they are sometimes derivative. + tainted_by_errors: bool, + + anon_type_def_id: DefId, + map: FxHashMap, Kind<'gcx>>, + map_missing_regions_to_empty: bool, + + /// initially `Some`, set to `None` once error has been reported + hidden_ty: Option>, +} + +impl<'cx, 'gcx, 'tcx> ReverseMapper<'cx, 'gcx, 'tcx> { + fn new( + tcx: TyCtxt<'cx, 'gcx, 'tcx>, + tainted_by_errors: bool, + anon_type_def_id: DefId, + map: FxHashMap, Kind<'gcx>>, + hidden_ty: Ty<'tcx>, + ) -> Self { + Self { + tcx, + tainted_by_errors, + anon_type_def_id, + map, + map_missing_regions_to_empty: false, + hidden_ty: Some(hidden_ty), + } + } + + fn fold_kind_mapping_missing_regions_to_empty(&mut self, kind: Kind<'tcx>) -> Kind<'tcx> { + assert!(!self.map_missing_regions_to_empty); + self.map_missing_regions_to_empty = true; + let kind = kind.fold_with(self); + self.map_missing_regions_to_empty = false; + kind + } + + fn fold_kind_normally(&mut self, kind: Kind<'tcx>) -> Kind<'tcx> { + assert!(!self.map_missing_regions_to_empty); + kind.fold_with(self) + } +} + +impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for ReverseMapper<'cx, 'gcx, 'tcx> { + fn tcx(&self) -> TyCtxt<'_, 'gcx, 'tcx> { + self.tcx + } + + fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { + match r { + // ignore bound regions that appear in the type (e.g., this + // would ignore `'r` in a type like `for<'r> fn(&'r u32)`. + ty::ReLateBound(..) | + + // ignore `'static`, as that can appear anywhere + ty::ReStatic | + + // ignore `ReScope`, as that can appear anywhere + // See `src/test/run-pass/issue-49556.rs` for example. + ty::ReScope(..) => return r, + + _ => { } + } + + match self.map.get(&r.into()).map(|k| k.unpack()) { + Some(UnpackedKind::Lifetime(r1)) => r1, + Some(u) => panic!("region mapped to unexpected kind: {:?}", u), + None => { + if !self.map_missing_regions_to_empty && !self.tainted_by_errors { + if let Some(hidden_ty) = self.hidden_ty.take() { + let span = self.tcx.def_span(self.anon_type_def_id); + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0700, + "hidden type for `impl Trait` captures lifetime that \ + does not appear in bounds", + ); + + // Assuming regionck succeeded, then we must + // be capturing *some* region from the fn + // header, and hence it must be free, so it's + // ok to invoke this fn (which doesn't accept + // all regions, and would ICE if an + // inappropriate region is given). We check + // `is_tainted_by_errors` by errors above, so + // we don't get in here unless regionck + // succeeded. (Note also that if regionck + // failed, then the regions we are attempting + // to map here may well be giving errors + // *because* the constraints were not + // satisfiable.) + self.tcx.note_and_explain_free_region( + &mut err, + &format!("hidden type `{}` captures ", hidden_ty), + r, + "" + ); + + err.emit(); + } + } + self.tcx.types.re_empty + }, + } + } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + match ty.sty { + ty::TyClosure(def_id, substs) => { + // I am a horrible monster and I pray for death. When + // we encounter a closure here, it is always a closure + // from within the function that we are currently + // type-checking -- one that is now being encapsulated + // in an existential abstract type. Ideally, we would + // go through the types/lifetimes that it references + // and treat them just like we would any other type, + // which means we would error out if we find any + // reference to a type/region that is not in the + // "reverse map". + // + // **However,** in the case of closures, there is a + // somewhat subtle (read: hacky) consideration. The + // problem is that our closure types currently include + // all the lifetime parameters declared on the + // enclosing function, even if they are unused by the + // closure itself. We can't readily filter them out, + // so here we replace those values with `'empty`. This + // can't really make a difference to the rest of the + // compiler; those regions are ignored for the + // outlives relation, and hence don't affect trait + // selection or auto traits, and they are erased + // during codegen. + + let generics = self.tcx.generics_of(def_id); + let substs = self.tcx.mk_substs(substs.substs.iter().enumerate().map( + |(index, &kind)| { + if index < generics.parent_count { + // Accommodate missing regions in the parent kinds... + self.fold_kind_mapping_missing_regions_to_empty(kind) + } else { + // ...but not elsewhere. + self.fold_kind_normally(kind) + } + }, + )); + + self.tcx.mk_closure(def_id, ty::ClosureSubsts { substs }) + } + + _ => ty.super_fold_with(self), + } + } +} + struct Instantiator<'a, 'gcx: 'tcx, 'tcx: 'a> { infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, parent_def_id: DefId, @@ -521,6 +653,7 @@ impl<'a, 'gcx, 'tcx> Instantiator<'a, 'gcx, 'tcx> { let tcx = self.infcx.tcx; value.fold_with(&mut BottomUpFolder { tcx, + reg_op: |reg| reg, fldop: |ty| { if let ty::TyAnon(def_id, substs) = ty.sty { // Check that this is `impl Trait` type is @@ -558,18 +691,52 @@ impl<'a, 'gcx, 'tcx> Instantiator<'a, 'gcx, 'tcx> { // } // ``` if let Some(anon_node_id) = tcx.hir.as_local_node_id(def_id) { - let anon_parent_node_id = tcx.hir.get_parent(anon_node_id); - let anon_parent_def_id = tcx.hir.local_def_id(anon_parent_node_id); - if self.parent_def_id == anon_parent_def_id { + let parent_def_id = self.parent_def_id; + let def_scope_default = || { + let anon_parent_node_id = tcx.hir.get_parent(anon_node_id); + parent_def_id == tcx.hir.local_def_id(anon_parent_node_id) + }; + let in_definition_scope = match tcx.hir.find(anon_node_id) { + Some(hir::map::NodeItem(item)) => match item.node { + // impl trait + hir::ItemKind::Existential(hir::ExistTy { + impl_trait_fn: Some(parent), + .. + }) => parent == self.parent_def_id, + // named existential types + hir::ItemKind::Existential(hir::ExistTy { + impl_trait_fn: None, + .. + }) => may_define_existential_type( + tcx, + self.parent_def_id, + anon_node_id, + ), + _ => def_scope_default(), + }, + Some(hir::map::NodeImplItem(item)) => match item.node { + hir::ImplItemKind::Existential(_) => may_define_existential_type( + tcx, + self.parent_def_id, + anon_node_id, + ), + _ => def_scope_default(), + }, + _ => bug!( + "expected (impl) item, found {}", + tcx.hir.node_to_string(anon_node_id), + ), + }; + if in_definition_scope { return self.fold_anon_ty(ty, def_id, substs); } - debug!("instantiate_anon_types_in_map: \ - encountered anon with wrong parent \ - def_id={:?} \ - anon_parent_def_id={:?}", - def_id, - anon_parent_def_id); + debug!( + "instantiate_anon_types_in_map: \ + encountered anon outside it's definition scope \ + def_id={:?}", + def_id, + ); } } @@ -589,8 +756,7 @@ impl<'a, 'gcx, 'tcx> Instantiator<'a, 'gcx, 'tcx> { debug!( "instantiate_anon_types: TyAnon(def_id={:?}, substs={:?})", - def_id, - substs + def_id, substs ); // Use the same type variable if the exact same TyAnon appears more @@ -602,6 +768,10 @@ impl<'a, 'gcx, 'tcx> Instantiator<'a, 'gcx, 'tcx> { let ty_var = infcx.next_ty_var(TypeVariableOrigin::TypeInference(span)); let predicates_of = tcx.predicates_of(def_id); + debug!( + "instantiate_anon_types: predicates: {:#?}", + predicates_of, + ); let bounds = predicates_of.instantiate(tcx, substs); debug!("instantiate_anon_types: bounds={:?}", bounds); @@ -611,6 +781,18 @@ impl<'a, 'gcx, 'tcx> Instantiator<'a, 'gcx, 'tcx> { required_region_bounds ); + // make sure that we are in fact defining the *entire* type + // e.g. `existential type Foo: Bar;` needs to be + // defined by a function like `fn foo() -> Foo`. + debug!( + "instantiate_anon_types: param_env: {:#?}", + self.param_env, + ); + debug!( + "instantiate_anon_types: generics: {:#?}", + tcx.generics_of(def_id), + ); + self.anon_types.insert( def_id, AnonTypeDecl { @@ -638,3 +820,42 @@ impl<'a, 'gcx, 'tcx> Instantiator<'a, 'gcx, 'tcx> { ty_var } } + +/// Whether `anon_node_id` is a sibling or a child of a sibling of `def_id` +/// +/// ```rust +/// pub mod foo { +/// pub mod bar { +/// pub existential type Baz; +/// +/// fn f1() -> Baz { .. } +/// } +/// +/// fn f2() -> bar::Baz { .. } +/// } +/// ``` +/// +/// Here, `def_id` will be the `DefId` of the existential type `Baz`. +/// `anon_node_id` is the `NodeId` of the reference to Baz -- so either the return type of f1 or f2. +/// We will return true if the reference is within the same module as the existential type +/// So true for f1, false for f2. +pub fn may_define_existential_type( + tcx: TyCtxt, + def_id: DefId, + anon_node_id: ast::NodeId, +) -> bool { + let mut node_id = tcx + .hir + .as_local_node_id(def_id) + .unwrap(); + // named existential types can be defined by any siblings or + // children of siblings + let mod_id = tcx.hir.get_parent(anon_node_id); + // so we walk up the node tree until we hit the root or the parent + // of the anon type + while node_id != mod_id && node_id != ast::CRATE_NODE_ID { + node_id = tcx.hir.get_parent(node_id); + } + // syntactically we are allowed to define the concrete type + node_id == mod_id +} diff --git a/src/librustc/infer/at.rs b/src/librustc/infer/at.rs index 3fd7ee276729..89dbc76c8a65 100644 --- a/src/librustc/infer/at.rs +++ b/src/librustc/infer/at.rs @@ -40,9 +40,9 @@ use super::*; use ty::relate::{Relate, TypeRelation}; pub struct At<'a, 'gcx: 'tcx, 'tcx: 'a> { - infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, - cause: &'a ObligationCause<'tcx>, - param_env: ty::ParamEnv<'tcx>, + pub infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + pub cause: &'a ObligationCause<'tcx>, + pub param_env: ty::ParamEnv<'tcx>, } pub struct Trace<'a, 'gcx: 'tcx, 'tcx: 'a> { @@ -281,6 +281,20 @@ impl<'tcx> ToTrace<'tcx> for Ty<'tcx> { } } +impl<'tcx> ToTrace<'tcx> for ty::Region<'tcx> { + fn to_trace(cause: &ObligationCause<'tcx>, + a_is_expected: bool, + a: Self, + b: Self) + -> TypeTrace<'tcx> + { + TypeTrace { + cause: cause.clone(), + values: Regions(ExpectedFound::new(a_is_expected, a, b)) + } + } +} + impl<'tcx> ToTrace<'tcx> for ty::TraitRef<'tcx> { fn to_trace(cause: &ObligationCause<'tcx>, a_is_expected: bool, diff --git a/src/librustc/infer/canonical/canonicalizer.rs b/src/librustc/infer/canonical/canonicalizer.rs new file mode 100644 index 000000000000..c4de95c60bff --- /dev/null +++ b/src/librustc/infer/canonical/canonicalizer.rs @@ -0,0 +1,439 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module contains the "canonicalizer" itself. +//! +//! For an overview of what canonicaliation is and how it fits into +//! rustc, check out the [chapter in the rustc guide][c]. +//! +//! [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html + +use infer::canonical::{ + Canonical, CanonicalTyVarKind, CanonicalVarInfo, CanonicalVarKind, Canonicalized, + SmallCanonicalVarValues, +}; +use infer::InferCtxt; +use std::sync::atomic::Ordering; +use ty::fold::{TypeFoldable, TypeFolder}; +use ty::subst::Kind; +use ty::{self, CanonicalVar, Lift, Slice, Ty, TyCtxt, TypeFlags}; + +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::small_vec::SmallVec; + +impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { + /// Canonicalizes a query value `V`. When we canonicalize a query, + /// we not only canonicalize unbound inference variables, but we + /// *also* replace all free regions whatsoever. So for example a + /// query like `T: Trait<'static>` would be canonicalized to + /// + /// ```text + /// T: Trait<'?0> + /// ``` + /// + /// with a mapping M that maps `'?0` to `'static`. + /// + /// To get a good understanding of what is happening here, check + /// out the [chapter in the rustc guide][c]. + /// + /// [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html#canonicalizing-the-query + pub fn canonicalize_query( + &self, + value: &V, + var_values: &mut SmallCanonicalVarValues<'tcx> + ) -> Canonicalized<'gcx, V> + where + V: TypeFoldable<'tcx> + Lift<'gcx>, + { + self.tcx + .sess + .perf_stats + .queries_canonicalized + .fetch_add(1, Ordering::Relaxed); + + Canonicalizer::canonicalize( + value, + Some(self), + self.tcx, + CanonicalizeRegionMode { + static_region: true, + other_free_regions: true, + }, + var_values, + ) + } + + /// Canonicalizes a query *response* `V`. When we canonicalize a + /// query response, we only canonicalize unbound inference + /// variables, and we leave other free regions alone. So, + /// continuing with the example from `canonicalize_query`, if + /// there was an input query `T: Trait<'static>`, it would have + /// been canonicalized to + /// + /// ```text + /// T: Trait<'?0> + /// ``` + /// + /// with a mapping M that maps `'?0` to `'static`. But if we found that there + /// exists only one possible impl of `Trait`, and it looks like + /// + /// impl Trait<'static> for T { .. } + /// + /// then we would prepare a query result R that (among other + /// things) includes a mapping to `'?0 := 'static`. When + /// canonicalizing this query result R, we would leave this + /// reference to `'static` alone. + /// + /// To get a good understanding of what is happening here, check + /// out the [chapter in the rustc guide][c]. + /// + /// [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html#canonicalizing-the-query-result + pub fn canonicalize_response( + &self, + value: &V, + ) -> Canonicalized<'gcx, V> + where + V: TypeFoldable<'tcx> + Lift<'gcx>, + { + let mut var_values = SmallVec::new(); + Canonicalizer::canonicalize( + value, + Some(self), + self.tcx, + CanonicalizeRegionMode { + static_region: false, + other_free_regions: false, + }, + &mut var_values + ) + } + + /// A hacky variant of `canonicalize_query` that does not + /// canonicalize `'static`. Unfortunately, the existing leak + /// check treaks `'static` differently in some cases (see also + /// #33684), so if we are performing an operation that may need to + /// prove "leak-check" related things, we leave `'static` + /// alone. + /// + /// FIXME(#48536) -- once we have universes, we can remove this and just use + /// `canonicalize_query`. + pub fn canonicalize_hr_query_hack( + &self, + value: &V, + var_values: &mut SmallCanonicalVarValues<'tcx> + ) -> Canonicalized<'gcx, V> + where + V: TypeFoldable<'tcx> + Lift<'gcx>, + { + self.tcx + .sess + .perf_stats + .queries_canonicalized + .fetch_add(1, Ordering::Relaxed); + + Canonicalizer::canonicalize( + value, + Some(self), + self.tcx, + CanonicalizeRegionMode { + static_region: false, + other_free_regions: true, + }, + var_values + ) + } +} + +/// If this flag is true, then all free regions will be replaced with +/// a canonical var. This is used to make queries as generic as +/// possible. For example, the query `F: Foo<'static>` would be +/// canonicalized to `F: Foo<'0>`. +struct CanonicalizeRegionMode { + static_region: bool, + other_free_regions: bool, +} + +impl CanonicalizeRegionMode { + fn any(&self) -> bool { + self.static_region || self.other_free_regions + } +} + +struct Canonicalizer<'cx, 'gcx: 'tcx, 'tcx: 'cx> { + infcx: Option<&'cx InferCtxt<'cx, 'gcx, 'tcx>>, + tcx: TyCtxt<'cx, 'gcx, 'tcx>, + variables: SmallVec<[CanonicalVarInfo; 8]>, + var_values: &'cx mut SmallCanonicalVarValues<'tcx>, + // Note that indices is only used once `var_values` is big enough to be + // heap-allocated. + indices: FxHashMap, CanonicalVar>, + canonicalize_region_mode: CanonicalizeRegionMode, + needs_canonical_flags: TypeFlags, +} + +impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Canonicalizer<'cx, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { + self.tcx + } + + fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { + match *r { + ty::ReLateBound(..) => { + // leave bound regions alone + r + } + + ty::ReVar(vid) => { + let r = self + .infcx + .unwrap() + .borrow_region_constraints() + .opportunistic_resolve_var(self.tcx, vid); + let info = CanonicalVarInfo { + kind: CanonicalVarKind::Region, + }; + debug!( + "canonical: region var found with vid {:?}, \ + opportunistically resolved to {:?}", + vid, r + ); + let cvar = self.canonical_var(info, r.into()); + self.tcx().mk_region(ty::ReCanonical(cvar)) + } + + ty::ReStatic => { + if self.canonicalize_region_mode.static_region { + let info = CanonicalVarInfo { + kind: CanonicalVarKind::Region, + }; + let cvar = self.canonical_var(info, r.into()); + self.tcx().mk_region(ty::ReCanonical(cvar)) + } else { + r + } + } + + ty::ReEarlyBound(..) + | ty::ReFree(_) + | ty::ReScope(_) + | ty::ReSkolemized(..) + | ty::ReEmpty + | ty::ReErased => { + if self.canonicalize_region_mode.other_free_regions { + let info = CanonicalVarInfo { + kind: CanonicalVarKind::Region, + }; + let cvar = self.canonical_var(info, r.into()); + self.tcx().mk_region(ty::ReCanonical(cvar)) + } else { + r + } + } + + ty::ReClosureBound(..) | ty::ReCanonical(_) => { + bug!("canonical region encountered during canonicalization") + } + } + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + match t.sty { + ty::TyInfer(ty::TyVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::General, t), + + ty::TyInfer(ty::IntVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Int, t), + + ty::TyInfer(ty::FloatVar(_)) => self.canonicalize_ty_var(CanonicalTyVarKind::Float, t), + + ty::TyInfer(ty::FreshTy(_)) + | ty::TyInfer(ty::FreshIntTy(_)) + | ty::TyInfer(ty::FreshFloatTy(_)) => { + bug!("encountered a fresh type during canonicalization") + } + + ty::TyInfer(ty::CanonicalTy(_)) => { + bug!("encountered a canonical type during canonicalization") + } + + ty::TyClosure(..) + | ty::TyGenerator(..) + | ty::TyGeneratorWitness(..) + | ty::TyBool + | ty::TyChar + | ty::TyInt(..) + | ty::TyUint(..) + | ty::TyFloat(..) + | ty::TyAdt(..) + | ty::TyStr + | ty::TyError + | ty::TyArray(..) + | ty::TySlice(..) + | ty::TyRawPtr(..) + | ty::TyRef(..) + | ty::TyFnDef(..) + | ty::TyFnPtr(_) + | ty::TyDynamic(..) + | ty::TyNever + | ty::TyTuple(..) + | ty::TyProjection(..) + | ty::TyForeign(..) + | ty::TyParam(..) + | ty::TyAnon(..) => { + if t.flags.intersects(self.needs_canonical_flags) { + t.super_fold_with(self) + } else { + t + } + } + } + } +} + +impl<'cx, 'gcx, 'tcx> Canonicalizer<'cx, 'gcx, 'tcx> { + /// The main `canonicalize` method, shared impl of + /// `canonicalize_query` and `canonicalize_response`. + fn canonicalize( + value: &V, + infcx: Option<&'cx InferCtxt<'cx, 'gcx, 'tcx>>, + tcx: TyCtxt<'cx, 'gcx, 'tcx>, + canonicalize_region_mode: CanonicalizeRegionMode, + var_values: &'cx mut SmallCanonicalVarValues<'tcx> + ) -> Canonicalized<'gcx, V> + where + V: TypeFoldable<'tcx> + Lift<'gcx>, + { + debug_assert!( + !value.has_type_flags(TypeFlags::HAS_CANONICAL_VARS), + "canonicalizing a canonical value: {:?}", + value, + ); + + let needs_canonical_flags = if canonicalize_region_mode.any() { + TypeFlags::HAS_FREE_REGIONS | TypeFlags::KEEP_IN_LOCAL_TCX + } else { + TypeFlags::KEEP_IN_LOCAL_TCX + }; + + let gcx = tcx.global_tcx(); + + // Fast path: nothing that needs to be canonicalized. + if !value.has_type_flags(needs_canonical_flags) { + let out_value = gcx.lift(value).unwrap(); + let canon_value = Canonical { + variables: Slice::empty(), + value: out_value, + }; + return canon_value; + } + + let mut canonicalizer = Canonicalizer { + infcx, + tcx, + canonicalize_region_mode, + needs_canonical_flags, + variables: SmallVec::new(), + var_values, + indices: FxHashMap::default(), + }; + let out_value = value.fold_with(&mut canonicalizer); + + // Once we have canonicalized `out_value`, it should not + // contain anything that ties it to this inference context + // anymore, so it should live in the global arena. + let out_value = gcx.lift(&out_value).unwrap_or_else(|| { + bug!( + "failed to lift `{:?}`, canonicalized from `{:?}`", + out_value, + value + ) + }); + + let canonical_variables = tcx.intern_canonical_var_infos(&canonicalizer.variables); + + Canonical { + variables: canonical_variables, + value: out_value, + } + } + + /// Creates a canonical variable replacing `kind` from the input, + /// or returns an existing variable if `kind` has already been + /// seen. `kind` is expected to be an unbound variable (or + /// potentially a free region). + fn canonical_var(&mut self, info: CanonicalVarInfo, kind: Kind<'tcx>) -> CanonicalVar { + let Canonicalizer { + variables, + var_values, + indices, + .. + } = self; + + // This code is hot. `variables` and `var_values` are usually small + // (fewer than 8 elements ~95% of the time). They are SmallVec's to + // avoid allocations in those cases. We also don't use `indices` to + // determine if a kind has been seen before until the limit of 8 has + // been exceeded, to also avoid allocations for `indices`. + if var_values.is_array() { + // `var_values` is stack-allocated. `indices` isn't used yet. Do a + // direct linear search of `var_values`. + if let Some(idx) = var_values.iter().position(|&k| k == kind) { + // `kind` is already present in `var_values`. + CanonicalVar::new(idx) + } else { + // `kind` isn't present in `var_values`. Append it. Likewise + // for `info` and `variables`. + variables.push(info); + var_values.push(kind); + assert_eq!(variables.len(), var_values.len()); + + // If `var_values` has become big enough to be heap-allocated, + // fill up `indices` to facilitate subsequent lookups. + if !var_values.is_array() { + assert!(indices.is_empty()); + *indices = + var_values.iter() + .enumerate() + .map(|(i, &kind)| (kind, CanonicalVar::new(i))) + .collect(); + } + // The cv is the index of the appended element. + CanonicalVar::new(var_values.len() - 1) + } + } else { + // `var_values` is large. Do a hashmap search via `indices`. + *indices + .entry(kind) + .or_insert_with(|| { + variables.push(info); + var_values.push(kind); + assert_eq!(variables.len(), var_values.len()); + CanonicalVar::new(variables.len() - 1) + }) + } + } + + /// Given a type variable `ty_var` of the given kind, first check + /// if `ty_var` is bound to anything; if so, canonicalize + /// *that*. Otherwise, create a new canonical variable for + /// `ty_var`. + fn canonicalize_ty_var(&mut self, ty_kind: CanonicalTyVarKind, ty_var: Ty<'tcx>) -> Ty<'tcx> { + let infcx = self.infcx.expect("encountered ty-var without infcx"); + let bound_to = infcx.shallow_resolve(ty_var); + if bound_to != ty_var { + self.fold_ty(bound_to) + } else { + let info = CanonicalVarInfo { + kind: CanonicalVarKind::Ty(ty_kind), + }; + let cvar = self.canonical_var(info, ty_var.into()); + self.tcx().mk_infer(ty::InferTy::CanonicalTy(cvar)) + } + } +} diff --git a/src/librustc/infer/canonical/mod.rs b/src/librustc/infer/canonical/mod.rs new file mode 100644 index 000000000000..958b33910605 --- /dev/null +++ b/src/librustc/infer/canonical/mod.rs @@ -0,0 +1,335 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! **Canonicalization** is the key to constructing a query in the +//! middle of type inference. Ordinarily, it is not possible to store +//! types from type inference in query keys, because they contain +//! references to inference variables whose lifetimes are too short +//! and so forth. Canonicalizing a value T1 using `canonicalize_query` +//! produces two things: +//! +//! - a value T2 where each unbound inference variable has been +//! replaced with a **canonical variable**; +//! - a map M (of type `CanonicalVarValues`) from those canonical +//! variables back to the original. +//! +//! We can then do queries using T2. These will give back constriants +//! on the canonical variables which can be translated, using the map +//! M, into constraints in our source context. This process of +//! translating the results back is done by the +//! `instantiate_query_result` method. +//! +//! For a more detailed look at what is happening here, check +//! out the [chapter in the rustc guide][c]. +//! +//! [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html + +use infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin}; +use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::small_vec::SmallVec; +use rustc_data_structures::sync::Lrc; +use serialize::UseSpecializedDecodable; +use std::ops::Index; +use syntax::codemap::Span; +use ty::fold::TypeFoldable; +use ty::subst::Kind; +use ty::{self, CanonicalVar, Lift, Region, Slice, TyCtxt}; + +mod canonicalizer; + +pub mod query_result; + +mod substitute; + +/// A "canonicalized" type `V` is one where all free inference +/// variables have been rewriten to "canonical vars". These are +/// numbered starting from 0 in order of first appearance. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)] +pub struct Canonical<'gcx, V> { + pub variables: CanonicalVarInfos<'gcx>, + pub value: V, +} + +pub type CanonicalVarInfos<'gcx> = &'gcx Slice; + +impl<'gcx> UseSpecializedDecodable for CanonicalVarInfos<'gcx> {} + +/// A set of values corresponding to the canonical variables from some +/// `Canonical`. You can give these values to +/// `canonical_value.substitute` to substitute them into the canonical +/// value at the right places. +/// +/// When you canonicalize a value `V`, you get back one of these +/// vectors with the original values that were replaced by canonical +/// variables. You will need to supply it later to instantiate the +/// canonicalized query response. +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)] +pub struct CanonicalVarValues<'tcx> { + pub var_values: IndexVec>, +} + +/// Like CanonicalVarValues, but for use in places where a SmallVec is +/// appropriate. +pub type SmallCanonicalVarValues<'tcx> = SmallVec<[Kind<'tcx>; 8]>; + +/// Information about a canonical variable that is included with the +/// canonical value. This is sufficient information for code to create +/// a copy of the canonical value in some other inference context, +/// with fresh inference variables replacing the canonical values. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)] +pub struct CanonicalVarInfo { + pub kind: CanonicalVarKind, +} + +/// Describes the "kind" of the canonical variable. This is a "kind" +/// in the type-theory sense of the term -- i.e., a "meta" type system +/// that analyzes type-like values. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)] +pub enum CanonicalVarKind { + /// Some kind of type inference variable. + Ty(CanonicalTyVarKind), + + /// Region variable `'?R`. + Region, +} + +/// Rust actually has more than one category of type variables; +/// notably, the type variables we create for literals (e.g., 22 or +/// 22.) can only be instantiated with integral/float types (e.g., +/// usize or f32). In order to faithfully reproduce a type, we need to +/// know what set of types a given type variable can be unified with. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcDecodable, RustcEncodable)] +pub enum CanonicalTyVarKind { + /// General type variable `?T` that can be unified with arbitrary types. + General, + + /// Integral type variable `?I` (that can only be unified with integral types). + Int, + + /// Floating-point type variable `?F` (that can only be unified with float types). + Float, +} + +/// After we execute a query with a canonicalized key, we get back a +/// `Canonical>`. You can use +/// `instantiate_query_result` to access the data in this result. +#[derive(Clone, Debug)] +pub struct QueryResult<'tcx, R> { + pub var_values: CanonicalVarValues<'tcx>, + pub region_constraints: Vec>, + pub certainty: Certainty, + pub value: R, +} + +pub type Canonicalized<'gcx, V> = Canonical<'gcx, >::Lifted>; + +pub type CanonicalizedQueryResult<'gcx, T> = + Lrc>::Lifted>>>; + +/// Indicates whether or not we were able to prove the query to be +/// true. +#[derive(Copy, Clone, Debug)] +pub enum Certainty { + /// The query is known to be true, presuming that you apply the + /// given `var_values` and the region-constraints are satisfied. + Proven, + + /// The query is not known to be true, but also not known to be + /// false. The `var_values` represent *either* values that must + /// hold in order for the query to be true, or helpful tips that + /// *might* make it true. Currently rustc's trait solver cannot + /// distinguish the two (e.g., due to our preference for where + /// clauses over impls). + /// + /// After some unifiations and things have been done, it makes + /// sense to try and prove again -- of course, at that point, the + /// canonical form will be different, making this a distinct + /// query. + Ambiguous, +} + +impl Certainty { + pub fn is_proven(&self) -> bool { + match self { + Certainty::Proven => true, + Certainty::Ambiguous => false, + } + } + + pub fn is_ambiguous(&self) -> bool { + !self.is_proven() + } +} + +impl<'tcx, R> QueryResult<'tcx, R> { + pub fn is_proven(&self) -> bool { + self.certainty.is_proven() + } + + pub fn is_ambiguous(&self) -> bool { + !self.is_proven() + } +} + +impl<'tcx, R> Canonical<'tcx, QueryResult<'tcx, R>> { + pub fn is_proven(&self) -> bool { + self.value.is_proven() + } + + pub fn is_ambiguous(&self) -> bool { + !self.is_proven() + } +} + +pub type QueryRegionConstraint<'tcx> = ty::Binder, Region<'tcx>>>; + +impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { + /// Creates a substitution S for the canonical value with fresh + /// inference variables and applies it to the canonical value. + /// Returns both the instantiated result *and* the substitution S. + /// + /// This is useful at the start of a query: it basically brings + /// the canonical value "into scope" within your new infcx. At the + /// end of processing, the substitution S (once canonicalized) + /// then represents the values that you computed for each of the + /// canonical inputs to your query. + pub fn instantiate_canonical_with_fresh_inference_vars( + &self, + span: Span, + canonical: &Canonical<'tcx, T>, + ) -> (T, CanonicalVarValues<'tcx>) + where + T: TypeFoldable<'tcx>, + { + let canonical_inference_vars = + self.fresh_inference_vars_for_canonical_vars(span, canonical.variables); + let result = canonical.substitute(self.tcx, &canonical_inference_vars); + (result, canonical_inference_vars) + } + + /// Given the "infos" about the canonical variables from some + /// canonical, creates fresh inference variables with the same + /// characteristics. You can then use `substitute` to instantiate + /// the canonical variable with these inference variables. + fn fresh_inference_vars_for_canonical_vars( + &self, + span: Span, + variables: &Slice, + ) -> CanonicalVarValues<'tcx> { + let var_values: IndexVec> = variables + .iter() + .map(|info| self.fresh_inference_var_for_canonical_var(span, *info)) + .collect(); + + CanonicalVarValues { var_values } + } + + /// Given the "info" about a canonical variable, creates a fresh + /// inference variable with the same characteristics. + fn fresh_inference_var_for_canonical_var( + &self, + span: Span, + cv_info: CanonicalVarInfo, + ) -> Kind<'tcx> { + match cv_info.kind { + CanonicalVarKind::Ty(ty_kind) => { + let ty = match ty_kind { + CanonicalTyVarKind::General => { + self.next_ty_var(TypeVariableOrigin::MiscVariable(span)) + } + + CanonicalTyVarKind::Int => self.tcx.mk_int_var(self.next_int_var_id()), + + CanonicalTyVarKind::Float => self.tcx.mk_float_var(self.next_float_var_id()), + }; + ty.into() + } + + CanonicalVarKind::Region => self + .next_region_var(RegionVariableOrigin::MiscVariable(span)) + .into(), + } + } +} + +CloneTypeFoldableAndLiftImpls! { + ::infer::canonical::Certainty, + ::infer::canonical::CanonicalVarInfo, + ::infer::canonical::CanonicalVarKind, +} + +CloneTypeFoldableImpls! { + for <'tcx> { + ::infer::canonical::CanonicalVarInfos<'tcx>, + } +} + +BraceStructTypeFoldableImpl! { + impl<'tcx, C> TypeFoldable<'tcx> for Canonical<'tcx, C> { + variables, + value, + } where C: TypeFoldable<'tcx> +} + +BraceStructLiftImpl! { + impl<'a, 'tcx, T> Lift<'tcx> for Canonical<'a, T> { + type Lifted = Canonical<'tcx, T::Lifted>; + variables, value + } where T: Lift<'tcx> +} + +impl<'tcx> CanonicalVarValues<'tcx> { + fn len(&self) -> usize { + self.var_values.len() + } +} + +impl<'a, 'tcx> IntoIterator for &'a CanonicalVarValues<'tcx> { + type Item = Kind<'tcx>; + type IntoIter = ::std::iter::Cloned<::std::slice::Iter<'a, Kind<'tcx>>>; + + fn into_iter(self) -> Self::IntoIter { + self.var_values.iter().cloned() + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for CanonicalVarValues<'a> { + type Lifted = CanonicalVarValues<'tcx>; + var_values, + } +} + +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for CanonicalVarValues<'tcx> { + var_values, + } +} + +BraceStructTypeFoldableImpl! { + impl<'tcx, R> TypeFoldable<'tcx> for QueryResult<'tcx, R> { + var_values, region_constraints, certainty, value + } where R: TypeFoldable<'tcx>, +} + +BraceStructLiftImpl! { + impl<'a, 'tcx, R> Lift<'tcx> for QueryResult<'a, R> { + type Lifted = QueryResult<'tcx, R::Lifted>; + var_values, region_constraints, certainty, value + } where R: Lift<'tcx> +} + +impl<'tcx> Index for CanonicalVarValues<'tcx> { + type Output = Kind<'tcx>; + + fn index(&self, value: CanonicalVar) -> &Kind<'tcx> { + &self.var_values[value] + } +} diff --git a/src/librustc/infer/canonical/query_result.rs b/src/librustc/infer/canonical/query_result.rs new file mode 100644 index 000000000000..f0b6d25e9dae --- /dev/null +++ b/src/librustc/infer/canonical/query_result.rs @@ -0,0 +1,610 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module contains the code to instantiate a "query result", and +//! in particular to extract out the resulting region obligations and +//! encode them therein. +//! +//! For an overview of what canonicaliation is and how it fits into +//! rustc, check out the [chapter in the rustc guide][c]. +//! +//! [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html + +use infer::canonical::substitute::substitute_value; +use infer::canonical::{ + Canonical, CanonicalVarKind, CanonicalVarValues, CanonicalizedQueryResult, Certainty, + QueryRegionConstraint, QueryResult, SmallCanonicalVarValues, +}; +use infer::region_constraints::{Constraint, RegionConstraintData}; +use infer::InferCtxtBuilder; +use infer::{InferCtxt, InferOk, InferResult, RegionObligation}; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::sync::Lrc; +use std::fmt::Debug; +use syntax::ast; +use syntax_pos::DUMMY_SP; +use traits::query::{Fallible, NoSolution}; +use traits::{FulfillmentContext, TraitEngine}; +use traits::{Obligation, ObligationCause, PredicateObligation}; +use ty::fold::TypeFoldable; +use ty::subst::{Kind, UnpackedKind}; +use ty::{self, CanonicalVar, Lift, TyCtxt}; + +impl<'cx, 'gcx, 'tcx> InferCtxtBuilder<'cx, 'gcx, 'tcx> { + /// The "main method" for a canonicalized trait query. Given the + /// canonical key `canonical_key`, this method will create a new + /// inference context, instantiate the key, and run your operation + /// `op`. The operation should yield up a result (of type `R`) as + /// well as a set of trait obligations that must be fully + /// satisfied. These obligations will be processed and the + /// canonical result created. + /// + /// Returns `NoSolution` in the event of any error. + /// + /// (It might be mildly nicer to implement this on `TyCtxt`, and + /// not `InferCtxtBuilder`, but that is a bit tricky right now. + /// In part because we would need a `for<'gcx: 'tcx>` sort of + /// bound for the closure and in part because it is convenient to + /// have `'tcx` be free on this function so that we can talk about + /// `K: TypeFoldable<'tcx>`.) + pub fn enter_canonical_trait_query( + &'tcx mut self, + canonical_key: &Canonical<'tcx, K>, + operation: impl FnOnce(&InferCtxt<'_, 'gcx, 'tcx>, &mut FulfillmentContext<'tcx>, K) + -> Fallible, + ) -> Fallible> + where + K: TypeFoldable<'tcx>, + R: Debug + Lift<'gcx> + TypeFoldable<'tcx>, + { + self.enter(|ref infcx| { + let (key, canonical_inference_vars) = + infcx.instantiate_canonical_with_fresh_inference_vars(DUMMY_SP, &canonical_key); + let fulfill_cx = &mut FulfillmentContext::new(); + let value = operation(infcx, fulfill_cx, key)?; + infcx.make_canonicalized_query_result(canonical_inference_vars, value, fulfill_cx) + }) + } +} + +impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { + /// This method is meant to be invoked as the final step of a canonical query + /// implementation. It is given: + /// + /// - the instantiated variables `inference_vars` created from the query key + /// - the result `answer` of the query + /// - a fulfillment context `fulfill_cx` that may contain various obligations which + /// have yet to be proven. + /// + /// Given this, the function will process the obligations pending + /// in `fulfill_cx`: + /// + /// - If all the obligations can be proven successfully, it will + /// package up any resulting region obligations (extracted from + /// `infcx`) along with the fully resolved value `answer` into a + /// query result (which is then itself canonicalized). + /// - If some obligations can be neither proven nor disproven, then + /// the same thing happens, but the resulting query is marked as ambiguous. + /// - Finally, if any of the obligations result in a hard error, + /// then `Err(NoSolution)` is returned. + pub fn make_canonicalized_query_result( + &self, + inference_vars: CanonicalVarValues<'tcx>, + answer: T, + fulfill_cx: &mut FulfillmentContext<'tcx>, + ) -> Fallible> + where + T: Debug + Lift<'gcx> + TypeFoldable<'tcx>, + { + let query_result = self.make_query_result(inference_vars, answer, fulfill_cx)?; + let canonical_result = self.canonicalize_response(&query_result); + + debug!( + "make_canonicalized_query_result: canonical_result = {:#?}", + canonical_result + ); + + Ok(Lrc::new(canonical_result)) + } + + /// Helper for `make_canonicalized_query_result` that does + /// everything up until the final canonicalization. + fn make_query_result( + &self, + inference_vars: CanonicalVarValues<'tcx>, + answer: T, + fulfill_cx: &mut FulfillmentContext<'tcx>, + ) -> Result, NoSolution> + where + T: Debug + TypeFoldable<'tcx> + Lift<'gcx>, + { + let tcx = self.tcx; + + debug!( + "make_query_result(\ + inference_vars={:?}, \ + answer={:?})", + inference_vars, answer, + ); + + // Select everything, returning errors. + let true_errors = match fulfill_cx.select_where_possible(self) { + Ok(()) => vec![], + Err(errors) => errors, + }; + debug!("true_errors = {:#?}", true_errors); + + if !true_errors.is_empty() { + // FIXME -- we don't indicate *why* we failed to solve + debug!("make_query_result: true_errors={:#?}", true_errors); + return Err(NoSolution); + } + + // Anything left unselected *now* must be an ambiguity. + let ambig_errors = match fulfill_cx.select_all_or_error(self) { + Ok(()) => vec![], + Err(errors) => errors, + }; + debug!("ambig_errors = {:#?}", ambig_errors); + + let region_obligations = self.take_registered_region_obligations(); + let region_constraints = self.with_region_constraints(|region_constraints| { + make_query_outlives(tcx, region_obligations, region_constraints) + }); + + let certainty = if ambig_errors.is_empty() { + Certainty::Proven + } else { + Certainty::Ambiguous + }; + + Ok(QueryResult { + var_values: inference_vars, + region_constraints, + certainty, + value: answer, + }) + } + + /// Given the (canonicalized) result to a canonical query, + /// instantiates the result so it can be used, plugging in the + /// values from the canonical query. (Note that the result may + /// have been ambiguous; you should check the certainty level of + /// the query before applying this function.) + /// + /// To get a good understanding of what is happening here, check + /// out the [chapter in the rustc guide][c]. + /// + /// [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html#processing-the-canonicalized-query-result + pub fn instantiate_query_result_and_region_obligations( + &self, + cause: &ObligationCause<'tcx>, + param_env: ty::ParamEnv<'tcx>, + original_values: &SmallCanonicalVarValues<'tcx>, + query_result: &Canonical<'tcx, QueryResult<'tcx, R>>, + ) -> InferResult<'tcx, R> + where + R: Debug + TypeFoldable<'tcx>, + { + let InferOk { + value: result_subst, + mut obligations, + } = self.query_result_substitution(cause, param_env, original_values, query_result)?; + + obligations.extend(self.query_region_constraints_into_obligations( + cause, + param_env, + &query_result.value.region_constraints, + &result_subst, + )); + + let user_result: R = + query_result.substitute_projected(self.tcx, &result_subst, |q_r| &q_r.value); + + Ok(InferOk { + value: user_result, + obligations, + }) + } + + /// An alternative to + /// `instantiate_query_result_and_region_obligations` that is more + /// efficient for NLL. NLL is a bit more advanced in the + /// "transition to chalk" than the rest of the compiler. During + /// the NLL type check, all of the "processing" of types and + /// things happens in queries -- the NLL checker itself is only + /// interested in the region obligations (`'a: 'b` or `T: 'b`) + /// that come out of these queries, which it wants to convert into + /// MIR-based constraints and solve. Therefore, it is most + /// convenient for the NLL Type Checker to **directly consume** + /// the `QueryRegionConstraint` values that arise from doing a + /// query. This is contrast to other parts of the compiler, which + /// would prefer for those `QueryRegionConstraint` to be converted + /// into the older infcx-style constraints (e.g., calls to + /// `sub_regions` or `register_region_obligation`). + /// + /// Therefore, `instantiate_nll_query_result_and_region_obligations` performs the same + /// basic operations as `instantiate_query_result_and_region_obligations` but + /// it returns its result differently: + /// + /// - It creates a substitution `S` that maps from the original + /// query variables to the values computed in the query + /// result. If any errors arise, they are propagated back as an + /// `Err` result. + /// - In the case of a successful substitution, we will append + /// `QueryRegionConstraint` values onto the + /// `output_query_region_constraints` vector for the solver to + /// use (if an error arises, some values may also be pushed, but + /// they should be ignored). + /// - It **can happen** (though it rarely does currently) that + /// equating types and things will give rise to subobligations + /// that must be processed. In this case, those subobligations + /// are propagated back in the return value. + /// - Finally, the query result (of type `R`) is propagated back, + /// after applying the substitution `S`. + pub fn instantiate_nll_query_result_and_region_obligations( + &self, + cause: &ObligationCause<'tcx>, + param_env: ty::ParamEnv<'tcx>, + original_values: &SmallCanonicalVarValues<'tcx>, + query_result: &Canonical<'tcx, QueryResult<'tcx, R>>, + output_query_region_constraints: &mut Vec>, + ) -> InferResult<'tcx, R> + where + R: Debug + TypeFoldable<'tcx>, + { + // In an NLL query, there should be no type variables in the + // query, only region variables. + debug_assert!(query_result.variables.iter().all(|v| match v.kind { + CanonicalVarKind::Ty(_) => false, + CanonicalVarKind::Region => true, + })); + + let result_subst = + self.query_result_substitution_guess(cause, original_values, query_result); + + // Compute `QueryRegionConstraint` values that unify each of + // the original values `v_o` that was canonicalized into a + // variable... + let mut obligations = vec![]; + + for (index, original_value) in original_values.iter().enumerate() { + // ...with the value `v_r` of that variable from the query. + let result_value = query_result.substitute_projected(self.tcx, &result_subst, |v| { + &v.var_values[CanonicalVar::new(index)] + }); + match (original_value.unpack(), result_value.unpack()) { + (UnpackedKind::Lifetime(ty::ReErased), UnpackedKind::Lifetime(ty::ReErased)) => { + // no action needed + } + + (UnpackedKind::Lifetime(v_o), UnpackedKind::Lifetime(v_r)) => { + // To make `v_o = v_r`, we emit `v_o: v_r` and `v_r: v_o`. + if v_o != v_r { + output_query_region_constraints + .push(ty::Binder::dummy(ty::OutlivesPredicate(v_o.into(), v_r))); + output_query_region_constraints + .push(ty::Binder::dummy(ty::OutlivesPredicate(v_r.into(), v_o))); + } + } + + (UnpackedKind::Type(v1), UnpackedKind::Type(v2)) => { + let ok = self.at(cause, param_env).eq(v1, v2)?; + obligations.extend(ok.into_obligations()); + } + + _ => { + bug!( + "kind mismatch, cannot unify {:?} and {:?}", + original_value, + result_value + ); + } + } + } + + // ...also include the other query region constraints from the query. + output_query_region_constraints.reserve(query_result.value.region_constraints.len()); + for r_c in query_result.value.region_constraints.iter() { + let &ty::OutlivesPredicate(k1, r2) = r_c.skip_binder(); // reconstructed below + let k1 = substitute_value(self.tcx, &result_subst, &k1); + let r2 = substitute_value(self.tcx, &result_subst, &r2); + if k1 != r2.into() { + output_query_region_constraints + .push(ty::Binder::bind(ty::OutlivesPredicate(k1, r2))); + } + } + + let user_result: R = + query_result.substitute_projected(self.tcx, &result_subst, |q_r| &q_r.value); + + Ok(InferOk { + value: user_result, + obligations, + }) + } + + /// Given the original values and the (canonicalized) result from + /// computing a query, returns a substitution that can be applied + /// to the query result to convert the result back into the + /// original namespace. + /// + /// The substitution also comes accompanied with subobligations + /// that arose from unification; these might occur if (for + /// example) we are doing lazy normalization and the value + /// assigned to a type variable is unified with an unnormalized + /// projection. + fn query_result_substitution( + &self, + cause: &ObligationCause<'tcx>, + param_env: ty::ParamEnv<'tcx>, + original_values: &SmallCanonicalVarValues<'tcx>, + query_result: &Canonical<'tcx, QueryResult<'tcx, R>>, + ) -> InferResult<'tcx, CanonicalVarValues<'tcx>> + where + R: Debug + TypeFoldable<'tcx>, + { + debug!( + "query_result_substitution(original_values={:#?}, query_result={:#?})", + original_values, query_result, + ); + + let result_subst = + self.query_result_substitution_guess(cause, original_values, query_result); + + let obligations = self.unify_query_result_substitution_guess( + cause, + param_env, + original_values, + &result_subst, + query_result, + )? + .into_obligations(); + + Ok(InferOk { + value: result_subst, + obligations, + }) + } + + /// Given the original values and the (canonicalized) result from + /// computing a query, returns a **guess** at a substitution that + /// can be applied to the query result to convert the result back + /// into the original namespace. This is called a **guess** + /// because it uses a quick heuristic to find the values for each + /// canonical variable; if that quick heuristic fails, then we + /// will instantiate fresh inference variables for each canonical + /// variable instead. Therefore, the result of this method must be + /// properly unified + fn query_result_substitution_guess( + &self, + cause: &ObligationCause<'tcx>, + original_values: &SmallCanonicalVarValues<'tcx>, + query_result: &Canonical<'tcx, QueryResult<'tcx, R>>, + ) -> CanonicalVarValues<'tcx> + where + R: Debug + TypeFoldable<'tcx>, + { + debug!( + "query_result_substitution_guess(original_values={:#?}, query_result={:#?})", + original_values, query_result, + ); + + // Every canonical query result includes values for each of + // the inputs to the query. Therefore, we begin by unifying + // these values with the original inputs that were + // canonicalized. + let result_values = &query_result.value.var_values; + assert_eq!(original_values.len(), result_values.len()); + + // Quickly try to find initial values for the canonical + // variables in the result in terms of the query. We do this + // by iterating down the values that the query gave to each of + // the canonical inputs. If we find that one of those values + // is directly equal to one of the canonical variables in the + // result, then we can type the corresponding value from the + // input. See the example above. + let mut opt_values: IndexVec>> = + IndexVec::from_elem_n(None, query_result.variables.len()); + + // In terms of our example above, we are iterating over pairs like: + // [(?A, Vec), ('static, '?1), (?B, ?0)] + for (original_value, result_value) in original_values.iter().zip(result_values) { + match result_value.unpack() { + UnpackedKind::Type(result_value) => { + // e.g., here `result_value` might be `?0` in the example above... + if let ty::TyInfer(ty::InferTy::CanonicalTy(index)) = result_value.sty { + // in which case we would set `canonical_vars[0]` to `Some(?U)`. + opt_values[index] = Some(*original_value); + } + } + UnpackedKind::Lifetime(result_value) => { + // e.g., here `result_value` might be `'?1` in the example above... + if let &ty::RegionKind::ReCanonical(index) = result_value { + // in which case we would set `canonical_vars[0]` to `Some('static)`. + opt_values[index] = Some(*original_value); + } + } + } + } + + // Create a result substitution: if we found a value for a + // given variable in the loop above, use that. Otherwise, use + // a fresh inference variable. + let result_subst = CanonicalVarValues { + var_values: query_result + .variables + .iter() + .enumerate() + .map(|(index, info)| match opt_values[CanonicalVar::new(index)] { + Some(k) => k, + None => self.fresh_inference_var_for_canonical_var(cause.span, *info), + }) + .collect(), + }; + + result_subst + } + + /// Given a "guess" at the values for the canonical variables in + /// the input, try to unify with the *actual* values found in the + /// query result. Often, but not always, this is a no-op, because + /// we already found the mapping in the "guessing" step. + /// + /// See also: `query_result_substitution_guess` + fn unify_query_result_substitution_guess( + &self, + cause: &ObligationCause<'tcx>, + param_env: ty::ParamEnv<'tcx>, + original_values: &SmallCanonicalVarValues<'tcx>, + result_subst: &CanonicalVarValues<'tcx>, + query_result: &Canonical<'tcx, QueryResult<'tcx, R>>, + ) -> InferResult<'tcx, ()> + where + R: Debug + TypeFoldable<'tcx>, + { + // A closure that yields the result value for the given + // canonical variable; this is taken from + // `query_result.var_values` after applying the substitution + // `result_subst`. + let substituted_query_result = |index: CanonicalVar| -> Kind<'tcx> { + query_result.substitute_projected(self.tcx, &result_subst, |v| &v.var_values[index]) + }; + + // Unify the original value for each variable with the value + // taken from `query_result` (after applying `result_subst`). + Ok(self.unify_canonical_vars(cause, param_env, original_values, substituted_query_result)?) + } + + /// Converts the region constraints resulting from a query into an + /// iterator of obligations. + fn query_region_constraints_into_obligations<'a>( + &'a self, + cause: &'a ObligationCause<'tcx>, + param_env: ty::ParamEnv<'tcx>, + unsubstituted_region_constraints: &'a [QueryRegionConstraint<'tcx>], + result_subst: &'a CanonicalVarValues<'tcx>, + ) -> impl Iterator> + 'a { + Box::new( + unsubstituted_region_constraints + .iter() + .map(move |constraint| { + let ty::OutlivesPredicate(k1, r2) = constraint.skip_binder(); // restored below + let k1 = substitute_value(self.tcx, result_subst, k1); + let r2 = substitute_value(self.tcx, result_subst, r2); + match k1.unpack() { + UnpackedKind::Lifetime(r1) => Obligation::new( + cause.clone(), + param_env, + ty::Predicate::RegionOutlives(ty::Binder::dummy( + ty::OutlivesPredicate(r1, r2), + )), + ), + + UnpackedKind::Type(t1) => Obligation::new( + cause.clone(), + param_env, + ty::Predicate::TypeOutlives(ty::Binder::dummy(ty::OutlivesPredicate( + t1, r2, + ))), + ), + } + }), + ) as Box> + } + + /// Given two sets of values for the same set of canonical variables, unify them. + /// The second set is produced lazilly by supplying indices from the first set. + fn unify_canonical_vars( + &self, + cause: &ObligationCause<'tcx>, + param_env: ty::ParamEnv<'tcx>, + variables1: &SmallCanonicalVarValues<'tcx>, + variables2: impl Fn(CanonicalVar) -> Kind<'tcx>, + ) -> InferResult<'tcx, ()> { + self.commit_if_ok(|_| { + let mut obligations = vec![]; + for (index, value1) in variables1.iter().enumerate() { + let value2 = variables2(CanonicalVar::new(index)); + + match (value1.unpack(), value2.unpack()) { + (UnpackedKind::Type(v1), UnpackedKind::Type(v2)) => { + obligations + .extend(self.at(cause, param_env).eq(v1, v2)?.into_obligations()); + } + ( + UnpackedKind::Lifetime(ty::ReErased), + UnpackedKind::Lifetime(ty::ReErased), + ) => { + // no action needed + } + (UnpackedKind::Lifetime(v1), UnpackedKind::Lifetime(v2)) => { + obligations + .extend(self.at(cause, param_env).eq(v1, v2)?.into_obligations()); + } + _ => { + bug!("kind mismatch, cannot unify {:?} and {:?}", value1, value2,); + } + } + } + Ok(InferOk { + value: (), + obligations, + }) + }) + } +} + +/// Given the region obligations and constraints scraped from the infcx, +/// creates query region constraints. +pub fn make_query_outlives<'tcx>( + tcx: TyCtxt<'_, '_, 'tcx>, + region_obligations: Vec<(ast::NodeId, RegionObligation<'tcx>)>, + region_constraints: &RegionConstraintData<'tcx>, +) -> Vec> { + let RegionConstraintData { + constraints, + verifys, + givens, + } = region_constraints; + + assert!(verifys.is_empty()); + assert!(givens.is_empty()); + + let mut outlives: Vec<_> = constraints + .into_iter() + .map(|(k, _)| match *k { + // Swap regions because we are going from sub (<=) to outlives + // (>=). + Constraint::VarSubVar(v1, v2) => ty::OutlivesPredicate( + tcx.mk_region(ty::ReVar(v2)).into(), + tcx.mk_region(ty::ReVar(v1)), + ), + Constraint::VarSubReg(v1, r2) => { + ty::OutlivesPredicate(r2.into(), tcx.mk_region(ty::ReVar(v1))) + } + Constraint::RegSubVar(r1, v2) => { + ty::OutlivesPredicate(tcx.mk_region(ty::ReVar(v2)).into(), r1) + } + Constraint::RegSubReg(r1, r2) => ty::OutlivesPredicate(r2.into(), r1), + }) + .map(ty::Binder::dummy) // no bound regions in the code above + .collect(); + + outlives.extend( + region_obligations + .into_iter() + .map(|(_, r_o)| ty::OutlivesPredicate(r_o.sup_type.into(), r_o.sub_region)) + .map(ty::Binder::dummy), // no bound regions in the code above + ); + + outlives +} diff --git a/src/librustc/infer/canonical/substitute.rs b/src/librustc/infer/canonical/substitute.rs new file mode 100644 index 000000000000..679829f43c52 --- /dev/null +++ b/src/librustc/infer/canonical/substitute.rs @@ -0,0 +1,113 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module contains code to substitute new values into a +//! `Canonical<'tcx, T>`. +//! +//! For an overview of what canonicaliation is and how it fits into +//! rustc, check out the [chapter in the rustc guide][c]. +//! +//! [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html + +use infer::canonical::{Canonical, CanonicalVarValues}; +use ty::fold::{TypeFoldable, TypeFolder}; +use ty::subst::UnpackedKind; +use ty::{self, Ty, TyCtxt, TypeFlags}; + +impl<'tcx, V> Canonical<'tcx, V> { + /// Instantiate the wrapped value, replacing each canonical value + /// with the value given in `var_values`. + pub fn substitute(&self, tcx: TyCtxt<'_, '_, 'tcx>, var_values: &CanonicalVarValues<'tcx>) -> V + where + V: TypeFoldable<'tcx>, + { + self.substitute_projected(tcx, var_values, |value| value) + } + + /// Allows one to apply a substitute to some subset of + /// `self.value`. Invoke `projection_fn` with `self.value` to get + /// a value V that is expressed in terms of the same canonical + /// variables bound in `self` (usually this extracts from subset + /// of `self`). Apply the substitution `var_values` to this value + /// V, replacing each of the canonical variables. + pub fn substitute_projected( + &self, + tcx: TyCtxt<'_, '_, 'tcx>, + var_values: &CanonicalVarValues<'tcx>, + projection_fn: impl FnOnce(&V) -> &T, + ) -> T + where + T: TypeFoldable<'tcx>, + { + assert_eq!(self.variables.len(), var_values.len()); + let value = projection_fn(&self.value); + substitute_value(tcx, var_values, value) + } +} + +/// Substitute the values from `var_values` into `value`. `var_values` +/// must be values for the set of canonical variables that appear in +/// `value`. +pub(super) fn substitute_value<'a, 'tcx, T>( + tcx: TyCtxt<'_, '_, 'tcx>, + var_values: &CanonicalVarValues<'tcx>, + value: &'a T, +) -> T +where + T: TypeFoldable<'tcx>, +{ + if var_values.var_values.is_empty() { + debug_assert!(!value.has_type_flags(TypeFlags::HAS_CANONICAL_VARS)); + value.clone() + } else if !value.has_type_flags(TypeFlags::HAS_CANONICAL_VARS) { + value.clone() + } else { + value.fold_with(&mut CanonicalVarValuesSubst { tcx, var_values }) + } +} + +struct CanonicalVarValuesSubst<'cx, 'gcx: 'tcx, 'tcx: 'cx> { + tcx: TyCtxt<'cx, 'gcx, 'tcx>, + var_values: &'cx CanonicalVarValues<'tcx>, +} + +impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for CanonicalVarValuesSubst<'cx, 'gcx, 'tcx> { + fn tcx(&self) -> TyCtxt<'_, 'gcx, 'tcx> { + self.tcx + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + match t.sty { + ty::TyInfer(ty::InferTy::CanonicalTy(c)) => { + match self.var_values.var_values[c].unpack() { + UnpackedKind::Type(ty) => ty, + r => bug!("{:?} is a type but value is {:?}", c, r), + } + } + _ => { + if !t.has_type_flags(TypeFlags::HAS_CANONICAL_VARS) { + t + } else { + t.super_fold_with(self) + } + } + } + } + + fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { + match r { + ty::RegionKind::ReCanonical(c) => match self.var_values.var_values[*c].unpack() { + UnpackedKind::Lifetime(l) => l, + r => bug!("{:?} is a region but value is {:?}", c, r), + }, + _ => r.super_fold_with(self), + } + } +} diff --git a/src/librustc/infer/combine.rs b/src/librustc/infer/combine.rs index f7bc092a3d7a..ccba5a09cf6c 100644 --- a/src/librustc/infer/combine.rs +++ b/src/librustc/infer/combine.rs @@ -34,10 +34,10 @@ use super::equate::Equate; use super::glb::Glb; +use super::{InferCtxt, MiscVariable, TypeTrace}; use super::lub::Lub; use super::sub::Sub; -use super::InferCtxt; -use super::{MiscVariable, TypeTrace}; +use super::type_variable::TypeVariableValue; use hir::def_id::DefId; use ty::{IntType, UintType}; @@ -132,7 +132,7 @@ impl<'infcx, 'gcx, 'tcx> InferCtxt<'infcx, 'gcx, 'tcx> { { self.int_unification_table .borrow_mut() - .unify_var_value(vid, val) + .unify_var_value(vid, Some(val)) .map_err(|e| int_unification_error(vid_is_expected, e))?; match val { IntType(v) => Ok(self.tcx.mk_mach_int(v)), @@ -148,7 +148,7 @@ impl<'infcx, 'gcx, 'tcx> InferCtxt<'infcx, 'gcx, 'tcx> { { self.float_unification_table .borrow_mut() - .unify_var_value(vid, val) + .unify_var_value(vid, Some(ty::FloatVarValue(val))) .map_err(|e| float_unification_error(vid_is_expected, e))?; Ok(self.tcx.mk_mach_float(val)) } @@ -194,7 +194,7 @@ impl<'infcx, 'gcx, 'tcx> CombineFields<'infcx, 'gcx, 'tcx> { use self::RelationDir::*; // Get the actual variable that b_vid has been inferred to - debug_assert!(self.infcx.type_variables.borrow_mut().probe(b_vid).is_none()); + debug_assert!(self.infcx.type_variables.borrow_mut().probe(b_vid).is_unknown()); debug!("instantiate(a_ty={:?} dir={:?} b_vid={:?})", a_ty, dir, b_vid); @@ -302,7 +302,7 @@ struct Generalizer<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { /// Result from a generalization operation. This includes /// not only the generalized type, but also a bool flag -/// indicating whether further WF checks are needed.q +/// indicating whether further WF checks are needed. struct Generalization<'tcx> { ty: Ty<'tcx>, @@ -351,7 +351,7 @@ impl<'cx, 'gcx, 'tcx> TypeRelation<'cx, 'gcx, 'tcx> for Generalizer<'cx, 'gcx, ' -> RelateResult<'tcx, ty::Binder> where T: Relate<'tcx> { - Ok(ty::Binder(self.relate(a.skip_binder(), b.skip_binder())?)) + Ok(ty::Binder::bind(self.relate(a.skip_binder(), b.skip_binder())?)) } fn relate_item_substs(&mut self, @@ -402,12 +402,12 @@ impl<'cx, 'gcx, 'tcx> TypeRelation<'cx, 'gcx, 'tcx> for Generalizer<'cx, 'gcx, ' // `vid` are related via subtyping. return Err(TypeError::CyclicTy(self.root_ty)); } else { - match variables.probe_root(vid) { - Some(u) => { + match variables.probe(vid) { + TypeVariableValue::Known { value: u } => { drop(variables); self.relate(&u, &u) } - None => { + TypeVariableValue::Unknown { universe } => { match self.ambient_variance { // Invariant: no need to make a fresh type variable. ty::Invariant => return Ok(t), @@ -423,8 +423,8 @@ impl<'cx, 'gcx, 'tcx> TypeRelation<'cx, 'gcx, 'tcx> for Generalizer<'cx, 'gcx, ' ty::Covariant | ty::Contravariant => (), } - let origin = variables.origin(vid); - let new_var_id = variables.new_var(false, origin, None); + let origin = *variables.var_origin(vid); + let new_var_id = variables.new_var(universe, false, origin); let u = self.tcx().mk_var(new_var_id); debug!("generalize: replacing original vid={:?} with new={:?}", vid, u); @@ -476,6 +476,7 @@ impl<'cx, 'gcx, 'tcx> TypeRelation<'cx, 'gcx, 'tcx> for Generalizer<'cx, 'gcx, ' } } + ty::ReCanonical(..) | ty::ReClosureBound(..) => { span_bug!( self.span, @@ -518,9 +519,9 @@ fn int_unification_error<'tcx>(a_is_expected: bool, v: (ty::IntVarValue, ty::Int } fn float_unification_error<'tcx>(a_is_expected: bool, - v: (ast::FloatTy, ast::FloatTy)) + v: (ty::FloatVarValue, ty::FloatVarValue)) -> TypeError<'tcx> { - let (a, b) = v; + let (ty::FloatVarValue(a), ty::FloatVarValue(b)) = v; TypeError::FloatMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b)) } diff --git a/src/librustc/infer/error_reporting/mod.rs b/src/librustc/infer/error_reporting/mod.rs index c477a0d383e2..212821cac2e4 100644 --- a/src/librustc/infer/error_reporting/mod.rs +++ b/src/librustc/infer/error_reporting/mod.rs @@ -56,21 +56,21 @@ //! time of error detection. use infer; -use super::{InferCtxt, TypeTrace, SubregionOrigin, RegionVariableOrigin, ValuePairs}; +use super::{InferCtxt, RegionVariableOrigin, SubregionOrigin, TypeTrace, ValuePairs}; use super::region_constraints::GenericKind; use super::lexical_region_resolve::RegionResolutionError; -use std::fmt; +use std::{cmp, fmt}; use hir; use hir::map as hir_map; use hir::def_id::DefId; use middle::region; use traits::{ObligationCause, ObligationCauseCode}; -use ty::{self, Region, Ty, TyCtxt, TypeFoldable, TypeVariants}; +use ty::{self, subst::Subst, Region, Ty, TyCtxt, TypeFoldable, TypeVariants}; use ty::error::TypeError; use syntax::ast::DUMMY_NODE_ID; use syntax_pos::{Pos, Span}; -use errors::{DiagnosticBuilder, DiagnosticStyledString}; +use errors::{Applicability, DiagnosticBuilder, DiagnosticStyledString}; use rustc_data_structures::indexed_vec::Idx; @@ -81,71 +81,39 @@ mod need_type_info; pub mod nice_region_error; impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - pub fn note_and_explain_region(self, - region_scope_tree: ®ion::ScopeTree, - err: &mut DiagnosticBuilder, - prefix: &str, - region: ty::Region<'tcx>, - suffix: &str) { - fn item_scope_tag(item: &hir::Item) -> &'static str { - match item.node { - hir::ItemImpl(..) => "impl", - hir::ItemStruct(..) => "struct", - hir::ItemUnion(..) => "union", - hir::ItemEnum(..) => "enum", - hir::ItemTrait(..) => "trait", - hir::ItemFn(..) => "function body", - _ => "item" - } - } - - fn trait_item_scope_tag(item: &hir::TraitItem) -> &'static str { - match item.node { - hir::TraitItemKind::Method(..) => "method body", - hir::TraitItemKind::Const(..) | - hir::TraitItemKind::Type(..) => "associated item" - } - } - - fn impl_item_scope_tag(item: &hir::ImplItem) -> &'static str { - match item.node { - hir::ImplItemKind::Method(..) => "method body", - hir::ImplItemKind::Const(..) | - hir::ImplItemKind::Type(_) => "associated item" - } - } - - fn explain_span<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, - heading: &str, span: Span) - -> (String, Option) { - let lo = tcx.sess.codemap().lookup_char_pos_adj(span.lo()); - (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize() + 1), - Some(span)) - } - + pub fn note_and_explain_region( + self, + region_scope_tree: ®ion::ScopeTree, + err: &mut DiagnosticBuilder, + prefix: &str, + region: ty::Region<'tcx>, + suffix: &str, + ) { let (description, span) = match *region { ty::ReScope(scope) => { let new_string; let unknown_scope = || { - format!("{}unknown scope: {:?}{}. Please report a bug.", - prefix, scope, suffix) + format!( + "{}unknown scope: {:?}{}. Please report a bug.", + prefix, scope, suffix + ) }; let span = scope.span(self, region_scope_tree); let tag = match self.hir.find(scope.node_id(self, region_scope_tree)) { Some(hir_map::NodeBlock(_)) => "block", Some(hir_map::NodeExpr(expr)) => match expr.node { - hir::ExprCall(..) => "call", - hir::ExprMethodCall(..) => "method call", - hir::ExprMatch(.., hir::MatchSource::IfLetDesugar { .. }) => "if let", - hir::ExprMatch(.., hir::MatchSource::WhileLetDesugar) => "while let", - hir::ExprMatch(.., hir::MatchSource::ForLoopDesugar) => "for", - hir::ExprMatch(..) => "match", + hir::ExprKind::Call(..) => "call", + hir::ExprKind::MethodCall(..) => "method call", + hir::ExprKind::Match(.., hir::MatchSource::IfLetDesugar { .. }) => "if let", + hir::ExprKind::Match(.., hir::MatchSource::WhileLetDesugar) => "while let", + hir::ExprKind::Match(.., hir::MatchSource::ForLoopDesugar) => "for", + hir::ExprKind::Match(..) => "match", _ => "expression", }, Some(hir_map::NodeStmt(_)) => "statement", - Some(hir_map::NodeItem(it)) => item_scope_tag(&it), - Some(hir_map::NodeTraitItem(it)) => trait_item_scope_tag(&it), - Some(hir_map::NodeImplItem(it)) => impl_item_scope_tag(&it), + Some(hir_map::NodeItem(it)) => Self::item_scope_tag(&it), + Some(hir_map::NodeTraitItem(it)) => Self::trait_item_scope_tag(&it), + Some(hir_map::NodeImplItem(it)) => Self::impl_item_scope_tag(&it), Some(_) | None => { err.span_note(span, &unknown_scope()); return; @@ -153,77 +121,27 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }; let scope_decorated_tag = match scope.data() { region::ScopeData::Node(_) => tag, - region::ScopeData::CallSite(_) => { - "scope of call-site for function" - } - region::ScopeData::Arguments(_) => { - "scope of function body" - } + region::ScopeData::CallSite(_) => "scope of call-site for function", + region::ScopeData::Arguments(_) => "scope of function body", region::ScopeData::Destruction(_) => { new_string = format!("destruction scope surrounding {}", tag); &new_string[..] } region::ScopeData::Remainder(r) => { - new_string = format!("block suffix following statement {}", - r.first_statement_index.index()); + new_string = format!( + "block suffix following statement {}", + r.first_statement_index.index() + ); &new_string[..] } }; - explain_span(self, scope_decorated_tag, span) + self.explain_span(scope_decorated_tag, span) } - ty::ReEarlyBound(_) | - ty::ReFree(_) => { - let scope = region.free_region_binding_scope(self); - let prefix = match *region { - ty::ReEarlyBound(ref br) => { - format!("the lifetime {} as defined on", br.name) - } - ty::ReFree(ref fr) => { - match fr.bound_region { - ty::BrAnon(idx) => { - format!("the anonymous lifetime #{} defined on", idx + 1) - } - ty::BrFresh(_) => "an anonymous lifetime defined on".to_owned(), - _ => { - format!("the lifetime {} as defined on", - fr.bound_region) - } - } - } - _ => bug!() - }; - - let node = self.hir.as_local_node_id(scope) - .unwrap_or(DUMMY_NODE_ID); - let unknown; - let tag = match self.hir.find(node) { - Some(hir_map::NodeBlock(_)) | - Some(hir_map::NodeExpr(_)) => "body", - Some(hir_map::NodeItem(it)) => item_scope_tag(&it), - Some(hir_map::NodeTraitItem(it)) => trait_item_scope_tag(&it), - Some(hir_map::NodeImplItem(it)) => impl_item_scope_tag(&it), - - // this really should not happen, but it does: - // FIXME(#27942) - Some(_) => { - unknown = format!("unexpected node ({}) for scope {:?}. \ - Please report a bug.", - self.hir.node_to_string(node), scope); - &unknown - } - None => { - unknown = format!("unknown node for scope {:?}. \ - Please report a bug.", scope); - &unknown - } - }; - let (msg, opt_span) = explain_span(self, tag, self.hir.span(node)); - (format!("{} {}", prefix, msg), opt_span) + ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReStatic => { + self.msg_span_from_free_region(region) } - ty::ReStatic => ("the static lifetime".to_owned(), None), - ty::ReEmpty => ("the empty lifetime".to_owned(), None), // FIXME(#13998) ReSkolemized should probably print like @@ -231,59 +149,189 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // // We shouldn't really be having unification failures with ReVar // and ReLateBound though. - ty::ReSkolemized(..) | - ty::ReVar(_) | - ty::ReLateBound(..) | - ty::ReErased => { + ty::ReSkolemized(..) | ty::ReVar(_) | ty::ReLateBound(..) | ty::ReErased => { (format!("lifetime {:?}", region), None) } // We shouldn't encounter an error message with ReClosureBound. + ty::ReCanonical(..) | ty::ReClosureBound(..) => { - bug!( - "encountered unexpected ReClosureBound: {:?}", - region, - ); + bug!("encountered unexpected ReClosureBound: {:?}", region,); } }; + + TyCtxt::emit_msg_span(err, prefix, description, span, suffix); + } + + pub fn note_and_explain_free_region( + self, + err: &mut DiagnosticBuilder, + prefix: &str, + region: ty::Region<'tcx>, + suffix: &str, + ) { + let (description, span) = self.msg_span_from_free_region(region); + + TyCtxt::emit_msg_span(err, prefix, description, span, suffix); + } + + fn msg_span_from_free_region(self, region: ty::Region<'tcx>) -> (String, Option) { + match *region { + ty::ReEarlyBound(_) | ty::ReFree(_) => { + self.msg_span_from_early_bound_and_free_regions(region) + }, + ty::ReStatic => ("the static lifetime".to_owned(), None), + _ => bug!("{:?}", region), + } + } + + fn msg_span_from_early_bound_and_free_regions( + self, + region: ty::Region<'tcx>, + ) -> (String, Option) { + let cm = self.sess.codemap(); + + let scope = region.free_region_binding_scope(self); + let node = self.hir.as_local_node_id(scope).unwrap_or(DUMMY_NODE_ID); + let tag = match self.hir.find(node) { + Some(hir_map::NodeBlock(_)) | Some(hir_map::NodeExpr(_)) => "body", + Some(hir_map::NodeItem(it)) => Self::item_scope_tag(&it), + Some(hir_map::NodeTraitItem(it)) => Self::trait_item_scope_tag(&it), + Some(hir_map::NodeImplItem(it)) => Self::impl_item_scope_tag(&it), + _ => unreachable!() + }; + let (prefix, span) = match *region { + ty::ReEarlyBound(ref br) => { + let mut sp = cm.def_span(self.hir.span(node)); + if let Some(param) = self.hir.get_generics(scope).and_then(|generics| { + generics.get_named(&br.name) + }) { + sp = param.span; + } + (format!("the lifetime {} as defined on", br.name), sp) + } + ty::ReFree(ty::FreeRegion { + bound_region: ty::BoundRegion::BrNamed(_, ref name), .. + }) => { + let mut sp = cm.def_span(self.hir.span(node)); + if let Some(param) = self.hir.get_generics(scope).and_then(|generics| { + generics.get_named(&name) + }) { + sp = param.span; + } + (format!("the lifetime {} as defined on", name), sp) + } + ty::ReFree(ref fr) => match fr.bound_region { + ty::BrAnon(idx) => ( + format!("the anonymous lifetime #{} defined on", idx + 1), + self.hir.span(node), + ), + ty::BrFresh(_) => ( + "an anonymous lifetime defined on".to_owned(), + self.hir.span(node), + ), + _ => ( + format!("the lifetime {} as defined on", fr.bound_region), + cm.def_span(self.hir.span(node)), + ), + }, + _ => bug!(), + }; + let (msg, opt_span) = self.explain_span(tag, span); + (format!("{} {}", prefix, msg), opt_span) + } + + fn emit_msg_span( + err: &mut DiagnosticBuilder, + prefix: &str, + description: String, + span: Option, + suffix: &str, + ) { let message = format!("{}{}{}", prefix, description, suffix); + if let Some(span) = span { err.span_note(span, &message); } else { err.note(&message); } } + + fn item_scope_tag(item: &hir::Item) -> &'static str { + match item.node { + hir::ItemKind::Impl(..) => "impl", + hir::ItemKind::Struct(..) => "struct", + hir::ItemKind::Union(..) => "union", + hir::ItemKind::Enum(..) => "enum", + hir::ItemKind::Trait(..) => "trait", + hir::ItemKind::Fn(..) => "function body", + _ => "item", + } + } + + fn trait_item_scope_tag(item: &hir::TraitItem) -> &'static str { + match item.node { + hir::TraitItemKind::Method(..) => "method body", + hir::TraitItemKind::Const(..) | hir::TraitItemKind::Type(..) => "associated item", + } + } + + fn impl_item_scope_tag(item: &hir::ImplItem) -> &'static str { + match item.node { + hir::ImplItemKind::Method(..) => "method body", + hir::ImplItemKind::Const(..) | + hir::ImplItemKind::Existential(..) | + hir::ImplItemKind::Type(..) => "associated item", + } + } + + fn explain_span(self, heading: &str, span: Span) -> (String, Option) { + let lo = self.sess.codemap().lookup_char_pos_adj(span.lo()); + ( + format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize() + 1), + Some(span), + ) + } } impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { - pub fn report_region_errors(&self, - region_scope_tree: ®ion::ScopeTree, - errors: &Vec>, - will_later_be_reported_by_nll: bool) { + pub fn report_region_errors( + &self, + region_scope_tree: ®ion::ScopeTree, + errors: &Vec>, + will_later_be_reported_by_nll: bool, + ) { debug!("report_region_errors(): {} errors to start", errors.len()); - if will_later_be_reported_by_nll && self.tcx.sess.nll() { + if will_later_be_reported_by_nll && + // FIXME: `use_mir_borrowck` seems wrong here... + self.tcx.use_mir_borrowck() && + // ... this is a band-aid; may be better to explicitly + // match on every borrowck_mode variant to guide decision + // here. + !self.tcx.migrate_borrowck() { + // With `#![feature(nll)]`, we want to present a nice user // experience, so don't even mention the errors from the // AST checker. - if self.tcx.sess.features.borrow().nll { + if self.tcx.features().nll { return; } - // But with -Znll, it's nice to have some note for later. + // But with nll, it's nice to have some note for later. for error in errors { match *error { - RegionResolutionError::ConcreteFailure(ref origin, ..) | - RegionResolutionError::GenericBoundFailure(ref origin, ..) => { - self.tcx.sess.span_warn( - origin.span(), - "not reporting region error due to -Znll"); + RegionResolutionError::ConcreteFailure(ref origin, ..) + | RegionResolutionError::GenericBoundFailure(ref origin, ..) => { + self.tcx + .sess + .span_warn(origin.span(), "not reporting region error due to nll"); } RegionResolutionError::SubSupConflict(ref rvo, ..) => { - self.tcx.sess.span_warn( - rvo.span(), - "not reporting region error due to -Znll"); + self.tcx + .sess + .span_warn(rvo.span(), "not reporting region error due to nll"); } } } @@ -295,7 +343,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // together into a `ProcessedErrors` group: let errors = self.process_errors(errors); - debug!("report_region_errors: {} errors after preprocessing", errors.len()); + debug!( + "report_region_errors: {} errors after preprocessing", + errors.len() + ); for error in errors { debug!("report_region_errors: error = {:?}", error); @@ -310,7 +361,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // the error. If all of these fails, we fall back to a rather // general bit of code that displays the error information RegionResolutionError::ConcreteFailure(origin, sub, sup) => { - self.report_concrete_failure(region_scope_tree, origin, sub, sup).emit(); + self.report_concrete_failure(region_scope_tree, origin, sub, sup) + .emit(); } RegionResolutionError::GenericBoundFailure(origin, param_ty, sub) => { @@ -323,17 +375,21 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ); } - RegionResolutionError::SubSupConflict(var_origin, - sub_origin, - sub_r, - sup_origin, - sup_r) => { - self.report_sub_sup_conflict(region_scope_tree, - var_origin, - sub_origin, - sub_r, - sup_origin, - sup_r); + RegionResolutionError::SubSupConflict( + var_origin, + sub_origin, + sub_r, + sup_origin, + sup_r, + ) => { + self.report_sub_sup_conflict( + region_scope_tree, + var_origin, + sub_origin, + sub_r, + sup_origin, + sup_r, + ); } } } @@ -350,8 +406,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // The method also attempts to weed out messages that seem like // duplicates that will be unhelpful to the end-user. But // obviously it never weeds out ALL errors. - fn process_errors(&self, errors: &Vec>) - -> Vec> { + fn process_errors( + &self, + errors: &Vec>, + ) -> Vec> { debug!("process_errors()"); // We want to avoid reporting generic-bound failures if we can @@ -368,15 +426,18 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let is_bound_failure = |e: &RegionResolutionError<'tcx>| match *e { RegionResolutionError::GenericBoundFailure(..) => true, - RegionResolutionError::ConcreteFailure(..) | - RegionResolutionError::SubSupConflict(..) => false, + RegionResolutionError::ConcreteFailure(..) + | RegionResolutionError::SubSupConflict(..) => false, }; - let mut errors = if errors.iter().all(|e| is_bound_failure(e)) { errors.clone() } else { - errors.iter().filter(|&e| !is_bound_failure(e)).cloned().collect() + errors + .iter() + .filter(|&e| !is_bound_failure(e)) + .cloned() + .collect() }; // sort the errors by span, for better error message stability. @@ -389,10 +450,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } /// Adds a note if the types come from similarly named crates - fn check_and_note_conflicting_crates(&self, - err: &mut DiagnosticBuilder, - terr: &TypeError<'tcx>, - sp: Span) { + fn check_and_note_conflicting_crates( + &self, + err: &mut DiagnosticBuilder, + terr: &TypeError<'tcx>, + sp: Span, + ) { let report_path_match = |err: &mut DiagnosticBuilder, did1: DefId, did2: DefId| { // Only external crates, if either is from a local // module we could have false positives @@ -403,12 +466,16 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let found_abs_path = self.tcx.absolute_item_path_str(did2); // We compare strings because DefPath can be different // for imported and non-imported crates - if exp_path == found_path - || exp_abs_path == found_abs_path { + if exp_path == found_path || exp_abs_path == found_abs_path { let crate_name = self.tcx.crate_name(did1.krate); - err.span_note(sp, &format!("Perhaps two different versions \ - of crate `{}` are being used?", - crate_name)); + err.span_note( + sp, + &format!( + "Perhaps two different versions \ + of crate `{}` are being used?", + crate_name + ), + ); } } }; @@ -419,31 +486,38 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { match (&exp_found.expected.sty, &exp_found.found.sty) { (&ty::TyAdt(exp_adt, _), &ty::TyAdt(found_adt, _)) => { report_path_match(err, exp_adt.did, found_adt.did); - }, - _ => () + } + _ => (), } - }, + } TypeError::Traits(ref exp_found) => { report_path_match(err, exp_found.expected, exp_found.found); - }, - _ => () // FIXME(#22750) handle traits and stuff + } + _ => (), // FIXME(#22750) handle traits and stuff } } - fn note_error_origin(&self, - err: &mut DiagnosticBuilder<'tcx>, - cause: &ObligationCause<'tcx>) - { + fn note_error_origin(&self, err: &mut DiagnosticBuilder<'tcx>, cause: &ObligationCause<'tcx>) { match cause.code { ObligationCauseCode::MatchExpressionArm { arm_span, source } => match source { - hir::MatchSource::IfLetDesugar {..} => { + hir::MatchSource::IfLetDesugar { .. } => { let msg = "`if let` arm with an incompatible type"; if self.tcx.sess.codemap().is_multiline(arm_span) { err.span_note(arm_span, msg); } else { err.span_label(arm_span, msg); } - } + }, + hir::MatchSource::TryDesugar => { // Issue #51632 + if let Ok(try_snippet) = self.tcx.sess.codemap().span_to_snippet(arm_span) { + err.span_suggestion_with_applicability( + arm_span, + "try wrapping with a success variant", + format!("Ok({})", try_snippet), + Applicability::MachineApplicable + ); + } + }, _ => { let msg = "match arm with an incompatible type"; if self.tcx.sess.codemap().is_multiline(arm_span) { @@ -453,7 +527,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } }, - _ => () + _ => (), } } @@ -470,13 +544,15 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// Bar /// -------- this type is the same as a type argument in the other type, not highlighted /// ``` - fn highlight_outer(&self, - value: &mut DiagnosticStyledString, - other_value: &mut DiagnosticStyledString, - name: String, - sub: &ty::subst::Substs<'tcx>, - pos: usize, - other_ty: &Ty<'tcx>) { + fn highlight_outer( + &self, + value: &mut DiagnosticStyledString, + other_value: &mut DiagnosticStyledString, + name: String, + sub: &ty::subst::Substs<'tcx>, + pos: usize, + other_ty: &Ty<'tcx>, + ) { // `value` and `other_value` hold two incomplete type representation for display. // `name` is the path of both types being compared. `sub` value.push_highlighted(name); @@ -486,14 +562,17 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } // Output the lifetimes fot the first type - let lifetimes = sub.regions().map(|lifetime| { - let s = format!("{}", lifetime); - if s.is_empty() { - "'_".to_string() - } else { - s - } - }).collect::>().join(", "); + let lifetimes = sub.regions() + .map(|lifetime| { + let s = lifetime.to_string(); + if s.is_empty() { + "'_".to_string() + } else { + s + } + }) + .collect::>() + .join(", "); if !lifetimes.is_empty() { if sub.regions().count() < len { value.push_normal(lifetimes + &", "); @@ -510,7 +589,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { value.0.extend((values.0).0); other_value.0.extend((values.1).0); } else { - value.push_highlighted(format!("{}", type_arg)); + value.push_highlighted(type_arg.to_string()); } if len > 0 && i != len - 1 { @@ -543,13 +622,15 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// Bar /// -------- this type is the same as a type argument in the other type, not highlighted /// ``` - fn cmp_type_arg(&self, - mut t1_out: &mut DiagnosticStyledString, - mut t2_out: &mut DiagnosticStyledString, - path: String, - sub: &ty::subst::Substs<'tcx>, - other_path: String, - other_ty: &Ty<'tcx>) -> Option<()> { + fn cmp_type_arg( + &self, + mut t1_out: &mut DiagnosticStyledString, + mut t2_out: &mut DiagnosticStyledString, + path: String, + sub: &ty::subst::Substs<'tcx>, + other_path: String, + other_ty: &Ty<'tcx>, + ) -> Option<()> { for (i, ta) in sub.types().enumerate() { if &ta == other_ty { self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, &other_ty); @@ -567,57 +648,99 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } /// Add a `,` to the type representation only if it is appropriate. - fn push_comma(&self, - value: &mut DiagnosticStyledString, - other_value: &mut DiagnosticStyledString, - len: usize, - pos: usize) { + fn push_comma( + &self, + value: &mut DiagnosticStyledString, + other_value: &mut DiagnosticStyledString, + len: usize, + pos: usize, + ) { if len > 0 && pos != len - 1 { value.push_normal(", "); other_value.push_normal(", "); } } + /// For generic types with parameters with defaults, remove the parameters corresponding to + /// the defaults. This repeats a lot of the logic found in `PrintContext::parameterized`. + fn strip_generic_default_params( + &self, + def_id: DefId, + substs: &ty::subst::Substs<'tcx> + ) -> &'tcx ty::subst::Substs<'tcx> { + let generics = self.tcx.generics_of(def_id); + let mut num_supplied_defaults = 0; + let mut type_params = generics.params.iter().rev().filter_map(|param| match param.kind { + ty::GenericParamDefKind::Lifetime => None, + ty::GenericParamDefKind::Type { has_default, .. } => { + Some((param.def_id, has_default)) + } + }).peekable(); + let has_default = { + let has_default = type_params.peek().map(|(_, has_default)| has_default); + *has_default.unwrap_or(&false) + }; + if has_default { + let types = substs.types().rev(); + for ((def_id, has_default), actual) in type_params.zip(types) { + if !has_default { + break; + } + if self.tcx.type_of(def_id).subst(self.tcx, substs) != actual { + break; + } + num_supplied_defaults += 1; + } + } + let len = generics.params.len(); + let mut generics = generics.clone(); + generics.params.truncate(len - num_supplied_defaults); + substs.truncate_to(self.tcx, &generics) + } + /// Compare two given types, eliding parts that are the same between them and highlighting /// relevant differences, and return two representation of those types for highlighted printing. - fn cmp(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) - -> (DiagnosticStyledString, DiagnosticStyledString) - { + fn cmp(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) -> (DiagnosticStyledString, DiagnosticStyledString) { fn equals<'tcx>(a: &Ty<'tcx>, b: &Ty<'tcx>) -> bool { match (&a.sty, &b.sty) { (a, b) if *a == *b => true, - (&ty::TyInt(_), &ty::TyInfer(ty::InferTy::IntVar(_))) | - (&ty::TyInfer(ty::InferTy::IntVar(_)), &ty::TyInt(_)) | - (&ty::TyInfer(ty::InferTy::IntVar(_)), &ty::TyInfer(ty::InferTy::IntVar(_))) | - (&ty::TyFloat(_), &ty::TyInfer(ty::InferTy::FloatVar(_))) | - (&ty::TyInfer(ty::InferTy::FloatVar(_)), &ty::TyFloat(_)) | - (&ty::TyInfer(ty::InferTy::FloatVar(_)), - &ty::TyInfer(ty::InferTy::FloatVar(_))) => true, + (&ty::TyInt(_), &ty::TyInfer(ty::InferTy::IntVar(_))) + | (&ty::TyInfer(ty::InferTy::IntVar(_)), &ty::TyInt(_)) + | (&ty::TyInfer(ty::InferTy::IntVar(_)), &ty::TyInfer(ty::InferTy::IntVar(_))) + | (&ty::TyFloat(_), &ty::TyInfer(ty::InferTy::FloatVar(_))) + | (&ty::TyInfer(ty::InferTy::FloatVar(_)), &ty::TyFloat(_)) + | ( + &ty::TyInfer(ty::InferTy::FloatVar(_)), + &ty::TyInfer(ty::InferTy::FloatVar(_)), + ) => true, _ => false, } } - fn push_ty_ref<'tcx>(r: &ty::Region<'tcx>, - tnm: &ty::TypeAndMut<'tcx>, - s: &mut DiagnosticStyledString) { - let r = &format!("{}", r); - s.push_highlighted(format!("&{}{}{}", - r, - if r == "" { - "" - } else { - " " - }, - if tnm.mutbl == hir::MutMutable { - "mut " - } else { - "" - })); - s.push_normal(format!("{}", tnm.ty)); + fn push_ty_ref<'tcx>( + r: &ty::Region<'tcx>, + ty: Ty<'tcx>, + mutbl: hir::Mutability, + s: &mut DiagnosticStyledString, + ) { + let r = &r.to_string(); + s.push_highlighted(format!( + "&{}{}{}", + r, + if r == "" { "" } else { " " }, + if mutbl == hir::MutMutable { + "mut " + } else { + "" + } + )); + s.push_normal(ty.to_string()); } match (&t1.sty, &t2.sty) { (&ty::TyAdt(def1, sub1), &ty::TyAdt(def2, sub2)) => { + let sub_no_defaults_1 = self.strip_generic_default_params(def1.did, sub1); + let sub_no_defaults_2 = self.strip_generic_default_params(def2.did, sub2); let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); let path1 = self.tcx.item_path_str(def1.did.clone()); let path2 = self.tcx.item_path_str(def2.did.clone()); @@ -633,15 +756,26 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { values.0.push_normal(path1); values.1.push_normal(path2); + // Avoid printing out default generic parameters that are common to both + // types. + let len1 = sub_no_defaults_1.len(); + let len2 = sub_no_defaults_2.len(); + let common_len = cmp::min(len1, len2); + let remainder1: Vec<_> = sub1.types().skip(common_len).collect(); + let remainder2: Vec<_> = sub2.types().skip(common_len).collect(); + let common_default_params = + remainder1.iter().rev().zip(remainder2.iter().rev()) + .filter(|(a, b)| a == b).count(); + let len = sub1.len() - common_default_params; + // Only draw `<...>` if there're lifetime/type arguments. - let len = sub1.len(); if len > 0 { values.0.push_normal("<"); values.1.push_normal("<"); } fn lifetime_display(lifetime: Region) -> String { - let s = format!("{}", lifetime); + let s = lifetime.to_string(); if s.is_empty() { "'_".to_string() } else { @@ -678,8 +812,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // Foo<_, Qux> // ^ elided type as this type argument was the same in both sides let type_arguments = sub1.types().zip(sub2.types()); - let regions_len = sub1.regions().collect::>().len(); - for (i, (ta1, ta2)) in type_arguments.enumerate() { + let regions_len = sub1.regions().count(); + for (i, (ta1, ta2)) in type_arguments.take(len).enumerate() { let i = i + regions_len; if ta1 == ta2 { values.0.push_normal("_"); @@ -705,12 +839,15 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // Foo // ------- this type argument is exactly the same as the other type // Bar - if self.cmp_type_arg(&mut values.0, - &mut values.1, - path1.clone(), - sub1, - path2.clone(), - &t2).is_some() { + if self.cmp_type_arg( + &mut values.0, + &mut values.1, + path1.clone(), + sub_no_defaults_1, + path2.clone(), + &t2, + ).is_some() + { return values; } // Check for case: @@ -718,69 +855,83 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // Bar // Foo> // ------- this type argument is exactly the same as the other type - if self.cmp_type_arg(&mut values.1, - &mut values.0, - path2, - sub2, - path1, - &t1).is_some() { + if self.cmp_type_arg( + &mut values.1, + &mut values.0, + path2, + sub_no_defaults_2, + path1, + &t1, + ).is_some() + { return values; } // We couldn't find anything in common, highlight everything. // let x: Bar = y::>(); - (DiagnosticStyledString::highlighted(format!("{}", t1)), - DiagnosticStyledString::highlighted(format!("{}", t2))) + ( + DiagnosticStyledString::highlighted(t1.to_string()), + DiagnosticStyledString::highlighted(t2.to_string()), + ) } } - // When finding T != &T, hightlight only the borrow - (&ty::TyRef(r1, ref tnm1), _) if equals(&tnm1.ty, &t2) => { + // When finding T != &T, highlight only the borrow + (&ty::TyRef(r1, ref_ty1, mutbl1), _) if equals(&ref_ty1, &t2) => { let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); - push_ty_ref(&r1, tnm1, &mut values.0); - values.1.push_normal(format!("{}", t2)); + push_ty_ref(&r1, ref_ty1, mutbl1, &mut values.0); + values.1.push_normal(t2.to_string()); values } - (_, &ty::TyRef(r2, ref tnm2)) if equals(&t1, &tnm2.ty) => { + (_, &ty::TyRef(r2, ref_ty2, mutbl2)) if equals(&t1, &ref_ty2) => { let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); - values.0.push_normal(format!("{}", t1)); - push_ty_ref(&r2, tnm2, &mut values.1); + values.0.push_normal(t1.to_string()); + push_ty_ref(&r2, ref_ty2, mutbl2, &mut values.1); values } // When encountering &T != &mut T, highlight only the borrow - (&ty::TyRef(r1, ref tnm1), &ty::TyRef(r2, ref tnm2)) if equals(&tnm1.ty, &tnm2.ty) => { + (&ty::TyRef(r1, ref_ty1, mutbl1), + &ty::TyRef(r2, ref_ty2, mutbl2)) if equals(&ref_ty1, &ref_ty2) => { let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new()); - push_ty_ref(&r1, tnm1, &mut values.0); - push_ty_ref(&r2, tnm2, &mut values.1); + push_ty_ref(&r1, ref_ty1, mutbl1, &mut values.0); + push_ty_ref(&r2, ref_ty2, mutbl2, &mut values.1); values } _ => { if t1 == t2 { // The two types are the same, elide and don't highlight. - (DiagnosticStyledString::normal("_"), DiagnosticStyledString::normal("_")) + ( + DiagnosticStyledString::normal("_"), + DiagnosticStyledString::normal("_"), + ) } else { // We couldn't find anything in common, highlight everything. - (DiagnosticStyledString::highlighted(format!("{}", t1)), - DiagnosticStyledString::highlighted(format!("{}", t2))) + ( + DiagnosticStyledString::highlighted(t1.to_string()), + DiagnosticStyledString::highlighted(t2.to_string()), + ) } } } } - pub fn note_type_err(&self, - diag: &mut DiagnosticBuilder<'tcx>, - cause: &ObligationCause<'tcx>, - secondary_span: Option<(Span, String)>, - mut values: Option>, - terr: &TypeError<'tcx>) - { + pub fn note_type_err( + &self, + diag: &mut DiagnosticBuilder<'tcx>, + cause: &ObligationCause<'tcx>, + secondary_span: Option<(Span, String)>, + mut values: Option>, + terr: &TypeError<'tcx>, + ) { // For some types of errors, expected-found does not make // sense, so just ignore the values we were given. match terr { - TypeError::CyclicTy(_) => { values = None; } - _ => { } + TypeError::CyclicTy(_) => { + values = None; + } + _ => {} } let (expected_found, exp_found, is_simple_error) = match values { @@ -788,8 +939,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { Some(values) => { let (is_simple_error, exp_found) = match values { ValuePairs::Types(exp_found) => { - let is_simple_err = exp_found.expected.is_primitive() - && exp_found.found.is_primitive(); + let is_simple_err = + exp_found.expected.is_primitive() && exp_found.found.is_primitive(); (is_simple_err, Some(exp_found)) } @@ -800,14 +951,14 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { None => { // Derived error. Cancel the emitter. self.tcx.sess.diagnostic().cancel(diag); - return + return; } }; (vals, exp_found, is_simple_error) } }; - let span = cause.span; + let span = cause.span(&self.tcx); diag.span_label(span, terr.to_string()); if let Some((sp, msg)) = secondary_span { @@ -818,9 +969,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { match (terr, is_simple_error, expected == found) { (&TypeError::Sorts(ref values), false, true) => { diag.note_expected_found_extra( - &"type", expected, found, + &"type", + expected, + found, &format!(" ({})", values.expected.sort_string(self.tcx)), - &format!(" ({})", values.found.sort_string(self.tcx))); + &format!(" ({})", values.found.sort_string(self.tcx)), + ); } (_, false, _) => { if let Some(exp_found) = exp_found { @@ -828,21 +982,21 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { TypeVariants::TyFnDef(def, _) => { (Some(def), Some(self.tcx.fn_sig(def).output())) } - _ => (None, None) + _ => (None, None), }; let exp_is_struct = match exp_found.expected.sty { TypeVariants::TyAdt(def, _) => def.is_struct(), - _ => false + _ => false, }; if let (Some(def_id), Some(ret_ty)) = (def_id, ret_ty) { - if exp_is_struct && exp_found.expected == ret_ty.0 { + if exp_is_struct && &exp_found.expected == ret_ty.skip_binder() { let message = format!( "did you mean `{}(/* fields */)`?", self.tcx.item_path_str(def_id) ); - diag.span_label(cause.span, message); + diag.span_label(span, message); } } } @@ -861,16 +1015,17 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.note_error_origin(diag, &cause); } - pub fn report_and_explain_type_error(&self, - trace: TypeTrace<'tcx>, - terr: &TypeError<'tcx>) - -> DiagnosticBuilder<'tcx> - { - debug!("report_and_explain_type_error(trace={:?}, terr={:?})", - trace, - terr); + pub fn report_and_explain_type_error( + &self, + trace: TypeTrace<'tcx>, + terr: &TypeError<'tcx>, + ) -> DiagnosticBuilder<'tcx> { + debug!( + "report_and_explain_type_error(trace={:?}, terr={:?})", + trace, terr + ); - let span = trace.cause.span; + let span = trace.cause.span(&self.tcx); let failure_code = trace.cause.as_failure_code(terr); let mut diag = match failure_code { FailureCode::Error0317(failure_str) => { @@ -890,19 +1045,22 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { diag } - fn values_str(&self, values: &ValuePairs<'tcx>) - -> Option<(DiagnosticStyledString, DiagnosticStyledString)> - { + fn values_str( + &self, + values: &ValuePairs<'tcx>, + ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { match *values { infer::Types(ref exp_found) => self.expected_found_str_ty(exp_found), + infer::Regions(ref exp_found) => self.expected_found_str(exp_found), infer::TraitRefs(ref exp_found) => self.expected_found_str(exp_found), infer::PolyTraitRefs(ref exp_found) => self.expected_found_str(exp_found), } } - fn expected_found_str_ty(&self, - exp_found: &ty::error::ExpectedFound>) - -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { + fn expected_found_str_ty( + &self, + exp_found: &ty::error::ExpectedFound>, + ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { let exp_found = self.resolve_type_vars_if_possible(exp_found); if exp_found.references_error() { return None; @@ -914,24 +1072,43 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// Returns a string of the form "expected `{}`, found `{}`". fn expected_found_str>( &self, - exp_found: &ty::error::ExpectedFound) - -> Option<(DiagnosticStyledString, DiagnosticStyledString)> - { + exp_found: &ty::error::ExpectedFound, + ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> { let exp_found = self.resolve_type_vars_if_possible(exp_found); if exp_found.references_error() { return None; } - Some((DiagnosticStyledString::highlighted(format!("{}", exp_found.expected)), - DiagnosticStyledString::highlighted(format!("{}", exp_found.found)))) + Some(( + DiagnosticStyledString::highlighted(exp_found.expected.to_string()), + DiagnosticStyledString::highlighted(exp_found.found.to_string()), + )) } - pub fn report_generic_bound_failure(&self, - region_scope_tree: ®ion::ScopeTree, - span: Span, - origin: Option>, - bound_kind: GenericKind<'tcx>, - sub: Region<'tcx>) + pub fn report_generic_bound_failure( + &self, + region_scope_tree: ®ion::ScopeTree, + span: Span, + origin: Option>, + bound_kind: GenericKind<'tcx>, + sub: Region<'tcx>, + ) { + self.construct_generic_bound_failure(region_scope_tree, + span, + origin, + bound_kind, + sub) + .emit() + } + + pub fn construct_generic_bound_failure( + &self, + region_scope_tree: ®ion::ScopeTree, + span: Span, + origin: Option>, + bound_kind: GenericKind<'tcx>, + sub: Region<'tcx>, + ) -> DiagnosticBuilder<'a> { // Attempt to obtain the span of the parameter so we can // suggest adding an explicit lifetime bound to it. @@ -946,23 +1123,25 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let type_param = generics.type_param(param, self.tcx); let hir = &self.tcx.hir; hir.as_local_node_id(type_param.def_id).map(|id| { - // Get the `hir::TyParam` to verify wether it already has any bounds. + // Get the `hir::TyParam` to verify whether it already has any bounds. // We do this to avoid suggesting code that ends up as `T: 'a'b`, // instead we suggest `T: 'a + 'b` in that case. - let has_lifetimes = if let hir_map::NodeTyParam(ref p) = hir.get(id) { - p.bounds.len() > 0 - } else { - false - }; + let mut has_bounds = false; + if let hir_map::NodeGenericParam(ref param) = hir.get(id) { + has_bounds = !param.bounds.is_empty(); + } let sp = hir.span(id); // `sp` only covers `T`, change it so that it covers // `T:` when appropriate - let sp = if has_lifetimes { - sp.to(sp.next_point().next_point()) + let sp = if has_bounds { + sp.to(self.tcx + .sess + .codemap() + .next_point(self.tcx.sess.codemap().next_point(sp))) } else { sp }; - (sp, has_lifetimes) + (sp, has_bounds) }) } else { None @@ -973,83 +1152,99 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { }; let labeled_user_string = match bound_kind { - GenericKind::Param(ref p) => - format!("the parameter type `{}`", p), - GenericKind::Projection(ref p) => - format!("the associated type `{}`", p), + GenericKind::Param(ref p) => format!("the parameter type `{}`", p), + GenericKind::Projection(ref p) => format!("the associated type `{}`", p), }; if let Some(SubregionOrigin::CompareImplMethodObligation { - span, item_name, impl_item_def_id, trait_item_def_id, - }) = origin { - self.report_extra_impl_obligation(span, - item_name, - impl_item_def_id, - trait_item_def_id, - &format!("`{}: {}`", bound_kind, sub)) - .emit(); - return; + span, + item_name, + impl_item_def_id, + trait_item_def_id, + }) = origin + { + return self.report_extra_impl_obligation( + span, + item_name, + impl_item_def_id, + trait_item_def_id, + &format!("`{}: {}`", bound_kind, sub), + ); } - fn binding_suggestion<'tcx, S: fmt::Display>(err: &mut DiagnosticBuilder<'tcx>, - type_param_span: Option<(Span, bool)>, - bound_kind: GenericKind<'tcx>, - sub: S) { - let consider = &format!("consider adding an explicit lifetime bound `{}: {}`...", - bound_kind, - sub); + fn binding_suggestion<'tcx, S: fmt::Display>( + err: &mut DiagnosticBuilder<'tcx>, + type_param_span: Option<(Span, bool)>, + bound_kind: GenericKind<'tcx>, + sub: S, + ) { + let consider = &format!( + "consider adding an explicit lifetime bound `{}: {}`...", + bound_kind, sub + ); if let Some((sp, has_lifetimes)) = type_param_span { - let tail = if has_lifetimes { - " + " - } else { - "" - }; + let tail = if has_lifetimes { " + " } else { "" }; let suggestion = format!("{}: {}{}", bound_kind, sub, tail); - err.span_suggestion_short(sp, consider, suggestion); + err.span_suggestion_short_with_applicability( + sp, consider, suggestion, + Applicability::MaybeIncorrect // Issue #41966 + ); } else { err.help(consider); } } let mut err = match *sub { - ty::ReEarlyBound(_) | - ty::ReFree(ty::FreeRegion {bound_region: ty::BrNamed(..), ..}) => { + ty::ReEarlyBound(_) + | ty::ReFree(ty::FreeRegion { + bound_region: ty::BrNamed(..), + .. + }) => { // Does the required lifetime have a nice name we can print? - let mut err = struct_span_err!(self.tcx.sess, - span, - E0309, - "{} may not live long enough", - labeled_user_string); + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0309, + "{} may not live long enough", + labeled_user_string + ); binding_suggestion(&mut err, type_param_span, bound_kind, sub); err } ty::ReStatic => { // Does the required lifetime have a nice name we can print? - let mut err = struct_span_err!(self.tcx.sess, - span, - E0310, - "{} may not live long enough", - labeled_user_string); + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0310, + "{} may not live long enough", + labeled_user_string + ); binding_suggestion(&mut err, type_param_span, bound_kind, "'static"); err } _ => { // If not, be less specific. - let mut err = struct_span_err!(self.tcx.sess, - span, - E0311, - "{} may not live long enough", - labeled_user_string); - err.help(&format!("consider adding an explicit lifetime bound for `{}`", - bound_kind)); + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0311, + "{} may not live long enough", + labeled_user_string + ); + err.help(&format!( + "consider adding an explicit lifetime bound for `{}`", + bound_kind + )); self.tcx.note_and_explain_region( region_scope_tree, &mut err, &format!("{} must be valid for ", labeled_user_string), sub, - "..."); + "...", + ); err } }; @@ -1057,29 +1252,65 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { if let Some(origin) = origin { self.note_region_origin(&mut err, &origin); } - err.emit(); + err } - fn report_sub_sup_conflict(&self, - region_scope_tree: ®ion::ScopeTree, - var_origin: RegionVariableOrigin, - sub_origin: SubregionOrigin<'tcx>, - sub_region: Region<'tcx>, - sup_origin: SubregionOrigin<'tcx>, - sup_region: Region<'tcx>) { + fn report_sub_sup_conflict( + &self, + region_scope_tree: ®ion::ScopeTree, + var_origin: RegionVariableOrigin, + sub_origin: SubregionOrigin<'tcx>, + sub_region: Region<'tcx>, + sup_origin: SubregionOrigin<'tcx>, + sup_region: Region<'tcx>, + ) { let mut err = self.report_inference_failure(var_origin); - self.tcx.note_and_explain_region(region_scope_tree, &mut err, + self.tcx.note_and_explain_region( + region_scope_tree, + &mut err, "first, the lifetime cannot outlive ", sup_region, - "..."); + "...", + ); + + match (&sup_origin, &sub_origin) { + (&infer::Subtype(ref sup_trace), &infer::Subtype(ref sub_trace)) => { + if let (Some((sup_expected, sup_found)), Some((sub_expected, sub_found))) = ( + self.values_str(&sup_trace.values), + self.values_str(&sub_trace.values), + ) { + if sub_expected == sup_expected && sub_found == sup_found { + self.tcx.note_and_explain_region( + region_scope_tree, + &mut err, + "...but the lifetime must also be valid for ", + sub_region, + "...", + ); + err.note(&format!( + "...so that the {}:\nexpected {}\n found {}", + sup_trace.cause.as_requirement_str(), + sup_expected.content(), + sup_found.content() + )); + err.emit(); + return; + } + } + } + _ => {} + } self.note_region_origin(&mut err, &sup_origin); - self.tcx.note_and_explain_region(region_scope_tree, &mut err, + self.tcx.note_and_explain_region( + region_scope_tree, + &mut err, "but, the lifetime must be valid for ", sub_region, - "..."); + "...", + ); self.note_region_origin(&mut err, &sub_origin); err.emit(); @@ -1087,9 +1318,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { - fn report_inference_failure(&self, - var_origin: RegionVariableOrigin) - -> DiagnosticBuilder<'tcx> { + fn report_inference_failure( + &self, + var_origin: RegionVariableOrigin, + ) -> DiagnosticBuilder<'tcx> { let br_string = |br: ty::BoundRegion| { let mut s = br.to_string(); if !s.is_empty() { @@ -1104,23 +1336,19 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { infer::Autoref(_) => " for autoref".to_string(), infer::Coercion(_) => " for automatic coercion".to_string(), infer::LateBoundRegion(_, br, infer::FnCall) => { - format!(" for lifetime parameter {}in function call", - br_string(br)) + format!(" for lifetime parameter {}in function call", br_string(br)) } infer::LateBoundRegion(_, br, infer::HigherRankedType) => { format!(" for lifetime parameter {}in generic type", br_string(br)) } - infer::LateBoundRegion(_, br, infer::AssocTypeProjection(def_id)) => { - format!(" for lifetime parameter {}in trait containing associated type `{}`", - br_string(br), self.tcx.associated_item(def_id).name) - } - infer::EarlyBoundRegion(_, name) => { - format!(" for lifetime parameter `{}`", - name) - } + infer::LateBoundRegion(_, br, infer::AssocTypeProjection(def_id)) => format!( + " for lifetime parameter {}in trait containing associated type `{}`", + br_string(br), + self.tcx.associated_item(def_id).ident + ), + infer::EarlyBoundRegion(_, name) => format!(" for lifetime parameter `{}`", name), infer::BoundRegionInCoherence(name) => { - format!(" for lifetime parameter `{}` in coherence check", - name) + format!(" for lifetime parameter `{}` in coherence check", name) } infer::UpvarRegion(ref upvar_id, _) => { let var_node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_id); @@ -1130,10 +1358,14 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { infer::NLL(..) => bug!("NLL variable found in lexical phase"), }; - struct_span_err!(self.tcx.sess, var_origin.span(), E0495, - "cannot infer an appropriate lifetime{} \ - due to conflicting requirements", - var_description) + struct_span_err!( + self.tcx.sess, + var_origin.span(), + E0495, + "cannot infer an appropriate lifetime{} \ + due to conflicting requirements", + var_description + ) } } @@ -1151,12 +1383,16 @@ impl<'tcx> ObligationCause<'tcx> { match self.code { CompareImplMethodObligation { .. } => Error0308("method not compatible with trait"), MatchExpressionArm { source, .. } => Error0308(match source { - hir::MatchSource::IfLetDesugar{..} => "`if let` arms have incompatible types", + hir::MatchSource::IfLetDesugar { .. } => { + "`if let` arms have incompatible types" + }, + hir::MatchSource::TryDesugar => { + "try expression alternatives have incompatible types" + }, _ => "match arms have incompatible types", }), IfExpression => Error0308("if and else have incompatible types"), IfExpressionWithNoElse => Error0317("if may be missing an else clause"), - EquatePredicate => Error0308("equality predicate not satisfied"), MainFunctionType => Error0580("main function has wrong type"), StartFunctionType => Error0308("start function has wrong type"), IntrinsicType => Error0308("intrinsic has wrong type"), @@ -1166,11 +1402,11 @@ impl<'tcx> ObligationCause<'tcx> { // say, also take a look at the error code, maybe we can // tailor to that. _ => match terr { - TypeError::CyclicTy(ty) if ty.is_closure() || ty.is_generator() => - Error0644("closure/generator type that references itself"), - _ => - Error0308("mismatched types"), - } + TypeError::CyclicTy(ty) if ty.is_closure() || ty.is_generator() => { + Error0644("closure/generator type that references itself") + } + _ => Error0308("mismatched types"), + }, } } @@ -1180,12 +1416,11 @@ impl<'tcx> ObligationCause<'tcx> { CompareImplMethodObligation { .. } => "method type is compatible with trait", ExprAssignable => "expression is assignable", MatchExpressionArm { source, .. } => match source { - hir::MatchSource::IfLetDesugar{..} => "`if let` arms have compatible types", + hir::MatchSource::IfLetDesugar { .. } => "`if let` arms have compatible types", _ => "match arms have compatible types", }, IfExpression => "if and else have compatible types", IfExpressionWithNoElse => "if missing an else returns ()", - EquatePredicate => "equality where clause is satisfied", MainFunctionType => "`main` function has the correct type", StartFunctionType => "`start` function has the correct type", IntrinsicType => "intrinsic has the correct type", diff --git a/src/librustc/infer/error_reporting/need_type_info.rs b/src/librustc/infer/error_reporting/need_type_info.rs index ea3c0a8ddb45..b71c886a8960 100644 --- a/src/librustc/infer/error_reporting/need_type_info.rs +++ b/src/librustc/infer/error_reporting/need_type_info.rs @@ -13,7 +13,9 @@ use hir::intravisit::{self, Visitor, NestedVisitorMap}; use infer::InferCtxt; use infer::type_variable::TypeVariableOrigin; use ty::{self, Ty, TyInfer, TyVar}; +use syntax::codemap::CompilerDesugaringKind; use syntax_pos::Span; +use errors::DiagnosticBuilder; struct FindLocalByTypeVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, @@ -72,7 +74,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindLocalByTypeVisitor<'a, 'gcx, 'tcx> { impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { - fn extract_type_name(&self, ty: &'a Ty<'tcx>) -> String { + pub fn extract_type_name(&self, ty: &'a Ty<'tcx>) -> String { if let ty::TyInfer(ty::TyVar(ty_vid)) = (*ty).sty { let ty_vars = self.type_variables.borrow(); if let TypeVariableOrigin::TypeParameterDefinition(_, name) = @@ -86,12 +88,23 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } - pub fn need_type_info(&self, body_id: Option, span: Span, ty: Ty<'tcx>) { + pub fn need_type_info_err(&self, + body_id: Option, + span: Span, + ty: Ty<'tcx>) + -> DiagnosticBuilder<'gcx> { let ty = self.resolve_type_vars_if_possible(&ty); let name = self.extract_type_name(&ty); let mut err_span = span; - let mut labels = vec![(span, format!("cannot infer type for `{}`", name))]; + let mut labels = vec![( + span, + if &name == "_" { + "cannot infer type".to_string() + } else { + format!("cannot infer type for `{}`", name) + }, + )]; let mut local_visitor = FindLocalByTypeVisitor { infcx: &self, @@ -124,12 +137,21 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // ^ consider giving this closure parameter a type // ``` labels.clear(); - labels.push((pattern.span, format!("consider giving this closure parameter a type"))); + labels.push( + (pattern.span, "consider giving this closure parameter a type".to_string())); } else if let Some(pattern) = local_visitor.found_local_pattern { - if let Some(simple_name) = pattern.simple_name() { - labels.push((pattern.span, format!("consider giving `{}` a type", simple_name))); + if let Some(simple_ident) = pattern.simple_ident() { + match pattern.span.compiler_desugaring_kind() { + None => labels.push((pattern.span, + format!("consider giving `{}` a type", simple_ident))), + Some(CompilerDesugaringKind::ForLoop) => labels.push(( + pattern.span, + "the element type for this iterator is not specified".to_string(), + )), + _ => {} + } } else { - labels.push((pattern.span, format!("consider giving the pattern a type"))); + labels.push((pattern.span, "consider giving the pattern a type".to_string())); } } @@ -142,6 +164,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { err.span_label(target_span, label_message); } - err.emit(); + err } } diff --git a/src/librustc/infer/error_reporting/nice_region_error/different_lifetimes.rs b/src/librustc/infer/error_reporting/nice_region_error/different_lifetimes.rs index d4ea899dc747..168285b455f0 100644 --- a/src/librustc/infer/error_reporting/nice_region_error/different_lifetimes.rs +++ b/src/librustc/infer/error_reporting/nice_region_error/different_lifetimes.rs @@ -53,7 +53,7 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { /// /// It will later be extended to trait objects. pub(super) fn try_report_anon_anon_conflict(&self) -> Option { - let NiceRegionError { span, sub, sup, .. } = *self; + let (span, sub, sup) = self.get_regions(); // Determine whether the sub and sup consist of both anonymous (elided) regions. let anon_reg_sup = self.is_suitable_region(sup)?; @@ -96,33 +96,29 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { let sub_is_ret_type = self.is_return_type_anon(scope_def_id_sub, bregion_sub, ty_fndecl_sub); - let span_label_var1 = if let Some(simple_name) = anon_arg_sup.pat.simple_name() { - format!(" from `{}`", simple_name) + let span_label_var1 = if let Some(simple_ident) = anon_arg_sup.pat.simple_ident() { + format!(" from `{}`", simple_ident) } else { - format!("") + String::new() }; - let span_label_var2 = if let Some(simple_name) = anon_arg_sub.pat.simple_name() { - format!(" into `{}`", simple_name) + let span_label_var2 = if let Some(simple_ident) = anon_arg_sub.pat.simple_ident() { + format!(" into `{}`", simple_ident) } else { - format!("") + String::new() }; let (span_1, span_2, main_label, span_label) = match (sup_is_ret_type, sub_is_ret_type) { (None, None) => { - let (main_label_1, span_label_1) = if ty_sup == ty_sub { + let (main_label_1, span_label_1) = if ty_sup.id == ty_sub.id { ( - format!("this type is declared with multiple lifetimes..."), - format!( - "...but data{} flows{} here", - format!(" with one lifetime"), - format!(" into the other") - ), + "this type is declared with multiple lifetimes...".to_string(), + "...but data with one lifetime flows into the other here".to_string() ) } else { ( - format!("these two types are declared with different lifetimes..."), + "these two types are declared with different lifetimes...".to_string(), format!( "...but data{} flows{} here", span_label_var1, @@ -136,19 +132,17 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { (Some(ret_span), _) => ( ty_sub.span, ret_span, - format!( - "this parameter and the return type are declared \ - with different lifetimes...", - ), + "this parameter and the return type are declared \ + with different lifetimes...".to_string() + , format!("...but data{} is returned here", span_label_var1), ), (_, Some(ret_span)) => ( ty_sup.span, ret_span, - format!( - "this parameter and the return type are declared \ - with different lifetimes...", - ), + "this parameter and the return type are declared \ + with different lifetimes...".to_string() + , format!("...but data{} is returned here", span_label_var1), ), }; @@ -156,7 +150,7 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { struct_span_err!(self.tcx.sess, span, E0623, "lifetime mismatch") .span_label(span_1, main_label) - .span_label(span_2, format!("")) + .span_label(span_2, String::new()) .span_label(span, span_label) .emit(); return Some(ErrorReported); diff --git a/src/librustc/infer/error_reporting/nice_region_error/find_anon_type.rs b/src/librustc/infer/error_reporting/nice_region_error/find_anon_type.rs index dc53c1db06f1..21be09b0ba19 100644 --- a/src/librustc/infer/error_reporting/nice_region_error/find_anon_type.rs +++ b/src/librustc/infer/error_reporting/nice_region_error/find_anon_type.rs @@ -41,7 +41,7 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { if let Some(node_id) = self.tcx.hir.as_local_node_id(def_id) { let fndecl = match self.tcx.hir.get(node_id) { hir_map::NodeItem(&hir::Item { - node: hir::ItemFn(ref fndecl, ..), + node: hir::ItemKind::Fn(ref fndecl, ..), .. }) => &fndecl, hir_map::NodeTraitItem(&hir::TraitItem { @@ -77,7 +77,7 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { tcx: self.tcx, bound_region: *br, found_type: None, - depth: 1, + current_index: ty::INNERMOST, }; nested_visitor.visit_ty(arg); nested_visitor.found_type @@ -99,7 +99,7 @@ struct FindNestedTypeVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { // The type where the anonymous lifetime appears // for e.g. Vec<`&u8`> and <`&u8`> found_type: Option<&'gcx hir::Ty>, - depth: u32, + current_index: ty::DebruijnIndex, } impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindNestedTypeVisitor<'a, 'gcx, 'tcx> { @@ -109,20 +109,20 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindNestedTypeVisitor<'a, 'gcx, 'tcx> { fn visit_ty(&mut self, arg: &'gcx hir::Ty) { match arg.node { - hir::TyBareFn(_) => { - self.depth += 1; + hir::TyKind::BareFn(_) => { + self.current_index.shift_in(1); intravisit::walk_ty(self, arg); - self.depth -= 1; + self.current_index.shift_out(1); return; } - hir::TyTraitObject(ref bounds, _) => for bound in bounds { - self.depth += 1; + hir::TyKind::TraitObject(ref bounds, _) => for bound in bounds { + self.current_index.shift_in(1); self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None); - self.depth -= 1; + self.current_index.shift_out(1); }, - hir::TyRptr(ref lifetime, _) => { + hir::TyKind::Rptr(ref lifetime, _) => { // the lifetime of the TyRptr let hir_id = self.tcx.hir.node_to_hir_id(lifetime.id); match (self.tcx.named_region(hir_id), self.bound_region) { @@ -135,11 +135,11 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindNestedTypeVisitor<'a, 'gcx, 'tcx> { ) => { debug!( "LateBoundAnon depth = {:?} anon_index = {:?} br_index={:?}", - debruijn_index.depth, + debruijn_index, anon_index, br_index ); - if debruijn_index.depth == self.depth && anon_index == br_index { + if debruijn_index == self.current_index && anon_index == br_index { self.found_type = Some(arg); return; // we can stop visiting now } @@ -170,11 +170,11 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindNestedTypeVisitor<'a, 'gcx, 'tcx> { ) => { debug!( "FindNestedTypeVisitor::visit_ty: LateBound depth = {:?}", - debruijn_index.depth + debruijn_index ); debug!("self.infcx.tcx.hir.local_def_id(id)={:?}", id); debug!("def_id={:?}", def_id); - if debruijn_index.depth == self.depth && id == def_id { + if debruijn_index == self.current_index && id == def_id { self.found_type = Some(arg); return; // we can stop visiting now } @@ -190,13 +190,13 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindNestedTypeVisitor<'a, 'gcx, 'tcx> { } } } - // Checks if it is of type `hir::TyPath` which corresponds to a struct. - hir::TyPath(_) => { + // Checks if it is of type `hir::TyKind::Path` which corresponds to a struct. + hir::TyKind::Path(_) => { let subvisitor = &mut TyPathVisitor { tcx: self.tcx, found_it: false, bound_region: self.bound_region, - depth: self.depth, + current_index: self.current_index, }; intravisit::walk_ty(subvisitor, arg); // call walk_ty; as visit_ty is empty, // this will visit only outermost type @@ -213,7 +213,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for FindNestedTypeVisitor<'a, 'gcx, 'tcx> { } // The visitor captures the corresponding `hir::Ty` of the anonymous region -// in the case of structs ie. `hir::TyPath`. +// in the case of structs ie. `hir::TyKind::Path`. // This visitor would be invoked for each lifetime corresponding to a struct, // and would walk the types like Vec in the above example and Ref looking for the HIR // where that lifetime appears. This allows us to highlight the @@ -222,7 +222,7 @@ struct TyPathVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, found_it: bool, bound_region: ty::BoundRegion, - depth: u32, + current_index: ty::DebruijnIndex, } impl<'a, 'gcx, 'tcx> Visitor<'gcx> for TyPathVisitor<'a, 'gcx, 'tcx> { @@ -235,7 +235,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for TyPathVisitor<'a, 'gcx, 'tcx> { match (self.tcx.named_region(hir_id), self.bound_region) { // the lifetime of the TyPath! (Some(rl::Region::LateBoundAnon(debruijn_index, anon_index)), ty::BrAnon(br_index)) => { - if debruijn_index.depth == self.depth && anon_index == br_index { + if debruijn_index == self.current_index && anon_index == br_index { self.found_it = true; return; } @@ -257,11 +257,11 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for TyPathVisitor<'a, 'gcx, 'tcx> { (Some(rl::Region::LateBound(debruijn_index, id, _)), ty::BrNamed(def_id, _)) => { debug!( "FindNestedTypeVisitor::visit_ty: LateBound depth = {:?}", - debruijn_index.depth + debruijn_index, ); debug!("id={:?}", id); debug!("def_id={:?}", def_id); - if debruijn_index.depth == self.depth && id == def_id { + if debruijn_index == self.current_index && id == def_id { self.found_it = true; return; // we can stop visiting now } diff --git a/src/librustc/infer/error_reporting/nice_region_error/mod.rs b/src/librustc/infer/error_reporting/nice_region_error/mod.rs index edc38b6bb14e..ddeb291a13aa 100644 --- a/src/librustc/infer/error_reporting/nice_region_error/mod.rs +++ b/src/librustc/infer/error_reporting/nice_region_error/mod.rs @@ -18,46 +18,72 @@ use util::common::ErrorReported; mod different_lifetimes; mod find_anon_type; mod named_anon_conflict; +mod outlives_closure; +mod static_impl_trait; mod util; impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { pub fn try_report_nice_region_error(&self, error: &RegionResolutionError<'tcx>) -> bool { - let (span, sub, sup) = match *error { - ConcreteFailure(ref origin, sub, sup) => (origin.span(), sub, sup), - SubSupConflict(_, ref origin, sub, _, sup) => (origin.span(), sub, sup), - _ => return false, // inapplicable - }; + match *error { + ConcreteFailure(..) | SubSupConflict(..) => {} + _ => return false, // inapplicable + } if let Some(tables) = self.in_progress_tables { let tables = tables.borrow(); - NiceRegionError::new(self.tcx, span, sub, sup, Some(&tables)).try_report().is_some() + NiceRegionError::new(self.tcx, error.clone(), Some(&tables)).try_report().is_some() } else { - NiceRegionError::new(self.tcx, span, sub, sup, None).try_report().is_some() + NiceRegionError::new(self.tcx, error.clone(), None).try_report().is_some() } } } pub struct NiceRegionError<'cx, 'gcx: 'tcx, 'tcx: 'cx> { tcx: TyCtxt<'cx, 'gcx, 'tcx>, - span: Span, - sub: ty::Region<'tcx>, - sup: ty::Region<'tcx>, + error: Option>, + regions: Option<(Span, ty::Region<'tcx>, ty::Region<'tcx>)>, tables: Option<&'cx ty::TypeckTables<'tcx>>, } impl<'cx, 'gcx, 'tcx> NiceRegionError<'cx, 'gcx, 'tcx> { pub fn new( + tcx: TyCtxt<'cx, 'gcx, 'tcx>, + error: RegionResolutionError<'tcx>, + tables: Option<&'cx ty::TypeckTables<'tcx>>, + ) -> Self { + Self { tcx, error: Some(error), regions: None, tables } + } + + pub fn new_from_span( tcx: TyCtxt<'cx, 'gcx, 'tcx>, span: Span, sub: ty::Region<'tcx>, sup: ty::Region<'tcx>, tables: Option<&'cx ty::TypeckTables<'tcx>>, ) -> Self { - Self { tcx, span, sub, sup, tables } + Self { tcx, error: None, regions: Some((span, sub, sup)), tables } + } + + pub fn try_report_from_nll(&self) -> Option { + // Due to the improved diagnostics returned by the MIR borrow checker, only a subset of + // the nice region errors are required when running under the MIR borrow checker. + self.try_report_named_anon_conflict() } pub fn try_report(&self) -> Option { self.try_report_named_anon_conflict() .or_else(|| self.try_report_anon_anon_conflict()) + .or_else(|| self.try_report_outlives_closure()) + .or_else(|| self.try_report_static_impl_trait()) + } + + pub fn get_regions(&self) -> (Span, ty::Region<'tcx>, ty::Region<'tcx>) { + match (&self.error, self.regions) { + (&Some(ConcreteFailure(ref origin, sub, sup)), None) => (origin.span(), sub, sup), + (&Some(SubSupConflict(_, ref origin, sub, _, sup)), None) => (origin.span(), sub, sup), + (None, Some((span, sub, sup))) => (span, sub, sup), + (Some(_), Some(_)) => panic!("incorrectly built NiceRegionError"), + _ => panic!("trying to report on an incorrect lifetime failure"), + } } } diff --git a/src/librustc/infer/error_reporting/nice_region_error/named_anon_conflict.rs b/src/librustc/infer/error_reporting/nice_region_error/named_anon_conflict.rs index 5617c7723859..4e26a4178b95 100644 --- a/src/librustc/infer/error_reporting/nice_region_error/named_anon_conflict.rs +++ b/src/librustc/infer/error_reporting/nice_region_error/named_anon_conflict.rs @@ -18,7 +18,7 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { /// When given a `ConcreteFailure` for a function with arguments containing a named region and /// an anonymous region, emit an descriptive diagnostic error. pub(super) fn try_report_named_anon_conflict(&self) -> Option { - let NiceRegionError { span, sub, sup, .. } = *self; + let (span, sub, sup) = self.get_regions(); debug!( "try_report_named_anon_conflict(sub={:?}, sup={:?})", @@ -65,9 +65,10 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { region_info ); - let (arg, new_ty, br, is_first, scope_def_id, is_impl_item) = ( + let (arg, new_ty, new_ty_span, br, is_first, scope_def_id, is_impl_item) = ( anon_arg_info.arg, anon_arg_info.arg_ty, + anon_arg_info.arg_ty_span, anon_arg_info.bound_region, anon_arg_info.is_first, region_info.def_id, @@ -95,10 +96,10 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { } } - let (error_var, span_label_var) = if let Some(simple_name) = arg.pat.simple_name() { + let (error_var, span_label_var) = if let Some(simple_ident) = arg.pat.simple_ident() { ( - format!("the type of `{}`", simple_name), - format!("the type of `{}`", simple_name), + format!("the type of `{}`", simple_ident), + format!("the type of `{}`", simple_ident), ) } else { ("parameter type".to_owned(), "type".to_owned()) @@ -110,9 +111,10 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { E0621, "explicit lifetime required in {}", error_var - ).span_label( - arg.pat.span, - format!("consider changing {} to `{}`", span_label_var, new_ty), + ).span_suggestion( + new_ty_span, + &format!("add explicit lifetime `{}` to {}", named, span_label_var), + new_ty.to_string() ) .span_label(span, format!("lifetime `{}` required", named)) .emit(); @@ -127,7 +129,7 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { ty::BrNamed(..) => true, _ => false, }, - ty::ReEarlyBound(_) => true, + ty::ReEarlyBound(ebr) => ebr.has_name(), _ => false, } } diff --git a/src/librustc/infer/error_reporting/nice_region_error/outlives_closure.rs b/src/librustc/infer/error_reporting/nice_region_error/outlives_closure.rs new file mode 100644 index 000000000000..f4ef197e5b42 --- /dev/null +++ b/src/librustc/infer/error_reporting/nice_region_error/outlives_closure.rs @@ -0,0 +1,124 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Error Reporting for Anonymous Region Lifetime Errors +//! where both the regions are anonymous. + +use infer::error_reporting::nice_region_error::NiceRegionError; +use infer::SubregionOrigin; +use ty::RegionKind; +use hir::{Expr, ExprKind::Closure}; +use hir::map::NodeExpr; +use util::common::ErrorReported; +use infer::lexical_region_resolve::RegionResolutionError::SubSupConflict; + +impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { + /// Print the error message for lifetime errors when binding excapes a closure. + /// + /// Consider a case where we have + /// + /// ```no_run + /// fn with_int(f: F) where F: FnOnce(&isize) { + /// let x = 3; + /// f(&x); + /// } + /// fn main() { + /// let mut x = None; + /// with_int(|y| x = Some(y)); + /// } + /// ``` + /// + /// the output will be + /// + /// ```text + /// let mut x = None; + /// ----- borrowed data cannot be stored into here... + /// with_int(|y| x = Some(y)); + /// --- ^ cannot be stored outside of its closure + /// | + /// ...because it cannot outlive this closure + /// ``` + pub(super) fn try_report_outlives_closure(&self) -> Option { + if let Some(SubSupConflict(origin, + ref sub_origin, + _, + ref sup_origin, + sup_region)) = self.error { + + // #45983: when trying to assign the contents of an argument to a binding outside of a + // closure, provide a specific message pointing this out. + if let (&SubregionOrigin::BindingTypeIsNotValidAtDecl(ref external_span), + &RegionKind::ReFree(ref free_region)) = (&sub_origin, sup_region) { + let hir = &self.tcx.hir; + if let Some(node_id) = hir.as_local_node_id(free_region.scope) { + match hir.get(node_id) { + NodeExpr(Expr { + node: Closure(_, _, _, closure_span, None), + .. + }) => { + let sup_sp = sup_origin.span(); + let origin_sp = origin.span(); + let mut err = self.tcx.sess.struct_span_err( + sup_sp, + "borrowed data cannot be stored outside of its closure"); + err.span_label(sup_sp, "cannot be stored outside of its closure"); + if origin_sp == sup_sp || origin_sp.contains(sup_sp) { +// // sup_sp == origin.span(): +// +// let mut x = None; +// ----- borrowed data cannot be stored into here... +// with_int(|y| x = Some(y)); +// --- ^ cannot be stored outside of its closure +// | +// ...because it cannot outlive this closure +// +// // origin.contains(&sup_sp): +// +// let mut f: Option<&u32> = None; +// ----- borrowed data cannot be stored into here... +// closure_expecting_bound(|x: &'x u32| { +// ------------ ... because it cannot outlive this closure +// f = Some(x); +// ^ cannot be stored outside of its closure + err.span_label(*external_span, + "borrowed data cannot be stored into here..."); + err.span_label(*closure_span, + "...because it cannot outlive this closure"); + } else { +// FIXME: the wording for this case could be much improved +// +// let mut lines_to_use: Vec<&CrateId> = Vec::new(); +// - cannot infer an appropriate lifetime... +// let push_id = |installed_id: &CrateId| { +// ------- ------------------------ borrowed data cannot outlive this closure +// | +// ...so that variable is valid at time of its declaration +// lines_to_use.push(installed_id); +// ^^^^^^^^^^^^ cannot be stored outside of its closure + err.span_label(origin_sp, + "cannot infer an appropriate lifetime..."); + err.span_label(*external_span, + "...so that variable is valid at time of its \ + declaration"); + err.span_label(*closure_span, + "borrowed data cannot outlive this closure"); + } + err.emit(); + return Some(ErrorReported); + } + _ => {} + } + } + } + } + None + } +} + diff --git a/src/librustc/infer/error_reporting/nice_region_error/static_impl_trait.rs b/src/librustc/infer/error_reporting/nice_region_error/static_impl_trait.rs new file mode 100644 index 000000000000..193f86a38279 --- /dev/null +++ b/src/librustc/infer/error_reporting/nice_region_error/static_impl_trait.rs @@ -0,0 +1,83 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Error Reporting for static impl Traits. + +use infer::error_reporting::nice_region_error::NiceRegionError; +use infer::lexical_region_resolve::RegionResolutionError; +use ty::{BoundRegion, FreeRegion, RegionKind}; +use util::common::ErrorReported; + +impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { + /// Print the error message for lifetime errors when the return type is a static impl Trait. + pub(super) fn try_report_static_impl_trait(&self) -> Option { + if let Some(ref error) = self.error { + match error.clone() { + RegionResolutionError::SubSupConflict( + var_origin, + sub_origin, + sub_r, + sup_origin, + sup_r, + ) => { + let anon_reg_sup = self.is_suitable_region(sup_r)?; + if sub_r == &RegionKind::ReStatic && + self.is_return_type_impl_trait(anon_reg_sup.def_id) + { + let sp = var_origin.span(); + let return_sp = sub_origin.span(); + let mut err = self.tcx.sess.struct_span_err( + sp, + "cannot infer an appropriate lifetime", + ); + err.span_label( + return_sp, + "this return type evaluates to the `'static` lifetime...", + ); + err.span_label( + sup_origin.span(), + "...but this borrow...", + ); + + let (lifetime, lt_sp_opt) = self.tcx.msg_span_from_free_region(sup_r); + if let Some(lifetime_sp) = lt_sp_opt { + err.span_note( + lifetime_sp, + &format!("...can't outlive {}", lifetime), + ); + } + + let lifetime_name = match sup_r { + RegionKind::ReFree(FreeRegion { + bound_region: BoundRegion::BrNamed(_, ref name), .. + }) => name.to_string(), + _ => "'_".to_owned(), + }; + if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(return_sp) { + err.span_suggestion( + return_sp, + &format!( + "you can add a constraint to the return type to make it last \ + less than `'static` and match {}", + lifetime, + ), + format!("{} + {}", snippet, lifetime_name), + ); + } + err.emit(); + return Some(ErrorReported); + } + } + _ => {} + } + } + None + } +} diff --git a/src/librustc/infer/error_reporting/nice_region_error/util.rs b/src/librustc/infer/error_reporting/nice_region_error/util.rs index 8aadec645541..28320ce3ad1b 100644 --- a/src/librustc/infer/error_reporting/nice_region_error/util.rs +++ b/src/librustc/infer/error_reporting/nice_region_error/util.rs @@ -27,6 +27,8 @@ pub(super) struct AnonymousArgInfo<'tcx> { pub arg_ty: Ty<'tcx>, // the ty::BoundRegion corresponding to the anonymous region pub bound_region: ty::BoundRegion, + // arg_ty_span contains span of argument type + pub arg_ty_span : Span, // corresponds to id the argument is the first parameter // in the declaration pub is_first: bool, @@ -74,12 +76,16 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { if let Some(node_id) = hir.as_local_node_id(id) { if let Some(body_id) = hir.maybe_body_owned_by(node_id) { let body = hir.body(body_id); + let owner_id = hir.body_owner(body_id); + let fn_decl = hir.fn_decl(owner_id).unwrap(); if let Some(tables) = self.tables { body.arguments .iter() .enumerate() .filter_map(|(index, arg)| { // May return None; sometimes the tables are not yet populated. + let ty_hir_id = fn_decl.inputs[index].hir_id; + let arg_ty_span = hir.span(hir.hir_to_node_id(ty_hir_id)); let ty = tables.node_id_to_type_opt(arg.hir_id)?; let mut found_anon_region = false; let new_arg_ty = self.tcx.fold_regions(&ty, &mut false, |r, _| { @@ -95,6 +101,7 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { Some(AnonymousArgInfo { arg: arg, arg_ty: new_arg_ty, + arg_ty_span : arg_ty_span, bound_region: bound_region, is_first: is_first, }) @@ -167,6 +174,23 @@ impl<'a, 'gcx, 'tcx> NiceRegionError<'a, 'gcx, 'tcx> { } None } + + pub(super) fn is_return_type_impl_trait( + &self, + scope_def_id: DefId, + ) -> bool { + let ret_ty = self.tcx.type_of(scope_def_id); + match ret_ty.sty { + ty::TyFnDef(_, _) => { + let sig = ret_ty.fn_sig(self.tcx); + let output = self.tcx.erase_late_bound_regions(&sig.output()); + return output.is_impl_trait(); + } + _ => {} + } + false + } + // Here we check for the case where anonymous region // corresponds to self and if yes, we display E0312. // FIXME(#42700) - Need to format self properly to diff --git a/src/librustc/infer/error_reporting/note.rs b/src/librustc/infer/error_reporting/note.rs index e46613b3e4da..02ec9fe74c1f 100644 --- a/src/librustc/infer/error_reporting/note.rs +++ b/src/librustc/infer/error_reporting/note.rs @@ -23,12 +23,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { if let Some((expected, found)) = self.values_str(&trace.values) { let expected = expected.content(); let found = found.content(); - // FIXME: do we want a "the" here? - err.span_note(trace.cause.span, - &format!("...so that {} (expected {}, found {})", - trace.cause.as_requirement_str(), - expected, - found)); + err.note(&format!("...so that the {}:\nexpected {}\n found {}", + trace.cause.as_requirement_str(), + expected, + found)); } else { // FIXME: this really should be handled at some earlier stage. Our // handling of region checking when type errors are present is diff --git a/src/librustc/infer/freshen.rs b/src/librustc/infer/freshen.rs index 1783d5abfc7c..6074bfd083d4 100644 --- a/src/librustc/infer/freshen.rs +++ b/src/librustc/infer/freshen.rs @@ -114,9 +114,10 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { self.tcx().types.re_erased } + ty::ReCanonical(..) | ty::ReClosureBound(..) => { bug!( - "encountered unexpected ReClosureBound: {:?}", + "encountered unexpected region: {:?}", r, ); } @@ -133,7 +134,7 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { match t.sty { ty::TyInfer(ty::TyVar(v)) => { - let opt_ty = self.infcx.type_variables.borrow_mut().probe(v); + let opt_ty = self.infcx.type_variables.borrow_mut().probe(v).known(); self.freshen( opt_ty, ty::TyVar(v), @@ -143,7 +144,7 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { ty::TyInfer(ty::IntVar(v)) => { self.freshen( self.infcx.int_unification_table.borrow_mut() - .probe(v) + .probe_value(v) .map(|v| v.to_type(tcx)), ty::IntVar(v), ty::FreshIntTy) @@ -152,7 +153,7 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { ty::TyInfer(ty::FloatVar(v)) => { self.freshen( self.infcx.float_unification_table.borrow_mut() - .probe(v) + .probe_value(v) .map(|v| v.to_type(tcx)), ty::FloatVar(v), ty::FreshFloatTy) @@ -170,6 +171,9 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { t } + ty::TyInfer(ty::CanonicalTy(..)) => + bug!("encountered canonical ty during freshening"), + ty::TyGenerator(..) | ty::TyBool | ty::TyChar | @@ -192,6 +196,7 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { ty::TyForeign(..) | ty::TyParam(..) | ty::TyClosure(..) | + ty::TyGeneratorWitness(..) | ty::TyAnon(..) => { t.super_fold_with(self) } diff --git a/src/librustc/infer/fudge.rs b/src/librustc/infer/fudge.rs index 756a6947ee3f..961dd70a4685 100644 --- a/src/librustc/infer/fudge.rs +++ b/src/librustc/infer/fudge.rs @@ -131,7 +131,9 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionFudger<'a, 'gcx, 'tcx> { // variables to their binding anyhow, we know // that it is unbound, so we can just return // it. - debug_assert!(self.infcx.type_variables.borrow_mut().probe(vid).is_none()); + debug_assert!(self.infcx.type_variables.borrow_mut() + .probe(vid) + .is_unknown()); ty } diff --git a/src/librustc/infer/higher_ranked/mod.rs b/src/librustc/infer/higher_ranked/mod.rs index 57e237fb9137..cb4e1ab65e75 100644 --- a/src/librustc/infer/higher_ranked/mod.rs +++ b/src/librustc/infer/higher_ranked/mod.rs @@ -19,10 +19,10 @@ use super::{CombinedSnapshot, use super::combine::CombineFields; use super::region_constraints::{TaintDirections}; -use std::collections::BTreeMap; use ty::{self, TyCtxt, Binder, TypeFoldable}; use ty::error::TypeError; use ty::relate::{Relate, RelateResult, TypeRelation}; +use std::collections::BTreeMap; use syntax_pos::Span; use util::nodemap::{FxHashMap, FxHashSet}; @@ -62,7 +62,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { // Second, we instantiate each bound region in the supertype with a // fresh concrete region. let (b_prime, skol_map) = - self.infcx.skolemize_late_bound_regions(b, snapshot); + self.infcx.skolemize_late_bound_regions(b); debug!("a_prime={:?}", a_prime); debug!("b_prime={:?}", b_prime); @@ -80,7 +80,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { debug!("higher_ranked_sub: OK result={:?}", result); - Ok(ty::Binder(result)) + Ok(ty::Binder::bind(result)) }); } @@ -114,7 +114,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { // First, we instantiate each bound region in the matcher // with a skolemized region. let ((a_match, a_value), skol_map) = - self.infcx.skolemize_late_bound_regions(a_pair, snapshot); + self.infcx.skolemize_late_bound_regions(a_pair); debug!("higher_ranked_match: a_match={:?}", a_match); debug!("higher_ranked_match: skol_map={:?}", skol_map); @@ -239,12 +239,12 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { b, result1); - Ok(ty::Binder(result1)) + Ok(ty::Binder::bind(result1)) }); fn generalize_region<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, span: Span, - snapshot: &CombinedSnapshot, + snapshot: &CombinedSnapshot<'a, 'tcx>, debruijn: ty::DebruijnIndex, new_vars: &[ty::RegionVid], a_map: &BTreeMap>, @@ -335,12 +335,12 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { b, result1); - Ok(ty::Binder(result1)) + Ok(ty::Binder::bind(result1)) }); fn generalize_region<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, span: Span, - snapshot: &CombinedSnapshot, + snapshot: &CombinedSnapshot<'a, 'tcx>, debruijn: ty::DebruijnIndex, new_vars: &[ty::RegionVid], a_map: &BTreeMap>, @@ -417,7 +417,7 @@ impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { { for (a_br, a_r) in a_map { if *a_r == r { - return infcx.tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br)); + return infcx.tcx.mk_region(ty::ReLateBound(ty::INNERMOST, *a_br)); } } span_bug!( @@ -473,13 +473,13 @@ fn fold_regions_in<'a, 'gcx, 'tcx, T, F>(tcx: TyCtxt<'a, 'gcx, 'tcx>, _ => true }); - fldr(region, ty::DebruijnIndex::new(current_depth)) + fldr(region, current_depth) }) } impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { fn tainted_regions(&self, - snapshot: &CombinedSnapshot, + snapshot: &CombinedSnapshot<'a, 'tcx>, r: ty::Region<'tcx>, directions: TaintDirections) -> FxHashSet> { @@ -491,7 +491,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } fn region_vars_confined_to_snapshot(&self, - snapshot: &CombinedSnapshot) + snapshot: &CombinedSnapshot<'a, 'tcx>) -> Vec { /*! @@ -580,16 +580,18 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// the pop occurs as part of the rollback, so an explicit call is not /// needed (but is also permitted). /// - /// See `README.md` for more details. + /// For more information about how skolemization for HRTBs works, see + /// the [rustc guide]. + /// + /// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/hrtb.html pub fn skolemize_late_bound_regions(&self, - binder: &ty::Binder, - snapshot: &CombinedSnapshot) + binder: &ty::Binder) -> (T, SkolemizationMap<'tcx>) where T : TypeFoldable<'tcx> { let (result, map) = self.tcx.replace_late_bound_regions(binder, |br| { - self.borrow_region_constraints() - .push_skolemized(self.tcx, br, &snapshot.region_constraints_snapshot) + self.universe.set(self.universe().subuniverse()); + self.tcx.mk_region(ty::ReSkolemized(self.universe(), br)) }); debug!("skolemize_bound_regions(binder={:?}, result={:?}, map={:?})", @@ -609,12 +611,24 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { overly_polymorphic: bool, _span: Span, skol_map: &SkolemizationMap<'tcx>, - snapshot: &CombinedSnapshot) + snapshot: &CombinedSnapshot<'a, 'tcx>) -> RelateResult<'tcx, ()> { debug!("leak_check: skol_map={:?}", skol_map); + // If the user gave `-Zno-leak-check`, then skip the leak + // check completely. This is wildly unsound and also not + // unlikely to cause an ICE or two. It is intended for use + // only during a transition period, in which the MIR typeck + // uses the "universe-style" check, and the rest of typeck + // uses the more conservative leak check. Since the leak + // check is more conservative, we can't test the + // universe-style check without disabling it. + if self.tcx.sess.opts.debugging_opts.no_leak_check { + return Ok(()); + } + let new_vars = self.region_vars_confined_to_snapshot(snapshot); for (&skol_br, &skol) in skol_map { // The inputs to a skolemized variable can only @@ -684,7 +698,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// predicate is `for<'a> &'a int : Clone`. pub fn plug_leaks(&self, skol_map: SkolemizationMap<'tcx>, - snapshot: &CombinedSnapshot, + snapshot: &CombinedSnapshot<'a, 'tcx>, value: T) -> T where T : TypeFoldable<'tcx> { @@ -732,7 +746,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // trait checking, and all of the skolemized regions // appear inside predicates, which always have // binders, so this assert is satisfied. - assert!(current_depth > 1); + assert!(current_depth > ty::INNERMOST); // since leak-check passed, this skolemized region // should only have incoming edges from variables @@ -748,7 +762,9 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { r, br); self.tcx.mk_region(ty::ReLateBound( - ty::DebruijnIndex::new(current_depth - 1), br.clone())) + current_depth.shifted_out(1), + br.clone(), + )) } } }); @@ -770,12 +786,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// Note: popping also occurs implicitly as part of `leak_check`. pub fn pop_skolemized(&self, skol_map: SkolemizationMap<'tcx>, - snapshot: &CombinedSnapshot) - { + snapshot: &CombinedSnapshot<'a, 'tcx>) { debug!("pop_skolemized({:?})", skol_map); let skol_regions: FxHashSet<_> = skol_map.values().cloned().collect(); self.borrow_region_constraints() - .pop_skolemized(self.tcx, &skol_regions, &snapshot.region_constraints_snapshot); + .pop_skolemized(self.universe(), &skol_regions, &snapshot.region_constraints_snapshot); + self.universe.set(snapshot.universe); if !skol_map.is_empty() { self.projection_cache.borrow_mut().rollback_skolemized( &snapshot.projection_cache_snapshot); diff --git a/src/librustc/infer/lattice.rs b/src/librustc/infer/lattice.rs index d5c1163cfc1b..28aba51ab372 100644 --- a/src/librustc/infer/lattice.rs +++ b/src/librustc/infer/lattice.rs @@ -70,14 +70,6 @@ pub fn super_lattice_tys<'a, 'gcx, 'tcx, L>(this: &mut L, let a = infcx.type_variables.borrow_mut().replace_if_possible(a); let b = infcx.type_variables.borrow_mut().replace_if_possible(b); match (&a.sty, &b.sty) { - (&ty::TyInfer(TyVar(..)), &ty::TyInfer(TyVar(..))) - if infcx.type_var_diverges(a) && infcx.type_var_diverges(b) => { - let v = infcx.next_diverging_ty_var( - TypeVariableOrigin::LatticeVariable(this.cause().span)); - this.relate_bound(v, a, b)?; - Ok(v) - } - // If one side is known to be a variable and one is not, // create a variable (`v`) to represent the LUB. Make sure to // relate `v` to the non-type-variable first (by passing it diff --git a/src/librustc/infer/lexical_region_resolve/README.md b/src/librustc/infer/lexical_region_resolve/README.md index a90230870a6c..6e1c4191173b 100644 --- a/src/librustc/infer/lexical_region_resolve/README.md +++ b/src/librustc/infer/lexical_region_resolve/README.md @@ -1,14 +1,16 @@ # Region inference +> WARNING: This README is obsolete and will be removed soon! For +> more info on how the current borrowck works, see the [rustc guide]. + +[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/mir/borrowck.html + ## Terminology Note that we use the terms region and lifetime interchangeably. ## Introduction -See the [general inference README](../README.md) for an overview of -how lexical-region-solving fits into the bigger picture. - Region inference uses a somewhat more involved algorithm than type inference. It is not the most efficient thing ever written though it seems to work well enough in practice (famous last words). The reason diff --git a/src/librustc/infer/lexical_region_resolve/graphviz.rs b/src/librustc/infer/lexical_region_resolve/graphviz.rs index d9d08294334d..bdd3f78aff3e 100644 --- a/src/librustc/infer/lexical_region_resolve/graphviz.rs +++ b/src/librustc/infer/lexical_region_resolve/graphviz.rs @@ -39,14 +39,14 @@ use std::sync::atomic::{AtomicBool, Ordering}; fn print_help_message() { println!("\ -Z print-region-graph by default prints a region constraint graph for every \n\ -function body, to the path `/tmp/constraints.nodeXXX.dot`, where the XXX is \n\ +function body, to the path `constraints.nodeXXX.dot`, where the XXX is \n\ replaced with the node id of the function under analysis. \n\ \n\ To select one particular function body, set `RUST_REGION_GRAPH_NODE=XXX`, \n\ where XXX is the node id desired. \n\ \n\ To generate output to some path other than the default \n\ -`/tmp/constraints.nodeXXX.dot`, set `RUST_REGION_GRAPH=/path/desired.dot`; \n\ +`constraints.nodeXXX.dot`, set `RUST_REGION_GRAPH=/path/desired.dot`; \n\ occurrences of the character `%` in the requested path will be replaced with\n\ the node id of the function under analysis. \n\ \n\ @@ -90,7 +90,7 @@ pub fn maybe_print_constraints_for<'a, 'gcx, 'tcx>( } Ok(other_path) => other_path, - Err(_) => "/tmp/constraints.node%.dot".to_string(), + Err(_) => "constraints.node%.dot".to_string(), }; if output_template.is_empty() { @@ -204,7 +204,7 @@ impl<'a, 'gcx, 'tcx> dot::Labeller<'a> for ConstraintGraph<'a, 'gcx, 'tcx> { match *e { Edge::Constraint(ref c) => dot::LabelText::label(format!("{:?}", self.map.get(c).unwrap())), - Edge::EnclScope(..) => dot::LabelText::label(format!("(enclosed)")), + Edge::EnclScope(..) => dot::LabelText::label("(enclosed)".to_string()), } } } @@ -273,7 +273,7 @@ fn dump_region_data_to<'a, 'gcx, 'tcx>(region_rels: &RegionRelations<'a, 'gcx, ' debug!("dump_region_data map (len: {}) path: {}", map.len(), path); - let g = ConstraintGraph::new(format!("region_data"), region_rels, map); + let g = ConstraintGraph::new("region_data".to_string(), region_rels, map); debug!("dump_region_data calling render"); let mut v = Vec::new(); dot::render(&g, &mut v).unwrap(); diff --git a/src/librustc/infer/lexical_region_resolve/mod.rs b/src/librustc/infer/lexical_region_resolve/mod.rs index 3ac4ec5bee41..120b45ec01e5 100644 --- a/src/librustc/infer/lexical_region_resolve/mod.rs +++ b/src/librustc/infer/lexical_region_resolve/mod.rs @@ -15,12 +15,12 @@ use infer::RegionVariableOrigin; use infer::region_constraints::Constraint; use infer::region_constraints::GenericKind; use infer::region_constraints::RegionConstraintData; -use infer::region_constraints::VarOrigins; +use infer::region_constraints::VarInfos; use infer::region_constraints::VerifyBound; use middle::free_region::RegionRelations; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_data_structures::fx::FxHashSet; -use rustc_data_structures::graph::{self, Direction, NodeIndex, OUTGOING}; +use rustc_data_structures::graph::implementation::{Graph, Direction, NodeIndex, INCOMING, OUTGOING}; use std::fmt; use std::u32; use ty::{self, TyCtxt}; @@ -37,7 +37,7 @@ mod graphviz; /// all the variables as well as a set of errors that must be reported. pub fn resolve<'tcx>( region_rels: &RegionRelations<'_, '_, 'tcx>, - var_origins: VarOrigins, + var_infos: VarInfos, data: RegionConstraintData<'tcx>, ) -> ( LexicalRegionResolutions<'tcx>, @@ -47,7 +47,7 @@ pub fn resolve<'tcx>( let mut errors = vec![]; let mut resolver = LexicalResolver { region_rels, - var_origins, + var_infos, data, }; let values = resolver.infer_variable_values(&mut errors); @@ -99,11 +99,11 @@ struct RegionAndOrigin<'tcx> { origin: SubregionOrigin<'tcx>, } -type RegionGraph<'tcx> = graph::Graph<(), Constraint<'tcx>>; +type RegionGraph<'tcx> = Graph<(), Constraint<'tcx>>; struct LexicalResolver<'cx, 'gcx: 'tcx, 'tcx: 'cx> { region_rels: &'cx RegionRelations<'cx, 'gcx, 'tcx>, - var_origins: VarOrigins, + var_infos: VarInfos, data: RegionConstraintData<'tcx>, } @@ -132,7 +132,7 @@ impl<'cx, 'gcx, 'tcx> LexicalResolver<'cx, 'gcx, 'tcx> { } fn num_vars(&self) -> usize { - self.var_origins.len() + self.var_infos.len() } /// Initially, the value for all variables is set to `'empty`, the @@ -258,6 +258,8 @@ impl<'cx, 'gcx, 'tcx> LexicalResolver<'cx, 'gcx, 'tcx> { fn lub_concrete_regions(&self, a: Region<'tcx>, b: Region<'tcx>) -> Region<'tcx> { let tcx = self.region_rels.tcx; match (a, b) { + (&ty::ReCanonical(..), _) | + (_, &ty::ReCanonical(..)) | (&ty::ReClosureBound(..), _) | (_, &ty::ReClosureBound(..)) | (&ReLateBound(..), _) | @@ -277,7 +279,7 @@ impl<'cx, 'gcx, 'tcx> LexicalResolver<'cx, 'gcx, 'tcx> { (&ReVar(v_id), _) | (_, &ReVar(v_id)) => { span_bug!( - self.var_origins[v_id].span(), + self.var_infos[v_id].origin.span(), "lub_concrete_regions invoked with non-concrete \ regions: {:?}, {:?}", a, @@ -499,7 +501,7 @@ impl<'cx, 'gcx, 'tcx> LexicalResolver<'cx, 'gcx, 'tcx> { fn construct_graph(&self) -> RegionGraph<'tcx> { let num_vars = self.num_vars(); - let mut graph = graph::Graph::new(); + let mut graph = Graph::new(); for _ in 0..num_vars { graph.add_node(()); @@ -548,9 +550,9 @@ impl<'cx, 'gcx, 'tcx> LexicalResolver<'cx, 'gcx, 'tcx> { // Errors in expanding nodes result from a lower-bound that is // not contained by an upper-bound. let (mut lower_bounds, lower_dup) = - self.collect_concrete_regions(graph, node_idx, graph::INCOMING, dup_vec); + self.collect_concrete_regions(graph, node_idx, INCOMING, dup_vec); let (mut upper_bounds, upper_dup) = - self.collect_concrete_regions(graph, node_idx, graph::OUTGOING, dup_vec); + self.collect_concrete_regions(graph, node_idx, OUTGOING, dup_vec); if lower_dup || upper_dup { return; @@ -574,7 +576,7 @@ impl<'cx, 'gcx, 'tcx> LexicalResolver<'cx, 'gcx, 'tcx> { if !self.region_rels .is_subregion_of(lower_bound.region, upper_bound.region) { - let origin = self.var_origins[node_idx].clone(); + let origin = self.var_infos[node_idx].origin.clone(); debug!( "region inference error at {:?} for {:?}: SubSupConflict sub: {:?} \ sup: {:?}", @@ -596,7 +598,7 @@ impl<'cx, 'gcx, 'tcx> LexicalResolver<'cx, 'gcx, 'tcx> { } span_bug!( - self.var_origins[node_idx].span(), + self.var_infos[node_idx].origin.span(), "collect_error_for_expanding_node() could not find \ error for var {:?}, lower_bounds={:?}, \ upper_bounds={:?}", diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs index 07c5b319970f..eed6215150fd 100644 --- a/src/librustc/infer/mod.rs +++ b/src/librustc/infer/mod.rs @@ -21,28 +21,28 @@ use hir::def_id::DefId; use middle::free_region::RegionRelations; use middle::region; use middle::lang_items; -use mir::tcx::PlaceTy; -use ty::subst::{Kind, Subst, Substs}; +use ty::subst::{Kind, Substs}; use ty::{TyVid, IntVid, FloatVid}; -use ty::{self, Ty, TyCtxt}; +use ty::{self, Ty, TyCtxt, GenericParamDefKind}; use ty::error::{ExpectedFound, TypeError, UnconstrainedNumeric}; -use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use ty::fold::TypeFoldable; use ty::relate::RelateResult; -use traits::{self, ObligationCause, PredicateObligations, Reveal}; -use rustc_data_structures::unify::{self, UnificationTable}; +use traits::{self, ObligationCause, PredicateObligations, TraitEngine}; +use rustc_data_structures::unify as ut; use std::cell::{Cell, RefCell, Ref, RefMut}; use std::collections::BTreeMap; use std::fmt; use syntax::ast; use errors::DiagnosticBuilder; -use syntax_pos::{self, Span, DUMMY_SP}; +use syntax_pos::{self, Span}; +use syntax_pos::symbol::InternedString; use util::nodemap::FxHashMap; -use arena::DroplessArena; +use arena::SyncDroplessArena; use self::combine::CombineFields; use self::higher_ranked::HrMatchResult; use self::region_constraints::{RegionConstraintCollector, RegionSnapshot}; -use self::region_constraints::{GenericKind, VerifyBound, RegionConstraintData, VarOrigins}; +use self::region_constraints::{GenericKind, VerifyBound, RegionConstraintData, VarInfos}; use self::lexical_region_resolve::LexicalRegionResolutions; use self::outlives::env::OutlivesEnvironment; use self::type_variable::TypeVariableOrigin; @@ -50,6 +50,7 @@ use self::unify_key::ToType; pub mod anon_types; pub mod at; +pub mod canonical; mod combine; mod equate; pub mod error_reporting; @@ -68,6 +69,7 @@ pub mod type_variable; pub mod unify_key; #[must_use] +#[derive(Debug)] pub struct InferOk<'tcx, T> { pub value: T, pub obligations: PredicateObligations<'tcx>, @@ -99,10 +101,10 @@ pub struct InferCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { pub type_variables: RefCell>, // Map from integral variable to the kind of integer it represents - int_unification_table: RefCell>, + int_unification_table: RefCell>>, // Map from floating variable to the kind of float it represents - float_unification_table: RefCell>, + float_unification_table: RefCell>>, // Tracks the set of region variables and the constraints between // them. This is initially `Some(_)` but when @@ -180,7 +182,18 @@ pub struct InferCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { // for each body-id in this map, which will process the // obligations within. This is expected to be done 'late enough' // that all type inference variables have been bound and so forth. - region_obligations: RefCell)>>, + pub region_obligations: RefCell)>>, + + /// What is the innermost universe we have created? Starts out as + /// `UniverseIndex::root()` but grows from there as we enter + /// universal quantifiers. + /// + /// NB: At present, we exclude the universal quantifiers on the + /// item we are type-checking, and just consider those names as + /// part of the root universe. So this would only get incremented + /// when we enter into a higher-ranked (`for<..>`) type or trait + /// bound. + universe: Cell, } /// A map returned by `skolemize_late_bound_regions()` indicating the skolemized @@ -191,6 +204,7 @@ pub type SkolemizationMap<'tcx> = BTreeMap>; #[derive(Clone, Debug)] pub enum ValuePairs<'tcx> { Types(ExpectedFound>), + Regions(ExpectedFound>), TraitRefs(ExpectedFound>), PolyTraitRefs(ExpectedFound>), } @@ -341,7 +355,7 @@ pub enum RegionVariableOrigin { Coercion(Span), // Region variables created as the values for early-bound regions - EarlyBoundRegion(Span, ast::Name), + EarlyBoundRegion(Span, InternedString), // Region variables created for bound regions // in a function or method that is called @@ -363,7 +377,23 @@ pub enum NLLRegionVariableOrigin { // elsewhere. This origin indices we've got one of those. FreeRegion, - Inferred(::mir::visit::TyContext), + BoundRegion(ty::UniverseIndex), + + Existential, +} + +impl NLLRegionVariableOrigin { + pub fn is_universal(self) -> bool { + match self { + NLLRegionVariableOrigin::FreeRegion => true, + NLLRegionVariableOrigin::BoundRegion(..) => true, + NLLRegionVariableOrigin::Existential => false, + } + } + + pub fn is_existential(self) -> bool { + !self.is_universal() + } } #[derive(Copy, Clone, Debug)] @@ -404,7 +434,7 @@ impl fmt::Display for FixupError { /// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(InferCtxt<'b, 'gcx, 'tcx>). pub struct InferCtxtBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { global_tcx: TyCtxt<'a, 'gcx, 'gcx>, - arena: DroplessArena, + arena: SyncDroplessArena, fresh_tables: Option>>, } @@ -412,7 +442,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'gcx> { pub fn infer_ctxt(self) -> InferCtxtBuilder<'a, 'gcx, 'tcx> { InferCtxtBuilder { global_tcx: self, - arena: DroplessArena::new(), + arena: SyncDroplessArena::new(), fresh_tables: None, } @@ -441,8 +471,8 @@ impl<'a, 'gcx, 'tcx> InferCtxtBuilder<'a, 'gcx, 'tcx> { in_progress_tables, projection_cache: RefCell::new(traits::ProjectionCache::new()), type_variables: RefCell::new(type_variable::TypeVariableTable::new()), - int_unification_table: RefCell::new(UnificationTable::new()), - float_unification_table: RefCell::new(UnificationTable::new()), + int_unification_table: RefCell::new(ut::UnificationTable::new()), + float_unification_table: RefCell::new(ut::UnificationTable::new()), region_constraints: RefCell::new(Some(RegionConstraintCollector::new())), lexical_region_resolutions: RefCell::new(None), selection_cache: traits::SelectionCache::new(), @@ -452,6 +482,7 @@ impl<'a, 'gcx, 'tcx> InferCtxtBuilder<'a, 'gcx, 'tcx> { err_count_on_creation: tcx.sess.err_count(), in_snapshot: Cell::new(false), region_obligations: RefCell::new(vec![]), + universe: Cell::new(ty::UniverseIndex::ROOT), })) } } @@ -470,190 +501,41 @@ impl<'tcx, T> InferOk<'tcx, T> { pub fn unit(self) -> InferOk<'tcx, ()> { InferOk { value: (), obligations: self.obligations } } + + /// Extract `value`, registering any obligations into `fulfill_cx` + pub fn into_value_registering_obligations( + self, + infcx: &InferCtxt<'_, '_, 'tcx>, + fulfill_cx: &mut impl TraitEngine<'tcx>, + ) -> T { + let InferOk { value, obligations } = self; + for obligation in obligations { + fulfill_cx.register_predicate_obligation(infcx, obligation); + } + value + } +} + +impl<'tcx> InferOk<'tcx, ()> { + pub fn into_obligations(self) -> PredicateObligations<'tcx> { + self.obligations + } } #[must_use = "once you start a snapshot, you should always consume it"] pub struct CombinedSnapshot<'a, 'tcx:'a> { projection_cache_snapshot: traits::ProjectionCacheSnapshot, - type_snapshot: type_variable::Snapshot, - int_snapshot: unify::Snapshot, - float_snapshot: unify::Snapshot, + type_snapshot: type_variable::Snapshot<'tcx>, + int_snapshot: ut::Snapshot>, + float_snapshot: ut::Snapshot>, region_constraints_snapshot: RegionSnapshot, region_obligations_snapshot: usize, + universe: ty::UniverseIndex, was_in_snapshot: bool, _in_progress_tables: Option>>, } -/// Helper trait for shortening the lifetimes inside a -/// value for post-type-checking normalization. -pub trait TransNormalize<'gcx>: TypeFoldable<'gcx> { - fn trans_normalize<'a, 'tcx>(&self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>) - -> Self; -} - -macro_rules! items { ($($item:item)+) => ($($item)+) } -macro_rules! impl_trans_normalize { - ($lt_gcx:tt, $($ty:ty),+) => { - items!($(impl<$lt_gcx> TransNormalize<$lt_gcx> for $ty { - fn trans_normalize<'a, 'tcx>(&self, - infcx: &InferCtxt<'a, $lt_gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>) - -> Self { - infcx.normalize_projections_in(param_env, self) - } - })+); - } -} - -impl_trans_normalize!('gcx, - Ty<'gcx>, - &'gcx ty::Const<'gcx>, - &'gcx Substs<'gcx>, - ty::FnSig<'gcx>, - ty::PolyFnSig<'gcx>, - ty::ClosureSubsts<'gcx>, - ty::PolyTraitRef<'gcx>, - ty::ExistentialTraitRef<'gcx> -); - -impl<'gcx> TransNormalize<'gcx> for PlaceTy<'gcx> { - fn trans_normalize<'a, 'tcx>(&self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>) - -> Self { - match *self { - PlaceTy::Ty { ty } => PlaceTy::Ty { ty: ty.trans_normalize(infcx, param_env) }, - PlaceTy::Downcast { adt_def, substs, variant_index } => { - PlaceTy::Downcast { - adt_def, - substs: substs.trans_normalize(infcx, param_env), - variant_index, - } - } - } - } -} - -// NOTE: Callable from trans only! -impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { - /// Currently, higher-ranked type bounds inhibit normalization. Therefore, - /// each time we erase them in translation, we need to normalize - /// the contents. - pub fn erase_late_bound_regions_and_normalize(self, value: &ty::Binder) - -> T - where T: TransNormalize<'tcx> - { - assert!(!value.needs_subst()); - let value = self.erase_late_bound_regions(value); - self.fully_normalize_associated_types_in(&value) - } - - /// Fully normalizes any associated types in `value`, using an - /// empty environment and `Reveal::All` mode (therefore, suitable - /// only for monomorphized code during trans, basically). - pub fn fully_normalize_associated_types_in(self, value: &T) -> T - where T: TransNormalize<'tcx> - { - debug!("fully_normalize_associated_types_in(t={:?})", value); - - let param_env = ty::ParamEnv::empty(Reveal::All); - let value = self.erase_regions(value); - - if !value.has_projections() { - return value; - } - - self.infer_ctxt().enter(|infcx| { - value.trans_normalize(&infcx, param_env) - }) - } - - /// Does a best-effort to normalize any associated types in - /// `value`; this includes revealing specializable types, so this - /// should be not be used during type-checking, but only during - /// optimization and code generation. - pub fn normalize_associated_type_in_env( - self, value: &T, env: ty::ParamEnv<'tcx> - ) -> T - where T: TransNormalize<'tcx> - { - debug!("normalize_associated_type_in_env(t={:?})", value); - - let value = self.erase_regions(value); - - if !value.has_projections() { - return value; - } - - self.infer_ctxt().enter(|infcx| { - value.trans_normalize(&infcx, env.reveal_all()) - }) - } -} - impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { - fn normalize_projections_in(&self, param_env: ty::ParamEnv<'tcx>, value: &T) -> T::Lifted - where T: TypeFoldable<'tcx> + ty::Lift<'gcx> - { - let mut selcx = traits::SelectionContext::new(self); - let cause = traits::ObligationCause::dummy(); - let traits::Normalized { value: result, obligations } = - traits::normalize(&mut selcx, param_env, cause, value); - - debug!("normalize_projections_in: result={:?} obligations={:?}", - result, obligations); - - let mut fulfill_cx = traits::FulfillmentContext::new(); - - for obligation in obligations { - fulfill_cx.register_predicate_obligation(self, obligation); - } - - self.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result) - } - - /// Finishes processes any obligations that remain in the - /// fulfillment context, and then returns the result with all type - /// variables removed and regions erased. Because this is intended - /// for use after type-check has completed, if any errors occur, - /// it will panic. It is used during normalization and other cases - /// where processing the obligations in `fulfill_cx` may cause - /// type inference variables that appear in `result` to be - /// unified, and hence we need to process those obligations to get - /// the complete picture of the type. - pub fn drain_fulfillment_cx_or_panic(&self, - span: Span, - fulfill_cx: &mut traits::FulfillmentContext<'tcx>, - result: &T) - -> T::Lifted - where T: TypeFoldable<'tcx> + ty::Lift<'gcx> - { - debug!("drain_fulfillment_cx_or_panic()"); - - // In principle, we only need to do this so long as `result` - // contains unbound type parameters. It could be a slight - // optimization to stop iterating early. - match fulfill_cx.select_all_or_error(self) { - Ok(()) => { } - Err(errors) => { - span_bug!(span, "Encountered errors `{:?}` resolving bounds after type-checking", - errors); - } - } - - let result = self.resolve_type_vars_if_possible(result); - let result = self.tcx.erase_regions(&result); - - match self.tcx.lift_to_global(&result) { - Some(result) => result, - None => { - span_bug!(span, "Uninferred types/regions in `{:?}`", result); - } - } - } - pub fn is_in_snapshot(&self) -> bool { self.in_snapshot.get() } @@ -678,14 +560,14 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { use ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat}; match ty.sty { ty::TyInfer(ty::IntVar(vid)) => { - if self.int_unification_table.borrow_mut().has_value(vid) { + if self.int_unification_table.borrow_mut().probe_value(vid).is_some() { Neither } else { UnconstrainedInt } }, ty::TyInfer(ty::FloatVar(vid)) => { - if self.float_unification_table.borrow_mut().has_value(vid) { + if self.float_unification_table.borrow_mut().probe_value(vid).is_some() { Neither } else { UnconstrainedFloat @@ -695,48 +577,26 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } - /// Returns a type variable's default fallback if any exists. A default - /// must be attached to the variable when created, if it is created - /// without a default, this will return None. - /// - /// This code does not apply to integral or floating point variables, - /// only to use declared defaults. - /// - /// See `new_ty_var_with_default` to create a type variable with a default. - /// See `type_variable::Default` for details about what a default entails. - pub fn default(&self, ty: Ty<'tcx>) -> Option> { - match ty.sty { - ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().default(vid), - _ => None - } - } - pub fn unsolved_variables(&self) -> Vec> { - let mut variables = Vec::new(); + let mut type_variables = self.type_variables.borrow_mut(); + let mut int_unification_table = self.int_unification_table.borrow_mut(); + let mut float_unification_table = self.float_unification_table.borrow_mut(); - let unbound_ty_vars = self.type_variables - .borrow_mut() - .unsolved_variables() - .into_iter() - .map(|t| self.tcx.mk_var(t)); - - let unbound_int_vars = self.int_unification_table - .borrow_mut() - .unsolved_variables() - .into_iter() - .map(|v| self.tcx.mk_int_var(v)); - - let unbound_float_vars = self.float_unification_table - .borrow_mut() - .unsolved_variables() - .into_iter() - .map(|v| self.tcx.mk_float_var(v)); - - variables.extend(unbound_ty_vars); - variables.extend(unbound_int_vars); - variables.extend(unbound_float_vars); - - return variables; + type_variables + .unsolved_variables() + .into_iter() + .map(|t| self.tcx.mk_var(t)) + .chain( + (0..int_unification_table.len()) + .map(|i| ty::IntVid { index: i as u32 }) + .filter(|&vid| int_unification_table.probe_value(vid).is_none()) + .map(|v| self.tcx.mk_int_var(v)) + ).chain( + (0..float_unification_table.len()) + .map(|i| ty::FloatVid { index: i as u32 }) + .filter(|&vid| float_unification_table.probe_value(vid).is_none()) + .map(|v| self.tcx.mk_float_var(v)) + ).collect() } fn combine_fields(&'a self, trace: TypeTrace<'tcx>, param_env: ty::ParamEnv<'tcx>) @@ -776,7 +636,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { result } - fn start_snapshot<'b>(&'b self) -> CombinedSnapshot<'b, 'tcx> { + fn start_snapshot(&self) -> CombinedSnapshot<'a, 'tcx> { debug!("start_snapshot()"); let in_snapshot = self.in_snapshot.get(); @@ -789,6 +649,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { float_snapshot: self.float_unification_table.borrow_mut().snapshot(), region_constraints_snapshot: self.borrow_region_constraints().start_snapshot(), region_obligations_snapshot: self.region_obligations.borrow().len(), + universe: self.universe(), was_in_snapshot: in_snapshot, // Borrow tables "in progress" (i.e. during typeck) // to ban writes from within a snapshot to them. @@ -798,7 +659,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } - fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot) { + fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot<'a, 'tcx>) { debug!("rollback_to(cause={})", cause); let CombinedSnapshot { projection_cache_snapshot, type_snapshot, @@ -806,10 +667,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { float_snapshot, region_constraints_snapshot, region_obligations_snapshot, + universe, was_in_snapshot, _in_progress_tables } = snapshot; self.in_snapshot.set(was_in_snapshot); + self.universe.set(universe); self.projection_cache .borrow_mut() @@ -830,7 +693,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { .rollback_to(region_constraints_snapshot); } - fn commit_from(&self, snapshot: CombinedSnapshot) { + fn commit_from(&self, snapshot: CombinedSnapshot<'a, 'tcx>) { debug!("commit_from()"); let CombinedSnapshot { projection_cache_snapshot, type_snapshot, @@ -838,6 +701,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { float_snapshot, region_constraints_snapshot, region_obligations_snapshot: _, + universe: _, was_in_snapshot, _in_progress_tables } = snapshot; @@ -845,7 +709,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.projection_cache .borrow_mut() - .commit(projection_cache_snapshot); + .commit(&projection_cache_snapshot); self.type_variables .borrow_mut() .commit(type_snapshot); @@ -872,7 +736,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// Execute `f` and commit the bindings if closure `f` returns `Ok(_)` pub fn commit_if_ok(&self, f: F) -> Result where - F: FnOnce(&CombinedSnapshot) -> Result + F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> Result { debug!("commit_if_ok()"); let snapshot = self.start_snapshot(); @@ -887,7 +751,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // Execute `f` in a snapshot, and commit the bindings it creates pub fn in_snapshot(&self, f: F) -> T where - F: FnOnce(&CombinedSnapshot) -> T + F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> T { debug!("in_snapshot()"); let snapshot = self.start_snapshot(); @@ -898,7 +762,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// Execute `f` then unroll any bindings it creates pub fn probe(&self, f: F) -> R where - F: FnOnce(&CombinedSnapshot) -> R, + F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> R, { debug!("probe()"); let snapshot = self.start_snapshot(); @@ -954,23 +818,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.borrow_region_constraints().make_subregion(origin, a, b); } - pub fn equality_predicate(&self, - cause: &ObligationCause<'tcx>, - param_env: ty::ParamEnv<'tcx>, - predicate: &ty::PolyEquatePredicate<'tcx>) - -> InferResult<'tcx, ()> - { - self.commit_if_ok(|snapshot| { - let (ty::EquatePredicate(a, b), skol_map) = - self.skolemize_late_bound_regions(predicate, snapshot); - let cause_span = cause.span; - let eqty_ok = self.at(cause, param_env).eq(b, a)?; - self.leak_check(false, cause_span, &skol_map, snapshot)?; - self.pop_skolemized(skol_map, snapshot); - Ok(eqty_ok.unit()) - }) - } - pub fn subtype_predicate(&self, cause: &ObligationCause<'tcx>, param_env: ty::ParamEnv<'tcx>, @@ -999,7 +846,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { Some(self.commit_if_ok(|snapshot| { let (ty::SubtypePredicate { a_is_expected, a, b}, skol_map) = - self.skolemize_late_bound_regions(predicate, snapshot); + self.skolemize_late_bound_regions(predicate); let cause_span = cause.span; let ok = self.at(cause, param_env).sub_exp(a_is_expected, a, b)?; @@ -1016,7 +863,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { { self.commit_if_ok(|snapshot| { let (ty::OutlivesPredicate(r_a, r_b), skol_map) = - self.skolemize_late_bound_regions(predicate, snapshot); + self.skolemize_late_bound_regions(predicate); let origin = SubregionOrigin::from_obligation_cause(cause, || RelateRegionParamBound(cause.span)); @@ -1029,7 +876,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn next_ty_var_id(&self, diverging: bool, origin: TypeVariableOrigin) -> TyVid { self.type_variables .borrow_mut() - .new_var(diverging, origin, None) + .new_var(self.universe(), diverging, origin) } pub fn next_ty_var(&self, origin: TypeVariableOrigin) -> Ty<'tcx> { @@ -1060,12 +907,14 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// during diagnostics / error-reporting. pub fn next_region_var(&self, origin: RegionVariableOrigin) -> ty::Region<'tcx> { - self.tcx.mk_region(ty::ReVar(self.borrow_region_constraints().new_region_var(origin))) + let region_var = self.borrow_region_constraints() + .new_region_var(self.universe(), origin); + self.tcx.mk_region(ty::ReVar(region_var)) } /// Number of region variables created so far. pub fn num_region_vars(&self) -> usize { - self.borrow_region_constraints().var_origins().len() + self.borrow_region_constraints().num_region_vars() } /// Just a convenient wrapper of `next_region_var` for using during NLL. @@ -1074,47 +923,35 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.next_region_var(RegionVariableOrigin::NLL(origin)) } - /// Create a region inference variable for the given - /// region parameter definition. - pub fn region_var_for_def(&self, - span: Span, - def: &ty::RegionParameterDef) - -> ty::Region<'tcx> { - self.next_region_var(EarlyBoundRegion(span, def.name)) - } + pub fn var_for_def(&self, + span: Span, + param: &ty::GenericParamDef) + -> Kind<'tcx> { + match param.kind { + GenericParamDefKind::Lifetime => { + // Create a region inference variable for the given + // region parameter definition. + self.next_region_var(EarlyBoundRegion(span, param.name)).into() + } + GenericParamDefKind::Type {..} => { + // Create a type inference variable for the given + // type parameter definition. The substitutions are + // for actual parameters that may be referred to by + // the default of this type parameter, if it exists. + // E.g. `struct Foo(...);` when + // used in a path such as `Foo::::new()` will + // use an inference variable for `C` with `[T, U]` + // as the substitutions for the default, `(T, U)`. + let ty_var_id = + self.type_variables + .borrow_mut() + .new_var(self.universe(), + false, + TypeVariableOrigin::TypeParameterDefinition(span, param.name)); - /// Create a type inference variable for the given - /// type parameter definition. The substitutions are - /// for actual parameters that may be referred to by - /// the default of this type parameter, if it exists. - /// E.g. `struct Foo(...);` when - /// used in a path such as `Foo::::new()` will - /// use an inference variable for `C` with `[T, U]` - /// as the substitutions for the default, `(T, U)`. - pub fn type_var_for_def(&self, - span: Span, - def: &ty::TypeParameterDef, - substs: &[Kind<'tcx>]) - -> Ty<'tcx> { - let default = if def.has_default { - let default = self.tcx.type_of(def.def_id); - Some(type_variable::Default { - ty: default.subst_spanned(self.tcx, substs, Some(span)), - origin_span: span, - def_id: def.def_id - }) - } else { - None - }; - - - let ty_var_id = self.type_variables - .borrow_mut() - .new_var(false, - TypeVariableOrigin::TypeParameterDefinition(span, def.name), - default); - - self.tcx.mk_var(ty_var_id) + self.tcx.mk_var(ty_var_id).into() + } + } } /// Given a set of generics defined on a type or impl, returns a substitution mapping each @@ -1123,10 +960,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span: Span, def_id: DefId) -> &'tcx Substs<'tcx> { - Substs::for_item(self.tcx, def_id, |def, _| { - self.region_var_for_def(span, def) - }, |def, substs| { - self.type_var_for_def(span, def, substs) + Substs::for_item(self.tcx, def_id, |param, _| { + self.var_for_def(span, param) }) } @@ -1206,12 +1041,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { region_context, region_map, outlives_env.free_region_map()); - let (var_origins, data) = self.region_constraints.borrow_mut() + let (var_infos, data) = self.region_constraints.borrow_mut() .take() .expect("regions already resolved") - .into_origins_and_data(); + .into_infos_and_data(); let (lexical_region_resolutions, errors) = - lexical_region_resolve::resolve(region_rels, var_origins, data); + lexical_region_resolve::resolve(region_rels, var_infos, data); let old_value = self.lexical_region_resolutions.replace(Some(lexical_region_resolutions)); assert!(old_value.is_none()); @@ -1244,18 +1079,28 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.borrow_region_constraints().take_and_reset_data() } + /// Gives temporary access to the region constraint data. + #[allow(non_camel_case_types)] // bug with impl trait + pub fn with_region_constraints( + &self, + op: impl FnOnce(&RegionConstraintData<'tcx>) -> R, + ) -> R { + let region_constraints = self.borrow_region_constraints(); + op(region_constraints.data()) + } + /// Takes ownership of the list of variable regions. This implies /// that all the region constriants have already been taken, and /// hence that `resolve_regions_and_report_errors` can never be /// called. This is used only during NLL processing to "hand off" ownership /// of the set of region vairables into the NLL region context. - pub fn take_region_var_origins(&self) -> VarOrigins { - let (var_origins, data) = self.region_constraints.borrow_mut() + pub fn take_region_var_origins(&self) -> VarInfos { + let (var_infos, data) = self.region_constraints.borrow_mut() .take() .expect("regions already resolved") - .into_origins_and_data(); + .into_infos_and_data(); assert!(data.is_empty()); - var_origins + var_infos } pub fn ty_to_string(&self, t: Ty<'tcx>) -> String { @@ -1278,21 +1123,22 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // it can be resolved to an int/float variable, which // can then be recursively resolved, hence the // recursion. Note though that we prevent type - // variables from unifying to other type variables + // variables from unifyxing to other type variables // directly (though they may be embedded // structurally), and we prevent cycles in any case, // so this recursion should always be of very limited // depth. self.type_variables.borrow_mut() - .probe(v) - .map(|t| self.shallow_resolve(t)) - .unwrap_or(typ) + .probe(v) + .known() + .map(|t| self.shallow_resolve(t)) + .unwrap_or(typ) } ty::TyInfer(ty::IntVar(v)) => { self.int_unification_table .borrow_mut() - .probe(v) + .probe_value(v) .map(|v| v.to_type(self.tcx)) .unwrap_or(typ) } @@ -1300,7 +1146,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ty::TyInfer(ty::FloatVar(v)) => { self.float_unification_table .borrow_mut() - .probe(v) + .probe_value(v) .map(|v| v.to_type(self.tcx)) .unwrap_or(typ) } @@ -1402,28 +1248,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.report_and_explain_type_error(trace, &err) } - pub fn report_conflicting_default_types(&self, - span: Span, - body_id: ast::NodeId, - expected: type_variable::Default<'tcx>, - actual: type_variable::Default<'tcx>) { - let trace = TypeTrace { - cause: ObligationCause::misc(span, body_id), - values: Types(ExpectedFound { - expected: expected.ty, - found: actual.ty - }) - }; - - self.report_and_explain_type_error( - trace, - &TypeError::TyParamDefaultMismatch(ExpectedFound { - expected, - found: actual - })) - .emit(); - } - pub fn replace_late_bound_regions_with_fresh_var( &self, span: Span, @@ -1555,11 +1379,35 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { InferOk { value, obligations } } - fn borrow_region_constraints(&self) -> RefMut<'_, RegionConstraintCollector<'tcx>> { + pub fn borrow_region_constraints(&self) -> RefMut<'_, RegionConstraintCollector<'tcx>> { RefMut::map( self.region_constraints.borrow_mut(), |c| c.as_mut().expect("region constraints already solved")) } + + /// Clears the selection, evaluation, and projection cachesThis is useful when + /// repeatedly attemping to select an Obligation while changing only + /// its ParamEnv, since FulfillmentContext doesn't use 'probe' + pub fn clear_caches(&self) { + self.selection_cache.clear(); + self.evaluation_cache.clear(); + self.projection_cache.borrow_mut().clear(); + } + + fn universe(&self) -> ty::UniverseIndex { + self.universe.get() + } + + /// Create and return a new subunivese of the current universe; + /// update `self.universe` to that new subuniverse. At present, + /// used only in the NLL subtyping code, which uses the new + /// universe-based scheme instead of the more limited leak-check + /// scheme. + pub fn create_subuniverse(&self) -> ty::UniverseIndex { + let u = self.universe.get().subuniverse(); + self.universe.set(u); + u + } } impl<'a, 'gcx, 'tcx> TypeTrace<'tcx> { @@ -1667,40 +1515,12 @@ impl RegionVariableOrigin { } } -impl<'tcx> TypeFoldable<'tcx> for ValuePairs<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - match *self { - ValuePairs::Types(ref ef) => { - ValuePairs::Types(ef.fold_with(folder)) - } - ValuePairs::TraitRefs(ref ef) => { - ValuePairs::TraitRefs(ef.fold_with(folder)) - } - ValuePairs::PolyTraitRefs(ref ef) => { - ValuePairs::PolyTraitRefs(ef.fold_with(folder)) - } - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - ValuePairs::Types(ref ef) => ef.visit_with(visitor), - ValuePairs::TraitRefs(ref ef) => ef.visit_with(visitor), - ValuePairs::PolyTraitRefs(ref ef) => ef.visit_with(visitor), - } - } -} - -impl<'tcx> TypeFoldable<'tcx> for TypeTrace<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - TypeTrace { - cause: self.cause.fold_with(folder), - values: self.values.fold_with(folder) - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.cause.visit_with(visitor) || self.values.visit_with(visitor) +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ValuePairs<'tcx> { + (ValuePairs::Types)(a), + (ValuePairs::Regions)(a), + (ValuePairs::TraitRefs)(a), + (ValuePairs::PolyTraitRefs)(a), } } @@ -1711,4 +1531,3 @@ impl<'tcx> fmt::Debug for RegionObligation<'tcx> { self.sup_type) } } - diff --git a/src/librustc/infer/outlives/bounds.rs b/src/librustc/infer/outlives/bounds.rs deleted file mode 100644 index 8a562471ac5d..000000000000 --- a/src/librustc/infer/outlives/bounds.rs +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::InferCtxt; -use syntax::ast; -use syntax::codemap::Span; -use traits::FulfillmentContext; -use ty::{self, Ty, TypeFoldable}; -use ty::outlives::Component; -use ty::wf; - -/// Outlives bounds are relationships between generic parameters, -/// whether they both be regions (`'a: 'b`) or whether types are -/// involved (`T: 'a`). These relationships can be extracted from the -/// full set of predicates we understand or also from types (in which -/// case they are called implied bounds). They are fed to the -/// `OutlivesEnv` which in turn is supplied to the region checker and -/// other parts of the inference system. -#[derive(Debug)] -pub enum OutlivesBound<'tcx> { - RegionSubRegion(ty::Region<'tcx>, ty::Region<'tcx>), - RegionSubParam(ty::Region<'tcx>, ty::ParamTy), - RegionSubProjection(ty::Region<'tcx>, ty::ProjectionTy<'tcx>), -} - -impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { - /// Implied bounds are region relationships that we deduce - /// automatically. The idea is that (e.g.) a caller must check that a - /// function's argument types are well-formed immediately before - /// calling that fn, and hence the *callee* can assume that its - /// argument types are well-formed. This may imply certain relationships - /// between generic parameters. For example: - /// - /// fn foo<'a,T>(x: &'a T) - /// - /// can only be called with a `'a` and `T` such that `&'a T` is WF. - /// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`. - /// - /// # Parameters - /// - /// - `param_env`, the where-clauses in scope - /// - `body_id`, the body-id to use when normalizing assoc types. - /// Note that this may cause outlives obligations to be injected - /// into the inference context with this body-id. - /// - `ty`, the type that we are supposed to assume is WF. - /// - `span`, a span to use when normalizing, hopefully not important, - /// might be useful if a `bug!` occurs. - pub fn implied_outlives_bounds( - &self, - param_env: ty::ParamEnv<'tcx>, - body_id: ast::NodeId, - ty: Ty<'tcx>, - span: Span, - ) -> Vec> { - let tcx = self.tcx; - - // Sometimes when we ask what it takes for T: WF, we get back that - // U: WF is required; in that case, we push U onto this stack and - // process it next. Currently (at least) these resulting - // predicates are always guaranteed to be a subset of the original - // type, so we need not fear non-termination. - let mut wf_types = vec![ty]; - - let mut implied_bounds = vec![]; - - let mut fulfill_cx = FulfillmentContext::new(); - - while let Some(ty) = wf_types.pop() { - // Compute the obligations for `ty` to be well-formed. If `ty` is - // an unresolved inference variable, just substituted an empty set - // -- because the return type here is going to be things we *add* - // to the environment, it's always ok for this set to be smaller - // than the ultimate set. (Note: normally there won't be - // unresolved inference variables here anyway, but there might be - // during typeck under some circumstances.) - let obligations = wf::obligations(self, param_env, body_id, ty, span).unwrap_or(vec![]); - - // NB: All of these predicates *ought* to be easily proven - // true. In fact, their correctness is (mostly) implied by - // other parts of the program. However, in #42552, we had - // an annoying scenario where: - // - // - Some `T::Foo` gets normalized, resulting in a - // variable `_1` and a `T: Trait` constraint - // (not sure why it couldn't immediately get - // solved). This result of `_1` got cached. - // - These obligations were dropped on the floor here, - // rather than being registered. - // - Then later we would get a request to normalize - // `T::Foo` which would result in `_1` being used from - // the cache, but hence without the `T: Trait` - // constraint. As a result, `_1` never gets resolved, - // and we get an ICE (in dropck). - // - // Therefore, we register any predicates involving - // inference variables. We restrict ourselves to those - // involving inference variables both for efficiency and - // to avoids duplicate errors that otherwise show up. - fulfill_cx.register_predicate_obligations( - self, - obligations - .iter() - .filter(|o| o.predicate.has_infer_types()) - .cloned(), - ); - - // From the full set of obligations, just filter down to the - // region relationships. - implied_bounds.extend(obligations.into_iter().flat_map(|obligation| { - assert!(!obligation.has_escaping_regions()); - match obligation.predicate { - ty::Predicate::Trait(..) | - ty::Predicate::Equate(..) | - ty::Predicate::Subtype(..) | - ty::Predicate::Projection(..) | - ty::Predicate::ClosureKind(..) | - ty::Predicate::ObjectSafe(..) | - ty::Predicate::ConstEvaluatable(..) => vec![], - - ty::Predicate::WellFormed(subty) => { - wf_types.push(subty); - vec![] - } - - ty::Predicate::RegionOutlives(ref data) => match data.no_late_bound_regions() { - None => vec![], - Some(ty::OutlivesPredicate(r_a, r_b)) => { - vec![OutlivesBound::RegionSubRegion(r_b, r_a)] - } - }, - - ty::Predicate::TypeOutlives(ref data) => match data.no_late_bound_regions() { - None => vec![], - Some(ty::OutlivesPredicate(ty_a, r_b)) => { - let ty_a = self.resolve_type_vars_if_possible(&ty_a); - let components = tcx.outlives_components(ty_a); - Self::implied_bounds_from_components(r_b, components) - } - }, - } - })); - } - - // Ensure that those obligations that we had to solve - // get solved *here*. - match fulfill_cx.select_all_or_error(self) { - Ok(()) => (), - Err(errors) => self.report_fulfillment_errors(&errors, None), - } - - implied_bounds - } - - /// When we have an implied bound that `T: 'a`, we can further break - /// this down to determine what relationships would have to hold for - /// `T: 'a` to hold. We get to assume that the caller has validated - /// those relationships. - fn implied_bounds_from_components( - sub_region: ty::Region<'tcx>, - sup_components: Vec>, - ) -> Vec> { - sup_components - .into_iter() - .flat_map(|component| { - match component { - Component::Region(r) => - vec![OutlivesBound::RegionSubRegion(sub_region, r)], - Component::Param(p) => - vec![OutlivesBound::RegionSubParam(sub_region, p)], - Component::Projection(p) => - vec![OutlivesBound::RegionSubProjection(sub_region, p)], - Component::EscapingProjection(_) => - // If the projection has escaping regions, don't - // try to infer any implied bounds even for its - // free components. This is conservative, because - // the caller will still have to prove that those - // free components outlive `sub_region`. But the - // idea is that the WAY that the caller proves - // that may change in the future and we want to - // give ourselves room to get smarter here. - vec![], - Component::UnresolvedInferenceVariable(..) => - vec![], - } - }) - .collect() - } -} - -pub fn explicit_outlives_bounds<'tcx>( - param_env: ty::ParamEnv<'tcx>, -) -> impl Iterator> + 'tcx { - debug!("explicit_outlives_bounds()"); - param_env - .caller_bounds - .into_iter() - .filter_map(move |predicate| match predicate { - ty::Predicate::Projection(..) | - ty::Predicate::Trait(..) | - ty::Predicate::Equate(..) | - ty::Predicate::Subtype(..) | - ty::Predicate::WellFormed(..) | - ty::Predicate::ObjectSafe(..) | - ty::Predicate::ClosureKind(..) | - ty::Predicate::TypeOutlives(..) | - ty::Predicate::ConstEvaluatable(..) => None, - ty::Predicate::RegionOutlives(ref data) => data.no_late_bound_regions().map( - |ty::OutlivesPredicate(r_a, r_b)| OutlivesBound::RegionSubRegion(r_b, r_a), - ), - }) -} diff --git a/src/librustc/infer/outlives/env.rs b/src/librustc/infer/outlives/env.rs index d47507592f80..7f59a6794efb 100644 --- a/src/librustc/infer/outlives/env.rs +++ b/src/librustc/infer/outlives/env.rs @@ -10,7 +10,7 @@ use infer::{GenericKind, InferCtxt}; use infer::outlives::free_region_map::FreeRegionMap; -use infer::outlives::bounds::{self, OutlivesBound}; +use traits::query::outlives_bounds::{self, OutlivesBound}; use ty::{self, Ty}; use syntax::ast; @@ -50,7 +50,7 @@ impl<'a, 'gcx: 'tcx, 'tcx: 'a> OutlivesEnvironment<'tcx> { region_bound_pairs: vec![], }; - env.add_outlives_bounds(None, bounds::explicit_outlives_bounds(param_env)); + env.add_outlives_bounds(None, outlives_bounds::explicit_outlives_bounds(param_env)); env } diff --git a/src/librustc/infer/outlives/mod.rs b/src/librustc/infer/outlives/mod.rs index 6aafebe79c67..bb3703b21573 100644 --- a/src/librustc/infer/outlives/mod.rs +++ b/src/librustc/infer/outlives/mod.rs @@ -12,5 +12,4 @@ pub mod env; pub mod free_region_map; -pub mod bounds; -mod obligations; +pub mod obligations; diff --git a/src/librustc/infer/outlives/obligations.rs b/src/librustc/infer/outlives/obligations.rs index eda2e1f7b4ef..3598d66060bf 100644 --- a/src/librustc/infer/outlives/obligations.rs +++ b/src/librustc/infer/outlives/obligations.rs @@ -71,11 +71,11 @@ use hir::def_id::DefId; use infer::{self, GenericKind, InferCtxt, RegionObligation, SubregionOrigin, VerifyBound}; -use traits; -use ty::{self, Ty, TyCtxt, TypeFoldable}; -use ty::subst::{Subst, Substs}; -use ty::outlives::Component; use syntax::ast; +use traits; +use ty::outlives::Component; +use ty::subst::{Subst, Substs}; +use ty::{self, Ty, TyCtxt, TypeFoldable}; impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { /// Registers that the given region obligation must be resolved @@ -90,8 +90,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { ) { debug!( "register_region_obligation(body_id={:?}, obligation={:?})", - body_id, - obligation + body_id, obligation ); self.region_obligations @@ -99,6 +98,11 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { .push((body_id, obligation)); } + /// Trait queries just want to pass back type obligations "as is" + pub fn take_registered_region_obligations(&self) -> Vec<(ast::NodeId, RegionObligation<'tcx>)> { + ::std::mem::replace(&mut *self.region_obligations.borrow_mut(), vec![]) + } + /// Process the region obligations that must be proven (during /// `regionck`) for the given `body_id`, given information about /// the region bounds in scope and so forth. This function must be @@ -106,7 +110,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { /// done (or else an assert will fire). /// /// See the `region_obligations` field of `InferCtxt` for some - /// comments about how this funtion fits into the overall expected + /// comments about how this function fits into the overall expected /// flow of the the inferencer. The key point is that it is /// invoked after all type-inference variables have been bound -- /// towards the end of regionck. This also ensures that the @@ -150,13 +154,19 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { let mut my_region_obligations = Vec::with_capacity(self.region_obligations.borrow().len()); { let mut r_o = self.region_obligations.borrow_mut(); - for (_, obligation) in r_o.drain_filter(|(ro_body_id, _)| *ro_body_id == body_id) { - my_region_obligations.push(obligation); - } + my_region_obligations.extend( + r_o.drain_filter(|(ro_body_id, _)| *ro_body_id == body_id) + .map(|(_, obligation)| obligation) + ); } - let outlives = - TypeOutlives::new(self, region_bound_pairs, implicit_region_bound, param_env); + let outlives = &mut TypeOutlives::new( + self, + self.tcx, + region_bound_pairs, + implicit_region_bound, + param_env, + ); for RegionObligation { sup_type, @@ -166,16 +176,14 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { { debug!( "process_registered_region_obligations: sup_type={:?} sub_region={:?} cause={:?}", - sup_type, - sub_region, - cause + sup_type, sub_region, cause ); - let origin = SubregionOrigin::from_obligation_cause( - &cause, - || infer::RelateParamBound(cause.span, sup_type), - ); + let origin = SubregionOrigin::from_obligation_cause(&cause, || { + infer::RelateParamBound(cause.span, sup_type) + }); + let sup_type = self.resolve_type_vars_if_possible(&sup_type); outlives.type_must_outlive(origin, sup_type, sub_region); } } @@ -191,31 +199,68 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { ty: Ty<'tcx>, region: ty::Region<'tcx>, ) { - let outlives = - TypeOutlives::new(self, region_bound_pairs, implicit_region_bound, param_env); + let outlives = &mut TypeOutlives::new( + self, + self.tcx, + region_bound_pairs, + implicit_region_bound, + param_env, + ); + let ty = self.resolve_type_vars_if_possible(&ty); outlives.type_must_outlive(origin, ty, region); } } -#[must_use] // you ought to invoke `into_accrued_obligations` when you are done =) -struct TypeOutlives<'cx, 'gcx: 'tcx, 'tcx: 'cx> { +/// The `TypeOutlives` struct has the job of "lowering" a `T: 'a` +/// obligation into a series of `'a: 'b` constraints and "verifys", as +/// described on the module comment. The final constraints are emitted +/// via a "delegate" of type `D` -- this is usually the `infcx`, which +/// accrues them into the `region_obligations` code, but for NLL we +/// use something else. +pub struct TypeOutlives<'cx, 'gcx: 'tcx, 'tcx: 'cx, D> +where + D: TypeOutlivesDelegate<'tcx>, +{ // See the comments on `process_registered_region_obligations` for the meaning // of these fields. - infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, + delegate: D, + tcx: TyCtxt<'cx, 'gcx, 'tcx>, region_bound_pairs: &'cx [(ty::Region<'tcx>, GenericKind<'tcx>)], implicit_region_bound: Option>, param_env: ty::ParamEnv<'tcx>, } -impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { - fn new( - infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, +pub trait TypeOutlivesDelegate<'tcx> { + fn push_sub_region_constraint( + &mut self, + origin: SubregionOrigin<'tcx>, + a: ty::Region<'tcx>, + b: ty::Region<'tcx>, + ); + + fn push_verify( + &mut self, + origin: SubregionOrigin<'tcx>, + kind: GenericKind<'tcx>, + a: ty::Region<'tcx>, + bound: VerifyBound<'tcx>, + ); +} + +impl<'cx, 'gcx, 'tcx, D> TypeOutlives<'cx, 'gcx, 'tcx, D> +where + D: TypeOutlivesDelegate<'tcx>, +{ + pub fn new( + delegate: D, + tcx: TyCtxt<'cx, 'gcx, 'tcx>, region_bound_pairs: &'cx [(ty::Region<'tcx>, GenericKind<'tcx>)], implicit_region_bound: Option>, param_env: ty::ParamEnv<'tcx>, ) -> Self { Self { - infcx, + delegate, + tcx, region_bound_pairs, implicit_region_bound, param_env, @@ -230,33 +275,25 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { /// - `origin`, the reason we need this constraint /// - `ty`, the type `T` /// - `region`, the region `'a` - fn type_must_outlive( - &self, + pub fn type_must_outlive( + &mut self, origin: infer::SubregionOrigin<'tcx>, ty: Ty<'tcx>, region: ty::Region<'tcx>, ) { - let ty = self.infcx.resolve_type_vars_if_possible(&ty); - debug!( "type_must_outlive(ty={:?}, region={:?}, origin={:?})", - ty, - region, - origin + ty, region, origin ); assert!(!ty.has_escaping_regions()); - let components = self.tcx().outlives_components(ty); + let components = self.tcx.outlives_components(ty); self.components_must_outlive(origin, components, region); } - fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> { - self.infcx.tcx - } - fn components_must_outlive( - &self, + &mut self, origin: infer::SubregionOrigin<'tcx>, components: Vec>, region: ty::Region<'tcx>, @@ -265,7 +302,7 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { let origin = origin.clone(); match component { Component::Region(region1) => { - self.infcx.sub_regions(origin, region, region1); + self.delegate.push_sub_region_constraint(origin, region, region1); } Component::Param(param_ty) => { self.param_ty_must_outlive(origin, region, param_ty); @@ -280,7 +317,7 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { // ignore this, we presume it will yield an error // later, since if a type variable is not resolved by // this point it never will be - self.infcx.tcx.sess.delay_span_bug( + self.tcx.sess.delay_span_bug( origin.span(), &format!("unresolved inference variable in outlives: {:?}", v), ); @@ -290,35 +327,31 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { } fn param_ty_must_outlive( - &self, + &mut self, origin: infer::SubregionOrigin<'tcx>, region: ty::Region<'tcx>, param_ty: ty::ParamTy, ) { debug!( "param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})", - region, - param_ty, - origin + region, param_ty, origin ); let verify_bound = self.param_bound(param_ty); let generic = GenericKind::Param(param_ty); - self.infcx - .verify_generic_bound(origin, generic, region, verify_bound); + self.delegate + .push_verify(origin, generic, region, verify_bound); } fn projection_must_outlive( - &self, + &mut self, origin: infer::SubregionOrigin<'tcx>, region: ty::Region<'tcx>, projection_ty: ty::ProjectionTy<'tcx>, ) { debug!( "projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})", - region, - projection_ty, - origin + region, projection_ty, origin ); // This case is thorny for inference. The fundamental problem is @@ -372,7 +405,7 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { } for r in projection_ty.substs.regions() { - self.infcx.sub_regions(origin.clone(), region, r); + self.delegate.push_sub_region_constraint(origin.clone(), region, r); } return; @@ -398,7 +431,8 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { .any(|r| env_bounds.contains(&r)) { debug!("projection_must_outlive: unique declared bound appears in trait ref"); - self.infcx.sub_regions(origin.clone(), region, unique_bound); + self.delegate + .push_sub_region_constraint(origin.clone(), region, unique_bound); return; } } @@ -410,8 +444,8 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { // even though a satisfactory solution exists. let verify_bound = self.projection_bound(env_bounds, projection_ty); let generic = GenericKind::Projection(projection_ty); - self.infcx - .verify_generic_bound(origin, generic.clone(), region, verify_bound); + self.delegate + .push_verify(origin, generic.clone(), region, verify_bound); } fn type_bound(&self, ty: Ty<'tcx>) -> VerifyBound<'tcx> { @@ -459,12 +493,11 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { ) -> VerifyBound<'tcx> { debug!( "projection_bound(declared_bounds={:?}, projection_ty={:?})", - declared_bounds, - projection_ty + declared_bounds, projection_ty ); // see the extensive comment in projection_must_outlive - let ty = self.infcx + let ty = self .tcx .mk_projection(projection_ty.item_def_id, projection_ty.substs); let recursive_bound = self.recursive_type_bound(ty); @@ -473,11 +506,7 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { } fn recursive_type_bound(&self, ty: Ty<'tcx>) -> VerifyBound<'tcx> { - let mut bounds = vec![]; - - for subty in ty.walk_shallow() { - bounds.push(self.type_bound(subty)); - } + let mut bounds = ty.walk_shallow().map(|subty| self.type_bound(subty)).collect::>(); let mut regions = ty.regions(); regions.retain(|r| !r.is_late_bound()); // ignore late-bound regions @@ -497,7 +526,7 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { &self, generic: GenericKind<'tcx>, ) -> Vec> { - let tcx = self.tcx(); + let tcx = self.tcx; // To start, collect bounds from user environment. Note that // parameter environments are already elaborated, so we don't @@ -549,7 +578,7 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { debug!("projection_bounds(projection_ty={:?})", projection_ty); let mut bounds = self.region_bounds_declared_on_associated_item(projection_ty.item_def_id); for r in &mut bounds { - *r = r.subst(self.tcx(), projection_ty.substs); + *r = r.subst(self.tcx, projection_ty.substs); } bounds } @@ -588,7 +617,7 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { &self, assoc_item_def_id: DefId, ) -> Vec> { - let tcx = self.tcx(); + let tcx = self.tcx; let assoc_item = tcx.associated_item(assoc_item_def_id); let trait_def_id = assoc_item.container.assert_trait(); let trait_predicates = tcx.predicates_of(trait_def_id); @@ -624,3 +653,25 @@ impl<'cx, 'gcx, 'tcx> TypeOutlives<'cx, 'gcx, 'tcx> { .collect() } } + +impl<'cx, 'gcx, 'tcx> TypeOutlivesDelegate<'tcx> for &'cx InferCtxt<'cx, 'gcx, 'tcx> { + fn push_sub_region_constraint( + &mut self, + origin: SubregionOrigin<'tcx>, + a: ty::Region<'tcx>, + b: ty::Region<'tcx>, + ) { + self.sub_regions(origin, a, b) + } + + fn push_verify( + &mut self, + origin: SubregionOrigin<'tcx>, + kind: GenericKind<'tcx>, + a: ty::Region<'tcx>, + bound: VerifyBound<'tcx>, + ) { + self.verify_generic_bound(origin, kind, a, bound) + } +} + diff --git a/src/librustc/infer/region_constraints/README.md b/src/librustc/infer/region_constraints/README.md index 67ad08c75303..61603e6dee68 100644 --- a/src/librustc/infer/region_constraints/README.md +++ b/src/librustc/infer/region_constraints/README.md @@ -1,25 +1,32 @@ # Region constraint collection +> WARNING: This README is obsolete and will be removed soon! For +> more info on how the current borrowck works, see the [rustc guide]. + +[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/mir/borrowck.html + ## Terminology Note that we use the terms region and lifetime interchangeably. ## Introduction -As described in the [inference README](../README.md), and unlike +As described in the rustc guide [chapter on type inference][ti], and unlike normal type inference, which is similar in spirit to H-M and thus works progressively, the region type inference works by accumulating constraints over the course of a function. Finally, at the end of processing a function, we process and solve the constraints all at once. +[ti]: https://rust-lang-nursery.github.io/rustc-guide/type-inference.html + The constraints are always of one of three possible forms: - `ConstrainVarSubVar(Ri, Rj)` states that region variable Ri must be a subregion of Rj - `ConstrainRegSubVar(R, Ri)` states that the concrete region R (which must not be a variable) must be a subregion of the variable Ri -- `ConstrainVarSubReg(Ri, R)` states the variable Ri shoudl be less +- `ConstrainVarSubReg(Ri, R)` states the variable Ri should be less than the concrete region R. This is kind of deprecated and ought to be replaced with a verify (they essentially play the same role). diff --git a/src/librustc/infer/region_constraints/mod.rs b/src/librustc/infer/region_constraints/mod.rs index 72740dd40be2..296808cea2bd 100644 --- a/src/librustc/infer/region_constraints/mod.rs +++ b/src/librustc/infer/region_constraints/mod.rs @@ -18,22 +18,20 @@ use super::unify_key; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_data_structures::unify::{self, UnificationTable}; +use rustc_data_structures::unify as ut; use ty::{self, Ty, TyCtxt}; use ty::{Region, RegionVid}; use ty::ReStatic; -use ty::{BrFresh, ReLateBound, ReSkolemized, ReVar}; +use ty::{BrFresh, ReLateBound, ReVar}; use std::collections::BTreeMap; -use std::fmt; -use std::mem; -use std::u32; +use std::{cmp, fmt, mem, u32}; mod taint; pub struct RegionConstraintCollector<'tcx> { /// For each `RegionVid`, the corresponding `RegionVariableOrigin`. - var_origins: IndexVec, + var_infos: IndexVec, data: RegionConstraintData<'tcx>, @@ -47,9 +45,6 @@ pub struct RegionConstraintCollector<'tcx> { /// exist). This prevents us from making many such regions. glbs: CombineMap<'tcx>, - /// Number of skolemized variables currently active. - skolemization_count: u32, - /// Global counter used during the GLB algorithm to create unique /// names for fresh bound regions bound_count: u32, @@ -73,16 +68,20 @@ pub struct RegionConstraintCollector<'tcx> { /// is iterating to a fixed point, because otherwise we sometimes /// would wind up with a fresh stream of region variables that /// have been equated but appear distinct. - unification_table: UnificationTable, + unification_table: ut::UnificationTable>, + + /// a flag set to true when we perform any unifications; this is used + /// to micro-optimize `take_and_reset_data` + any_unifications: bool, } -pub type VarOrigins = IndexVec; +pub type VarInfos = IndexVec; /// The full set of region constraints gathered up by the collector. /// Describes constraints between the region variables and other /// regions, as well as other conditions that must be verified, or /// assumptions that can be made. -#[derive(Default)] +#[derive(Debug, Default, Clone)] pub struct RegionConstraintData<'tcx> { /// Constraints of the form `A <= B`, where either `A` or `B` can /// be a region variable (or neither, as it happens). @@ -142,7 +141,7 @@ pub enum Constraint<'tcx> { /// outlive `RS`. Therefore verify that `R <= RS[i]` for some /// `i`. Inference variables may be involved (but this verification /// step doesn't influence inference). -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Verify<'tcx> { pub kind: GenericKind<'tcx>, pub origin: SubregionOrigin<'tcx>, @@ -159,7 +158,7 @@ pub enum GenericKind<'tcx> { /// When we introduce a verification step, we wish to test that a /// particular region (let's call it `'min`) meets some bound. /// The bound is described the by the following grammar: -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum VerifyBound<'tcx> { /// B = exists {R} --> some 'r in {R} must outlive 'min /// @@ -230,10 +229,16 @@ enum CombineMapType { type CombineMap<'tcx> = FxHashMap, RegionVid>; +#[derive(Debug, Clone, Copy)] +pub struct RegionVariableInfo { + pub origin: RegionVariableOrigin, + pub universe: ty::UniverseIndex, +} + pub struct RegionSnapshot { length: usize, - region_snapshot: unify::Snapshot, - skolemization_count: u32, + region_snapshot: ut::Snapshot>, + any_unifications: bool, } /// When working with skolemized regions, we often wish to find all of @@ -273,27 +278,31 @@ impl TaintDirections { impl<'tcx> RegionConstraintCollector<'tcx> { pub fn new() -> RegionConstraintCollector<'tcx> { RegionConstraintCollector { - var_origins: VarOrigins::default(), + var_infos: VarInfos::default(), data: RegionConstraintData::default(), lubs: FxHashMap(), glbs: FxHashMap(), - skolemization_count: 0, bound_count: 0, undo_log: Vec::new(), - unification_table: UnificationTable::new(), + unification_table: ut::UnificationTable::new(), + any_unifications: false, } } - pub fn var_origins(&self) -> &VarOrigins { - &self.var_origins + pub fn num_region_vars(&self) -> usize { + self.var_infos.len() + } + + pub fn region_constraint_data(&self) -> &RegionConstraintData<'tcx> { + &self.data } /// Once all the constraints have been gathered, extract out the final data. /// /// Not legal during a snapshot. - pub fn into_origins_and_data(self) -> (VarOrigins, RegionConstraintData<'tcx>) { + pub fn into_infos_and_data(self) -> (VarInfos, RegionConstraintData<'tcx>) { assert!(!self.in_snapshot()); - (self.var_origins, self.data) + (self.var_infos, self.data) } /// Takes (and clears) the current set of constraints. Note that @@ -315,18 +324,16 @@ impl<'tcx> RegionConstraintCollector<'tcx> { // should think carefully about whether it needs to be cleared // or updated in some way. let RegionConstraintCollector { - var_origins, + var_infos: _, data, lubs, glbs, - skolemization_count, bound_count: _, undo_log: _, unification_table, + any_unifications, } = self; - assert_eq!(*skolemization_count, 0); - // Clear the tables of (lubs, glbs), so that we will create // fresh regions if we do a LUB operation. As it happens, // LUB/GLB are not performed by the MIR type-checker, which is @@ -338,14 +345,18 @@ impl<'tcx> RegionConstraintCollector<'tcx> { // un-unified" state. Note that when we unify `a` and `b`, we // also insert `a <= b` and a `b <= a` edges, so the // `RegionConstraintData` contains the relationship here. - *unification_table = UnificationTable::new(); - for vid in var_origins.indices() { - unification_table.new_key(unify_key::RegionVidKey { min_vid: vid }); + if *any_unifications { + unification_table.reset_unifications(|vid| unify_key::RegionVidKey { min_vid: vid }); + *any_unifications = false; } mem::replace(data, RegionConstraintData::default()) } + pub fn data(&self) -> &RegionConstraintData<'tcx> { + &self.data + } + fn in_snapshot(&self) -> bool { !self.undo_log.is_empty() } @@ -357,7 +368,7 @@ impl<'tcx> RegionConstraintCollector<'tcx> { RegionSnapshot { length, region_snapshot: self.unification_table.snapshot(), - skolemization_count: self.skolemization_count, + any_unifications: self.any_unifications, } } @@ -365,12 +376,6 @@ impl<'tcx> RegionConstraintCollector<'tcx> { debug!("RegionConstraintCollector: commit({})", snapshot.length); assert!(self.undo_log.len() > snapshot.length); assert!(self.undo_log[snapshot.length] == OpenSnapshot); - assert!( - self.skolemization_count == snapshot.skolemization_count, - "failed to pop skolemized regions: {} now vs {} at start", - self.skolemization_count, - snapshot.skolemization_count - ); if snapshot.length == 0 { self.undo_log.truncate(0); @@ -390,8 +395,8 @@ impl<'tcx> RegionConstraintCollector<'tcx> { } let c = self.undo_log.pop().unwrap(); assert!(c == OpenSnapshot); - self.skolemization_count = snapshot.skolemization_count; self.unification_table.rollback_to(snapshot.region_snapshot); + self.any_unifications = snapshot.any_unifications; } fn rollback_undo_entry(&mut self, undo_entry: UndoLogEntry<'tcx>) { @@ -403,8 +408,8 @@ impl<'tcx> RegionConstraintCollector<'tcx> { // nothing to do here } AddVar(vid) => { - self.var_origins.pop().unwrap(); - assert_eq!(self.var_origins.len(), vid.index() as usize); + self.var_infos.pop().unwrap(); + assert_eq!(self.var_infos.len(), vid.index() as usize); } AddConstraint(ref constraint) => { self.data.constraints.remove(constraint); @@ -425,8 +430,13 @@ impl<'tcx> RegionConstraintCollector<'tcx> { } } - pub fn new_region_var(&mut self, origin: RegionVariableOrigin) -> RegionVid { - let vid = self.var_origins.push(origin.clone()); + pub fn new_region_var(&mut self, + universe: ty::UniverseIndex, + origin: RegionVariableOrigin) -> RegionVid { + let vid = self.var_infos.push(RegionVariableInfo { + origin, + universe, + }); let u_vid = self.unification_table .new_key(unify_key::RegionVidKey { min_vid: vid }); @@ -442,42 +452,14 @@ impl<'tcx> RegionConstraintCollector<'tcx> { return vid; } - /// Returns the origin for the given variable. - pub fn var_origin(&self, vid: RegionVid) -> RegionVariableOrigin { - self.var_origins[vid].clone() + /// Returns the universe for the given variable. + pub fn var_universe(&self, vid: RegionVid) -> ty::UniverseIndex { + self.var_infos[vid].universe } - /// Creates a new skolemized region. Skolemized regions are fresh - /// regions used when performing higher-ranked computations. They - /// must be used in a very particular way and are never supposed - /// to "escape" out into error messages or the code at large. - /// - /// The idea is to always create a snapshot. Skolemized regions - /// can be created in the context of this snapshot, but before the - /// snapshot is committed or rolled back, they must be popped - /// (using `pop_skolemized_regions`), so that their numbers can be - /// recycled. Normally you don't have to think about this: you use - /// the APIs in `higher_ranked/mod.rs`, such as - /// `skolemize_late_bound_regions` and `plug_leaks`, which will - /// guide you on this path (ensure that the `SkolemizationMap` is - /// consumed and you are good). There are also somewhat extensive - /// comments in `higher_ranked/README.md`. - /// - /// The `snapshot` argument to this function is not really used; - /// it's just there to make it explicit which snapshot bounds the - /// skolemized region that results. It should always be the top-most snapshot. - pub fn push_skolemized( - &mut self, - tcx: TyCtxt<'_, '_, 'tcx>, - br: ty::BoundRegion, - snapshot: &RegionSnapshot, - ) -> Region<'tcx> { - assert!(self.in_snapshot()); - assert!(self.undo_log[snapshot.length] == OpenSnapshot); - - let sc = self.skolemization_count; - self.skolemization_count = sc + 1; - tcx.mk_region(ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br)) + /// Returns the origin for the given variable. + pub fn var_origin(&self, vid: RegionVid) -> RegionVariableOrigin { + self.var_infos[vid].origin } /// Removes all the edges to/from the skolemized regions that are @@ -486,7 +468,7 @@ impl<'tcx> RegionConstraintCollector<'tcx> { /// created in that time. pub fn pop_skolemized( &mut self, - _tcx: TyCtxt<'_, '_, 'tcx>, + skolemization_count: ty::UniverseIndex, skols: &FxHashSet>, snapshot: &RegionSnapshot, ) { @@ -495,36 +477,28 @@ impl<'tcx> RegionConstraintCollector<'tcx> { assert!(self.in_snapshot()); assert!(self.undo_log[snapshot.length] == OpenSnapshot); assert!( - self.skolemization_count as usize >= skols.len(), + skolemization_count.as_usize() >= skols.len(), "popping more skolemized variables than actually exist, \ - sc now = {}, skols.len = {}", - self.skolemization_count, + sc now = {:?}, skols.len = {:?}", + skolemization_count, skols.len() ); - let last_to_pop = self.skolemization_count; - let first_to_pop = last_to_pop - (skols.len() as u32); + let last_to_pop = skolemization_count.subuniverse(); + let first_to_pop = ty::UniverseIndex::from(last_to_pop.as_u32() - skols.len() as u32); - assert!( - first_to_pop >= snapshot.skolemization_count, - "popping more regions than snapshot contains, \ - sc now = {}, sc then = {}, skols.len = {}", - self.skolemization_count, - snapshot.skolemization_count, - skols.len() - ); debug_assert! { skols.iter() .all(|&k| match *k { - ty::ReSkolemized(index, _) => - index.index >= first_to_pop && - index.index < last_to_pop, + ty::ReSkolemized(universe, _) => + universe >= first_to_pop && + universe < last_to_pop, _ => false }), - "invalid skolemization keys or keys out of range ({}..{}): {:?}", - snapshot.skolemization_count, - self.skolemization_count, + "invalid skolemization keys or keys out of range ({:?}..{:?}): {:?}", + first_to_pop, + last_to_pop, skols } @@ -541,7 +515,6 @@ impl<'tcx> RegionConstraintCollector<'tcx> { self.rollback_undo_entry(undo_entry); } - self.skolemization_count = snapshot.skolemization_count; return; fn kill_constraint<'tcx>( @@ -662,6 +635,7 @@ impl<'tcx> RegionConstraintCollector<'tcx> { if let (ty::ReVar(sub), ty::ReVar(sup)) = (*sub, *sup) { self.unification_table.union(sub, sup); + self.any_unifications = true; } } } @@ -772,7 +746,7 @@ impl<'tcx> RegionConstraintCollector<'tcx> { tcx: TyCtxt<'_, '_, 'tcx>, rid: RegionVid, ) -> ty::Region<'tcx> { - let vid = self.unification_table.find_value(rid).min_vid; + let vid = self.unification_table.probe_value(rid).min_vid; tcx.mk_region(ty::ReVar(vid)) } @@ -795,7 +769,10 @@ impl<'tcx> RegionConstraintCollector<'tcx> { if let Some(&c) = self.combine_map(t).get(&vars) { return tcx.mk_region(ReVar(c)); } - let c = self.new_region_var(MiscVariable(origin.span())); + let a_universe = self.universe(a); + let b_universe = self.universe(b); + let c_universe = cmp::max(a_universe, b_universe); + let c = self.new_region_var(c_universe, MiscVariable(origin.span())); self.combine_map(t).insert(vars, c); if self.in_snapshot() { self.undo_log.push(AddCombination(t, vars)); @@ -811,6 +788,24 @@ impl<'tcx> RegionConstraintCollector<'tcx> { new_r } + fn universe(&self, region: Region<'tcx>) -> ty::UniverseIndex { + match *region { + ty::ReScope(..) | + ty::ReStatic | + ty::ReEmpty | + ty::ReErased | + ty::ReFree(..) | + ty::ReEarlyBound(..) => ty::UniverseIndex::ROOT, + ty::ReSkolemized(universe, _) => universe, + ty::ReClosureBound(vid) | + ty::ReVar(vid) => self.var_universe(vid), + ty::ReLateBound(..) => + bug!("universe(): encountered bound region {:?}", region), + ty::ReCanonical(..) => + bug!("region_universe(): encountered canonical region {:?}", region), + } + } + pub fn vars_created_since_snapshot(&self, mark: &RegionSnapshot) -> Vec { self.undo_log[mark.length..] .iter() @@ -855,12 +850,7 @@ impl<'tcx> RegionConstraintCollector<'tcx> { impl fmt::Debug for RegionSnapshot { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "RegionSnapshot(length={},skolemization={})", - self.length, - self.skolemization_count - ) + write!(f, "RegionSnapshot(length={})", self.length) } } @@ -892,7 +882,7 @@ impl<'a, 'gcx, 'tcx> GenericKind<'tcx> { } impl<'a, 'gcx, 'tcx> VerifyBound<'tcx> { - fn for_each_region(&self, f: &mut FnMut(ty::Region<'tcx>)) { + fn for_each_region(&self, f: &mut dyn FnMut(ty::Region<'tcx>)) { match self { &VerifyBound::AnyRegion(ref rs) | &VerifyBound::AllRegions(ref rs) => for &r in rs { f(r); diff --git a/src/librustc/infer/resolve.rs b/src/librustc/infer/resolve.rs index 5e70c0ce368f..77b722c5695f 100644 --- a/src/librustc/infer/resolve.rs +++ b/src/librustc/infer/resolve.rs @@ -173,12 +173,6 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for FullTypeResolver<'a, 'gcx, 'tcx> ty::TyInfer(_) => { bug!("Unexpected type in full type resolver: {:?}", t); } - ty::TyTuple(tys, true) => { - // Un-default defaulted tuples - we are going to a - // different infcx, and the default will just cause - // pollution. - self.tcx().intern_tup(tys, false) - } _ => { t.super_fold_with(self) } diff --git a/src/librustc/infer/sub.rs b/src/librustc/infer/sub.rs index f891f692c7d8..58eae5e6a5b9 100644 --- a/src/librustc/infer/sub.rs +++ b/src/librustc/infer/sub.rs @@ -98,7 +98,7 @@ impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> self.fields.trace.cause.clone(), self.fields.param_env, ty::Predicate::Subtype( - ty::Binder(ty::SubtypePredicate { + ty::Binder::dummy(ty::SubtypePredicate { a_is_expected: self.a_is_expected, a, b, diff --git a/src/librustc/infer/type_variable.rs b/src/librustc/infer/type_variable.rs index 6aa094d2cd6d..d40e1b3760f0 100644 --- a/src/librustc/infer/type_variable.rs +++ b/src/librustc/infer/type_variable.rs @@ -8,26 +8,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use self::TypeVariableValue::*; -use hir::def_id::{DefId}; -use syntax::ast; +use syntax::symbol::InternedString; use syntax_pos::Span; use ty::{self, Ty}; -use std::cmp::min; +use std::cmp; use std::marker::PhantomData; -use std::mem; use std::u32; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::snapshot_vec as sv; use rustc_data_structures::unify as ut; pub struct TypeVariableTable<'tcx> { - values: sv::SnapshotVec>, + values: sv::SnapshotVec, /// Two variables are unified in `eq_relations` when we have a - /// constraint `?X == ?Y`. - eq_relations: ut::UnificationTable, + /// constraint `?X == ?Y`. This table also stores, for each key, + /// the known value. + eq_relations: ut::UnificationTable>>, /// Two variables are unified in `eq_relations` when we have a /// constraint `?X <: ?Y` *or* a constraint `?Y <: ?X`. This second @@ -46,7 +44,7 @@ pub struct TypeVariableTable<'tcx> { /// This is reasonable because, in Rust, subtypes have the same /// "skeleton" and hence there is no possible type such that /// (e.g.) `Box <: ?3` for any `?3`. - sub_relations: ut::UnificationTable, + sub_relations: ut::UnificationTable>, } /// Reasons to create a type inference variable @@ -55,7 +53,7 @@ pub enum TypeVariableOrigin { MiscVariable(Span), NormalizeProjectionType(Span), TypeInference(Span), - TypeParameterDefinition(Span, ast::Name), + TypeParameterDefinition(Span, InternedString), /// one of the upvars or closure kind parameters in a `ClosureSubsts` /// (before it has been determined) @@ -72,42 +70,46 @@ pub enum TypeVariableOrigin { pub type TypeVariableMap = FxHashMap; -struct TypeVariableData<'tcx> { - value: TypeVariableValue<'tcx>, +struct TypeVariableData { origin: TypeVariableOrigin, diverging: bool } -enum TypeVariableValue<'tcx> { - Known(Ty<'tcx>), - Bounded { - default: Option> +#[derive(Copy, Clone, Debug)] +pub enum TypeVariableValue<'tcx> { + Known { value: Ty<'tcx> }, + Unknown { universe: ty::UniverseIndex }, +} + +impl<'tcx> TypeVariableValue<'tcx> { + /// If this value is known, returns the type it is known to be. + /// Otherwise, `None`. + pub fn known(&self) -> Option> { + match *self { + TypeVariableValue::Unknown { .. } => None, + TypeVariableValue::Known { value } => Some(value), + } + } + + pub fn is_unknown(&self) -> bool { + match *self { + TypeVariableValue::Unknown { .. } => true, + TypeVariableValue::Known { .. } => false, + } } } -// We will use this to store the required information to recapitulate what happened when -// an error occurs. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct Default<'tcx> { - pub ty: Ty<'tcx>, - /// The span where the default was incurred - pub origin_span: Span, - /// The definition that the default originates from - pub def_id: DefId -} - -pub struct Snapshot { +pub struct Snapshot<'tcx> { snapshot: sv::Snapshot, - eq_snapshot: ut::Snapshot, - sub_snapshot: ut::Snapshot, + eq_snapshot: ut::Snapshot>>, + sub_snapshot: ut::Snapshot>, } -struct Instantiate<'tcx> { +struct Instantiate { vid: ty::TyVid, - default: Option>, } -struct Delegate<'tcx>(PhantomData<&'tcx ()>); +struct Delegate; impl<'tcx> TypeVariableTable<'tcx> { pub fn new() -> TypeVariableTable<'tcx> { @@ -118,17 +120,18 @@ impl<'tcx> TypeVariableTable<'tcx> { } } - pub fn default(&self, vid: ty::TyVid) -> Option> { - match &self.values.get(vid.index as usize).value { - &Known(_) => None, - &Bounded { ref default, .. } => default.clone() - } - } - + /// Returns the diverges flag given when `vid` was created. + /// + /// Note that this function does not return care whether + /// `vid` has been unified with something else or not. pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool { self.values.get(vid.index as usize).diverging } + /// Returns the origin that was given when `vid` was created. + /// + /// Note that this function does not return care whether + /// `vid` has been unified with something else or not. pub fn var_origin(&self, vid: ty::TyVid) -> &TypeVariableOrigin { &self.values.get(vid.index as usize).origin } @@ -137,8 +140,8 @@ impl<'tcx> TypeVariableTable<'tcx> { /// /// Precondition: neither `a` nor `b` are known. pub fn equate(&mut self, a: ty::TyVid, b: ty::TyVid) { - debug_assert!(self.probe(a).is_none()); - debug_assert!(self.probe(b).is_none()); + debug_assert!(self.probe(a).is_unknown()); + debug_assert!(self.probe(b).is_unknown()); self.eq_relations.union(a, b); self.sub_relations.union(a, b); } @@ -147,8 +150,8 @@ impl<'tcx> TypeVariableTable<'tcx> { /// /// Precondition: neither `a` nor `b` are known. pub fn sub(&mut self, a: ty::TyVid, b: ty::TyVid) { - debug_assert!(self.probe(a).is_none()); - debug_assert!(self.probe(b).is_none()); + debug_assert!(self.probe(a).is_unknown()); + debug_assert!(self.probe(b).is_unknown()); self.sub_relations.union(a, b); } @@ -157,41 +160,50 @@ impl<'tcx> TypeVariableTable<'tcx> { /// Precondition: `vid` must not have been previously instantiated. pub fn instantiate(&mut self, vid: ty::TyVid, ty: Ty<'tcx>) { let vid = self.root_var(vid); - debug_assert!(self.probe_root(vid).is_none()); + debug_assert!(self.probe(vid).is_unknown()); + debug_assert!(self.eq_relations.probe_value(vid).is_unknown(), + "instantiating type variable `{:?}` twice: new-value = {:?}, old-value={:?}", + vid, ty, self.eq_relations.probe_value(vid)); + self.eq_relations.union_value(vid, TypeVariableValue::Known { value: ty }); - let old_value = { - let vid_data = &mut self.values[vid.index as usize]; - mem::replace(&mut vid_data.value, TypeVariableValue::Known(ty)) - }; - - match old_value { - TypeVariableValue::Bounded { default } => { - self.values.record(Instantiate { vid: vid, default: default }); - } - TypeVariableValue::Known(old_ty) => { - bug!("instantiating type variable `{:?}` twice: new-value = {:?}, old-value={:?}", - vid, ty, old_ty) - } - } + // Hack: we only need this so that `types_escaping_snapshot` + // can see what has been unified; see the Delegate impl for + // more details. + self.values.record(Instantiate { vid: vid }); } + /// Creates a new type variable. + /// + /// - `diverging`: indicates if this is a "diverging" type + /// variable, e.g. one created as the type of a `return` + /// expression. The code in this module doesn't care if a + /// variable is diverging, but the main Rust type-checker will + /// sometimes "unify" such variables with the `!` or `()` types. + /// - `origin`: indicates *why* the type variable was created. + /// The code in this module doesn't care, but it can be useful + /// for improving error messages. pub fn new_var(&mut self, + universe: ty::UniverseIndex, diverging: bool, - origin: TypeVariableOrigin, - default: Option>,) -> ty::TyVid { - debug!("new_var(diverging={:?}, origin={:?})", diverging, origin); - self.eq_relations.new_key(()); - self.sub_relations.new_key(()); + origin: TypeVariableOrigin) + -> ty::TyVid { + let eq_key = self.eq_relations.new_key(TypeVariableValue::Unknown { universe }); + + let sub_key = self.sub_relations.new_key(()); + assert_eq!(eq_key.vid, sub_key); + let index = self.values.push(TypeVariableData { - value: Bounded { default: default }, origin, diverging, }); - let v = ty::TyVid { index: index as u32 }; - debug!("new_var: diverging={:?} index={:?}", diverging, v); - v + assert_eq!(eq_key.vid.index, index as u32); + + debug!("new_var(index={:?}, diverging={:?}, origin={:?}", eq_key.vid, diverging, origin); + + eq_key.vid } + /// Returns the number of type variables created thus far. pub fn num_vars(&self) -> usize { self.values.len() } @@ -202,7 +214,7 @@ impl<'tcx> TypeVariableTable<'tcx> { /// algorithm), so `root_var(a) == root_var(b)` implies that `a == /// b` (transitively). pub fn root_var(&mut self, vid: ty::TyVid) -> ty::TyVid { - self.eq_relations.find(vid) + self.eq_relations.find(vid).vid } /// Returns the "root" variable of `vid` in the `sub_relations` @@ -222,37 +234,32 @@ impl<'tcx> TypeVariableTable<'tcx> { self.sub_root_var(a) == self.sub_root_var(b) } - pub fn probe(&mut self, vid: ty::TyVid) -> Option> { - let vid = self.root_var(vid); - self.probe_root(vid) - } - - pub fn origin(&self, vid: ty::TyVid) -> TypeVariableOrigin { - self.values.get(vid.index as usize).origin.clone() - } - - /// Retrieves the type of `vid` given that it is currently a root in the unification table - pub fn probe_root(&mut self, vid: ty::TyVid) -> Option> { - debug_assert!(self.root_var(vid) == vid); - match self.values.get(vid.index as usize).value { - Bounded { .. } => None, - Known(t) => Some(t) - } + /// Retrieves the type to which `vid` has been instantiated, if + /// any. + pub fn probe(&mut self, vid: ty::TyVid) -> TypeVariableValue<'tcx> { + self.eq_relations.probe_value(vid) } + /// If `t` is a type-inference variable, and it has been + /// instantiated, then return the with which it was + /// instantiated. Otherwise, returns `t`. pub fn replace_if_possible(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { match t.sty { ty::TyInfer(ty::TyVar(v)) => { match self.probe(v) { - None => t, - Some(u) => u + TypeVariableValue::Unknown { .. } => t, + TypeVariableValue::Known { value } => value, } } _ => t, } } - pub fn snapshot(&mut self) -> Snapshot { + /// Creates a snapshot of the type variable state. This snapshot + /// must later be committed (`commit()`) or rolled back + /// (`rollback_to()`). Nested snapshots are permitted, but must + /// be processed in a stack-like fashion. + pub fn snapshot(&mut self) -> Snapshot<'tcx> { Snapshot { snapshot: self.values.start_snapshot(), eq_snapshot: self.eq_relations.snapshot(), @@ -260,7 +267,10 @@ impl<'tcx> TypeVariableTable<'tcx> { } } - pub fn rollback_to(&mut self, s: Snapshot) { + /// Undoes all changes since the snapshot was created. Any + /// snapshots created since that point must already have been + /// committed or rolled back. + pub fn rollback_to(&mut self, s: Snapshot<'tcx>) { debug!("rollback_to{:?}", { for action in self.values.actions_since_snapshot(&s.snapshot) { match *action { @@ -278,7 +288,11 @@ impl<'tcx> TypeVariableTable<'tcx> { self.sub_relations.rollback_to(sub_snapshot); } - pub fn commit(&mut self, s: Snapshot) { + /// Commits all changes since the snapshot was created, making + /// them permanent (unless this snapshot was created within + /// another snapshot). Any snapshots created since that point + /// must already have been committed or rolled back. + pub fn commit(&mut self, s: Snapshot<'tcx>) { let Snapshot { snapshot, eq_snapshot, sub_snapshot } = s; self.values.commit(snapshot); self.eq_relations.commit(eq_snapshot); @@ -289,7 +303,7 @@ impl<'tcx> TypeVariableTable<'tcx> { /// ty-variables created during the snapshot, and the values /// `{V2}` are the root variables that they were unified with, /// along with their origin. - pub fn types_created_since_snapshot(&mut self, s: &Snapshot) -> TypeVariableMap { + pub fn types_created_since_snapshot(&mut self, s: &Snapshot<'tcx>) -> TypeVariableMap { let actions_since_snapshot = self.values.actions_since_snapshot(&s.snapshot); actions_since_snapshot @@ -305,16 +319,13 @@ impl<'tcx> TypeVariableTable<'tcx> { .collect() } - pub fn types_escaping_snapshot(&mut self, s: &Snapshot) -> Vec> { - /*! - * Find the set of type variables that existed *before* `s` - * but which have only been unified since `s` started, and - * return the types with which they were unified. So if we had - * a type variable `V0`, then we started the snapshot, then we - * created a type variable `V1`, unifed `V0` with `T0`, and - * unified `V1` with `T1`, this function would return `{T0}`. - */ - + /// Find the set of type variables that existed *before* `s` + /// but which have only been unified since `s` started, and + /// return the types with which they were unified. So if we had + /// a type variable `V0`, then we started the snapshot, then we + /// created a type variable `V1`, unifed `V0` with `T0`, and + /// unified `V1` with `T1`, this function would return `{T0}`. + pub fn types_escaping_snapshot(&mut self, s: &Snapshot<'tcx>) -> Vec> { let mut new_elem_threshold = u32::MAX; let mut escaping_types = Vec::new(); let actions_since_snapshot = self.values.actions_since_snapshot(&s.snapshot); @@ -327,7 +338,7 @@ impl<'tcx> TypeVariableTable<'tcx> { // always be the first one we see). Note that this // action must precede those variables being // specified. - new_elem_threshold = min(new_elem_threshold, index as u32); + new_elem_threshold = cmp::min(new_elem_threshold, index as u32); debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold); } @@ -335,9 +346,9 @@ impl<'tcx> TypeVariableTable<'tcx> { if vid.index < new_elem_threshold { // quick check to see if this variable was // created since the snapshot started or not. - let escaping_type = match self.values.get(vid.index as usize).value { - Bounded { .. } => bug!(), - Known(ty) => ty, + let escaping_type = match self.eq_relations.probe_value(vid) { + TypeVariableValue::Unknown { .. } => bug!(), + TypeVariableValue::Known { value } => value, }; escaping_types.push(escaping_type); } @@ -351,28 +362,104 @@ impl<'tcx> TypeVariableTable<'tcx> { escaping_types } + /// Returns indices of all variables that are not yet + /// instantiated. pub fn unsolved_variables(&mut self) -> Vec { (0..self.values.len()) .filter_map(|i| { let vid = ty::TyVid { index: i as u32 }; - if self.probe(vid).is_some() { - None - } else { - Some(vid) + match self.probe(vid) { + TypeVariableValue::Unknown { .. } => Some(vid), + TypeVariableValue::Known { .. } => None, } }) .collect() } } -impl<'tcx> sv::SnapshotVecDelegate for Delegate<'tcx> { - type Value = TypeVariableData<'tcx>; - type Undo = Instantiate<'tcx>; +impl sv::SnapshotVecDelegate for Delegate { + type Value = TypeVariableData; + type Undo = Instantiate; - fn reverse(values: &mut Vec>, action: Instantiate<'tcx>) { - let Instantiate { vid, default } = action; - values[vid.index as usize].value = Bounded { - default, - }; + fn reverse(_values: &mut Vec, _action: Instantiate) { + // We don't actually have to *do* anything to reverse an + // instanation; the value for a variable is stored in the + // `eq_relations` and hence its rollback code will handle + // it. In fact, we could *almost* just remove the + // `SnapshotVec` entirely, except that we would have to + // reproduce *some* of its logic, since we want to know which + // type variables have been instantiated since the snapshot + // was started, so we can implement `types_escaping_snapshot`. + // + // (If we extended the `UnificationTable` to let us see which + // values have been unified and so forth, that might also + // suffice.) } } + +/////////////////////////////////////////////////////////////////////////// + +/// These structs (a newtyped TyVid) are used as the unification key +/// for the `eq_relations`; they carry a `TypeVariableValue` along +/// with them. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +struct TyVidEqKey<'tcx> { + vid: ty::TyVid, + + // in the table, we map each ty-vid to one of these: + phantom: PhantomData>, +} + +impl<'tcx> From for TyVidEqKey<'tcx> { + fn from(vid: ty::TyVid) -> Self { + TyVidEqKey { vid, phantom: PhantomData } + } +} + +impl<'tcx> ut::UnifyKey for TyVidEqKey<'tcx> { + type Value = TypeVariableValue<'tcx>; + fn index(&self) -> u32 { self.vid.index } + fn from_index(i: u32) -> Self { TyVidEqKey::from(ty::TyVid { index: i }) } + fn tag() -> &'static str { "TyVidEqKey" } +} + +impl<'tcx> ut::UnifyValue for TypeVariableValue<'tcx> { + type Error = ut::NoError; + + fn unify_values(value1: &Self, value2: &Self) -> Result { + match (value1, value2) { + // We never equate two type variables, both of which + // have known types. Instead, we recursively equate + // those types. + (&TypeVariableValue::Known { .. }, &TypeVariableValue::Known { .. }) => { + bug!("equating two type variables, both of which have known types") + } + + // If one side is known, prefer that one. + (&TypeVariableValue::Known { .. }, &TypeVariableValue::Unknown { .. }) => Ok(*value1), + (&TypeVariableValue::Unknown { .. }, &TypeVariableValue::Known { .. }) => Ok(*value2), + + // If both sides are *unknown*, it hardly matters, does it? + (&TypeVariableValue::Unknown { universe: universe1 }, + &TypeVariableValue::Unknown { universe: universe2 }) => { + // If we unify two unbound variables, ?T and ?U, then whatever + // value they wind up taking (which must be the same value) must + // be nameable by both universes. Therefore, the resulting + // universe is the minimum of the two universes, because that is + // the one which contains the fewest names in scope. + let universe = cmp::min(universe1, universe2); + Ok(TypeVariableValue::Unknown { universe }) + } + } + } +} + +/// Raw `TyVid` are used as the unification key for `sub_relations`; +/// they carry no values. +impl ut::UnifyKey for ty::TyVid { + type Value = (); + fn index(&self) -> u32 { self.index } + fn from_index(i: u32) -> ty::TyVid { ty::TyVid { index: i } } + fn tag() -> &'static str { "TyVid" } +} + diff --git a/src/librustc/infer/unify_key.rs b/src/librustc/infer/unify_key.rs index 99b11794cc5b..a1145572b79d 100644 --- a/src/librustc/infer/unify_key.rs +++ b/src/librustc/infer/unify_key.rs @@ -8,9 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use syntax::ast; -use ty::{self, IntVarValue, Ty, TyCtxt}; -use rustc_data_structures::unify::{Combine, UnifyKey}; +use ty::{self, FloatVarValue, IntVarValue, Ty, TyCtxt}; +use rustc_data_structures::unify::{NoError, EqUnifyValue, UnifyKey, UnifyValue}; pub trait ToType { fn to_type<'a, 'gcx, 'tcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx>; @@ -20,7 +19,10 @@ impl UnifyKey for ty::IntVid { type Value = Option; fn index(&self) -> u32 { self.index } fn from_index(i: u32) -> ty::IntVid { ty::IntVid { index: i } } - fn tag(_: Option) -> &'static str { "IntVid" } + fn tag() -> &'static str { "IntVid" } +} + +impl EqUnifyValue for IntVarValue { } #[derive(PartialEq, Copy, Clone, Debug)] @@ -31,15 +33,17 @@ pub struct RegionVidKey { pub min_vid: ty::RegionVid } -impl Combine for RegionVidKey { - fn combine(&self, other: &RegionVidKey) -> RegionVidKey { - let min_vid = if self.min_vid.index() < other.min_vid.index() { - self.min_vid +impl UnifyValue for RegionVidKey { + type Error = NoError; + + fn unify_values(value1: &Self, value2: &Self) -> Result { + let min_vid = if value1.min_vid.index() < value2.min_vid.index() { + value1.min_vid } else { - other.min_vid + value2.min_vid }; - RegionVidKey { min_vid: min_vid } + Ok(RegionVidKey { min_vid: min_vid }) } } @@ -47,7 +51,7 @@ impl UnifyKey for ty::RegionVid { type Value = RegionVidKey; fn index(&self) -> u32 { self.0 } fn from_index(i: u32) -> ty::RegionVid { ty::RegionVid(i) } - fn tag(_: Option) -> &'static str { "RegionVid" } + fn tag() -> &'static str { "RegionVid" } } impl ToType for IntVarValue { @@ -62,21 +66,17 @@ impl ToType for IntVarValue { // Floating point type keys impl UnifyKey for ty::FloatVid { - type Value = Option; + type Value = Option; fn index(&self) -> u32 { self.index } fn from_index(i: u32) -> ty::FloatVid { ty::FloatVid { index: i } } - fn tag(_: Option) -> &'static str { "FloatVid" } + fn tag() -> &'static str { "FloatVid" } } -impl ToType for ast::FloatTy { +impl EqUnifyValue for FloatVarValue { +} + +impl ToType for FloatVarValue { fn to_type<'a, 'gcx, 'tcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { - tcx.mk_mach_float(*self) + tcx.mk_mach_float(self.0) } } - -impl UnifyKey for ty::TyVid { - type Value = (); - fn index(&self) -> u32 { self.index } - fn from_index(i: u32) -> ty::TyVid { ty::TyVid { index: i } } - fn tag(_: Option) -> &'static str { "TyVid" } -} diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 075ee0b8c7c2..b6f4bd6dc408 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -28,8 +28,9 @@ //! this code handles low-level equality and subtyping operations. The //! type check pass in the compiler is found in the `librustc_typeck` crate. //! -//! For a deeper explanation of how the compiler works and is -//! organized, see the README.md file in this directory. +//! For more information about how rustc works, see the [rustc guide]. +//! +//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/ //! //! # Note //! @@ -38,36 +39,41 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] -#![deny(warnings)] #![feature(box_patterns)] #![feature(box_syntax)] -#![feature(conservative_impl_trait)] #![feature(const_fn)] #![feature(core_intrinsics)] #![feature(drain_filter)] -#![feature(dyn_trait)] -#![feature(from_ref)] -#![feature(fs_read_write)] -#![feature(i128)] -#![feature(i128_type)] -#![feature(inclusive_range)] -#![feature(inclusive_range_syntax)] +#![feature(iterator_find_map)] #![cfg_attr(windows, feature(libc))] #![feature(macro_vis_matcher)] -#![feature(match_default_bindings)] #![feature(never_type)] -#![feature(nonzero)] +#![feature(exhaustive_patterns)] +#![feature(extern_types)] +#![cfg_attr(not(stage0), feature(nll))] +#![feature(non_exhaustive)] +#![feature(proc_macro_internals)] #![feature(quote)] +#![feature(optin_builtin_traits)] #![feature(refcell_replace_swap)] #![feature(rustc_diagnostic_macros)] #![feature(slice_patterns)] +#![feature(slice_sort_by_cached_key)] #![feature(specialization)] #![feature(unboxed_closures)] -#![feature(underscore_lifetimes)] #![feature(trace_macros)] +#![feature(trusted_len)] +#![feature(vec_remove_item)] #![feature(catch_expr)] +#![feature(step_trait)] +#![feature(integer_atomics)] #![feature(test)] +#![cfg_attr(not(stage0), feature(impl_header_lifetime_elision))] +#![feature(in_band_lifetimes)] +#![feature(macro_at_most_once_rep)] +#![feature(crate_in_paths)] +#![feature(crate_visibility_modifier)] #![recursion_limit="512"] @@ -77,17 +83,25 @@ extern crate core; extern crate fmt_macros; extern crate getopts; extern crate graphviz; +#[macro_use] extern crate lazy_static; +#[macro_use] extern crate scoped_tls; #[cfg(windows)] extern crate libc; -extern crate rustc_back; +extern crate polonius_engine; +extern crate rustc_target; #[macro_use] extern crate rustc_data_structures; extern crate serialize; -extern crate rustc_const_math; +extern crate parking_lot; extern crate rustc_errors as errors; +extern crate rustc_rayon as rayon; +extern crate rustc_rayon_core as rayon_core; #[macro_use] extern crate log; #[macro_use] extern crate syntax; extern crate syntax_pos; extern crate jobserver; +extern crate proc_macro; +extern crate chalk_engine; +extern crate rustc_fs_util; extern crate serialize as rustc_serialize; // used by deriving @@ -120,15 +134,14 @@ pub mod middle { pub mod allocator; pub mod borrowck; pub mod expr_use_visitor; - pub mod const_val; pub mod cstore; - pub mod dataflow; pub mod dead; pub mod dependency_format; pub mod entry; pub mod exported_symbols; pub mod free_region; pub mod intrinsicck; + pub mod lib_features; pub mod lang_items; pub mod liveness; pub mod mem_categorization; @@ -147,10 +160,13 @@ pub mod traits; pub mod ty; pub mod util { + pub mod captures; pub mod common; pub mod ppaux; pub mod nodemap; - pub mod fs; + pub mod time_graph; + pub mod profiling; + pub mod bug; } // A private module so that macro-expanded idents like @@ -176,5 +192,4 @@ fn noop() { // Build the diagnostics array at the end so that the metadata includes error use sites. -#[cfg(not(stage0))] // remove after the next snapshot __build_diagnostic_array! { librustc, DIAGNOSTICS } diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs index 143d2c2ea28b..590b59568c4a 100644 --- a/src/librustc/lint/builtin.rs +++ b/src/librustc/lint/builtin.rs @@ -14,11 +14,21 @@ //! compiler code, rather than using their own custom pass. Those //! lints are all available in `rustc_lint::builtin`. +use errors::{Applicability, DiagnosticBuilder}; use lint::{LintPass, LateLintPass, LintArray}; +use session::Session; +use syntax::ast; +use syntax::codemap::Span; + +declare_lint! { + pub EXCEEDING_BITSHIFTS, + Deny, + "shift exceeds the type's number of bits" +} declare_lint! { pub CONST_ERR, - Warn, + Deny, "constant evaluation detected erroneous expression" } @@ -67,7 +77,8 @@ declare_lint! { declare_lint! { pub UNREACHABLE_CODE, Warn, - "detects unreachable code paths" + "detects unreachable code paths", + report_in_external_macro: true } declare_lint! { @@ -91,7 +102,7 @@ declare_lint! { declare_lint! { pub UNUSED_FEATURES, Warn, - "unused or unknown features found in crate-level #[feature] directives" + "unused features found in crate-level #[feature] directives" } declare_lint! { @@ -142,13 +153,6 @@ declare_lint! { "lints that have been renamed or removed" } -declare_lint! { - pub RESOLVE_TRAIT_ON_DEFAULTED_UNIT, - Deny, - "attempt to resolve a trait on an expression whose type cannot be inferred but which \ - currently defaults to ()" -} - declare_lint! { pub SAFE_EXTERN_STATICS, Deny, @@ -174,12 +178,6 @@ declare_lint! { not named `mod.rs`" } -declare_lint! { - pub LEGACY_IMPORTS, - Deny, - "detects names that resolve to ambiguous glob imports with RFC 1560" -} - declare_lint! { pub LEGACY_CONSTRUCTOR_VISIBILITY, Deny, @@ -206,14 +204,21 @@ declare_lint! { declare_lint! { pub INCOHERENT_FUNDAMENTAL_IMPLS, - Warn, + Deny, "potentially-conflicting impls were erroneously allowed" } +declare_lint! { + pub BAD_REPR, + Warn, + "detects incorrect use of `repr` attribute" +} + declare_lint! { pub DEPRECATED, Warn, - "detects use of deprecated items" + "detects use of deprecated items", + report_in_external_macro: true } declare_lint! { @@ -229,15 +234,15 @@ declare_lint! { } declare_lint! { - pub COERCE_NEVER, - Deny, - "detect coercion to !" + pub SINGLE_USE_LIFETIMES, + Allow, + "detects lifetime parameters that are only used once" } declare_lint! { - pub SINGLE_USE_LIFETIME, + pub UNUSED_LIFETIMES, Allow, - "detects single use lifetimes" + "detects lifetime parameters that are never used" } declare_lint! { @@ -246,6 +251,95 @@ declare_lint! { "raw pointer to an inference variable" } +declare_lint! { + pub ELIDED_LIFETIMES_IN_PATHS, + Allow, + "hidden lifetime parameters in types are deprecated" +} + +declare_lint! { + pub BARE_TRAIT_OBJECTS, + Allow, + "suggest using `dyn Trait` for trait objects" +} + +declare_lint! { + pub ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE, + Allow, + "fully qualified paths that start with a module name \ + instead of `crate`, `self`, or an extern crate name" +} + +declare_lint! { + pub ILLEGAL_FLOATING_POINT_LITERAL_PATTERN, + Warn, + "floating-point literals cannot be used in patterns" +} + +declare_lint! { + pub UNSTABLE_NAME_COLLISIONS, + Warn, + "detects name collision with an existing but unstable method" +} + +declare_lint! { + pub IRREFUTABLE_LET_PATTERNS, + Deny, + "detects irrefutable patterns in if-let and while-let statements" +} + +declare_lint! { + pub UNUSED_LABELS, + Allow, + "detects labels that are never used" +} + +declare_lint! { + pub DUPLICATE_ASSOCIATED_TYPE_BINDINGS, + Warn, + "warns about duplicate associated type bindings in generics" +} + +declare_lint! { + pub DUPLICATE_MACRO_EXPORTS, + Deny, + "detects duplicate macro exports" +} + +declare_lint! { + pub INTRA_DOC_LINK_RESOLUTION_FAILURE, + Warn, + "warn about documentation intra links resolution failure" +} + +declare_lint! { + pub WHERE_CLAUSES_OBJECT_SAFETY, + Warn, + "checks the object safety of where clauses" +} + +declare_lint! { + pub PROC_MACRO_DERIVE_RESOLUTION_FALLBACK, + Warn, + "detects proc macro derives using inaccessible names from parent modules" +} + +declare_lint! { + pub MACRO_USE_EXTERN_CRATE, + Allow, + "the `#[macro_use]` attribute is now deprecated in favor of using macros \ + via the module system" +} + +/// Some lints that are buffered from `libsyntax`. See `syntax::early_buffered_lints`. +pub mod parser { + declare_lint! { + pub QUESTION_MARK_MACRO_SEP, + Allow, + "detects the use of `?` as a macro separator" + } +} + /// Does nothing as a lint pass, but registers some `Lint`s /// which are used by other parts of the compiler. #[derive(Copy, Clone)] @@ -254,6 +348,8 @@ pub struct HardwiredLints; impl LintPass for HardwiredLints { fn get_lints(&self) -> LintArray { lint_array!( + ILLEGAL_FLOATING_POINT_LITERAL_PATTERN, + EXCEEDING_BITSHIFTS, UNUSED_IMPORTS, UNUSED_EXTERN_CRATES, UNUSED_QUALIFICATIONS, @@ -275,12 +371,10 @@ impl LintPass for HardwiredLints { INVALID_TYPE_PARAM_DEFAULT, CONST_ERR, RENAMED_AND_REMOVED_LINTS, - RESOLVE_TRAIT_ON_DEFAULTED_UNIT, SAFE_EXTERN_STATICS, SAFE_PACKED_BORROWS, PATTERNS_IN_FNS_WITHOUT_BODY, LEGACY_DIRECTORY_OWNERSHIP, - LEGACY_IMPORTS, LEGACY_CONSTRUCTOR_VISIBILITY, MISSING_FRAGMENT_SPECIFIER, PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES, @@ -289,11 +383,113 @@ impl LintPass for HardwiredLints { DEPRECATED, UNUSED_UNSAFE, UNUSED_MUT, - COERCE_NEVER, - SINGLE_USE_LIFETIME, - TYVAR_BEHIND_RAW_POINTER + SINGLE_USE_LIFETIMES, + UNUSED_LIFETIMES, + UNUSED_LABELS, + TYVAR_BEHIND_RAW_POINTER, + ELIDED_LIFETIMES_IN_PATHS, + BARE_TRAIT_OBJECTS, + ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE, + UNSTABLE_NAME_COLLISIONS, + IRREFUTABLE_LET_PATTERNS, + DUPLICATE_ASSOCIATED_TYPE_BINDINGS, + DUPLICATE_MACRO_EXPORTS, + INTRA_DOC_LINK_RESOLUTION_FAILURE, + WHERE_CLAUSES_OBJECT_SAFETY, + PROC_MACRO_DERIVE_RESOLUTION_FALLBACK, + MACRO_USE_EXTERN_CRATE, + parser::QUESTION_MARK_MACRO_SEP, ) } } +// this could be a closure, but then implementing derive traits +// becomes hacky (and it gets allocated) +#[derive(PartialEq, RustcEncodable, RustcDecodable, Debug)] +pub enum BuiltinLintDiagnostics { + Normal, + BareTraitObject(Span, /* is_global */ bool), + AbsPathWithModule(Span), + DuplicatedMacroExports(ast::Ident, Span, Span), + ProcMacroDeriveResolutionFallback(Span), + ElidedLifetimesInPaths(usize, Span, bool, Span, String), +} + +impl BuiltinLintDiagnostics { + pub fn run(self, sess: &Session, db: &mut DiagnosticBuilder) { + match self { + BuiltinLintDiagnostics::Normal => (), + BuiltinLintDiagnostics::BareTraitObject(span, is_global) => { + let (sugg, app) = match sess.codemap().span_to_snippet(span) { + Ok(ref s) if is_global => (format!("dyn ({})", s), + Applicability::MachineApplicable), + Ok(s) => (format!("dyn {}", s), Applicability::MachineApplicable), + Err(_) => ("dyn ".to_string(), Applicability::HasPlaceholders) + }; + db.span_suggestion_with_applicability(span, "use `dyn`", sugg, app); + } + BuiltinLintDiagnostics::AbsPathWithModule(span) => { + let (sugg, app) = match sess.codemap().span_to_snippet(span) { + Ok(ref s) => { + // FIXME(Manishearth) ideally the emitting code + // can tell us whether or not this is global + let opt_colon = if s.trim_left().starts_with("::") { + "" + } else { + "::" + }; + + (format!("crate{}{}", opt_colon, s), Applicability::MachineApplicable) + } + Err(_) => ("crate::".to_string(), Applicability::HasPlaceholders) + }; + db.span_suggestion_with_applicability(span, "use `crate`", sugg, app); + } + BuiltinLintDiagnostics::DuplicatedMacroExports(ident, earlier_span, later_span) => { + db.span_label(later_span, format!("`{}` already exported", ident)); + db.span_note(earlier_span, "previous macro export is now shadowed"); + } + BuiltinLintDiagnostics::ProcMacroDeriveResolutionFallback(span) => { + db.span_label(span, "names from parent modules are not \ + accessible without an explicit import"); + } + BuiltinLintDiagnostics::ElidedLifetimesInPaths( + n, path_span, incl_angl_brckt, insertion_span, anon_lts + ) => { + let (replace_span, suggestion) = if incl_angl_brckt { + (insertion_span, anon_lts) + } else { + // When possible, prefer a suggestion that replaces the whole + // `Path` expression with `Path<'_, T>`, rather than inserting `'_, ` + // at a point (which makes for an ugly/confusing label) + if let Ok(snippet) = sess.codemap().span_to_snippet(path_span) { + // But our spans can get out of whack due to macros; if the place we think + // we want to insert `'_` isn't even within the path expression's span, we + // should bail out of making any suggestion rather than panicking on a + // subtract-with-overflow or string-slice-out-out-bounds (!) + // FIXME: can we do better? + if insertion_span.lo().0 < path_span.lo().0 { + return; + } + let insertion_index = (insertion_span.lo().0 - path_span.lo().0) as usize; + if insertion_index > snippet.len() { + return; + } + let (before, after) = snippet.split_at(insertion_index); + (path_span, format!("{}{}{}", before, anon_lts, after)) + } else { + (insertion_span, anon_lts) + } + }; + db.span_suggestion_with_applicability( + replace_span, + &format!("indicate the anonymous lifetime{}", if n >= 2 { "s" } else { "" }), + suggestion, + Applicability::MachineApplicable + ); + } + } + } +} + impl<'a, 'tcx> LateLintPass<'a, 'tcx> for HardwiredLints {} diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs index 5336c1944e8c..315ed38ad077 100644 --- a/src/librustc/lint/context.rs +++ b/src/librustc/lint/context.rs @@ -10,15 +10,15 @@ //! Implementation of lint checking. //! -//! The lint checking is mostly consolidated into one pass which runs just -//! before translation to LLVM bytecode. Throughout compilation, lint warnings +//! The lint checking is mostly consolidated into one pass which runs +//! after all other analyses. Throughout compilation, lint warnings //! can be added via the `add_lint` method on the Session structure. This //! requires a span and an id of the node that the lint is being added to. The //! lint isn't actually emitted at that time because it is unknown what the //! actual lint level at that location is. //! -//! To actually emit lint warnings/errors, a separate pass is used just before -//! translation. A context keeps track of the current state of all lint levels. +//! To actually emit lint warnings/errors, a separate pass is used. +//! A context keeps track of the current state of all lint levels. //! Upon entering a node of the ast which can modify the lint settings, the //! previous lint state is pushed onto a stack and the ast is then recursed //! upon. As the ast is traversed, this keeps track of the current lint level @@ -27,21 +27,22 @@ use self::TargetLint::*; use std::slice; +use rustc_data_structures::sync::ReadGuard; use lint::{EarlyLintPassObject, LateLintPassObject}; use lint::{Level, Lint, LintId, LintPass, LintBuffer}; +use lint::builtin::BuiltinLintDiagnostics; use lint::levels::{LintLevelSets, LintLevelsBuilder}; use middle::privacy::AccessLevels; use rustc_serialize::{Decoder, Decodable, Encoder, Encodable}; use session::{config, early_error, Session}; -use traits::Reveal; use ty::{self, TyCtxt, Ty}; use ty::layout::{LayoutError, LayoutOf, TyLayout}; use util::nodemap::FxHashMap; use std::default::Default as StdDefault; -use std::cell::{Ref, RefCell}; use syntax::ast; -use syntax_pos::{MultiSpan, Span}; +use syntax::edition; +use syntax_pos::{MultiSpan, Span, symbol::LocalInternedString}; use errors::DiagnosticBuilder; use hir; use hir::def_id::LOCAL_CRATE; @@ -58,8 +59,8 @@ pub struct LintStore { lints: Vec<(&'static Lint, bool)>, /// Trait objects for each lint pass. - /// This is only `None` while performing a lint pass. See the definition - /// of `LintSession::new`. + /// This is only `None` while performing a lint pass. + pre_expansion_passes: Option>, early_passes: Option>, late_passes: Option>, @@ -77,7 +78,7 @@ pub struct LintStore { pub struct LintSession<'a, PassObject> { /// Reference to the store of registered lints. - lints: Ref<'a, LintStore>, + lints: ReadGuard<'a, LintStore>, /// Trait objects for each lint pass. passes: Option>, @@ -92,6 +93,7 @@ pub struct BufferedEarlyLint { pub ast_id: ast::NodeId, pub span: MultiSpan, pub msg: String, + pub diagnostic: BuiltinLintDiagnostics, } /// Extra information for a future incompatibility lint. See the call @@ -99,7 +101,11 @@ pub struct BufferedEarlyLint { /// guidelines. pub struct FutureIncompatibleInfo { pub id: LintId, - pub reference: &'static str // e.g., a URL for an issue/PR/RFC or error code + /// e.g., a URL for an issue/PR/RFC or error code + pub reference: &'static str, + /// If this is an edition fixing lint, the edition in which + /// this lint becomes obsolete + pub edition: Option, } /// The target of the `by_name` map, which accounts for renaming/deprecation. @@ -125,14 +131,21 @@ pub enum CheckLintNameResult<'a> { /// Lint doesn't exist NoLint, /// The lint is either renamed or removed. This is the warning - /// message. - Warning(String), + /// message, and an optional new name (`None` if removed). + Warning(String, Option), + /// The lint is from a tool. If the Option is None, then either + /// the lint does not exist in the tool or the code was not + /// compiled with the tool and therefore the lint was never + /// added to the `LintStore`. Otherwise the `LintId` will be + /// returned as if it where a rustc lint. + Tool(Option<&'a [LintId]>), } impl LintStore { pub fn new() -> LintStore { LintStore { lints: vec![], + pre_expansion_passes: Some(vec![]), early_passes: Some(vec![]), late_passes: Some(vec![]), by_name: FxHashMap(), @@ -159,6 +172,15 @@ impl LintStore { self.early_passes.as_mut().unwrap().push(pass); } + pub fn register_pre_expansion_pass( + &mut self, + sess: Option<&Session>, + pass: EarlyLintPassObject, + ) { + self.push_pass(sess, false, &pass); + self.pre_expansion_passes.as_mut().unwrap().push(pass); + } + pub fn register_late_pass(&mut self, sess: Option<&Session>, from_plugin: bool, @@ -172,10 +194,10 @@ impl LintStore { sess: Option<&Session>, from_plugin: bool, pass: &Box

) { - for &lint in pass.get_lints() { - self.lints.push((*lint, from_plugin)); + for lint in pass.get_lints() { + self.lints.push((lint, from_plugin)); - let id = LintId::of(*lint); + let id = LintId::of(lint); if self.by_name.insert(lint.name_lower(), Id(id)).is_some() { let msg = format!("duplicate specification of lint {}", lint.name_lower()); match (sess, from_plugin) { @@ -194,11 +216,24 @@ impl LintStore { pub fn register_future_incompatible(&mut self, sess: Option<&Session>, lints: Vec) { - let ids = lints.iter().map(|f| f.id).collect(); - self.register_group(sess, false, "future_incompatible", ids); - for info in lints { - self.future_incompatible.insert(info.id, info); + + for edition in edition::ALL_EDITIONS { + let lints = lints.iter().filter(|f| f.edition == Some(*edition)).map(|f| f.id) + .collect::>(); + if !lints.is_empty() { + self.register_group(sess, false, edition.lint_name(), lints) + } } + + let mut future_incompatible = Vec::with_capacity(lints.len()); + for lint in lints { + future_incompatible.push(lint.id); + self.future_incompatible.insert(lint.id, lint); + } + + self.register_group(sess, false, "future_incompatible", future_incompatible); + + } pub fn future_incompatible(&self, id: LintId) -> Option<&FutureIncompatibleInfo> { @@ -259,14 +294,15 @@ impl LintStore { sess: &Session, lint_name: &str, level: Level) { - let db = match self.check_lint_name(lint_name) { + let db = match self.check_lint_name(lint_name, None) { CheckLintNameResult::Ok(_) => None, - CheckLintNameResult::Warning(ref msg) => { + CheckLintNameResult::Warning(ref msg, _) => { Some(sess.struct_warn(msg)) }, CheckLintNameResult::NoLint => { Some(struct_err!(sess, E0602, "unknown lint: `{}`", lint_name)) } + CheckLintNameResult::Tool(_) => unreachable!(), }; if let Some(mut db) = db { @@ -290,51 +326,46 @@ impl LintStore { /// it emits non-fatal warnings and there are *two* lint passes that /// inspect attributes, this is only run from the late pass to avoid /// printing duplicate warnings. - pub fn check_lint_name(&self, lint_name: &str) -> CheckLintNameResult { - match self.by_name.get(lint_name) { - Some(&Renamed(ref new_name, _)) => { - CheckLintNameResult::Warning( - format!("lint {} has been renamed to {}", lint_name, new_name) - ) - }, - Some(&Removed(ref reason)) => { - CheckLintNameResult::Warning( - format!("lint {} has been removed: {}", lint_name, reason) - ) - }, - None => { - match self.lint_groups.get(lint_name) { - None => CheckLintNameResult::NoLint, - Some(ids) => CheckLintNameResult::Ok(&ids.0), - } + pub fn check_lint_name( + &self, + lint_name: &str, + tool_name: Option, + ) -> CheckLintNameResult { + let complete_name = if let Some(tool_name) = tool_name { + format!("{}::{}", tool_name, lint_name) + } else { + lint_name.to_string() + }; + if let Some(_) = tool_name { + match self.by_name.get(&complete_name) { + None => match self.lint_groups.get(&*complete_name) { + None => return CheckLintNameResult::Tool(None), + Some(ids) => return CheckLintNameResult::Tool(Some(&ids.0)), + }, + Some(&Id(ref id)) => return CheckLintNameResult::Tool(Some(slice::from_ref(id))), + // If the lint was registered as removed or renamed by the lint tool, we don't need + // to treat tool_lints and rustc lints different and can use the code below. + _ => {} } + } + match self.by_name.get(&complete_name) { + Some(&Renamed(ref new_name, _)) => CheckLintNameResult::Warning( + format!("lint `{}` has been renamed to `{}`", lint_name, new_name), + Some(new_name.to_owned()), + ), + Some(&Removed(ref reason)) => CheckLintNameResult::Warning( + format!("lint `{}` has been removed: `{}`", lint_name, reason), + None, + ), + None => match self.lint_groups.get(&*complete_name) { + None => CheckLintNameResult::NoLint, + Some(ids) => CheckLintNameResult::Ok(&ids.0), + }, Some(&Id(ref id)) => CheckLintNameResult::Ok(slice::from_ref(id)), } } } -impl<'a, PassObject: LintPassObject> LintSession<'a, PassObject> { - /// Creates a new `LintSession`, by moving out the `LintStore`'s initial - /// lint levels and pass objects. These can be restored using the `restore` - /// method. - fn new(store: &'a RefCell) -> LintSession<'a, PassObject> { - let mut s = store.borrow_mut(); - let passes = PassObject::take_passes(&mut *s); - drop(s); - LintSession { - lints: store.borrow(), - passes, - } - } - - /// Restores the levels back to the original lint store. - fn restore(self, store: &RefCell) { - drop(self.lints); - let mut s = store.borrow_mut(); - PassObject::restore_passes(&mut *s, self.passes); - } -} - /// Context for lint checking after type checking. pub struct LateContext<'a, 'tcx: 'a> { /// Type context we're checking in. @@ -376,7 +407,7 @@ pub struct EarlyContext<'a> { } /// Convenience macro for calling a `LintPass` method on every pass in the context. -macro_rules! run_lints { ($cx:expr, $f:ident, $ps:ident, $($args:expr),*) => ({ +macro_rules! run_lints { ($cx:expr, $f:ident, $($args:expr),*) => ({ // Move the vector of passes out of `$cx` so that we can // iterate over it mutably while passing `$cx` to the methods. let mut passes = $cx.lint_sess_mut().passes.take().unwrap(); @@ -386,30 +417,11 @@ macro_rules! run_lints { ($cx:expr, $f:ident, $ps:ident, $($args:expr),*) => ({ $cx.lint_sess_mut().passes = Some(passes); }) } -pub trait LintPassObject: Sized { - fn take_passes(store: &mut LintStore) -> Option>; - fn restore_passes(store: &mut LintStore, passes: Option>); -} +pub trait LintPassObject: Sized {} -impl LintPassObject for EarlyLintPassObject { - fn take_passes(store: &mut LintStore) -> Option> { - store.early_passes.take() - } +impl LintPassObject for EarlyLintPassObject {} - fn restore_passes(store: &mut LintStore, passes: Option>) { - store.early_passes = passes; - } -} - -impl LintPassObject for LateLintPassObject { - fn take_passes(store: &mut LintStore) -> Option> { - store.late_passes.take() - } - - fn restore_passes(store: &mut LintStore, passes: Option>) { - store.late_passes = passes; - } -} +impl LintPassObject for LateLintPassObject {} pub trait LintContext<'tcx>: Sized { @@ -429,6 +441,16 @@ pub trait LintContext<'tcx>: Sized { self.lookup(lint, span, msg).emit(); } + fn lookup_and_emit_with_diagnostics>(&self, + lint: &'static Lint, + span: Option, + msg: &str, + diagnostic: BuiltinLintDiagnostics) { + let mut db = self.lookup(lint, span, msg); + diagnostic.run(self.sess(), &mut db); + db.emit(); + } + fn lookup>(&self, lint: &'static Lint, span: Option, @@ -486,22 +508,30 @@ pub trait LintContext<'tcx>: Sized { impl<'a> EarlyContext<'a> { - fn new(sess: &'a Session, - krate: &'a ast::Crate) -> EarlyContext<'a> { + fn new( + sess: &'a Session, + krate: &'a ast::Crate, + passes: Option>, + buffered: LintBuffer, + ) -> EarlyContext<'a> { EarlyContext { sess, krate, - lint_sess: LintSession::new(&sess.lint_store), + lint_sess: LintSession { + lints: sess.lint_store.borrow(), + passes, + }, builder: LintLevelSets::builder(sess), - buffered: sess.buffered_lints.borrow_mut().take().unwrap(), + buffered, } } fn check_id(&mut self, id: ast::NodeId) { for early_lint in self.buffered.take(id) { - self.lookup_and_emit(early_lint.lint_id.lint, - Some(early_lint.span.clone()), - &early_lint.msg); + self.lookup_and_emit_with_diagnostics(early_lint.lint_id.lint, + Some(early_lint.span.clone()), + &early_lint.msg, + early_lint.diagnostic); } } } @@ -528,12 +558,12 @@ impl<'a, 'tcx> LintContext<'tcx> for LateContext<'a, 'tcx> { fn enter_attrs(&mut self, attrs: &'tcx [ast::Attribute]) { debug!("late context: enter_attrs({:?})", attrs); - run_lints!(self, enter_lint_attrs, late_passes, attrs); + run_lints!(self, enter_lint_attrs, attrs); } fn exit_attrs(&mut self, attrs: &'tcx [ast::Attribute]) { debug!("late context: exit_attrs({:?})", attrs); - run_lints!(self, exit_lint_attrs, late_passes, attrs); + run_lints!(self, exit_lint_attrs, attrs); } fn lookup>(&self, @@ -585,12 +615,12 @@ impl<'a> LintContext<'a> for EarlyContext<'a> { fn enter_attrs(&mut self, attrs: &'a [ast::Attribute]) { debug!("early context: enter_attrs({:?})", attrs); - run_lints!(self, enter_lint_attrs, early_passes, attrs); + run_lints!(self, enter_lint_attrs, attrs); } fn exit_attrs(&mut self, attrs: &'a [ast::Attribute]) { debug!("early context: exit_attrs({:?})", attrs); - run_lints!(self, exit_lint_attrs, early_passes, attrs); + run_lints!(self, exit_lint_attrs, attrs); } fn lookup>(&self, @@ -625,13 +655,17 @@ impl<'a, 'tcx> LateContext<'a, 'tcx> { f(self); self.param_env = old_param_env; } + pub fn current_lint_root(&self) -> ast::NodeId { + self.last_ast_node_with_lint_attrs + } } -impl<'a, 'tcx> LayoutOf> for &'a LateContext<'a, 'tcx> { +impl<'a, 'tcx> LayoutOf for &'a LateContext<'a, 'tcx> { + type Ty = Ty<'tcx>; type TyLayout = Result, LayoutError<'tcx>>; fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { - (self.tcx, self.param_env.reveal_all()).layout_of(ty) + self.tcx.layout_of(self.param_env.and(ty)) } } @@ -652,9 +686,9 @@ impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { } fn visit_body(&mut self, body: &'tcx hir::Body) { - run_lints!(self, check_body, late_passes, body); + run_lints!(self, check_body, body); hir_visit::walk_body(self, body); - run_lints!(self, check_body_post, late_passes, body); + run_lints!(self, check_body_post, body); } fn visit_item(&mut self, it: &'tcx hir::Item) { @@ -662,9 +696,9 @@ impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { self.generics = it.node.generics(); self.with_lint_attrs(it.id, &it.attrs, |cx| { cx.with_param_env(it.id, |cx| { - run_lints!(cx, check_item, late_passes, it); + run_lints!(cx, check_item, it); hir_visit::walk_item(cx, it); - run_lints!(cx, check_item_post, late_passes, it); + run_lints!(cx, check_item_post, it); }); }); self.generics = generics; @@ -673,23 +707,23 @@ impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem) { self.with_lint_attrs(it.id, &it.attrs, |cx| { cx.with_param_env(it.id, |cx| { - run_lints!(cx, check_foreign_item, late_passes, it); + run_lints!(cx, check_foreign_item, it); hir_visit::walk_foreign_item(cx, it); - run_lints!(cx, check_foreign_item_post, late_passes, it); + run_lints!(cx, check_foreign_item_post, it); }); }) } fn visit_pat(&mut self, p: &'tcx hir::Pat) { - run_lints!(self, check_pat, late_passes, p); + run_lints!(self, check_pat, p); hir_visit::walk_pat(self, p); } fn visit_expr(&mut self, e: &'tcx hir::Expr) { self.with_lint_attrs(e.id, &e.attrs, |cx| { - run_lints!(cx, check_expr, late_passes, e); + run_lints!(cx, check_expr, e); hir_visit::walk_expr(cx, e); - run_lints!(cx, check_expr_post, late_passes, e); + run_lints!(cx, check_expr_post, e); }) } @@ -699,7 +733,7 @@ impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { // - local // - expression // so we keep track of lint levels there - run_lints!(self, check_stmt, late_passes, s); + run_lints!(self, check_stmt, s); hir_visit::walk_stmt(self, s); } @@ -710,9 +744,9 @@ impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { let old_tables = self.tables; self.tables = self.tcx.body_tables(body_id); let body = self.tcx.hir.body(body_id); - run_lints!(self, check_fn, late_passes, fk, decl, body, span, id); + run_lints!(self, check_fn, fk, decl, body, span, id); hir_visit::walk_fn(self, fk, decl, body_id, span, id); - run_lints!(self, check_fn_post, late_passes, fk, decl, body, span, id); + run_lints!(self, check_fn_post, fk, decl, body, span, id); self.tables = old_tables; } @@ -722,14 +756,14 @@ impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { g: &'tcx hir::Generics, item_id: ast::NodeId, _: Span) { - run_lints!(self, check_struct_def, late_passes, s, name, g, item_id); + run_lints!(self, check_struct_def, s, name, g, item_id); hir_visit::walk_struct_def(self, s); - run_lints!(self, check_struct_def_post, late_passes, s, name, g, item_id); + run_lints!(self, check_struct_def_post, s, name, g, item_id); } fn visit_struct_field(&mut self, s: &'tcx hir::StructField) { self.with_lint_attrs(s.id, &s.attrs, |cx| { - run_lints!(cx, check_struct_field, late_passes, s); + run_lints!(cx, check_struct_field, s); hir_visit::walk_struct_field(cx, s); }) } @@ -739,68 +773,79 @@ impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { g: &'tcx hir::Generics, item_id: ast::NodeId) { self.with_lint_attrs(v.node.data.id(), &v.node.attrs, |cx| { - run_lints!(cx, check_variant, late_passes, v, g); + run_lints!(cx, check_variant, v, g); hir_visit::walk_variant(cx, v, g, item_id); - run_lints!(cx, check_variant_post, late_passes, v, g); + run_lints!(cx, check_variant_post, v, g); }) } fn visit_ty(&mut self, t: &'tcx hir::Ty) { - run_lints!(self, check_ty, late_passes, t); + run_lints!(self, check_ty, t); hir_visit::walk_ty(self, t); } fn visit_name(&mut self, sp: Span, name: ast::Name) { - run_lints!(self, check_name, late_passes, sp, name); + run_lints!(self, check_name, sp, name); } fn visit_mod(&mut self, m: &'tcx hir::Mod, s: Span, n: ast::NodeId) { - run_lints!(self, check_mod, late_passes, m, s, n); + run_lints!(self, check_mod, m, s, n); hir_visit::walk_mod(self, m, n); - run_lints!(self, check_mod_post, late_passes, m, s, n); + run_lints!(self, check_mod_post, m, s, n); } fn visit_local(&mut self, l: &'tcx hir::Local) { self.with_lint_attrs(l.id, &l.attrs, |cx| { - run_lints!(cx, check_local, late_passes, l); + run_lints!(cx, check_local, l); hir_visit::walk_local(cx, l); }) } fn visit_block(&mut self, b: &'tcx hir::Block) { - run_lints!(self, check_block, late_passes, b); + run_lints!(self, check_block, b); hir_visit::walk_block(self, b); - run_lints!(self, check_block_post, late_passes, b); + run_lints!(self, check_block_post, b); } fn visit_arm(&mut self, a: &'tcx hir::Arm) { - run_lints!(self, check_arm, late_passes, a); + run_lints!(self, check_arm, a); hir_visit::walk_arm(self, a); } fn visit_decl(&mut self, d: &'tcx hir::Decl) { - run_lints!(self, check_decl, late_passes, d); + run_lints!(self, check_decl, d); hir_visit::walk_decl(self, d); } fn visit_generic_param(&mut self, p: &'tcx hir::GenericParam) { - run_lints!(self, check_generic_param, late_passes, p); + run_lints!(self, check_generic_param, p); hir_visit::walk_generic_param(self, p); } fn visit_generics(&mut self, g: &'tcx hir::Generics) { - run_lints!(self, check_generics, late_passes, g); + run_lints!(self, check_generics, g); hir_visit::walk_generics(self, g); } + fn visit_where_predicate(&mut self, p: &'tcx hir::WherePredicate) { + run_lints!(self, check_where_predicate, p); + hir_visit::walk_where_predicate(self, p); + } + + fn visit_poly_trait_ref(&mut self, t: &'tcx hir::PolyTraitRef, + m: hir::TraitBoundModifier) { + run_lints!(self, check_poly_trait_ref, t, m); + hir_visit::walk_poly_trait_ref(self, t, m); + } + fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { let generics = self.generics.take(); self.generics = Some(&trait_item.generics); self.with_lint_attrs(trait_item.id, &trait_item.attrs, |cx| { cx.with_param_env(trait_item.id, |cx| { - run_lints!(cx, check_trait_item, late_passes, trait_item); + run_lints!(cx, check_trait_item, trait_item); hir_visit::walk_trait_item(cx, trait_item); - run_lints!(cx, check_trait_item_post, late_passes, trait_item); + run_lints!(cx, check_trait_item_post, trait_item); }); }); self.generics = generics; @@ -811,71 +856,71 @@ impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { self.generics = Some(&impl_item.generics); self.with_lint_attrs(impl_item.id, &impl_item.attrs, |cx| { cx.with_param_env(impl_item.id, |cx| { - run_lints!(cx, check_impl_item, late_passes, impl_item); + run_lints!(cx, check_impl_item, impl_item); hir_visit::walk_impl_item(cx, impl_item); - run_lints!(cx, check_impl_item_post, late_passes, impl_item); + run_lints!(cx, check_impl_item_post, impl_item); }); }); self.generics = generics; } fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) { - run_lints!(self, check_lifetime, late_passes, lt); + run_lints!(self, check_lifetime, lt); hir_visit::walk_lifetime(self, lt); } - fn visit_path(&mut self, p: &'tcx hir::Path, id: ast::NodeId) { - run_lints!(self, check_path, late_passes, p, id); + fn visit_path(&mut self, p: &'tcx hir::Path, id: hir::HirId) { + run_lints!(self, check_path, p, id); hir_visit::walk_path(self, p); } fn visit_attribute(&mut self, attr: &'tcx ast::Attribute) { - run_lints!(self, check_attribute, late_passes, attr); + run_lints!(self, check_attribute, attr); } } impl<'a> ast_visit::Visitor<'a> for EarlyContext<'a> { fn visit_item(&mut self, it: &'a ast::Item) { self.with_lint_attrs(it.id, &it.attrs, |cx| { - run_lints!(cx, check_item, early_passes, it); + run_lints!(cx, check_item, it); ast_visit::walk_item(cx, it); - run_lints!(cx, check_item_post, early_passes, it); + run_lints!(cx, check_item_post, it); }) } fn visit_foreign_item(&mut self, it: &'a ast::ForeignItem) { self.with_lint_attrs(it.id, &it.attrs, |cx| { - run_lints!(cx, check_foreign_item, early_passes, it); + run_lints!(cx, check_foreign_item, it); ast_visit::walk_foreign_item(cx, it); - run_lints!(cx, check_foreign_item_post, early_passes, it); + run_lints!(cx, check_foreign_item_post, it); }) } fn visit_pat(&mut self, p: &'a ast::Pat) { - run_lints!(self, check_pat, early_passes, p); + run_lints!(self, check_pat, p); self.check_id(p.id); ast_visit::walk_pat(self, p); } fn visit_expr(&mut self, e: &'a ast::Expr) { self.with_lint_attrs(e.id, &e.attrs, |cx| { - run_lints!(cx, check_expr, early_passes, e); + run_lints!(cx, check_expr, e); ast_visit::walk_expr(cx, e); }) } fn visit_stmt(&mut self, s: &'a ast::Stmt) { - run_lints!(self, check_stmt, early_passes, s); + run_lints!(self, check_stmt, s); self.check_id(s.id); ast_visit::walk_stmt(self, s); } fn visit_fn(&mut self, fk: ast_visit::FnKind<'a>, decl: &'a ast::FnDecl, span: Span, id: ast::NodeId) { - run_lints!(self, check_fn, early_passes, fk, decl, span, id); + run_lints!(self, check_fn, fk, decl, span, id); self.check_id(id); ast_visit::walk_fn(self, fk, decl, span); - run_lints!(self, check_fn_post, early_passes, fk, decl, span, id); + run_lints!(self, check_fn_post, fk, decl, span, id); } fn visit_variant_data(&mut self, @@ -884,111 +929,126 @@ impl<'a> ast_visit::Visitor<'a> for EarlyContext<'a> { g: &'a ast::Generics, item_id: ast::NodeId, _: Span) { - run_lints!(self, check_struct_def, early_passes, s, ident, g, item_id); + run_lints!(self, check_struct_def, s, ident, g, item_id); self.check_id(s.id()); ast_visit::walk_struct_def(self, s); - run_lints!(self, check_struct_def_post, early_passes, s, ident, g, item_id); + run_lints!(self, check_struct_def_post, s, ident, g, item_id); } fn visit_struct_field(&mut self, s: &'a ast::StructField) { self.with_lint_attrs(s.id, &s.attrs, |cx| { - run_lints!(cx, check_struct_field, early_passes, s); + run_lints!(cx, check_struct_field, s); ast_visit::walk_struct_field(cx, s); }) } fn visit_variant(&mut self, v: &'a ast::Variant, g: &'a ast::Generics, item_id: ast::NodeId) { self.with_lint_attrs(item_id, &v.node.attrs, |cx| { - run_lints!(cx, check_variant, early_passes, v, g); + run_lints!(cx, check_variant, v, g); ast_visit::walk_variant(cx, v, g, item_id); - run_lints!(cx, check_variant_post, early_passes, v, g); + run_lints!(cx, check_variant_post, v, g); }) } fn visit_ty(&mut self, t: &'a ast::Ty) { - run_lints!(self, check_ty, early_passes, t); + run_lints!(self, check_ty, t); self.check_id(t.id); ast_visit::walk_ty(self, t); } - fn visit_ident(&mut self, sp: Span, id: ast::Ident) { - run_lints!(self, check_ident, early_passes, sp, id); + fn visit_ident(&mut self, ident: ast::Ident) { + run_lints!(self, check_ident, ident); } fn visit_mod(&mut self, m: &'a ast::Mod, s: Span, _a: &[ast::Attribute], n: ast::NodeId) { - run_lints!(self, check_mod, early_passes, m, s, n); + run_lints!(self, check_mod, m, s, n); self.check_id(n); ast_visit::walk_mod(self, m); - run_lints!(self, check_mod_post, early_passes, m, s, n); + run_lints!(self, check_mod_post, m, s, n); } fn visit_local(&mut self, l: &'a ast::Local) { self.with_lint_attrs(l.id, &l.attrs, |cx| { - run_lints!(cx, check_local, early_passes, l); + run_lints!(cx, check_local, l); ast_visit::walk_local(cx, l); }) } fn visit_block(&mut self, b: &'a ast::Block) { - run_lints!(self, check_block, early_passes, b); + run_lints!(self, check_block, b); self.check_id(b.id); ast_visit::walk_block(self, b); - run_lints!(self, check_block_post, early_passes, b); + run_lints!(self, check_block_post, b); } fn visit_arm(&mut self, a: &'a ast::Arm) { - run_lints!(self, check_arm, early_passes, a); + run_lints!(self, check_arm, a); ast_visit::walk_arm(self, a); } fn visit_expr_post(&mut self, e: &'a ast::Expr) { - run_lints!(self, check_expr_post, early_passes, e); + run_lints!(self, check_expr_post, e); } fn visit_generic_param(&mut self, param: &'a ast::GenericParam) { - run_lints!(self, check_generic_param, early_passes, param); + run_lints!(self, check_generic_param, param); ast_visit::walk_generic_param(self, param); } fn visit_generics(&mut self, g: &'a ast::Generics) { - run_lints!(self, check_generics, early_passes, g); + run_lints!(self, check_generics, g); ast_visit::walk_generics(self, g); } + fn visit_where_predicate(&mut self, p: &'a ast::WherePredicate) { + run_lints!(self, check_where_predicate, p); + ast_visit::walk_where_predicate(self, p); + } + + fn visit_poly_trait_ref(&mut self, t: &'a ast::PolyTraitRef, m: &'a ast::TraitBoundModifier) { + run_lints!(self, check_poly_trait_ref, t, m); + ast_visit::walk_poly_trait_ref(self, t, m); + } + fn visit_trait_item(&mut self, trait_item: &'a ast::TraitItem) { self.with_lint_attrs(trait_item.id, &trait_item.attrs, |cx| { - run_lints!(cx, check_trait_item, early_passes, trait_item); + run_lints!(cx, check_trait_item, trait_item); ast_visit::walk_trait_item(cx, trait_item); - run_lints!(cx, check_trait_item_post, early_passes, trait_item); + run_lints!(cx, check_trait_item_post, trait_item); }); } fn visit_impl_item(&mut self, impl_item: &'a ast::ImplItem) { self.with_lint_attrs(impl_item.id, &impl_item.attrs, |cx| { - run_lints!(cx, check_impl_item, early_passes, impl_item); + run_lints!(cx, check_impl_item, impl_item); ast_visit::walk_impl_item(cx, impl_item); - run_lints!(cx, check_impl_item_post, early_passes, impl_item); + run_lints!(cx, check_impl_item_post, impl_item); }); } fn visit_lifetime(&mut self, lt: &'a ast::Lifetime) { - run_lints!(self, check_lifetime, early_passes, lt); + run_lints!(self, check_lifetime, lt); self.check_id(lt.id); } fn visit_path(&mut self, p: &'a ast::Path, id: ast::NodeId) { - run_lints!(self, check_path, early_passes, p, id); + run_lints!(self, check_path, p, id); self.check_id(id); ast_visit::walk_path(self, p); } fn visit_attribute(&mut self, attr: &'a ast::Attribute) { - run_lints!(self, check_attribute, early_passes, attr); + run_lints!(self, check_attribute, attr); } - fn visit_mac_def(&mut self, _mac: &'a ast::MacroDef, id: ast::NodeId) { + fn visit_mac_def(&mut self, mac: &'a ast::MacroDef, id: ast::NodeId) { + run_lints!(self, check_mac_def, mac, id); self.check_id(id); } + + fn visit_mac(&mut self, mac: &'ast ast::Mac) { + run_lints!(self, check_mac, mac); + } } @@ -999,54 +1059,92 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE); let krate = tcx.hir.krate(); + let passes = tcx.sess.lint_store.borrow_mut().late_passes.take(); - let mut cx = LateContext { - tcx, - tables: &ty::TypeckTables::empty(None), - param_env: ty::ParamEnv::empty(Reveal::UserFacing), - access_levels, - lint_sess: LintSession::new(&tcx.sess.lint_store), - last_ast_node_with_lint_attrs: ast::CRATE_NODE_ID, - generics: None, + let passes = { + let mut cx = LateContext { + tcx, + tables: &ty::TypeckTables::empty(None), + param_env: ty::ParamEnv::empty(), + access_levels, + lint_sess: LintSession { + passes, + lints: tcx.sess.lint_store.borrow(), + }, + last_ast_node_with_lint_attrs: ast::CRATE_NODE_ID, + generics: None, + }; + + // Visit the whole crate. + cx.with_lint_attrs(ast::CRATE_NODE_ID, &krate.attrs, |cx| { + // since the root module isn't visited as an item (because it isn't an + // item), warn for it here. + run_lints!(cx, check_crate, krate); + + hir_visit::walk_crate(cx, krate); + + run_lints!(cx, check_crate_post, krate); + }); + cx.lint_sess.passes }; - // Visit the whole crate. - cx.with_lint_attrs(ast::CRATE_NODE_ID, &krate.attrs, |cx| { - // since the root module isn't visited as an item (because it isn't an - // item), warn for it here. - run_lints!(cx, check_crate, late_passes, krate); - - hir_visit::walk_crate(cx, krate); - - run_lints!(cx, check_crate_post, late_passes, krate); - }); - // Put the lint store levels and passes back in the session. - cx.lint_sess.restore(&tcx.sess.lint_store); + tcx.sess.lint_store.borrow_mut().late_passes = passes; } -pub fn check_ast_crate(sess: &Session, krate: &ast::Crate) { - let mut cx = EarlyContext::new(sess, krate); +pub fn check_ast_crate( + sess: &Session, + krate: &ast::Crate, + pre_expansion: bool, +) { + let (passes, buffered) = if pre_expansion { + ( + sess.lint_store.borrow_mut().pre_expansion_passes.take(), + LintBuffer::new(), + ) + } else { + ( + sess.lint_store.borrow_mut().early_passes.take(), + sess.buffered_lints.borrow_mut().take().unwrap(), + ) + }; + let (passes, buffered) = { + let mut cx = EarlyContext::new(sess, krate, passes, buffered); - // Visit the whole crate. - cx.with_lint_attrs(ast::CRATE_NODE_ID, &krate.attrs, |cx| { - // since the root module isn't visited as an item (because it isn't an - // item), warn for it here. - run_lints!(cx, check_crate, early_passes, krate); + // Visit the whole crate. + cx.with_lint_attrs(ast::CRATE_NODE_ID, &krate.attrs, |cx| { + // since the root module isn't visited as an item (because it isn't an + // item), warn for it here. + run_lints!(cx, check_crate, krate); - ast_visit::walk_crate(cx, krate); + ast_visit::walk_crate(cx, krate); - run_lints!(cx, check_crate_post, early_passes, krate); - }); + run_lints!(cx, check_crate_post, krate); + }); + (cx.lint_sess.passes, cx.buffered) + }; // Put the lint store levels and passes back in the session. - cx.lint_sess.restore(&sess.lint_store); + if pre_expansion { + sess.lint_store.borrow_mut().pre_expansion_passes = passes; + } else { + sess.lint_store.borrow_mut().early_passes = passes; + } - // Emit all buffered lints from early on in the session now that we've - // calculated the lint levels for all AST nodes. - for (_id, lints) in cx.buffered.map { - for early_lint in lints { - span_bug!(early_lint.span, "failed to process buffered lint here"); + // All of the buffered lints should have been emitted at this point. + // If not, that means that we somehow buffered a lint for a node id + // that was not lint-checked (perhaps it doesn't exist?). This is a bug. + // + // Rustdoc runs everybody-loops before the early lints and removes + // function bodies, so it's totally possible for linted + // node ids to not exist (e.g. macros defined within functions for the + // unused_macro lint) anymore. So we only run this check + // when we're not in rustdoc mode. (see issue #47639) + if !sess.opts.actually_rustdoc { + for (_id, lints) in buffered.map { + for early_lint in lints { + sess.delay_span_bug(early_lint.span, "failed to process buffered lint here"); + } } } } diff --git a/src/librustc/lint/levels.rs b/src/librustc/lint/levels.rs index 4bc37747f2a7..483e2ea8a96c 100644 --- a/src/librustc/lint/levels.rs +++ b/src/librustc/lint/levels.rs @@ -10,7 +10,7 @@ use std::cmp; -use errors::DiagnosticBuilder; +use errors::{Applicability, DiagnosticBuilder}; use hir::HirId; use ich::StableHashingContext; use lint::builtin; @@ -22,6 +22,7 @@ use session::Session; use syntax::ast; use syntax::attr; use syntax::codemap::MultiSpan; +use syntax::feature_gate; use syntax::symbol::Symbol; use util::nodemap::FxHashMap; @@ -89,14 +90,15 @@ impl LintLevelSets { fn get_lint_level(&self, lint: &'static Lint, idx: u32, - aux: Option<&FxHashMap>) + aux: Option<&FxHashMap>, + sess: &Session) -> (Level, LintSource) { let (level, mut src) = self.get_lint_id_level(LintId::of(lint), idx, aux); // If `level` is none then we actually assume the default level for this // lint. - let mut level = level.unwrap_or(lint.default_level); + let mut level = level.unwrap_or(lint.default_level(sess)); // If we're about to issue a warning, check at the last minute for any // directives against the warnings "lint". If, for example, there's an @@ -117,6 +119,11 @@ impl LintLevelSets { // Ensure that we never exceed the `--cap-lints` argument. level = cmp::min(level, self.lint_cap); + if let Some(driver_level) = sess.driver_lint_caps.get(&LintId::of(lint)) { + // Ensure that we never exceed driver level. + level = cmp::min(*driver_level, level); + } + return (level, src) } @@ -197,7 +204,7 @@ impl<'a> LintLevelsBuilder<'a> { "malformed lint attribute"); }; for attr in attrs { - let level = match attr.name().and_then(|name| Level::from_str(&name.as_str())) { + let level = match Level::from_str(&attr.name().as_str()) { None => continue, Some(lvl) => lvl, }; @@ -220,8 +227,37 @@ impl<'a> LintLevelsBuilder<'a> { continue } }; + let tool_name = if let Some(lint_tool) = word.is_scoped() { + let gate_feature = !self.sess.features_untracked().tool_lints; + let known_tool = attr::is_known_lint_tool(lint_tool); + if gate_feature { + feature_gate::emit_feature_err(&sess.parse_sess, + "tool_lints", + word.span, + feature_gate::GateIssue::Language, + &format!("scoped lint `{}` is experimental", + word.ident)); + } + if !known_tool { + span_err!( + sess, + lint_tool.span, + E0710, + "an unknown tool name found in scoped lint: `{}`", + word.ident + ); + } + + if gate_feature || !known_tool { + continue + } + + Some(lint_tool.as_str()) + } else { + None + }; let name = word.name(); - match store.check_lint_name(&name.as_str()) { + match store.check_lint_name(&name.as_str(), tool_name) { CheckLintNameResult::Ok(ids) => { let src = LintSource::Node(name, li.span); for id in ids { @@ -229,26 +265,50 @@ impl<'a> LintLevelsBuilder<'a> { } } + CheckLintNameResult::Tool(result) => { + if let Some(ids) = result { + let complete_name = &format!("{}::{}", tool_name.unwrap(), name); + let src = LintSource::Node(Symbol::intern(complete_name), li.span); + for id in ids { + specs.insert(*id, (level, src)); + } + } + // If Tool(None) is returned, then either the lint does not exist in the + // tool or the code was not compiled with the tool and therefore the lint + // was never added to the `LintStore`. To detect this is the responsibility + // of the lint tool. + } + _ if !self.warn_about_weird_lints => {} - CheckLintNameResult::Warning(ref msg) => { + CheckLintNameResult::Warning(msg, renamed) => { let lint = builtin::RENAMED_AND_REMOVED_LINTS; let (level, src) = self.sets.get_lint_level(lint, self.cur, - Some(&specs)); - lint::struct_lint_level(self.sess, - lint, - level, - src, - Some(li.span.into()), - msg) - .emit(); + Some(&specs), + &sess); + let mut err = lint::struct_lint_level(self.sess, + lint, + level, + src, + Some(li.span.into()), + &msg); + if let Some(new_name) = renamed { + err.span_suggestion_with_applicability( + li.span, + "use the new name", + new_name, + Applicability::MachineApplicable + ); + } + err.emit(); } CheckLintNameResult::NoLint => { let lint = builtin::UNKNOWN_LINTS; let (level, src) = self.sets.get_lint_level(lint, self.cur, - Some(&specs)); + Some(&specs), + self.sess); let msg = format!("unknown lint: `{}`", name); let mut db = lint::struct_lint_level(self.sess, lint, @@ -257,15 +317,16 @@ impl<'a> LintLevelsBuilder<'a> { Some(li.span.into()), &msg); if name.as_str().chars().any(|c| c.is_uppercase()) { - let name_lower = name.as_str().to_lowercase(); + let name_lower = name.as_str().to_lowercase().to_string(); if let CheckLintNameResult::NoLint = - store.check_lint_name(&name_lower) { + store.check_lint_name(&name_lower, tool_name) { db.emit(); } else { - db.span_suggestion( + db.span_suggestion_with_applicability( li.span, "lowercase the lint name", - name_lower + name_lower, + Applicability::MachineApplicable ).emit(); } } else { @@ -342,7 +403,7 @@ impl<'a> LintLevelsBuilder<'a> { msg: &str) -> DiagnosticBuilder<'a> { - let (level, src) = self.sets.get_lint_level(lint, self.cur, None); + let (level, src) = self.sets.get_lint_level(lint, self.cur, None, self.sess); lint::struct_lint_level(self.sess, lint, level, src, span, msg) } @@ -377,11 +438,11 @@ impl LintLevelMap { /// If the `id` was not previously registered, returns `None`. If `None` is /// returned then the parent of `id` should be acquired and this function /// should be called again. - pub fn level_and_source(&self, lint: &'static Lint, id: HirId) + pub fn level_and_source(&self, lint: &'static Lint, id: HirId, session: &Session) -> Option<(Level, LintSource)> { self.id_to_set.get(&id).map(|idx| { - self.sets.get_lint_level(lint, *idx, None) + self.sets.get_lint_level(lint, *idx, None, session) }) } @@ -391,10 +452,10 @@ impl LintLevelMap { } } -impl<'gcx> HashStable> for LintLevelMap { +impl<'a> HashStable> for LintLevelMap { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let LintLevelMap { ref sets, diff --git a/src/librustc/lint/mod.rs b/src/librustc/lint/mod.rs index b2a9859f68a3..c36d674566ad 100644 --- a/src/librustc/lint/mod.rs +++ b/src/librustc/lint/mod.rs @@ -16,36 +16,40 @@ //! other phases of the compiler, which are generally required to hold in order //! to compile the program at all. //! -//! Most lints can be written as `LintPass` instances. These run just before -//! translation to LLVM bytecode. The `LintPass`es built into rustc are defined +//! Most lints can be written as `LintPass` instances. These run after +//! all other analyses. The `LintPass`es built into rustc are defined //! within `builtin.rs`, which has further comments on how to add such a lint. //! rustc can also load user-defined lint plugins via the plugin mechanism. //! //! Some of rustc's lints are defined elsewhere in the compiler and work by //! calling `add_lint()` on the overall `Session` object. This works when //! it happens before the main lint pass, which emits the lints stored by -//! `add_lint()`. To emit lints after the main lint pass (from trans, for +//! `add_lint()`. To emit lints after the main lint pass (from codegen, for //! example) requires more effort. See `emit_lint` and `GatherNodeLevels` //! in `context.rs`. pub use self::Level::*; pub use self::LintSource::*; -use std::rc::Rc; +use rustc_data_structures::sync::{self, Lrc}; use errors::{DiagnosticBuilder, DiagnosticId}; use hir::def_id::{CrateNum, LOCAL_CRATE}; -use hir::intravisit::{self, FnKind}; +use hir::intravisit; use hir; +use lint::builtin::BuiltinLintDiagnostics; +use lint::builtin::parser::QUESTION_MARK_MACRO_SEP; use session::{Session, DiagnosticMessageId}; -use std::hash; +use std::{hash, ptr}; use syntax::ast; -use syntax::codemap::MultiSpan; +use syntax::codemap::{MultiSpan, ExpnFormat}; +use syntax::early_buffered_lints::BufferedEarlyLintId; +use syntax::edition::Edition; use syntax::symbol::Symbol; use syntax::visit as ast_visit; use syntax_pos::Span; use ty::TyCtxt; -use ty::maps::Providers; +use ty::query::Providers; use util::nodemap::NodeMap; pub use lint::context::{LateContext, EarlyContext, LintContext, LintStore, @@ -74,38 +78,97 @@ pub struct Lint { /// /// e.g. "imports that are never used" pub desc: &'static str, + + /// Starting at the given edition, default to the given lint level. If this is `None`, then use + /// `default_level`. + pub edition_lint_opts: Option<(Edition, Level)>, + + /// Whether this lint is reported even inside expansions of external macros + pub report_in_external_macro: bool, } impl Lint { + /// Returns the `rust::lint::Lint` for a `syntax::early_buffered_lints::BufferedEarlyLintId`. + pub fn from_parser_lint_id(lint_id: BufferedEarlyLintId) -> &'static Self { + match lint_id { + BufferedEarlyLintId::QuestionMarkMacroSep => QUESTION_MARK_MACRO_SEP, + } + } + /// Get the lint's name, with ASCII letters converted to lowercase. pub fn name_lower(&self) -> String { self.name.to_ascii_lowercase() } + + pub fn default_level(&self, session: &Session) -> Level { + self.edition_lint_opts + .filter(|(e, _)| *e <= session.edition()) + .map(|(_, l)| l) + .unwrap_or(self.default_level) + } } /// Declare a static item of type `&'static Lint`. #[macro_export] macro_rules! declare_lint { ($vis: vis $NAME: ident, $Level: ident, $desc: expr) => ( + declare_lint!{$vis $NAME, $Level, $desc, false} + ); + ($vis: vis $NAME: ident, $Level: ident, $desc: expr, report_in_external_macro: $rep: expr) => ( + declare_lint!{$vis $NAME, $Level, $desc, $rep} + ); + ($vis: vis $NAME: ident, $Level: ident, $desc: expr, $external: expr) => ( $vis static $NAME: &$crate::lint::Lint = &$crate::lint::Lint { name: stringify!($NAME), default_level: $crate::lint::$Level, - desc: $desc + desc: $desc, + edition_lint_opts: None, + report_in_external_macro: $external, }; - ) + ); + ($vis: vis $NAME: ident, $Level: ident, $desc: expr, + $lint_edition: expr => $edition_level: ident + ) => ( + $vis static $NAME: &$crate::lint::Lint = &$crate::lint::Lint { + name: stringify!($NAME), + default_level: $crate::lint::$Level, + desc: $desc, + edition_lint_opts: Some(($lint_edition, $crate::lint::Level::$edition_level)), + report_in_external_macro: false, + }; + ); +} + +#[macro_export] +macro_rules! declare_tool_lint { + ($vis: vis $tool: ident ::$NAME: ident, $Level: ident, $desc: expr) => ( + declare_tool_lint!{$vis $tool::$NAME, $Level, $desc, false} + ); + ($vis: vis $tool: ident ::$NAME: ident, $Level: ident, $desc: expr, + report_in_external_macro: $rep: expr) => ( + declare_tool_lint!{$vis $tool::$NAME, $Level, $desc, $rep} + ); + ($vis: vis $tool: ident ::$NAME: ident, $Level: ident, $desc: expr, $external: expr) => ( + $vis static $NAME: &$crate::lint::Lint = &$crate::lint::Lint { + name: &concat!(stringify!($tool), "::", stringify!($NAME)), + default_level: $crate::lint::$Level, + desc: $desc, + edition_lint_opts: None, + report_in_external_macro: $external, + }; + ); } /// Declare a static `LintArray` and return it as an expression. #[macro_export] macro_rules! lint_array { - ($( $lint:expr ),*,) => { lint_array!( $( $lint ),* ) }; + ($( $lint:expr ),* ,) => { lint_array!( $($lint),* ) }; ($( $lint:expr ),*) => {{ - static ARRAY: LintArray = &[ $( &$lint ),* ]; - ARRAY + vec![$($lint),*] }} } -pub type LintArray = &'static [&'static &'static Lint]; +pub type LintArray = Vec<&'static Lint>; pub trait LintPass { /// Get descriptions of the lints this `LintPass` object can emit. @@ -117,6 +180,80 @@ pub trait LintPass { fn get_lints(&self) -> LintArray; } +#[macro_export] +macro_rules! late_lint_methods { + ($macro:path, $args:tt, [$hir:tt]) => ( + $macro!($args, [$hir], [ + fn check_body(a: &$hir hir::Body); + fn check_body_post(a: &$hir hir::Body); + fn check_name(a: Span, b: ast::Name); + fn check_crate(a: &$hir hir::Crate); + fn check_crate_post(a: &$hir hir::Crate); + fn check_mod(a: &$hir hir::Mod, b: Span, c: ast::NodeId); + fn check_mod_post(a: &$hir hir::Mod, b: Span, c: ast::NodeId); + fn check_foreign_item(a: &$hir hir::ForeignItem); + fn check_foreign_item_post(a: &$hir hir::ForeignItem); + fn check_item(a: &$hir hir::Item); + fn check_item_post(a: &$hir hir::Item); + fn check_local(a: &$hir hir::Local); + fn check_block(a: &$hir hir::Block); + fn check_block_post(a: &$hir hir::Block); + fn check_stmt(a: &$hir hir::Stmt); + fn check_arm(a: &$hir hir::Arm); + fn check_pat(a: &$hir hir::Pat); + fn check_decl(a: &$hir hir::Decl); + fn check_expr(a: &$hir hir::Expr); + fn check_expr_post(a: &$hir hir::Expr); + fn check_ty(a: &$hir hir::Ty); + fn check_generic_param(a: &$hir hir::GenericParam); + fn check_generics(a: &$hir hir::Generics); + fn check_where_predicate(a: &$hir hir::WherePredicate); + fn check_poly_trait_ref(a: &$hir hir::PolyTraitRef, b: hir::TraitBoundModifier); + fn check_fn( + a: hir::intravisit::FnKind<$hir>, + b: &$hir hir::FnDecl, + c: &$hir hir::Body, + d: Span, + e: ast::NodeId); + fn check_fn_post( + a: hir::intravisit::FnKind<$hir>, + b: &$hir hir::FnDecl, + c: &$hir hir::Body, + d: Span, + e: ast::NodeId + ); + fn check_trait_item(a: &$hir hir::TraitItem); + fn check_trait_item_post(a: &$hir hir::TraitItem); + fn check_impl_item(a: &$hir hir::ImplItem); + fn check_impl_item_post(a: &$hir hir::ImplItem); + fn check_struct_def( + a: &$hir hir::VariantData, + b: ast::Name, + c: &$hir hir::Generics, + d: ast::NodeId + ); + fn check_struct_def_post( + a: &$hir hir::VariantData, + b: ast::Name, + c: &$hir hir::Generics, + d: ast::NodeId + ); + fn check_struct_field(a: &$hir hir::StructField); + fn check_variant(a: &$hir hir::Variant, b: &$hir hir::Generics); + fn check_variant_post(a: &$hir hir::Variant, b: &$hir hir::Generics); + fn check_lifetime(a: &$hir hir::Lifetime); + fn check_path(a: &$hir hir::Path, b: hir::HirId); + fn check_attribute(a: &$hir ast::Attribute); + + /// Called when entering a syntax node that can have lint attributes such + /// as `#[allow(...)]`. Called with *all* the attributes of that node. + fn enter_lint_attrs(a: &$hir [ast::Attribute]); + + /// Counterpart to `enter_lint_attrs`. + fn exit_lint_attrs(a: &$hir [ast::Attribute]); + ]); + ) +} /// Trait for types providing lint checks. /// @@ -126,91 +263,71 @@ pub trait LintPass { // // FIXME: eliminate the duplication with `Visitor`. But this also // contains a few lint-specific methods with no equivalent in `Visitor`. -pub trait LateLintPass<'a, 'tcx>: LintPass { - fn check_body(&mut self, _: &LateContext, _: &'tcx hir::Body) { } - fn check_body_post(&mut self, _: &LateContext, _: &'tcx hir::Body) { } - fn check_name(&mut self, _: &LateContext, _: Span, _: ast::Name) { } - fn check_crate(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Crate) { } - fn check_crate_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Crate) { } - fn check_mod(&mut self, - _: &LateContext<'a, 'tcx>, - _: &'tcx hir::Mod, - _: Span, - _: ast::NodeId) { } - fn check_mod_post(&mut self, - _: &LateContext<'a, 'tcx>, - _: &'tcx hir::Mod, - _: Span, - _: ast::NodeId) { } - fn check_foreign_item(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::ForeignItem) { } - fn check_foreign_item_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::ForeignItem) { } - fn check_item(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Item) { } - fn check_item_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Item) { } - fn check_local(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Local) { } - fn check_block(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Block) { } - fn check_block_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Block) { } - fn check_stmt(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Stmt) { } - fn check_arm(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Arm) { } - fn check_pat(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Pat) { } - fn check_decl(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Decl) { } - fn check_expr(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Expr) { } - fn check_expr_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Expr) { } - fn check_ty(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Ty) { } - fn check_generic_param(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::GenericParam) { } - fn check_generics(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Generics) { } - fn check_fn(&mut self, - _: &LateContext<'a, 'tcx>, - _: FnKind<'tcx>, - _: &'tcx hir::FnDecl, - _: &'tcx hir::Body, - _: Span, - _: ast::NodeId) { } - fn check_fn_post(&mut self, - _: &LateContext<'a, 'tcx>, - _: FnKind<'tcx>, - _: &'tcx hir::FnDecl, - _: &'tcx hir::Body, - _: Span, - _: ast::NodeId) { } - fn check_trait_item(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::TraitItem) { } - fn check_trait_item_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::TraitItem) { } - fn check_impl_item(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::ImplItem) { } - fn check_impl_item_post(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::ImplItem) { } - fn check_struct_def(&mut self, - _: &LateContext<'a, 'tcx>, - _: &'tcx hir::VariantData, - _: ast::Name, - _: &'tcx hir::Generics, - _: ast::NodeId) { } - fn check_struct_def_post(&mut self, - _: &LateContext<'a, 'tcx>, - _: &'tcx hir::VariantData, - _: ast::Name, - _: &'tcx hir::Generics, - _: ast::NodeId) { } - fn check_struct_field(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::StructField) { } - fn check_variant(&mut self, - _: &LateContext<'a, 'tcx>, - _: &'tcx hir::Variant, - _: &'tcx hir::Generics) { } - fn check_variant_post(&mut self, - _: &LateContext<'a, 'tcx>, - _: &'tcx hir::Variant, - _: &'tcx hir::Generics) { } - fn check_lifetime(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Lifetime) { } - fn check_path(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx hir::Path, _: ast::NodeId) { } - fn check_attribute(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx ast::Attribute) { } - /// Called when entering a syntax node that can have lint attributes such - /// as `#[allow(...)]`. Called with *all* the attributes of that node. - fn enter_lint_attrs(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx [ast::Attribute]) { } +macro_rules! expand_lint_pass_methods { + ($context:ty, [$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => ( + $(#[inline(always)] fn $name(&mut self, $context, $(_: $arg),*) {})* + ) +} - /// Counterpart to `enter_lint_attrs`. - fn exit_lint_attrs(&mut self, _: &LateContext<'a, 'tcx>, _: &'tcx [ast::Attribute]) { } +macro_rules! declare_late_lint_pass { + ([], [$hir:tt], [$($methods:tt)*]) => ( + pub trait LateLintPass<'a, $hir>: LintPass { + expand_lint_pass_methods!(&LateContext<'a, $hir>, [$($methods)*]); + } + ) +} + +late_lint_methods!(declare_late_lint_pass, [], ['tcx]); + +#[macro_export] +macro_rules! expand_combined_late_lint_pass_method { + ([$($passes:ident),*], $self: ident, $name: ident, $params:tt) => ({ + $($self.$passes.$name $params;)* + }) +} + +#[macro_export] +macro_rules! expand_combined_late_lint_pass_methods { + ($passes:tt, [$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => ( + $(fn $name(&mut self, context: &LateContext<'a, 'tcx>, $($param: $arg),*) { + expand_combined_late_lint_pass_method!($passes, self, $name, (context, $($param),*)); + })* + ) +} + +#[macro_export] +macro_rules! declare_combined_late_lint_pass { + ([$name:ident, [$($passes:ident: $constructor:expr,)*]], [$hir:tt], $methods:tt) => ( + #[allow(non_snake_case)] + struct $name { + $($passes: $passes,)* + } + + impl $name { + fn new() -> Self { + Self { + $($passes: $constructor,)* + } + } + } + + impl<'a, 'tcx> LateLintPass<'a, 'tcx> for $name { + expand_combined_late_lint_pass_methods!([$($passes),*], $methods); + } + + impl LintPass for $name { + fn get_lints(&self) -> LintArray { + let mut lints = Vec::new(); + $(lints.extend_from_slice(&self.$passes.get_lints());)* + lints + } + } + ) } pub trait EarlyLintPass: LintPass { - fn check_ident(&mut self, _: &EarlyContext, _: Span, _: ast::Ident) { } + fn check_ident(&mut self, _: &EarlyContext, _: ast::Ident) { } fn check_crate(&mut self, _: &EarlyContext, _: &ast::Crate) { } fn check_crate_post(&mut self, _: &EarlyContext, _: &ast::Crate) { } fn check_mod(&mut self, _: &EarlyContext, _: &ast::Mod, _: Span, _: ast::NodeId) { } @@ -230,6 +347,9 @@ pub trait EarlyLintPass: LintPass { fn check_ty(&mut self, _: &EarlyContext, _: &ast::Ty) { } fn check_generic_param(&mut self, _: &EarlyContext, _: &ast::GenericParam) { } fn check_generics(&mut self, _: &EarlyContext, _: &ast::Generics) { } + fn check_where_predicate(&mut self, _: &EarlyContext, _: &ast::WherePredicate) { } + fn check_poly_trait_ref(&mut self, _: &EarlyContext, _: &ast::PolyTraitRef, + _: &ast::TraitBoundModifier) { } fn check_fn(&mut self, _: &EarlyContext, _: ast_visit::FnKind, _: &ast::FnDecl, _: Span, _: ast::NodeId) { } fn check_fn_post(&mut self, _: &EarlyContext, @@ -248,6 +368,8 @@ pub trait EarlyLintPass: LintPass { fn check_lifetime(&mut self, _: &EarlyContext, _: &ast::Lifetime) { } fn check_path(&mut self, _: &EarlyContext, _: &ast::Path, _: ast::NodeId) { } fn check_attribute(&mut self, _: &EarlyContext, _: &ast::Attribute) { } + fn check_mac_def(&mut self, _: &EarlyContext, _: &ast::MacroDef, _id: ast::NodeId) { } + fn check_mac(&mut self, _: &EarlyContext, _: &ast::Mac) { } /// Called when entering a syntax node that can have lint attributes such /// as `#[allow(...)]`. Called with *all* the attributes of that node. @@ -258,8 +380,11 @@ pub trait EarlyLintPass: LintPass { } /// A lint pass boxed up as a trait object. -pub type EarlyLintPassObject = Box; -pub type LateLintPassObject = Box LateLintPass<'a, 'tcx> + 'static>; +pub type EarlyLintPassObject = Box; +pub type LateLintPassObject = Box LateLintPass<'a, 'tcx> + sync::Send + + sync::Sync + 'static>; + + /// Identifies a lint known to the compiler. #[derive(Clone, Copy, Debug)] @@ -270,7 +395,7 @@ pub struct LintId { impl PartialEq for LintId { fn eq(&self, other: &LintId) -> bool { - (self.lint as *const Lint) == (other.lint as *const Lint) + ptr::eq(self.lint, other.lint) } } @@ -304,7 +429,7 @@ impl LintId { /// Setting for how to handle a lint. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)] pub enum Level { - Allow, Warn, Deny, Forbid + Allow, Warn, Deny, Forbid, } impl_stable_hash_for!(enum self::Level { @@ -378,14 +503,16 @@ impl LintBuffer { lint: &'static Lint, id: ast::NodeId, sp: MultiSpan, - msg: &str) { + msg: &str, + diagnostic: BuiltinLintDiagnostics) { let early_lint = BufferedEarlyLint { lint_id: LintId::of(lint), ast_id: id, span: sp, msg: msg.to_string(), + diagnostic }; - let arr = self.map.entry(id).or_insert(Vec::new()); + let arr = self.map.entry(id).or_default(); if !arr.contains(&early_lint) { arr.push(early_lint); } @@ -467,21 +594,49 @@ pub fn struct_lint_level<'a>(sess: &'a Session, // Check for future incompatibility lints and issue a stronger warning. let lints = sess.lint_store.borrow(); - if let Some(future_incompatible) = lints.future_incompatible(LintId::of(lint)) { - let explanation = format!("this was previously accepted by the compiler \ - but is being phased out; \ - it will become a hard error in a future release!"); + let lint_id = LintId::of(lint); + let future_incompatible = lints.future_incompatible(lint_id); + if let Some(future_incompatible) = future_incompatible { + const STANDARD_MESSAGE: &str = + "this was previously accepted by the compiler but is being phased out; \ + it will become a hard error"; + + let explanation = if lint_id == LintId::of(::lint::builtin::UNSTABLE_NAME_COLLISIONS) { + "once this method is added to the standard library, \ + the ambiguity may cause an error or change in behavior!" + .to_owned() + } else if let Some(edition) = future_incompatible.edition { + format!("{} in the {} edition!", STANDARD_MESSAGE, edition) + } else { + format!("{} in a future release!", STANDARD_MESSAGE) + }; let citation = format!("for more information, see {}", future_incompatible.reference); err.warn(&explanation); err.note(&citation); } + // If this code originates in a foreign macro, aka something that this crate + // did not itself author, then it's likely that there's nothing this crate + // can do about it. We probably want to skip the lint entirely. + if err.span.primary_spans().iter().any(|s| in_external_macro(sess, *s)) { + // Any suggestions made here are likely to be incorrect, so anything we + // emit shouldn't be automatically fixed by rustfix. + err.allow_suggestions(false); + + // If this is a future incompatible lint it'll become a hard error, so + // we have to emit *something*. Also allow lints to whitelist themselves + // on a case-by-case basis for emission in a foreign macro. + if future_incompatible.is_none() && !lint.report_in_external_macro { + err.cancel() + } + } + return err } fn lint_levels<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, cnum: CrateNum) - -> Rc + -> Lrc { assert_eq!(cnum, LOCAL_CRATE); let mut builder = LintLevelMapBuilder { @@ -494,7 +649,7 @@ fn lint_levels<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, cnum: CrateNum) intravisit::walk_crate(builder, krate); }); - Rc::new(builder.levels.build_map()) + Lrc::new(builder.levels.build_map()) } struct LintLevelMapBuilder<'a, 'tcx: 'a> { @@ -576,3 +731,32 @@ impl<'a, 'tcx> intravisit::Visitor<'tcx> for LintLevelMapBuilder<'a, 'tcx> { pub fn provide(providers: &mut Providers) { providers.lint_levels = lint_levels; } + +/// Returns whether `span` originates in a foreign crate's external macro. +/// +/// This is used to test whether a lint should be entirely aborted above. +pub fn in_external_macro(sess: &Session, span: Span) -> bool { + let info = match span.ctxt().outer().expn_info() { + Some(info) => info, + // no ExpnInfo means this span doesn't come from a macro + None => return false, + }; + + match info.format { + ExpnFormat::MacroAttribute(..) => return true, // definitely a plugin + ExpnFormat::CompilerDesugaring(_) => return true, // well, it's "external" + ExpnFormat::MacroBang(..) => {} // check below + } + + let def_site = match info.def_site { + Some(span) => span, + // no span for the def_site means it's an external macro + None => return true, + }; + + match sess.codemap().span_to_snippet(def_site) { + Ok(code) => !code.starts_with("macro_rules"), + // no snippet = external macro or compiler-builtin expansion + Err(_) => true, + } +} diff --git a/src/librustc/macros.rs b/src/librustc/macros.rs index f0285d6a9378..e599b0704f95 100644 --- a/src/librustc/macros.rs +++ b/src/librustc/macros.rs @@ -51,70 +51,92 @@ macro_rules! enum_from_u32 { macro_rules! bug { () => ( bug!("impossible case reached") ); ($($message:tt)*) => ({ - $crate::session::bug_fmt(file!(), line!(), format_args!($($message)*)) + $crate::util::bug::bug_fmt(file!(), line!(), format_args!($($message)*)) }) } #[macro_export] macro_rules! span_bug { ($span:expr, $($message:tt)*) => ({ - $crate::session::span_bug_fmt(file!(), line!(), $span, format_args!($($message)*)) + $crate::util::bug::span_bug_fmt(file!(), line!(), $span, format_args!($($message)*)) }) } #[macro_export] macro_rules! __impl_stable_hash_field { - (DECL IGNORED) => (_); - (DECL $name:ident) => (ref $name); - (USE IGNORED $ctx:expr, $hasher:expr) => ({}); - (USE $name:ident, $ctx:expr, $hasher:expr) => ($name.hash_stable($ctx, $hasher)); + ($field:ident, $ctx:expr, $hasher:expr) => ($field.hash_stable($ctx, $hasher)); + ($field:ident, $ctx:expr, $hasher:expr, _) => ({ let _ = $field; }); + ($field:ident, $ctx:expr, $hasher:expr, $delegate:expr) => ($delegate.hash_stable($ctx, $hasher)); } #[macro_export] macro_rules! impl_stable_hash_for { - (enum $enum_name:path { $( $variant:ident $( ( $($arg:ident),* ) )* ),* }) => { - impl<'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'tcx>> for $enum_name { + // FIXME(mark-i-m): Some of these should be `?` rather than `*`. See the git blame and change + // them back when `?` is supported again. + (enum $enum_name:path { $( $variant:ident $( ( $($field:ident $(-> $delegate:tt)*),* ) )* ),* $(,)* }) => { + impl<'a, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $enum_name { #[inline] fn hash_stable(&self, - __ctx: &mut $crate::ich::StableHashingContext<'tcx>, + __ctx: &mut $crate::ich::StableHashingContext<'a>, __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { use $enum_name::*; ::std::mem::discriminant(self).hash_stable(__ctx, __hasher); match *self { $( - $variant $( ( $( __impl_stable_hash_field!(DECL $arg) ),* ) )* => { - $($( __impl_stable_hash_field!(USE $arg, __ctx, __hasher) );*)* + $variant $( ( $(ref $field),* ) )* => { + $($( __impl_stable_hash_field!($field, __ctx, __hasher $(, $delegate)*) );*)* } )* } } } }; - (struct $struct_name:path { $($field:ident),* }) => { - impl<'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'tcx>> for $struct_name { + // FIXME(mark-i-m): same here. + (struct $struct_name:path { $($field:ident $(-> $delegate:tt)*),* $(,)* }) => { + impl<'a, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $struct_name { #[inline] fn hash_stable(&self, - __ctx: &mut $crate::ich::StableHashingContext<'tcx>, + __ctx: &mut $crate::ich::StableHashingContext<'a>, __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { let $struct_name { $(ref $field),* } = *self; - $( $field.hash_stable(__ctx, __hasher));* + $( __impl_stable_hash_field!($field, __ctx, __hasher $(, $delegate)*) );* } } }; - (tuple_struct $struct_name:path { $($field:ident),* }) => { - impl<'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'tcx>> for $struct_name { + // FIXME(mark-i-m): same here. + (tuple_struct $struct_name:path { $($field:ident $(-> $delegate:tt)*),* $(,)* }) => { + impl<'a, 'tcx> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $struct_name { #[inline] fn hash_stable(&self, - __ctx: &mut $crate::ich::StableHashingContext<'tcx>, + __ctx: &mut $crate::ich::StableHashingContext<'a>, __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { let $struct_name ( $(ref $field),* ) = *self; + $( __impl_stable_hash_field!($field, __ctx, __hasher $(, $delegate)*) );* + } + } + }; + + (impl<$tcx:lifetime $(, $T:ident)*> for struct $struct_name:path { + $($field:ident),* $(,)* + }) => { + impl<'a, $tcx, $($T,)*> ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>> for $struct_name + where $($T: ::rustc_data_structures::stable_hasher::HashStable<$crate::ich::StableHashingContext<'a>>),* + { + #[inline] + fn hash_stable(&self, + __ctx: &mut $crate::ich::StableHashingContext<'a>, + __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) { + let $struct_name { + $(ref $field),* + } = *self; + $( $field.hash_stable(__ctx, __hasher));* } } @@ -125,11 +147,11 @@ macro_rules! impl_stable_hash_for { macro_rules! impl_stable_hash_for_spanned { ($T:path) => ( - impl<'tcx> HashStable> for ::syntax::codemap::Spanned<$T> + impl<'a, 'tcx> HashStable> for ::syntax::codemap::Spanned<$T> { #[inline] fn hash_stable(&self, - hcx: &mut StableHashingContext<'tcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { self.node.hash_stable(hcx, hasher); self.span.hash_stable(hcx, hasher); @@ -138,3 +160,330 @@ macro_rules! impl_stable_hash_for_spanned { ); } +/////////////////////////////////////////////////////////////////////////// +// Lift and TypeFoldable macros +// +// When possible, use one of these (relatively) convenient macros to write +// the impls for you. + +#[macro_export] +macro_rules! CloneLiftImpls { + (for <$tcx:lifetime> { $($ty:ty,)+ }) => { + $( + impl<$tcx> $crate::ty::Lift<$tcx> for $ty { + type Lifted = Self; + fn lift_to_tcx<'a, 'gcx>(&self, _: $crate::ty::TyCtxt<'a, 'gcx, $tcx>) -> Option { + Some(Clone::clone(self)) + } + } + )+ + }; + + ($($ty:ty,)+) => { + CloneLiftImpls! { + for <'tcx> { + $($ty,)+ + } + } + }; +} + +/// Used for types that are `Copy` and which **do not care arena +/// allocated data** (i.e., don't need to be folded). +#[macro_export] +macro_rules! CloneTypeFoldableImpls { + (for <$tcx:lifetime> { $($ty:ty,)+ }) => { + $( + impl<$tcx> $crate::ty::fold::TypeFoldable<$tcx> for $ty { + fn super_fold_with<'gcx: $tcx, F: $crate::ty::fold::TypeFolder<'gcx, $tcx>>( + &self, + _: &mut F + ) -> $ty { + Clone::clone(self) + } + + fn super_visit_with>( + &self, + _: &mut F) + -> bool + { + false + } + } + )+ + }; + + ($($ty:ty,)+) => { + CloneTypeFoldableImpls! { + for <'tcx> { + $($ty,)+ + } + } + }; +} + +#[macro_export] +macro_rules! CloneTypeFoldableAndLiftImpls { + ($($t:tt)*) => { + CloneTypeFoldableImpls! { $($t)* } + CloneLiftImpls! { $($t)* } + } +} + +#[macro_export] +macro_rules! BraceStructLiftImpl { + (impl<$($p:tt),*> Lift<$tcx:tt> for $s:path { + type Lifted = $lifted:ty; + $($field:ident),* $(,)* + } $(where $($wc:tt)*)*) => { + impl<$($p),*> $crate::ty::Lift<$tcx> for $s + $(where $($wc)*)* + { + type Lifted = $lifted; + + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<$lifted> { + $(let $field = tcx.lift(&self.$field)?;)* + Some(Self::Lifted { $($field),* }) + } + } + }; +} + +#[macro_export] +macro_rules! EnumLiftImpl { + (impl<$($p:tt),*> Lift<$tcx:tt> for $s:path { + type Lifted = $lifted:ty; + $($variants:tt)* + } $(where $($wc:tt)*)*) => { + impl<$($p),*> $crate::ty::Lift<$tcx> for $s + $(where $($wc)*)* + { + type Lifted = $lifted; + + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<$lifted> { + EnumLiftImpl!(@Variants(self, tcx) input($($variants)*) output()) + } + } + }; + + (@Variants($this:expr, $tcx:expr) input() output($($output:tt)*)) => { + match $this { + $($output)* + } + }; + + (@Variants($this:expr, $tcx:expr) + input( ($variant:path) ( $($variant_arg:ident),* ) , $($input:tt)*) + output( $($output:tt)*) ) => { + EnumLiftImpl!( + @Variants($this, $tcx) + input($($input)*) + output( + $variant ( $($variant_arg),* ) => { + Some($variant ( $($tcx.lift($variant_arg)?),* )) + } + $($output)* + ) + ) + }; + + (@Variants($this:expr, $tcx:expr) + input( ($variant:path), $($input:tt)*) + output( $($output:tt)*) ) => { + EnumLiftImpl!( + @Variants($this, $tcx) + input($($input)*) + output( + $variant => { Some($variant) } + $($output)* + ) + ) + }; +} + +#[macro_export] +macro_rules! BraceStructTypeFoldableImpl { + (impl<$($p:tt),*> TypeFoldable<$tcx:tt> for $s:path { + $($field:ident),* $(,)* + } $(where $($wc:tt)*)*) => { + impl<$($p),*> $crate::ty::fold::TypeFoldable<$tcx> for $s + $(where $($wc)*)* + { + fn super_fold_with<'gcx: $tcx, V: $crate::ty::fold::TypeFolder<'gcx, $tcx>>( + &self, + folder: &mut V, + ) -> Self { + let $s { $($field,)* } = self; + $s { $($field: $crate::ty::fold::TypeFoldable::fold_with($field, folder),)* } + } + + fn super_visit_with>( + &self, + visitor: &mut V, + ) -> bool { + let $s { $($field,)* } = self; + false $(|| $crate::ty::fold::TypeFoldable::visit_with($field, visitor))* + } + } + }; +} + +#[macro_export] +macro_rules! TupleStructTypeFoldableImpl { + (impl<$($p:tt),*> TypeFoldable<$tcx:tt> for $s:path { + $($field:ident),* $(,)* + } $(where $($wc:tt)*)*) => { + impl<$($p),*> $crate::ty::fold::TypeFoldable<$tcx> for $s + $(where $($wc)*)* + { + fn super_fold_with<'gcx: $tcx, V: $crate::ty::fold::TypeFolder<'gcx, $tcx>>( + &self, + folder: &mut V, + ) -> Self { + let $s($($field,)*)= self; + $s($($crate::ty::fold::TypeFoldable::fold_with($field, folder),)*) + } + + fn super_visit_with>( + &self, + visitor: &mut V, + ) -> bool { + let $s($($field,)*) = self; + false $(|| $crate::ty::fold::TypeFoldable::visit_with($field, visitor))* + } + } + }; +} + +#[macro_export] +macro_rules! EnumTypeFoldableImpl { + (impl<$($p:tt),*> TypeFoldable<$tcx:tt> for $s:path { + $($variants:tt)* + } $(where $($wc:tt)*)*) => { + impl<$($p),*> $crate::ty::fold::TypeFoldable<$tcx> for $s + $(where $($wc)*)* + { + fn super_fold_with<'gcx: $tcx, V: $crate::ty::fold::TypeFolder<'gcx, $tcx>>( + &self, + folder: &mut V, + ) -> Self { + EnumTypeFoldableImpl!(@FoldVariants(self, folder) input($($variants)*) output()) + } + + fn super_visit_with>( + &self, + visitor: &mut V, + ) -> bool { + EnumTypeFoldableImpl!(@VisitVariants(self, visitor) input($($variants)*) output()) + } + } + }; + + (@FoldVariants($this:expr, $folder:expr) input() output($($output:tt)*)) => { + match $this { + $($output)* + } + }; + + (@FoldVariants($this:expr, $folder:expr) + input( ($variant:path) ( $($variant_arg:ident),* ) , $($input:tt)*) + output( $($output:tt)*) ) => { + EnumTypeFoldableImpl!( + @FoldVariants($this, $folder) + input($($input)*) + output( + $variant ( $($variant_arg),* ) => { + $variant ( + $($crate::ty::fold::TypeFoldable::fold_with($variant_arg, $folder)),* + ) + } + $($output)* + ) + ) + }; + + (@FoldVariants($this:expr, $folder:expr) + input( ($variant:path) { $($variant_arg:ident),* $(,)* } , $($input:tt)*) + output( $($output:tt)*) ) => { + EnumTypeFoldableImpl!( + @FoldVariants($this, $folder) + input($($input)*) + output( + $variant { $($variant_arg),* } => { + $variant { + $($variant_arg: $crate::ty::fold::TypeFoldable::fold_with( + $variant_arg, $folder + )),* } + } + $($output)* + ) + ) + }; + + (@FoldVariants($this:expr, $folder:expr) + input( ($variant:path), $($input:tt)*) + output( $($output:tt)*) ) => { + EnumTypeFoldableImpl!( + @FoldVariants($this, $folder) + input($($input)*) + output( + $variant => { $variant } + $($output)* + ) + ) + }; + + (@VisitVariants($this:expr, $visitor:expr) input() output($($output:tt)*)) => { + match $this { + $($output)* + } + }; + + (@VisitVariants($this:expr, $visitor:expr) + input( ($variant:path) ( $($variant_arg:ident),* ) , $($input:tt)*) + output( $($output:tt)*) ) => { + EnumTypeFoldableImpl!( + @VisitVariants($this, $visitor) + input($($input)*) + output( + $variant ( $($variant_arg),* ) => { + false $(|| $crate::ty::fold::TypeFoldable::visit_with( + $variant_arg, $visitor + ))* + } + $($output)* + ) + ) + }; + + (@VisitVariants($this:expr, $visitor:expr) + input( ($variant:path) { $($variant_arg:ident),* $(,)* } , $($input:tt)*) + output( $($output:tt)*) ) => { + EnumTypeFoldableImpl!( + @VisitVariants($this, $visitor) + input($($input)*) + output( + $variant { $($variant_arg),* } => { + false $(|| $crate::ty::fold::TypeFoldable::visit_with( + $variant_arg, $visitor + ))* + } + $($output)* + ) + ) + }; + + (@VisitVariants($this:expr, $visitor:expr) + input( ($variant:path), $($input:tt)*) + output( $($output:tt)*) ) => { + EnumTypeFoldableImpl!( + @VisitVariants($this, $visitor) + input($($input)*) + output( + $variant => { false } + $($output)* + ) + ) + }; +} + diff --git a/src/librustc/middle/borrowck.rs b/src/librustc/middle/borrowck.rs index 380f79361e27..c8d513a59f00 100644 --- a/src/librustc/middle/borrowck.rs +++ b/src/librustc/middle/borrowck.rs @@ -15,18 +15,26 @@ use util::nodemap::FxHashSet; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] +pub enum SignalledError { SawSomeError, NoErrorsSeen } + +impl_stable_hash_for!(enum self::SignalledError { SawSomeError, NoErrorsSeen }); + #[derive(Debug, RustcEncodable, RustcDecodable)] pub struct BorrowCheckResult { pub used_mut_nodes: FxHashSet, + pub signalled_any_error: SignalledError, } -impl<'gcx> HashStable> for BorrowCheckResult { +impl<'a> HashStable> for BorrowCheckResult { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let BorrowCheckResult { ref used_mut_nodes, + ref signalled_any_error, } = *self; used_mut_nodes.hash_stable(hcx, hasher); + signalled_any_error.hash_stable(hcx, hasher); } } diff --git a/src/librustc/middle/const_val.rs b/src/librustc/middle/const_val.rs deleted file mode 100644 index 440af39a0d46..000000000000 --- a/src/librustc/middle/const_val.rs +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use rustc_const_math::ConstInt; - -use hir::def_id::DefId; -use ty::{self, TyCtxt, layout}; -use ty::subst::Substs; -use rustc_const_math::*; - -use graphviz::IntoCow; -use errors::DiagnosticBuilder; -use serialize::{self, Encodable, Encoder, Decodable, Decoder}; -use syntax::symbol::InternedString; -use syntax::ast; -use syntax_pos::Span; - -use std::borrow::Cow; - -pub type EvalResult<'tcx> = Result<&'tcx ty::Const<'tcx>, ConstEvalErr<'tcx>>; - -#[derive(Copy, Clone, Debug, Hash, RustcEncodable, RustcDecodable, Eq, PartialEq)] -pub enum ConstVal<'tcx> { - Integral(ConstInt), - Float(ConstFloat), - Str(InternedString), - ByteStr(ByteArray<'tcx>), - Bool(bool), - Char(char), - Variant(DefId), - Function(DefId, &'tcx Substs<'tcx>), - Aggregate(ConstAggregate<'tcx>), - Unevaluated(DefId, &'tcx Substs<'tcx>), -} - -#[derive(Copy, Clone, Debug, Hash, RustcEncodable, Eq, PartialEq)] -pub struct ByteArray<'tcx> { - pub data: &'tcx [u8], -} - -impl<'tcx> serialize::UseSpecializedDecodable for ByteArray<'tcx> {} - -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] -pub enum ConstAggregate<'tcx> { - Struct(&'tcx [(ast::Name, &'tcx ty::Const<'tcx>)]), - Tuple(&'tcx [&'tcx ty::Const<'tcx>]), - Array(&'tcx [&'tcx ty::Const<'tcx>]), - Repeat(&'tcx ty::Const<'tcx>, u64), -} - -impl<'tcx> Encodable for ConstAggregate<'tcx> { - fn encode(&self, _: &mut S) -> Result<(), S::Error> { - bug!("should never encode ConstAggregate::{:?}", self) - } -} - -impl<'tcx> Decodable for ConstAggregate<'tcx> { - fn decode(_: &mut D) -> Result { - bug!("should never decode ConstAggregate") - } -} - -impl<'tcx> ConstVal<'tcx> { - pub fn to_const_int(&self) -> Option { - match *self { - ConstVal::Integral(i) => Some(i), - ConstVal::Bool(b) => Some(ConstInt::U8(b as u8)), - ConstVal::Char(ch) => Some(ConstInt::U32(ch as u32)), - _ => None - } - } -} - -#[derive(Clone, Debug)] -pub struct ConstEvalErr<'tcx> { - pub span: Span, - pub kind: ErrKind<'tcx>, -} - -#[derive(Clone, Debug)] -pub enum ErrKind<'tcx> { - CannotCast, - MissingStructField, - - NonConstPath, - UnimplementedConstVal(&'static str), - ExpectedConstTuple, - ExpectedConstStruct, - IndexedNonVec, - IndexNotUsize, - IndexOutOfBounds { len: u64, index: u64 }, - - MiscBinaryOp, - MiscCatchAll, - - IndexOpFeatureGated, - Math(ConstMathErr), - LayoutError(layout::LayoutError<'tcx>), - - ErroneousReferencedConstant(Box>), - - TypeckError, - CheckMatchError, -} - -impl<'tcx> From for ErrKind<'tcx> { - fn from(err: ConstMathErr) -> ErrKind<'tcx> { - match err { - ConstMathErr::UnsignedNegation => ErrKind::TypeckError, - _ => ErrKind::Math(err) - } - } -} - -#[derive(Clone, Debug)] -pub enum ConstEvalErrDescription<'a> { - Simple(Cow<'a, str>), -} - -impl<'a> ConstEvalErrDescription<'a> { - /// Return a one-line description of the error, for lints and such - pub fn into_oneline(self) -> Cow<'a, str> { - match self { - ConstEvalErrDescription::Simple(simple) => simple, - } - } -} - -impl<'a, 'gcx, 'tcx> ConstEvalErr<'tcx> { - pub fn description(&self) -> ConstEvalErrDescription { - use self::ErrKind::*; - use self::ConstEvalErrDescription::*; - - macro_rules! simple { - ($msg:expr) => ({ Simple($msg.into_cow()) }); - ($fmt:expr, $($arg:tt)+) => ({ - Simple(format!($fmt, $($arg)+).into_cow()) - }) - } - - match self.kind { - CannotCast => simple!("can't cast this type"), - MissingStructField => simple!("nonexistent struct field"), - NonConstPath => simple!("non-constant path in constant expression"), - UnimplementedConstVal(what) => - simple!("unimplemented constant expression: {}", what), - ExpectedConstTuple => simple!("expected constant tuple"), - ExpectedConstStruct => simple!("expected constant struct"), - IndexedNonVec => simple!("indexing is only supported for arrays"), - IndexNotUsize => simple!("indices must be of type `usize`"), - IndexOutOfBounds { len, index } => { - simple!("index out of bounds: the len is {} but the index is {}", - len, index) - } - - MiscBinaryOp => simple!("bad operands for binary"), - MiscCatchAll => simple!("unsupported constant expr"), - IndexOpFeatureGated => simple!("the index operation on const values is unstable"), - Math(ref err) => Simple(err.description().into_cow()), - LayoutError(ref err) => Simple(err.to_string().into_cow()), - - ErroneousReferencedConstant(_) => simple!("could not evaluate referenced constant"), - - TypeckError => simple!("type-checking failed"), - CheckMatchError => simple!("match-checking failed"), - } - } - - pub fn struct_error(&self, - tcx: TyCtxt<'a, 'gcx, 'tcx>, - primary_span: Span, - primary_kind: &str) - -> DiagnosticBuilder<'gcx> - { - let mut err = self; - while let &ConstEvalErr { - kind: ErrKind::ErroneousReferencedConstant(box ref i_err), .. - } = err { - err = i_err; - } - - let mut diag = struct_span_err!(tcx.sess, err.span, E0080, "constant evaluation error"); - err.note(tcx, primary_span, primary_kind, &mut diag); - diag - } - - pub fn note(&self, - _tcx: TyCtxt<'a, 'gcx, 'tcx>, - primary_span: Span, - primary_kind: &str, - diag: &mut DiagnosticBuilder) - { - match self.description() { - ConstEvalErrDescription::Simple(message) => { - diag.span_label(self.span, message); - } - } - - if !primary_span.contains(self.span) { - diag.span_note(primary_span, - &format!("for {} here", primary_kind)); - } - } - - pub fn report(&self, - tcx: TyCtxt<'a, 'gcx, 'tcx>, - primary_span: Span, - primary_kind: &str) - { - match self.kind { - ErrKind::TypeckError | ErrKind::CheckMatchError => return, - _ => {} - } - self.struct_error(tcx, primary_span, primary_kind).emit(); - } -} diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index 9708afd20459..b91a9644b211 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -22,28 +22,21 @@ //! are *mostly* used as a part of that interface, but these should //! probably get a better home if someone can find one. -use hir; -use hir::def; use hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use hir::map as hir_map; -use hir::map::definitions::{Definitions, DefKey, DefPathTable}; -use hir::svh::Svh; -use ich; +use hir::map::definitions::{DefKey, DefPathTable}; +use rustc_data_structures::svh::Svh; use ty::{self, TyCtxt}; use session::{Session, CrateDisambiguator}; use session::search_paths::PathKind; -use util::nodemap::NodeSet; use std::any::Any; -use std::collections::BTreeMap; use std::path::{Path, PathBuf}; -use std::rc::Rc; -use rustc_data_structures::owning_ref::ErasedBoxRef; use syntax::ast; -use syntax::ext::base::SyntaxExtension; use syntax::symbol::Symbol; use syntax_pos::Span; -use rustc_back::target::Target; +use rustc_target::spec::Target; +use rustc_data_structures::sync::{self, MetadataRef, Lrc}; pub use self::NativeLibraryKind::*; @@ -129,38 +122,51 @@ pub enum NativeLibraryKind { NativeUnknown, } -#[derive(Clone, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, RustcEncodable, RustcDecodable)] pub struct NativeLibrary { pub kind: NativeLibraryKind, - pub name: Symbol, + pub name: Option, pub cfg: Option, - pub foreign_items: Vec, + pub foreign_module: Option, + pub wasm_import_module: Option, } -pub enum LoadedMacro { - MacroDef(ast::Item), - ProcMacro(Rc), +#[derive(Clone, Hash, RustcEncodable, RustcDecodable)] +pub struct ForeignModule { + pub foreign_items: Vec, + pub def_id: DefId, } #[derive(Copy, Clone, Debug)] pub struct ExternCrate { - /// def_id of an `extern crate` in the current crate that caused - /// this crate to be loaded; note that there could be multiple - /// such ids - pub def_id: DefId, + pub src: ExternCrateSource, /// span of the extern crate that caused this to be loaded pub span: Span, + /// Number of links to reach the extern; + /// used to select the extern with the shortest path + pub path_len: usize, + /// If true, then this crate is the crate named by the extern /// crate referenced above. If false, then this crate is a dep /// of the crate. pub direct: bool, +} - /// Number of links to reach the extern crate `def_id` - /// declaration; used to select the extern crate with the shortest - /// path - pub path_len: usize, +#[derive(Copy, Clone, Debug)] +pub enum ExternCrateSource { + /// Crate is loaded by `extern crate`. + Extern( + /// def_id of the item in the current crate that caused + /// this crate to be loaded; note that there could be multiple + /// such ids + DefId, + ), + // Crate is loaded by `use`. + Use, + /// Crate is implicitly loaded by an absolute or an `extern::` path. + Path, } pub struct EncodedMetadata { @@ -187,31 +193,11 @@ pub trait MetadataLoader { fn get_rlib_metadata(&self, target: &Target, filename: &Path) - -> Result, String>; + -> Result; fn get_dylib_metadata(&self, target: &Target, filename: &Path) - -> Result, String>; -} - -#[derive(Clone)] -pub struct ExternConstBody<'tcx> { - pub body: &'tcx hir::Body, - - // It would require a lot of infrastructure to enable stable-hashing Bodies - // from other crates, so we hash on export and just store the fingerprint - // with them. - pub fingerprint: ich::Fingerprint, -} - -#[derive(Clone)] -pub struct ExternBodyNestedBodies { - pub nested_bodies: Rc>, - - // It would require a lot of infrastructure to enable stable-hashing Bodies - // from other crates, so we hash on export and just store the fingerprint - // with them. - pub fingerprint: ich::Fingerprint, + -> Result; } /// A store of Rust crates, through with their metadata @@ -225,30 +211,20 @@ pub struct ExternBodyNestedBodies { /// (it'd break incremental compilation) and should only be called pre-HIR (e.g. /// during resolve) pub trait CrateStore { - fn crate_data_as_rc_any(&self, krate: CrateNum) -> Rc; - - // access to the metadata loader - fn metadata_loader(&self) -> &MetadataLoader; + fn crate_data_as_rc_any(&self, krate: CrateNum) -> Lrc; // resolve fn def_key(&self, def: DefId) -> DefKey; fn def_path(&self, def: DefId) -> hir_map::DefPath; fn def_path_hash(&self, def: DefId) -> hir_map::DefPathHash; - fn def_path_table(&self, cnum: CrateNum) -> Rc; + fn def_path_table(&self, cnum: CrateNum) -> Lrc; // "queries" used in resolve that aren't tracked for incremental compilation - fn visibility_untracked(&self, def: DefId) -> ty::Visibility; - fn export_macros_untracked(&self, cnum: CrateNum); - fn dep_kind_untracked(&self, cnum: CrateNum) -> DepKind; fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol; fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> CrateDisambiguator; fn crate_hash_untracked(&self, cnum: CrateNum) -> Svh; - fn struct_field_names_untracked(&self, def: DefId) -> Vec; - fn item_children_untracked(&self, did: DefId, sess: &Session) -> Vec; - fn load_macro_untracked(&self, did: DefId, sess: &Session) -> LoadedMacro; fn extern_mod_stmt_cnum_untracked(&self, emod_id: ast::NodeId) -> Option; fn item_generics_cloned_untracked(&self, def: DefId, sess: &Session) -> ty::Generics; - fn associated_item_cloned_untracked(&self, def: DefId) -> ty::AssociatedItem; fn postorder_cnums_untracked(&self) -> Vec; // This is basically a 1-based range of ints, which is a little @@ -258,107 +234,12 @@ pub trait CrateStore { // utility functions fn encode_metadata<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - link_meta: &LinkMeta, - reachable: &NodeSet) + link_meta: &LinkMeta) -> EncodedMetadata; fn metadata_encoding_version(&self) -> &[u8]; } -// FIXME: find a better place for this? -pub fn validate_crate_name(sess: Option<&Session>, s: &str, sp: Option) { - let mut err_count = 0; - { - let mut say = |s: &str| { - match (sp, sess) { - (_, None) => bug!("{}", s), - (Some(sp), Some(sess)) => sess.span_err(sp, s), - (None, Some(sess)) => sess.err(s), - } - err_count += 1; - }; - if s.is_empty() { - say("crate name must not be empty"); - } - for c in s.chars() { - if c.is_alphanumeric() { continue } - if c == '_' { continue } - say(&format!("invalid character `{}` in crate name: `{}`", c, s)); - } - } - - if err_count > 0 { - sess.unwrap().abort_if_errors(); - } -} - -/// A dummy crate store that does not support any non-local crates, -/// for test purposes. -pub struct DummyCrateStore; - -#[allow(unused_variables)] -impl CrateStore for DummyCrateStore { - fn crate_data_as_rc_any(&self, krate: CrateNum) -> Rc - { bug!("crate_data_as_rc_any") } - // item info - fn visibility_untracked(&self, def: DefId) -> ty::Visibility { bug!("visibility") } - fn item_generics_cloned_untracked(&self, def: DefId, sess: &Session) -> ty::Generics - { bug!("item_generics_cloned") } - - // trait/impl-item info - fn associated_item_cloned_untracked(&self, def: DefId) -> ty::AssociatedItem - { bug!("associated_item_cloned") } - - // crate metadata - fn dep_kind_untracked(&self, cnum: CrateNum) -> DepKind { bug!("is_explicitly_linked") } - fn export_macros_untracked(&self, cnum: CrateNum) { bug!("export_macros") } - fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol { bug!("crate_name") } - fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> CrateDisambiguator { - bug!("crate_disambiguator") - } - fn crate_hash_untracked(&self, cnum: CrateNum) -> Svh { bug!("crate_hash") } - - // resolve - fn def_key(&self, def: DefId) -> DefKey { bug!("def_key") } - fn def_path(&self, def: DefId) -> hir_map::DefPath { - bug!("relative_def_path") - } - fn def_path_hash(&self, def: DefId) -> hir_map::DefPathHash { - bug!("def_path_hash") - } - fn def_path_table(&self, cnum: CrateNum) -> Rc { - bug!("def_path_table") - } - fn struct_field_names_untracked(&self, def: DefId) -> Vec { - bug!("struct_field_names") - } - fn item_children_untracked(&self, did: DefId, sess: &Session) -> Vec { - bug!("item_children") - } - fn load_macro_untracked(&self, did: DefId, sess: &Session) -> LoadedMacro { bug!("load_macro") } - - fn crates_untracked(&self) -> Vec { vec![] } - - // utility functions - fn extern_mod_stmt_cnum_untracked(&self, emod_id: ast::NodeId) -> Option { None } - fn encode_metadata<'a, 'tcx>(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - link_meta: &LinkMeta, - reachable: &NodeSet) - -> EncodedMetadata { - bug!("encode_metadata") - } - fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") } - fn postorder_cnums_untracked(&self) -> Vec { bug!("postorder_cnums_untracked") } - - // access to the metadata loader - fn metadata_loader(&self) -> &MetadataLoader { bug!("metadata_loader") } -} - -pub trait CrateLoader { - fn process_item(&mut self, item: &ast::Item, defs: &Definitions); - fn postprocess(&mut self, krate: &ast::Crate); - fn resolve_crate_from_path(&mut self, name: Symbol, span: Span) -> CrateNum; -} +pub type CrateStoreDyn = dyn CrateStore + sync::Sync; // This method is used when generating the command line to pass through to // system linker. The linker expects undefined symbols on the left of the @@ -398,8 +279,8 @@ pub fn used_crates(tcx: TyCtxt, prefer: LinkagePreference) }) .collect::>(); let mut ordering = tcx.postorder_cnums(LOCAL_CRATE); - Rc::make_mut(&mut ordering).reverse(); - libs.sort_by_key(|&(a, _)| { + Lrc::make_mut(&mut ordering).reverse(); + libs.sort_by_cached_key(|&(a, _)| { ordering.iter().position(|x| *x == a) }); libs diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 1ff9c7a86291..d0e3ae2b9fc4 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -13,7 +13,7 @@ // from live codes are live, and everything else is dead. use hir::map as hir_map; -use hir::{self, Item_, PatKind}; +use hir::{self, PatKind}; use hir::intravisit::{self, Visitor, NestedVisitorMap}; use hir::itemlikevisit::ItemLikeVisitor; @@ -96,25 +96,21 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { } fn lookup_and_handle_method(&mut self, id: hir::HirId) { - self.check_def_id(self.tables.type_dependent_defs()[id].def_id()); - } - - fn handle_field_access(&mut self, lhs: &hir::Expr, name: ast::Name) { - match self.tables.expr_ty_adjusted(lhs).sty { - ty::TyAdt(def, _) => { - self.insert_def_id(def.non_enum_variant().field_named(name).did); - } - _ => span_bug!(lhs.span, "named field access on non-ADT"), + if let Some(def) = self.tables.type_dependent_defs().get(id) { + self.check_def_id(def.def_id()); + } else { + bug!("no type-dependent def for method"); } } - fn handle_tup_field_access(&mut self, lhs: &hir::Expr, idx: usize) { + fn handle_field_access(&mut self, lhs: &hir::Expr, node_id: ast::NodeId) { match self.tables.expr_ty_adjusted(lhs).sty { ty::TyAdt(def, _) => { - self.insert_def_id(def.non_enum_variant().fields[idx].did); + let index = self.tcx.field_index(node_id, self.tables); + self.insert_def_id(def.non_enum_variant().fields[index].did); } ty::TyTuple(..) => {} - _ => span_bug!(lhs.span, "numeric field access on non-ADT"), + _ => span_bug!(lhs.span, "named field access on non-ADT"), } } @@ -128,7 +124,8 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { if let PatKind::Wild = pat.node.pat.node { continue; } - self.insert_def_id(variant.field_named(pat.node.name).did); + let index = self.tcx.field_index(pat.node.id, self.tables); + self.insert_def_id(variant.fields[index].did); } } @@ -156,21 +153,21 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { match *node { hir_map::NodeItem(item) => { match item.node { - hir::ItemStruct(..) | hir::ItemUnion(..) => { + hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) => { let def_id = self.tcx.hir.local_def_id(item.id); let def = self.tcx.adt_def(def_id); self.repr_has_repr_c = def.repr.c(); intravisit::walk_item(self, &item); } - hir::ItemEnum(..) => { - self.inherited_pub_visibility = item.vis == hir::Public; + hir::ItemKind::Enum(..) => { + self.inherited_pub_visibility = item.vis.node.is_pub(); intravisit::walk_item(self, &item); } - hir::ItemFn(..) - | hir::ItemTy(..) - | hir::ItemStatic(..) - | hir::ItemConst(..) => { + hir::ItemKind::Fn(..) + | hir::ItemKind::Ty(..) + | hir::ItemKind::Static(..) + | hir::ItemKind::Const(..) => { intravisit::walk_item(self, &item); } _ => () @@ -191,18 +188,11 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { self.inherited_pub_visibility = had_inherited_pub_visibility; } - fn mark_as_used_if_union(&mut self, did: DefId, fields: &hir::HirVec) { - if let Some(node_id) = self.tcx.hir.as_local_node_id(did) { - if let Some(hir_map::NodeItem(item)) = self.tcx.hir.find(node_id) { - if let Item_::ItemUnion(ref variant, _) = item.node { - if variant.fields().len() > 1 { - for field in variant.fields() { - if fields.iter().find(|x| x.name.node == field.name).is_some() { - self.live_symbols.insert(field.id); - } - } - } - } + fn mark_as_used_if_union(&mut self, adt: &ty::AdtDef, fields: &hir::HirVec) { + if adt.is_union() && adt.non_enum_variant().fields.len() > 1 && adt.did.is_local() { + for field in fields { + let index = self.tcx.field_index(field.id, self.tables); + self.insert_def_id(adt.non_enum_variant().fields[index].did); } } } @@ -226,7 +216,7 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { let has_repr_c = self.repr_has_repr_c; let inherited_pub_visibility = self.inherited_pub_visibility; let live_fields = def.fields().iter().filter(|f| { - has_repr_c || inherited_pub_visibility || f.vis == hir::Public + has_repr_c || inherited_pub_visibility || f.vis.node.is_pub() }); self.live_symbols.extend(live_fields.map(|f| f.id)); @@ -235,24 +225,19 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { fn visit_expr(&mut self, expr: &'tcx hir::Expr) { match expr.node { - hir::ExprPath(ref qpath @ hir::QPath::TypeRelative(..)) => { + hir::ExprKind::Path(ref qpath @ hir::QPath::TypeRelative(..)) => { let def = self.tables.qpath_def(qpath, expr.hir_id); self.handle_definition(def); } - hir::ExprMethodCall(..) => { + hir::ExprKind::MethodCall(..) => { self.lookup_and_handle_method(expr.hir_id); } - hir::ExprField(ref lhs, ref name) => { - self.handle_field_access(&lhs, name.node); + hir::ExprKind::Field(ref lhs, ..) => { + self.handle_field_access(&lhs, expr.id); } - hir::ExprTupField(ref lhs, idx) => { - self.handle_tup_field_access(&lhs, idx.node); - } - hir::ExprStruct(_, ref fields, _) => { - if let ty::TypeVariants::TyAdt(ref def, _) = self.tables.expr_ty(expr).sty { - if def.is_union() { - self.mark_as_used_if_union(def.did, fields); - } + hir::ExprKind::Struct(_, ref fields, _) => { + if let ty::TypeVariants::TyAdt(ref adt, _) = self.tables.expr_ty(expr).sty { + self.mark_as_used_if_union(adt, fields); } } _ => () @@ -294,7 +279,7 @@ impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { self.in_pat = false; } - fn visit_path(&mut self, path: &'tcx hir::Path, _: ast::NodeId) { + fn visit_path(&mut self, path: &'tcx hir::Path, _: hir::HirId) { self.handle_definition(path.def); intravisit::walk_path(self, path); } @@ -307,6 +292,16 @@ fn has_allow_dead_code_or_lang_attr(tcx: TyCtxt, return true; } + // (To be) stable attribute for #[lang = "panic_impl"] + if attr::contains_name(attrs, "panic_implementation") { + return true; + } + + // (To be) stable attribute for #[lang = "oom"] + if attr::contains_name(attrs, "alloc_error_handler") { + return true; + } + // #[used] also keeps the item alive forcefully, // e.g. for placing it in a specific section. if attr::contains_name(attrs, "used") { @@ -349,11 +344,11 @@ impl<'v, 'k, 'tcx> ItemLikeVisitor<'v> for LifeSeeder<'k, 'tcx> { self.worklist.push(item.id); } match item.node { - hir::ItemEnum(ref enum_def, _) if allow_dead_code => { + hir::ItemKind::Enum(ref enum_def, _) if allow_dead_code => { self.worklist.extend(enum_def.variants.iter() .map(|variant| variant.node.data.id())); } - hir::ItemTrait(.., ref trait_item_refs) => { + hir::ItemKind::Trait(.., ref trait_item_refs) => { for trait_item_ref in trait_item_refs { let trait_item = self.krate.trait_item(trait_item_ref.id); match trait_item.node { @@ -369,7 +364,7 @@ impl<'v, 'k, 'tcx> ItemLikeVisitor<'v> for LifeSeeder<'k, 'tcx> { } } } - hir::ItemImpl(.., ref opt_trait, _, ref impl_item_refs) => { + hir::ItemKind::Impl(.., ref opt_trait, _, ref impl_item_refs) => { for impl_item_ref in impl_item_refs { let impl_item = self.krate.impl_item(impl_item_ref.id); if opt_trait.is_some() || @@ -396,16 +391,12 @@ impl<'v, 'k, 'tcx> ItemLikeVisitor<'v> for LifeSeeder<'k, 'tcx> { fn create_and_seed_worklist<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, access_levels: &privacy::AccessLevels, krate: &hir::Crate) - -> Vec { - let mut worklist = Vec::new(); - for (id, _) in &access_levels.map { - worklist.push(*id); - } - - // Seed entry point - if let Some((id, _)) = *tcx.sess.entry_fn.borrow() { - worklist.push(id); - } + -> Vec +{ + let worklist = access_levels.map.iter().map(|(&id, _)| id).chain( + // Seed entry point + tcx.sess.entry_fn.borrow().map(|(id, _, _)| id) + ).collect::>(); // Seed implemented trait items let mut life_seeder = LifeSeeder { @@ -439,7 +430,7 @@ fn find_live<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn get_struct_ctor_id(item: &hir::Item) -> Option { match item.node { - hir::ItemStruct(ref struct_def, _) if !struct_def.is_struct() => { + hir::ItemKind::Struct(ref struct_def, _) if !struct_def.is_struct() => { Some(struct_def.id()) } _ => None @@ -454,13 +445,13 @@ struct DeadVisitor<'a, 'tcx: 'a> { impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { fn should_warn_about_item(&mut self, item: &hir::Item) -> bool { let should_warn = match item.node { - hir::ItemStatic(..) - | hir::ItemConst(..) - | hir::ItemFn(..) - | hir::ItemTy(..) - | hir::ItemEnum(..) - | hir::ItemStruct(..) - | hir::ItemUnion(..) => true, + hir::ItemKind::Static(..) + | hir::ItemKind::Const(..) + | hir::ItemKind::Fn(..) + | hir::ItemKind::Ty(..) + | hir::ItemKind::Enum(..) + | hir::ItemKind::Struct(..) + | hir::ItemKind::Union(..) => true, _ => false }; let ctor_id = get_struct_ctor_id(item); @@ -469,17 +460,13 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { fn should_warn_about_field(&mut self, field: &hir::StructField) -> bool { let field_type = self.tcx.type_of(self.tcx.hir.local_def_id(field.id)); - let is_marker_field = match field_type.ty_to_def_id() { - Some(def_id) => self.tcx.lang_items().items().iter().any(|item| *item == Some(def_id)), - _ => false - }; !field.is_positional() && !self.symbol_is_live(field.id, None) - && !is_marker_field + && !field_type.is_phantom_data() && !has_allow_dead_code_or_lang_attr(self.tcx, field.id, &field.attrs) } - fn should_warn_about_variant(&mut self, variant: &hir::Variant_) -> bool { + fn should_warn_about_variant(&mut self, variant: &hir::VariantKind) -> bool { !self.symbol_is_live(variant.data.id(), None) && !has_allow_dead_code_or_lang_attr(self.tcx, variant.data.id(), @@ -496,7 +483,7 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { // `None` otherwise. // If the item is a struct_ctor, then either its `id` or // `ctor_id` (unwrapped) is in the live_symbols set. More specifically, - // DefMap maps the ExprPath of a struct_ctor to the node referred by + // DefMap maps the ExprKind::Path of a struct_ctor to the node referred by // `ctor_id`. On the other hand, in a statement like // `type = ;` where refers to a struct_ctor, // DefMap maps to `id` instead. @@ -558,21 +545,25 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { // For items that have a definition with a signature followed by a // block, point only at the signature. let span = match item.node { - hir::ItemFn(..) | - hir::ItemMod(..) | - hir::ItemEnum(..) | - hir::ItemStruct(..) | - hir::ItemUnion(..) | - hir::ItemTrait(..) | - hir::ItemImpl(..) => self.tcx.sess.codemap().def_span(item.span), + hir::ItemKind::Fn(..) | + hir::ItemKind::Mod(..) | + hir::ItemKind::Enum(..) | + hir::ItemKind::Struct(..) | + hir::ItemKind::Union(..) | + hir::ItemKind::Trait(..) | + hir::ItemKind::Impl(..) => self.tcx.sess.codemap().def_span(item.span), _ => item.span, }; + let participle = match item.node { + hir::ItemKind::Struct(..) => "constructed", // Issue #52325 + _ => "used" + }; self.warn_dead_code( item.id, span, item.name, item.node.descriptive_variant(), - "used", + participle, ); } else { // Only continue if we didn't warn @@ -602,7 +593,7 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { fn visit_struct_field(&mut self, field: &'tcx hir::StructField) { if self.should_warn_about_field(&field) { - self.warn_dead_code(field.id, field.span, field.name, "field", "used"); + self.warn_dead_code(field.id, field.span, field.ident.name, "field", "used"); } intravisit::walk_struct_field(self, field); } @@ -613,7 +604,7 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { if !self.symbol_is_live(impl_item.id, None) { self.warn_dead_code(impl_item.id, impl_item.span, - impl_item.name, + impl_item.ident.name, "associated const", "used"); } @@ -622,10 +613,11 @@ impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { hir::ImplItemKind::Method(_, body_id) => { if !self.symbol_is_live(impl_item.id, None) { let span = self.tcx.sess.codemap().def_span(impl_item.span); - self.warn_dead_code(impl_item.id, span, impl_item.name, "method", "used"); + self.warn_dead_code(impl_item.id, span, impl_item.ident.name, "method", "used"); } self.visit_nested_body(body_id) } + hir::ImplItemKind::Existential(..) | hir::ImplItemKind::Type(..) => {} } } diff --git a/src/librustc/middle/dependency_format.rs b/src/librustc/middle/dependency_format.rs index db0ecb6aa5eb..a9c118d606b2 100644 --- a/src/librustc/middle/dependency_format.rs +++ b/src/librustc/middle/dependency_format.rs @@ -69,7 +69,7 @@ use ty::TyCtxt; use middle::cstore::{self, DepKind}; use middle::cstore::LinkagePreference::{self, RequireStatic, RequireDynamic}; use util::nodemap::FxHashMap; -use rustc_back::PanicStrategy; +use rustc_target::spec::PanicStrategy; /// A list of dependencies for a certain crate type. /// @@ -94,13 +94,14 @@ pub enum Linkage { pub fn calculate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let sess = &tcx.sess; - let mut fmts = sess.dependency_formats.borrow_mut(); + let mut fmts = FxHashMap(); for &ty in sess.crate_types.borrow().iter() { let linkage = calculate_type(tcx, ty); verify_ok(tcx, &linkage); fmts.insert(ty, linkage); } sess.abort_if_errors(); + sess.dependency_formats.set(fmts); } fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -108,36 +109,36 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let sess = &tcx.sess; - if !sess.opts.output_types.should_trans() { + if !sess.opts.output_types.should_codegen() { return Vec::new(); } let preferred_linkage = match ty { // cdylibs must have all static dependencies. - config::CrateTypeCdylib => Linkage::Static, + config::CrateType::Cdylib => Linkage::Static, // Generating a dylib without `-C prefer-dynamic` means that we're going // to try to eagerly statically link all dependencies. This is normally // done for end-product dylibs, not intermediate products. - config::CrateTypeDylib if !sess.opts.cg.prefer_dynamic => Linkage::Static, - config::CrateTypeDylib => Linkage::Dynamic, + config::CrateType::Dylib if !sess.opts.cg.prefer_dynamic => Linkage::Static, + config::CrateType::Dylib => Linkage::Dynamic, // If the global prefer_dynamic switch is turned off, or the final // executable will be statically linked, prefer static crate linkage. - config::CrateTypeExecutable if !sess.opts.cg.prefer_dynamic || + config::CrateType::Executable if !sess.opts.cg.prefer_dynamic || sess.crt_static() => Linkage::Static, - config::CrateTypeExecutable => Linkage::Dynamic, + config::CrateType::Executable => Linkage::Dynamic, // proc-macro crates are required to be dylibs, and they're currently // required to link to libsyntax as well. - config::CrateTypeProcMacro => Linkage::Dynamic, + config::CrateType::ProcMacro => Linkage::Dynamic, // No linkage happens with rlibs, we just needed the metadata (which we // got long ago), so don't bother with anything. - config::CrateTypeRlib => Linkage::NotLinked, + config::CrateType::Rlib => Linkage::NotLinked, // staticlibs must have all static dependencies. - config::CrateTypeStaticlib => Linkage::Static, + config::CrateType::Staticlib => Linkage::Static, }; if preferred_linkage == Linkage::NotLinked { @@ -154,8 +155,8 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Staticlibs, cdylibs, and static executables must have all static // dependencies. If any are not found, generate some nice pretty errors. - if ty == config::CrateTypeCdylib || ty == config::CrateTypeStaticlib || - (ty == config::CrateTypeExecutable && sess.crt_static() && + if ty == config::CrateType::Cdylib || ty == config::CrateType::Staticlib || + (ty == config::CrateType::Executable && sess.crt_static() && !sess.target.target.options.crt_static_allows_dylibs) { for &cnum in tcx.crates().iter() { if tcx.dep_kind(cnum).macros_only() { continue } @@ -222,7 +223,7 @@ fn calculate_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // // Things like allocators and panic runtimes may not have been activated // quite yet, so do so here. - activate_injected_dep(sess.injected_panic_runtime.get(), &mut ret, + activate_injected_dep(*sess.injected_panic_runtime.get(), &mut ret, &|cnum| tcx.is_panic_runtime(cnum)); activate_injected_allocator(sess, &mut ret); @@ -301,7 +302,7 @@ fn attempt_static<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option, list: &mut DependencyList, - replaces_injected: &Fn(CrateNum) -> bool) { + replaces_injected: &dyn Fn(CrateNum) -> bool) { for (i, slot) in list.iter().enumerate() { let cnum = CrateNum::new(i + 1); if !replaces_injected(cnum) { diff --git a/src/librustc/middle/entry.rs b/src/librustc/middle/entry.rs index 31e054ec1cb9..b0acc6f20e69 100644 --- a/src/librustc/middle/entry.rs +++ b/src/librustc/middle/entry.rs @@ -12,11 +12,12 @@ use hir::map as hir_map; use hir::def_id::{CRATE_DEF_INDEX}; use session::{config, Session}; +use session::config::EntryFnType; use syntax::ast::NodeId; use syntax::attr; use syntax::entry::EntryPointType; use syntax_pos::Span; -use hir::{Item, ItemFn, ImplItem, TraitItem}; +use hir::{Item, ItemKind, ImplItem, TraitItem}; use hir::itemlikevisit::ItemLikeVisitor; struct EntryContext<'a, 'tcx: 'a> { @@ -55,18 +56,21 @@ impl<'a, 'tcx> ItemLikeVisitor<'tcx> for EntryContext<'a, 'tcx> { } } -pub fn find_entry_point(session: &Session, hir_map: &hir_map::Map) { +pub fn find_entry_point(session: &Session, + hir_map: &hir_map::Map, + crate_name: &str) { let any_exe = session.crate_types.borrow().iter().any(|ty| { - *ty == config::CrateTypeExecutable + *ty == config::CrateType::Executable }); if !any_exe { // No need to find a main function + session.entry_fn.set(None); return } // If the user wants no main function at all, then stop here. if attr::contains_name(&hir_map.krate().attrs, "no_main") { - session.entry_type.set(Some(config::EntryNone)); + session.entry_fn.set(None); return } @@ -81,14 +85,14 @@ pub fn find_entry_point(session: &Session, hir_map: &hir_map::Map) { hir_map.krate().visit_all_item_likes(&mut ctxt); - configure_main(&mut ctxt); + configure_main(&mut ctxt, crate_name); } // Beware, this is duplicated in libsyntax/entry.rs, make sure to keep // them in sync. fn entry_point_type(item: &Item, at_root: bool) -> EntryPointType { match item.node { - ItemFn(..) => { + ItemKind::Fn(..) => { if attr::contains_name(&item.attrs, "start") { EntryPointType::Start } else if attr::contains_name(&item.attrs, "main") { @@ -150,19 +154,18 @@ fn find_item(item: &Item, ctxt: &mut EntryContext, at_root: bool) { } } -fn configure_main(this: &mut EntryContext) { - if this.start_fn.is_some() { - *this.session.entry_fn.borrow_mut() = this.start_fn; - this.session.entry_type.set(Some(config::EntryStart)); - } else if this.attr_main_fn.is_some() { - *this.session.entry_fn.borrow_mut() = this.attr_main_fn; - this.session.entry_type.set(Some(config::EntryMain)); - } else if this.main_fn.is_some() { - *this.session.entry_fn.borrow_mut() = this.main_fn; - this.session.entry_type.set(Some(config::EntryMain)); +fn configure_main(this: &mut EntryContext, crate_name: &str) { + if let Some((node_id, span)) = this.start_fn { + this.session.entry_fn.set(Some((node_id, span, EntryFnType::Start))); + } else if let Some((node_id, span)) = this.attr_main_fn { + this.session.entry_fn.set(Some((node_id, span, EntryFnType::Main))); + } else if let Some((node_id, span)) = this.main_fn { + this.session.entry_fn.set(Some((node_id, span, EntryFnType::Main))); } else { // No main function - let mut err = struct_err!(this.session, E0601, "main function not found"); + this.session.entry_fn.set(None); + let mut err = struct_err!(this.session, E0601, + "`main` function not found in crate `{}`", crate_name); if !this.non_main_fns.is_empty() { // There were some functions named 'main' though. Try to give the user a hint. err.note("the main function must be defined at the crate level \ @@ -175,6 +178,13 @@ fn configure_main(this: &mut EntryContext) { err.emit(); this.session.abort_if_errors(); } else { + if let Some(ref filename) = this.session.local_crate_source_file { + err.note(&format!("consider adding a `main` function to `{}`", filename.display())); + } + if this.session.teach(&err.get_code().unwrap()) { + err.note("If you don't know the basics of Rust, you can go look to the Rust Book \ + to get started: https://doc.rust-lang.org/book/"); + } err.emit(); } } diff --git a/src/librustc/middle/exported_symbols.rs b/src/librustc/middle/exported_symbols.rs index d650dbe88b5c..01783eb0ff65 100644 --- a/src/librustc/middle/exported_symbols.rs +++ b/src/librustc/middle/exported_symbols.rs @@ -8,12 +8,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use hir::def_id::{DefId, LOCAL_CRATE}; +use ich::StableHashingContext; +use rustc_data_structures::stable_hasher::{StableHasher, HashStable, + StableHasherResult}; +use std::cmp; +use std::mem; +use ty; +use ty::subst::Substs; + /// The SymbolExportLevel of a symbols specifies from which kinds of crates /// the symbol will be exported. `C` symbols will be exported from any /// kind of crate, including cdylibs which export very few things. /// `Rust` will only be exported if the crate produced is a Rust /// dylib. -#[derive(Eq, PartialEq, Debug, Copy, Clone)] +#[derive(Eq, PartialEq, Debug, Copy, Clone, RustcEncodable, RustcDecodable)] pub enum SymbolExportLevel { C, Rust, @@ -34,3 +43,91 @@ impl SymbolExportLevel { } } } + +#[derive(Eq, PartialEq, Debug, Copy, Clone, RustcEncodable, RustcDecodable)] +pub enum ExportedSymbol<'tcx> { + NonGeneric(DefId), + Generic(DefId, &'tcx Substs<'tcx>), + NoDefId(ty::SymbolName), +} + +impl<'tcx> ExportedSymbol<'tcx> { + pub fn symbol_name(&self, + tcx: ty::TyCtxt<'_, 'tcx, '_>) + -> ty::SymbolName { + match *self { + ExportedSymbol::NonGeneric(def_id) => { + tcx.symbol_name(ty::Instance::mono(tcx, def_id)) + } + ExportedSymbol::Generic(def_id, substs) => { + tcx.symbol_name(ty::Instance::new(def_id, substs)) + } + ExportedSymbol::NoDefId(symbol_name) => { + symbol_name + } + } + } + + pub fn compare_stable(&self, + tcx: ty::TyCtxt<'_, 'tcx, '_>, + other: &ExportedSymbol<'tcx>) + -> cmp::Ordering { + match *self { + ExportedSymbol::NonGeneric(self_def_id) => match *other { + ExportedSymbol::NonGeneric(other_def_id) => { + tcx.def_path_hash(self_def_id).cmp(&tcx.def_path_hash(other_def_id)) + } + ExportedSymbol::Generic(..) | + ExportedSymbol::NoDefId(_) => { + cmp::Ordering::Less + } + } + ExportedSymbol::Generic(..) => match *other { + ExportedSymbol::NonGeneric(_) => { + cmp::Ordering::Greater + } + ExportedSymbol::Generic(..) => { + self.symbol_name(tcx).cmp(&other.symbol_name(tcx)) + } + ExportedSymbol::NoDefId(_) => { + cmp::Ordering::Less + } + } + ExportedSymbol::NoDefId(self_symbol_name) => match *other { + ExportedSymbol::NonGeneric(_) | + ExportedSymbol::Generic(..) => { + cmp::Ordering::Greater + } + ExportedSymbol::NoDefId(ref other_symbol_name) => { + self_symbol_name.cmp(other_symbol_name) + } + } + } + } +} + +pub fn metadata_symbol_name(tcx: ty::TyCtxt) -> String { + format!("rust_metadata_{}_{}", + tcx.original_crate_name(LOCAL_CRATE), + tcx.crate_disambiguator(LOCAL_CRATE).to_fingerprint().to_hex()) +} + +impl<'a, 'gcx> HashStable> for ExportedSymbol<'gcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + ExportedSymbol::NonGeneric(def_id) => { + def_id.hash_stable(hcx, hasher); + } + ExportedSymbol::Generic(def_id, substs) => { + def_id.hash_stable(hcx, hasher); + substs.hash_stable(hcx, hasher); + } + ExportedSymbol::NoDefId(symbol_name) => { + symbol_name.hash_stable(hcx, hasher); + } + } + } +} diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs index c69005101c67..5beafe2b601b 100644 --- a/src/librustc/middle/expr_use_visitor.rs +++ b/src/librustc/middle/expr_use_visitor.rs @@ -27,6 +27,7 @@ use middle::region; use ty::{self, TyCtxt, adjustment}; use hir::{self, PatKind}; +use rustc_data_structures::sync::Lrc; use std::rc::Rc; use syntax::ast; use syntax::ptr::P; @@ -44,7 +45,7 @@ pub trait Delegate<'tcx> { fn consume(&mut self, consume_id: ast::NodeId, consume_span: Span, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, mode: ConsumeMode); // The value found at `cmt` has been determined to match the @@ -61,14 +62,14 @@ pub trait Delegate<'tcx> { // called on a subpart of an input passed to `matched_pat). fn matched_pat(&mut self, matched_pat: &hir::Pat, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, mode: MatchMode); // The value found at `cmt` is either copied or moved via the // pattern binding `consume_pat`, depending on mode. fn consume_pat(&mut self, consume_pat: &hir::Pat, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, mode: ConsumeMode); // The value found at `borrow` is being borrowed at the point @@ -76,7 +77,7 @@ pub trait Delegate<'tcx> { fn borrow(&mut self, borrow_id: ast::NodeId, borrow_span: Span, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, loan_region: ty::Region<'tcx>, bk: ty::BorrowKind, loan_cause: LoanCause); @@ -90,7 +91,7 @@ pub trait Delegate<'tcx> { fn mutate(&mut self, assignment_id: ast::NodeId, assignment_span: Span, - assignee_cmt: mc::cmt<'tcx>, + assignee_cmt: &mc::cmt_<'tcx>, mode: MutateMode); } @@ -239,7 +240,7 @@ impl OverloadedCallType { // This is the code that actually walks the tree. pub struct ExprUseVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { mc: mc::MemCategorizationContext<'a, 'gcx, 'tcx>, - delegate: &'a mut Delegate<'tcx>, + delegate: &'a mut dyn Delegate<'tcx>, param_env: ty::ParamEnv<'tcx>, } @@ -274,12 +275,12 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx, 'tcx> { /// `None` means that rvalues will be given more conservative lifetimes. /// /// See also `with_infer`, which is used *during* typeck. - pub fn new(delegate: &'a mut (Delegate<'tcx>+'a), + pub fn new(delegate: &'a mut (dyn Delegate<'tcx>+'a), tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, region_scope_tree: &'a region::ScopeTree, tables: &'a ty::TypeckTables<'tcx>, - rvalue_promotable_map: Option>) + rvalue_promotable_map: Option>) -> Self { ExprUseVisitor { @@ -294,7 +295,7 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx, 'tcx> { } impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { - pub fn with_infer(delegate: &'a mut (Delegate<'tcx>+'a), + pub fn with_infer(delegate: &'a mut (dyn Delegate<'tcx>+'a), infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, region_scope_tree: &'a region::ScopeTree, @@ -312,15 +313,16 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { debug!("consume_body(body={:?})", body); for arg in &body.arguments { - let arg_ty = return_if_err!(self.mc.node_ty(arg.pat.hir_id)); + let arg_ty = return_if_err!(self.mc.pat_ty_adjusted(&arg.pat)); + debug!("consume_body: arg_ty = {:?}", arg_ty); let fn_body_scope_r = self.tcx().mk_region(ty::ReScope(region::Scope::Node(body.value.hir_id.local_id))); - let arg_cmt = self.mc.cat_rvalue( - arg.id, + let arg_cmt = Rc::new(self.mc.cat_rvalue( + arg.hir_id, arg.pat.span, fn_body_scope_r, // Args live only as long as the fn body. - arg_ty); + arg_ty)); self.walk_irrefutable_pat(arg_cmt, &arg.pat); } @@ -335,11 +337,11 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { fn delegate_consume(&mut self, consume_id: ast::NodeId, consume_span: Span, - cmt: mc::cmt<'tcx>) { + cmt: &mc::cmt_<'tcx>) { debug!("delegate_consume(consume_id={}, cmt={:?})", consume_id, cmt); - let mode = copy_or_move(&self.mc, self.param_env, &cmt, DirectRefMove); + let mode = copy_or_move(&self.mc, self.param_env, cmt, DirectRefMove); self.delegate.consume(consume_id, consume_span, cmt, mode); } @@ -353,7 +355,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { debug!("consume_expr(expr={:?})", expr); let cmt = return_if_err!(self.mc.cat_expr(expr)); - self.delegate_consume(expr.id, expr.span, cmt); + self.delegate_consume(expr.id, expr.span, &cmt); self.walk_expr(expr); } @@ -362,7 +364,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { expr: &hir::Expr, mode: MutateMode) { let cmt = return_if_err!(self.mc.cat_expr(expr)); - self.delegate.mutate(assignment_expr.id, assignment_expr.span, cmt, mode); + self.delegate.mutate(assignment_expr.id, assignment_expr.span, &cmt, mode); self.walk_expr(expr); } @@ -375,7 +377,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { expr, r, bk); let cmt = return_if_err!(self.mc.cat_expr(expr)); - self.delegate.borrow(expr.id, expr.span, cmt, r, bk, cause); + self.delegate.borrow(expr.id, expr.span, &cmt, r, bk, cause); self.walk_expr(expr) } @@ -390,47 +392,43 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { self.walk_adjustment(expr); match expr.node { - hir::ExprPath(_) => { } + hir::ExprKind::Path(_) => { } - hir::ExprType(ref subexpr, _) => { + hir::ExprKind::Type(ref subexpr, _) => { self.walk_expr(&subexpr) } - hir::ExprUnary(hir::UnDeref, ref base) => { // *base + hir::ExprKind::Unary(hir::UnDeref, ref base) => { // *base self.select_from_expr(&base); } - hir::ExprField(ref base, _) => { // base.f + hir::ExprKind::Field(ref base, _) => { // base.f self.select_from_expr(&base); } - hir::ExprTupField(ref base, _) => { // base. - self.select_from_expr(&base); - } - - hir::ExprIndex(ref lhs, ref rhs) => { // lhs[rhs] + hir::ExprKind::Index(ref lhs, ref rhs) => { // lhs[rhs] self.select_from_expr(&lhs); self.consume_expr(&rhs); } - hir::ExprCall(ref callee, ref args) => { // callee(args) + hir::ExprKind::Call(ref callee, ref args) => { // callee(args) self.walk_callee(expr, &callee); self.consume_exprs(args); } - hir::ExprMethodCall(.., ref args) => { // callee.m(args) + hir::ExprKind::MethodCall(.., ref args) => { // callee.m(args) self.consume_exprs(args); } - hir::ExprStruct(_, ref fields, ref opt_with) => { + hir::ExprKind::Struct(_, ref fields, ref opt_with) => { self.walk_struct_expr(fields, opt_with); } - hir::ExprTup(ref exprs) => { + hir::ExprKind::Tup(ref exprs) => { self.consume_exprs(exprs); } - hir::ExprIf(ref cond_expr, ref then_expr, ref opt_else_expr) => { + hir::ExprKind::If(ref cond_expr, ref then_expr, ref opt_else_expr) => { self.consume_expr(&cond_expr); self.walk_expr(&then_expr); if let Some(ref else_expr) = *opt_else_expr { @@ -438,8 +436,8 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { } } - hir::ExprMatch(ref discr, ref arms, _) => { - let discr_cmt = return_if_err!(self.mc.cat_expr(&discr)); + hir::ExprKind::Match(ref discr, ref arms, _) => { + let discr_cmt = Rc::new(return_if_err!(self.mc.cat_expr(&discr))); let r = self.tcx().types.re_empty; self.borrow_expr(&discr, r, ty::ImmBorrow, MatchDiscriminant); @@ -451,21 +449,21 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { } } - hir::ExprArray(ref exprs) => { + hir::ExprKind::Array(ref exprs) => { self.consume_exprs(exprs); } - hir::ExprAddrOf(m, ref base) => { // &base + hir::ExprKind::AddrOf(m, ref base) => { // &base // make sure that the thing we are pointing out stays valid // for the lifetime `scope_r` of the resulting ptr: let expr_ty = return_if_err!(self.mc.expr_ty(expr)); - if let ty::TyRef(r, _) = expr_ty.sty { + if let ty::TyRef(r, _, _) = expr_ty.sty { let bk = ty::BorrowKind::from_mutbl(m); self.borrow_expr(&base, r, bk, AddrOf); } } - hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => { + hir::ExprKind::InlineAsm(ref ia, ref outputs, ref inputs) => { for (o, output) in ia.outputs.iter().zip(outputs) { if o.is_indirect { self.consume_expr(output); @@ -481,47 +479,47 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { self.consume_exprs(inputs); } - hir::ExprAgain(..) | - hir::ExprLit(..) => {} + hir::ExprKind::Continue(..) | + hir::ExprKind::Lit(..) => {} - hir::ExprLoop(ref blk, _, _) => { + hir::ExprKind::Loop(ref blk, _, _) => { self.walk_block(&blk); } - hir::ExprWhile(ref cond_expr, ref blk, _) => { + hir::ExprKind::While(ref cond_expr, ref blk, _) => { self.consume_expr(&cond_expr); self.walk_block(&blk); } - hir::ExprUnary(_, ref lhs) => { + hir::ExprKind::Unary(_, ref lhs) => { self.consume_expr(&lhs); } - hir::ExprBinary(_, ref lhs, ref rhs) => { + hir::ExprKind::Binary(_, ref lhs, ref rhs) => { self.consume_expr(&lhs); self.consume_expr(&rhs); } - hir::ExprBlock(ref blk) => { + hir::ExprKind::Block(ref blk, _) => { self.walk_block(&blk); } - hir::ExprBreak(_, ref opt_expr) | hir::ExprRet(ref opt_expr) => { + hir::ExprKind::Break(_, ref opt_expr) | hir::ExprKind::Ret(ref opt_expr) => { if let Some(ref expr) = *opt_expr { self.consume_expr(&expr); } } - hir::ExprAssign(ref lhs, ref rhs) => { + hir::ExprKind::Assign(ref lhs, ref rhs) => { self.mutate_expr(expr, &lhs, MutateMode::JustWrite); self.consume_expr(&rhs); } - hir::ExprCast(ref base, _) => { + hir::ExprKind::Cast(ref base, _) => { self.consume_expr(&base); } - hir::ExprAssignOp(_, ref lhs, ref rhs) => { + hir::ExprKind::AssignOp(_, ref lhs, ref rhs) => { if self.mc.tables.is_method_call(expr) { self.consume_expr(lhs); } else { @@ -530,19 +528,19 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { self.consume_expr(&rhs); } - hir::ExprRepeat(ref base, _) => { + hir::ExprKind::Repeat(ref base, _) => { self.consume_expr(&base); } - hir::ExprClosure(.., fn_decl_span, _) => { + hir::ExprKind::Closure(.., fn_decl_span, _) => { self.walk_captures(expr, fn_decl_span) } - hir::ExprBox(ref base) => { + hir::ExprKind::Box(ref base) => { self.consume_expr(&base); } - hir::ExprYield(ref value) => { + hir::ExprKind::Yield(ref value) => { self.consume_expr(&value); } } @@ -588,21 +586,21 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { fn walk_stmt(&mut self, stmt: &hir::Stmt) { match stmt.node { - hir::StmtDecl(ref decl, _) => { + hir::StmtKind::Decl(ref decl, _) => { match decl.node { - hir::DeclLocal(ref local) => { + hir::DeclKind::Local(ref local) => { self.walk_local(&local); } - hir::DeclItem(_) => { + hir::DeclKind::Item(_) => { // we don't visit nested items in this visitor, // only the fn body we were given. } } } - hir::StmtExpr(ref expr, _) | - hir::StmtSemi(ref expr, _) => { + hir::StmtKind::Expr(ref expr, _) | + hir::StmtKind::Semi(ref expr, _) => { self.consume_expr(&expr); } } @@ -611,9 +609,9 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { fn walk_local(&mut self, local: &hir::Local) { match local.init { None => { - let delegate = &mut self.delegate; - local.pat.each_binding(|_, id, span, _| { - delegate.decl_without_init(id, span); + local.pat.each_binding(|_, hir_id, span, _| { + let node_id = self.mc.tcx.hir.hir_to_node_id(hir_id); + self.delegate.decl_without_init(node_id, span); }) } @@ -623,7 +621,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // "assigns", which is handled by // `walk_pat`: self.walk_expr(&expr); - let init_cmt = return_if_err!(self.mc.cat_expr(&expr)); + let init_cmt = Rc::new(return_if_err!(self.mc.cat_expr(&expr))); self.walk_irrefutable_pat(init_cmt, &local.pat); } } @@ -656,22 +654,26 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { None => { return; } }; - let with_cmt = return_if_err!(self.mc.cat_expr(&with_expr)); + let with_cmt = Rc::new(return_if_err!(self.mc.cat_expr(&with_expr))); // Select just those fields of the `with` // expression that will actually be used match with_cmt.ty.sty { ty::TyAdt(adt, substs) if adt.is_struct() => { // Consume those fields of the with expression that are needed. - for with_field in &adt.non_enum_variant().fields { - if !contains_field_named(with_field, fields) { + for (f_index, with_field) in adt.non_enum_variant().fields.iter().enumerate() { + let is_mentioned = fields.iter().any(|f| { + self.tcx().field_index(f.id, self.mc.tables) == f_index + }); + if !is_mentioned { let cmt_field = self.mc.cat_field( &*with_expr, with_cmt.clone(), - with_field.name, + f_index, + with_field.ident, with_field.ty(self.tcx(), substs) ); - self.delegate_consume(with_expr.id, with_expr.span, cmt_field); + self.delegate_consume(with_expr.id, with_expr.span, &cmt_field); } } } @@ -691,14 +693,6 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // walk the with expression so that complex expressions // are properly handled. self.walk_expr(with_expr); - - fn contains_field_named(field: &ty::FieldDef, - fields: &[hir::Field]) - -> bool - { - fields.iter().any( - |f| f.name.node == field.name) - } } // Invoke the appropriate delegate calls for anything that gets @@ -718,7 +712,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { adjustment::Adjust::Unsize => { // Creating a closure/fn-pointer or unsizing consumes // the input and stores it into the resulting rvalue. - self.delegate_consume(expr.id, expr.span, cmt.clone()); + self.delegate_consume(expr.id, expr.span, &cmt); } adjustment::Adjust::Deref(None) => {} @@ -730,12 +724,11 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { // this is an autoref of `x`. adjustment::Adjust::Deref(Some(ref deref)) => { let bk = ty::BorrowKind::from_mutbl(deref.mutbl); - self.delegate.borrow(expr.id, expr.span, cmt.clone(), - deref.region, bk, AutoRef); + self.delegate.borrow(expr.id, expr.span, &cmt, deref.region, bk, AutoRef); } adjustment::Adjust::Borrow(ref autoref) => { - self.walk_autoref(expr, cmt.clone(), autoref); + self.walk_autoref(expr, &cmt, autoref); } } cmt = return_if_err!(self.mc.cat_expr_adjusted(expr, cmt, &adjustment)); @@ -747,7 +740,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { /// after all relevant autoderefs have occurred. fn walk_autoref(&mut self, expr: &hir::Expr, - cmt_base: mc::cmt<'tcx>, + cmt_base: &mc::cmt_<'tcx>, autoref: &adjustment::AutoBorrow<'tcx>) { debug!("walk_autoref(expr.id={} cmt_base={:?} autoref={:?})", expr.id, @@ -760,7 +753,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { expr.span, cmt_base, r, - ty::BorrowKind::from_mutbl(m), + ty::BorrowKind::from_mutbl(m.into()), AutoRef); } @@ -845,38 +838,48 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { /// established up front, e.g. via `determine_pat_move_mode` (see /// also `walk_irrefutable_pat` for patterns that stand alone). fn walk_pat(&mut self, cmt_discr: mc::cmt<'tcx>, pat: &hir::Pat, match_mode: MatchMode) { - debug!("walk_pat cmt_discr={:?} pat={:?}", cmt_discr, pat); + debug!("walk_pat(cmt_discr={:?}, pat={:?})", cmt_discr, pat); + let tcx = self.tcx(); let ExprUseVisitor { ref mc, ref mut delegate, param_env } = *self; return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |cmt_pat, pat| { if let PatKind::Binding(_, canonical_id, ..) = pat.node { - debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}", cmt_pat, pat, match_mode); - let bm = *mc.tables.pat_binding_modes().get(pat.hir_id) - .expect("missing binding mode"); + debug!( + "walk_pat: binding cmt_pat={:?} pat={:?} match_mode={:?}", + cmt_pat, + pat, + match_mode, + ); + if let Some(&bm) = mc.tables.pat_binding_modes().get(pat.hir_id) { + debug!("walk_pat: pat.hir_id={:?} bm={:?}", pat.hir_id, bm); - // pat_ty: the type of the binding being produced. - let pat_ty = return_if_err!(mc.node_ty(pat.hir_id)); + // pat_ty: the type of the binding being produced. + let pat_ty = return_if_err!(mc.node_ty(pat.hir_id)); + debug!("walk_pat: pat_ty={:?}", pat_ty); - // Each match binding is effectively an assignment to the - // binding being produced. - let def = Def::Local(canonical_id); - if let Ok(binding_cmt) = mc.cat_def(pat.id, pat.span, pat_ty, def) { - delegate.mutate(pat.id, pat.span, binding_cmt, MutateMode::Init); - } + // Each match binding is effectively an assignment to the + // binding being produced. + let def = Def::Local(canonical_id); + if let Ok(ref binding_cmt) = mc.cat_def(pat.hir_id, pat.span, pat_ty, def) { + delegate.mutate(pat.id, pat.span, binding_cmt, MutateMode::Init); + } - // It is also a borrow or copy/move of the value being matched. - match bm { - ty::BindByReference(m) => { - if let ty::TyRef(r, _) = pat_ty.sty { - let bk = ty::BorrowKind::from_mutbl(m); - delegate.borrow(pat.id, pat.span, cmt_pat, r, bk, RefBinding); + // It is also a borrow or copy/move of the value being matched. + match bm { + ty::BindByReference(m) => { + if let ty::TyRef(r, _, _) = pat_ty.sty { + let bk = ty::BorrowKind::from_mutbl(m); + delegate.borrow(pat.id, pat.span, &cmt_pat, r, bk, RefBinding); + } + } + ty::BindByValue(..) => { + let mode = copy_or_move(mc, param_env, &cmt_pat, PatBindingMove); + debug!("walk_pat binding consuming pat"); + delegate.consume_pat(pat, &cmt_pat, mode); } } - ty::BindByValue(..) => { - let mode = copy_or_move(mc, param_env, &cmt_pat, PatBindingMove); - debug!("walk_pat binding consuming pat"); - delegate.consume_pat(pat, cmt_pat, mode); - } + } else { + tcx.sess.delay_span_bug(pat.span, "missing binding mode"); } } })); @@ -899,12 +902,12 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { let downcast_cmt = mc.cat_downcast_if_needed(pat, cmt_pat, variant_did); debug!("variant downcast_cmt={:?} pat={:?}", downcast_cmt, pat); - delegate.matched_pat(pat, downcast_cmt, match_mode); + delegate.matched_pat(pat, &downcast_cmt, match_mode); } Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | Def::SelfTy(..) => { debug!("struct cmt_pat={:?} pat={:?}", cmt_pat, pat); - delegate.matched_pat(pat, cmt_pat, match_mode); + delegate.matched_pat(pat, &cmt_pat, match_mode); } _ => {} } @@ -923,7 +926,7 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { closure_expr_id: closure_def_id.to_local(), }; let upvar_capture = self.mc.tables.upvar_capture(upvar_id); - let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.id, + let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.hir_id, fn_decl_span, freevar)); match upvar_capture { @@ -932,12 +935,12 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { self.param_env, &cmt_var, CaptureMove); - self.delegate.consume(closure_expr.id, freevar.span, cmt_var, mode); + self.delegate.consume(closure_expr.id, freevar.span, &cmt_var, mode); } ty::UpvarCapture::ByRef(upvar_borrow) => { self.delegate.borrow(closure_expr.id, fn_decl_span, - cmt_var, + &cmt_var, upvar_borrow.region, upvar_borrow.kind, ClosureCapture(freevar.span)); @@ -948,21 +951,21 @@ impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { } fn cat_captured_var(&mut self, - closure_id: ast::NodeId, + closure_hir_id: hir::HirId, closure_span: Span, upvar: &hir::Freevar) - -> mc::McResult> { + -> mc::McResult> { // Create the cmt for the variable being borrowed, from the // caller's perspective let var_hir_id = self.tcx().hir.node_to_hir_id(upvar.var_id()); let var_ty = self.mc.node_ty(var_hir_id)?; - self.mc.cat_def(closure_id, closure_span, var_ty, upvar.def) + self.mc.cat_def(closure_hir_id, closure_span, var_ty, upvar.def) } } fn copy_or_move<'a, 'gcx, 'tcx>(mc: &mc::MemCategorizationContext<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - cmt: &mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, move_reason: MoveReason) -> ConsumeMode { diff --git a/src/librustc/middle/intrinsicck.rs b/src/librustc/middle/intrinsicck.rs index 0a4e5094cde7..8e4fb547d416 100644 --- a/src/librustc/middle/intrinsicck.rs +++ b/src/librustc/middle/intrinsicck.rs @@ -13,7 +13,7 @@ use hir::def_id::DefId; use ty::{self, Ty, TyCtxt}; use ty::layout::{LayoutError, Pointer, SizeSkeleton}; -use syntax::abi::Abi::RustIntrinsic; +use rustc_target::spec::abi::Abi::RustIntrinsic; use syntax_pos::Span; use hir::intravisit::{self, Visitor, NestedVisitorMap}; use hir; @@ -107,7 +107,7 @@ impl<'a, 'tcx> ExprVisitor<'a, 'tcx> { } Err(LayoutError::Unknown(bad)) => { if bad == ty { - format!("this type's size can vary") + "this type's size can vary".to_string() } else { format!("size can vary because of {}", bad) } @@ -145,7 +145,7 @@ impl<'a, 'tcx> Visitor<'tcx> for ExprVisitor<'a, 'tcx> { } fn visit_expr(&mut self, expr: &'tcx hir::Expr) { - let def = if let hir::ExprPath(ref qpath) = expr.node { + let def = if let hir::ExprKind::Path(ref qpath) = expr.node { self.tables.qpath_def(qpath, expr.hir_id) } else { Def::Err diff --git a/src/librustc/middle/lang_items.rs b/src/librustc/middle/lang_items.rs index dca676130b9a..cf94a0fb4b4b 100644 --- a/src/librustc/middle/lang_items.rs +++ b/src/librustc/middle/lang_items.rs @@ -28,6 +28,7 @@ use util::nodemap::FxHashMap; use syntax::ast; use syntax::symbol::Symbol; +use syntax_pos::Span; use hir::itemlikevisit::ItemLikeVisitor; use hir; @@ -104,17 +105,18 @@ struct LanguageItemCollector<'a, 'tcx: 'a> { impl<'a, 'v, 'tcx> ItemLikeVisitor<'v> for LanguageItemCollector<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { - if let Some(value) = extract(&item.attrs) { + if let Some((value, span)) = extract(&item.attrs) { let item_index = self.item_refs.get(&*value.as_str()).cloned(); if let Some(item_index) = item_index { let def_id = self.tcx.hir.local_def_id(item.id); self.collect_item(item_index, def_id); } else { - let span = self.tcx.hir.span(item.id); - span_err!(self.tcx.sess, span, E0522, - "definition of an unknown language item: `{}`.", - value); + let mut err = struct_span_err!(self.tcx.sess, span, E0522, + "definition of an unknown language item: `{}`", + value); + err.span_label(span, format!("definition of unknown language item `{}`", value)); + err.emit(); } } } @@ -177,12 +179,16 @@ impl<'a, 'tcx> LanguageItemCollector<'a, 'tcx> { } } -pub fn extract(attrs: &[ast::Attribute]) -> Option { +pub fn extract(attrs: &[ast::Attribute]) -> Option<(Symbol, Span)> { for attribute in attrs { if attribute.check_name("lang") { if let Some(value) = attribute.value_str() { - return Some(value) + return Some((value, attribute.span)); } + } else if attribute.check_name("panic_implementation") { + return Some((Symbol::intern("panic_impl"), attribute.span)) + } else if attribute.check_name("alloc_error_handler") { + return Some((Symbol::intern("oom"), attribute.span)) } } @@ -212,6 +218,9 @@ language_item_table! { StrImplItem, "str", str_impl; SliceImplItem, "slice", slice_impl; SliceU8ImplItem, "slice_u8", slice_u8_impl; + StrAllocImplItem, "str_alloc", str_alloc_impl; + SliceAllocImplItem, "slice_alloc", slice_alloc_impl; + SliceU8AllocImplItem, "slice_u8_alloc", slice_u8_alloc_impl; ConstPtrImplItem, "const_ptr", const_ptr_impl; MutPtrImplItem, "mut_ptr", mut_ptr_impl; I8ImplItem, "i8", i8_impl; @@ -228,6 +237,8 @@ language_item_table! { UsizeImplItem, "usize", usize_impl; F32ImplItem, "f32", f32_impl; F64ImplItem, "f64", f64_impl; + F32RuntimeImplItem, "f32_runtime", f32_runtime_impl; + F64RuntimeImplItem, "f64_runtime", f64_runtime_impl; SizedTraitLangItem, "sized", sized_trait; UnsizeTraitLangItem, "unsize", unsize_trait; @@ -278,6 +289,7 @@ language_item_table! { GeneratorTraitLangItem, "generator", gen_trait; EqTraitLangItem, "eq", eq_trait; + PartialOrdTraitLangItem, "partial_ord", partial_ord_trait; OrdTraitLangItem, "ord", ord_trait; // A number of panic-related lang items. The `panic` item corresponds to @@ -291,11 +303,14 @@ language_item_table! { // lang item, but do not have it defined. PanicFnLangItem, "panic", panic_fn; PanicBoundsCheckFnLangItem, "panic_bounds_check", panic_bounds_check_fn; - PanicFmtLangItem, "panic_fmt", panic_fmt; + PanicInfoLangItem, "panic_info", panic_info; + PanicImplLangItem, "panic_impl", panic_impl; ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn; BoxFreeFnLangItem, "box_free", box_free_fn; - DropInPlaceFnLangItem, "drop_in_place", drop_in_place_fn; + DropInPlaceFnLangItem, "drop_in_place", drop_in_place_fn; + OomLangItem, "oom", oom; + AllocLayoutLangItem, "alloc_layout", alloc_layout; StartFnLangItem, "start", start_fn; @@ -309,6 +324,8 @@ language_item_table! { NonZeroItem, "non_zero", non_zero; + ManuallyDropItem, "manually_drop", manually_drop; + DebugTraitLangItem, "debug_trait", debug_trait; // A lang item for each of the 128-bit operators we can optionally lower. @@ -339,6 +356,9 @@ language_item_table! { I128ShroFnLangItem, "i128_shro", i128_shro_fn; U128ShroFnLangItem, "u128_shro", u128_shro_fn; + // Align offset for stride != 1, must not panic. + AlignOffsetLangItem, "align_offset", align_offset_fn; + TerminationTraitLangItem, "termination", termination; } diff --git a/src/librustc/middle/lib_features.rs b/src/librustc/middle/lib_features.rs new file mode 100644 index 000000000000..c21ac6218031 --- /dev/null +++ b/src/librustc/middle/lib_features.rs @@ -0,0 +1,162 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Detecting lib features (i.e. features that are not lang features). +// +// These are declared using stability attributes (e.g. `#[stable (..)]` +// and `#[unstable (..)]`), but are not declared in one single location +// (unlike lang features), which means we need to collect them instead. + +use ty::TyCtxt; +use syntax::symbol::Symbol; +use syntax::ast::{Attribute, MetaItem, MetaItemKind}; +use syntax_pos::{Span, DUMMY_SP}; +use hir::intravisit::{self, NestedVisitorMap, Visitor}; +use rustc_data_structures::fx::{FxHashSet, FxHashMap}; +use errors::DiagnosticId; + +pub struct LibFeatures { + // A map from feature to stabilisation version. + pub stable: FxHashMap, + pub unstable: FxHashSet, +} + +impl LibFeatures { + fn new() -> LibFeatures { + LibFeatures { + stable: FxHashMap(), + unstable: FxHashSet(), + } + } + + pub fn to_vec(&self) -> Vec<(Symbol, Option)> { + let mut all_features: Vec<_> = self.stable.iter().map(|(f, s)| (*f, Some(*s))) + .chain(self.unstable.iter().map(|f| (*f, None))) + .collect(); + all_features.sort_unstable_by_key(|f| f.0.as_str()); + all_features + } +} + +pub struct LibFeatureCollector<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + lib_features: LibFeatures, +} + +impl<'a, 'tcx> LibFeatureCollector<'a, 'tcx> { + fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> LibFeatureCollector<'a, 'tcx> { + LibFeatureCollector { + tcx, + lib_features: LibFeatures::new(), + } + } + + fn extract(&self, attr: &Attribute) -> Option<(Symbol, Option, Span)> { + let stab_attrs = vec!["stable", "unstable", "rustc_const_unstable"]; + + // Find a stability attribute (i.e. `#[stable (..)]`, `#[unstable (..)]`, + // `#[rustc_const_unstable (..)]`). + if let Some(stab_attr) = stab_attrs.iter().find(|stab_attr| { + attr.check_name(stab_attr) + }) { + let meta_item = attr.meta(); + if let Some(MetaItem { node: MetaItemKind::List(ref metas), .. }) = meta_item { + let mut feature = None; + let mut since = None; + for meta in metas { + if let Some(mi) = meta.meta_item() { + // Find the `feature = ".."` meta-item. + match (&*mi.name().as_str(), mi.value_str()) { + ("feature", val) => feature = val, + ("since", val) => since = val, + _ => {} + } + } + } + if let Some(feature) = feature { + // This additional check for stability is to make sure we + // don't emit additional, irrelevant errors for malformed + // attributes. + if *stab_attr != "stable" || since.is_some() { + return Some((feature, since, attr.span)); + } + } + // We need to iterate over the other attributes, because + // `rustc_const_unstable` is not mutually exclusive with + // the other stability attributes, so we can't just `break` + // here. + } + } + + None + } + + fn collect_feature(&mut self, feature: Symbol, since: Option, span: Span) { + let already_in_stable = self.lib_features.stable.contains_key(&feature); + let already_in_unstable = self.lib_features.unstable.contains(&feature); + + match (since, already_in_stable, already_in_unstable) { + (Some(since), _, false) => { + if let Some(prev_since) = self.lib_features.stable.get(&feature) { + if *prev_since != since { + let msg = format!( + "feature `{}` is declared stable since {}, \ + but was previously declared stable since {}", + feature, + since, + prev_since, + ); + self.tcx.sess.struct_span_err_with_code(span, &msg, + DiagnosticId::Error("E0711".into())).emit(); + return; + } + } + + self.lib_features.stable.insert(feature, since); + } + (None, false, _) => { + self.lib_features.unstable.insert(feature); + } + (Some(_), _, true) | (None, true, _) => { + let msg = format!( + "feature `{}` is declared {}, but was previously declared {}", + feature, + if since.is_some() { "stable"} else { "unstable" }, + if since.is_none() { "stable"} else { "unstable" }, + ); + self.tcx.sess.struct_span_err_with_code(span, &msg, + DiagnosticId::Error("E0711".into())).emit(); + } + } + } +} + +impl<'a, 'tcx> Visitor<'tcx> for LibFeatureCollector<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::All(&self.tcx.hir) + } + + fn visit_attribute(&mut self, attr: &'tcx Attribute) { + if let Some((feature, stable, span)) = self.extract(attr) { + self.collect_feature(feature, stable, span); + } + } +} + +pub fn collect<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> LibFeatures { + let mut collector = LibFeatureCollector::new(tcx); + for &cnum in tcx.crates().iter() { + for &(feature, since) in tcx.defined_lib_features(cnum).iter() { + collector.collect_feature(feature, since, DUMMY_SP); + } + } + intravisit::walk_crate(&mut collector, tcx.hir.krate()); + collector.lib_features +} diff --git a/src/librustc/middle/liveness.rs b/src/librustc/middle/liveness.rs index d4fa03b50856..b828b1bd30a9 100644 --- a/src/librustc/middle/liveness.rs +++ b/src/librustc/middle/liveness.rs @@ -51,8 +51,8 @@ //! enclosing function. On the way down the tree, it identifies those AST //! nodes and variable IDs that will be needed for the liveness analysis //! and assigns them contiguous IDs. The liveness id for an AST node is -//! called a `live_node` (it's a newtype'd usize) and the id for a variable -//! is called a `variable` (another newtype'd usize). +//! called a `live_node` (it's a newtype'd u32) and the id for a variable +//! is called a `variable` (another newtype'd u32). //! //! On the way back up the tree, as we are about to exit from a function //! declaration we allocate a `liveness` instance. Now that we know @@ -109,17 +109,20 @@ use self::VarKind::*; use hir::def::*; use ty::{self, TyCtxt}; use lint; -use util::nodemap::NodeMap; +use errors::Applicability; +use util::nodemap::{NodeMap, HirIdMap, HirIdSet}; -use std::{fmt, usize}; +use std::collections::VecDeque; +use std::{fmt, u32}; use std::io::prelude::*; use std::io; use std::rc::Rc; use syntax::ast::{self, NodeId}; +use syntax::ptr::P; use syntax::symbol::keywords; use syntax_pos::Span; -use hir::Expr; +use hir::{Expr, HirId}; use hir; use hir::intravisit::{self, Visitor, FnKind, NestedVisitorMap}; @@ -132,23 +135,17 @@ enum LoopKind<'a> { } #[derive(Copy, Clone, PartialEq)] -struct Variable(usize); +struct Variable(u32); -#[derive(Copy, PartialEq)] -struct LiveNode(usize); +#[derive(Copy, Clone, PartialEq)] +struct LiveNode(u32); impl Variable { - fn get(&self) -> usize { let Variable(v) = *self; v } + fn get(&self) -> usize { self.0 as usize } } impl LiveNode { - fn get(&self) -> usize { let LiveNode(v) = *self; v } -} - -impl Clone for LiveNode { - fn clone(&self) -> LiveNode { - LiveNode(self.get()) - } + fn get(&self) -> usize { self.0 as usize } } #[derive(Copy, Clone, PartialEq, Debug)] @@ -184,6 +181,7 @@ impl<'a, 'tcx> Visitor<'tcx> for IrMaps<'a, 'tcx> { b: hir::BodyId, s: Span, id: NodeId) { visit_fn(self, fk, fd, b, s, id); } + fn visit_local(&mut self, l: &'tcx hir::Local) { visit_local(self, l); } fn visit_expr(&mut self, ex: &'tcx Expr) { visit_expr(self, ex); } fn visit_arm(&mut self, a: &'tcx hir::Arm) { visit_arm(self, a); } @@ -230,26 +228,27 @@ impl fmt::Debug for Variable { impl LiveNode { fn is_valid(&self) -> bool { - self.get() != usize::MAX + self.0 != u32::MAX } } -fn invalid_node() -> LiveNode { LiveNode(usize::MAX) } +fn invalid_node() -> LiveNode { LiveNode(u32::MAX) } struct CaptureInfo { ln: LiveNode, - var_nid: NodeId + var_hid: HirId } #[derive(Copy, Clone, Debug)] struct LocalInfo { - id: NodeId, - name: ast::Name + id: HirId, + name: ast::Name, + is_shorthand: bool, } #[derive(Copy, Clone, Debug)] enum VarKind { - Arg(NodeId, ast::Name), + Arg(HirId, ast::Name), Local(LocalInfo), CleanExit } @@ -259,8 +258,8 @@ struct IrMaps<'a, 'tcx: 'a> { num_live_nodes: usize, num_vars: usize, - live_node_map: NodeMap, - variable_map: NodeMap, + live_node_map: HirIdMap, + variable_map: HirIdMap, capture_info_map: NodeMap>>, var_kinds: Vec, lnks: Vec, @@ -272,8 +271,8 @@ impl<'a, 'tcx> IrMaps<'a, 'tcx> { tcx, num_live_nodes: 0, num_vars: 0, - live_node_map: NodeMap(), - variable_map: NodeMap(), + live_node_map: HirIdMap(), + variable_map: HirIdMap(), capture_info_map: NodeMap(), var_kinds: Vec::new(), lnks: Vec::new(), @@ -281,7 +280,7 @@ impl<'a, 'tcx> IrMaps<'a, 'tcx> { } fn add_live_node(&mut self, lnk: LiveNodeKind) -> LiveNode { - let ln = LiveNode(self.num_live_nodes); + let ln = LiveNode(self.num_live_nodes as u32); self.lnks.push(lnk); self.num_live_nodes += 1; @@ -291,15 +290,15 @@ impl<'a, 'tcx> IrMaps<'a, 'tcx> { ln } - fn add_live_node_for_node(&mut self, node_id: NodeId, lnk: LiveNodeKind) { + fn add_live_node_for_node(&mut self, hir_id: HirId, lnk: LiveNodeKind) { let ln = self.add_live_node(lnk); - self.live_node_map.insert(node_id, ln); + self.live_node_map.insert(hir_id, ln); - debug!("{:?} is node {}", ln, node_id); + debug!("{:?} is node {:?}", ln, hir_id); } fn add_variable(&mut self, vk: VarKind) -> Variable { - let v = Variable(self.num_vars); + let v = Variable(self.num_vars as u32); self.var_kinds.push(vk); self.num_vars += 1; @@ -315,11 +314,11 @@ impl<'a, 'tcx> IrMaps<'a, 'tcx> { v } - fn variable(&self, node_id: NodeId, span: Span) -> Variable { - match self.variable_map.get(&node_id) { + fn variable(&self, hir_id: HirId, span: Span) -> Variable { + match self.variable_map.get(&hir_id) { Some(&var) => var, None => { - span_bug!(span, "no variable registered for id {}", node_id); + span_bug!(span, "no variable registered for id {:?}", hir_id); } } } @@ -333,6 +332,13 @@ impl<'a, 'tcx> IrMaps<'a, 'tcx> { } } + fn variable_is_shorthand(&self, var: Variable) -> bool { + match self.var_kinds[var.get()] { + Local(LocalInfo { is_shorthand, .. }) => is_shorthand, + Arg(..) | CleanExit => false + } + } + fn set_captures(&mut self, node_id: NodeId, cs: Vec) { self.capture_info_map.insert(node_id, Rc::new(cs)); } @@ -353,15 +359,24 @@ fn visit_fn<'a, 'tcx: 'a>(ir: &mut IrMaps<'a, 'tcx>, // swap in a new set of IR maps for this function body: let mut fn_maps = IrMaps::new(ir.tcx); + // Don't run unused pass for #[derive()] + if let FnKind::Method(..) = fk { + let parent = ir.tcx.hir.get_parent(id); + if let Some(hir::map::Node::NodeItem(i)) = ir.tcx.hir.find(parent) { + if i.attrs.iter().any(|a| a.check_name("automatically_derived")) { + return; + } + } + } + debug!("creating fn_maps: {:?}", &fn_maps as *const IrMaps); let body = ir.tcx.hir.body(body_id); for arg in &body.arguments { - arg.pat.each_binding(|_bm, arg_id, _x, path1| { - debug!("adding argument {}", arg_id); - let name = path1.node; - fn_maps.add_variable(Arg(arg_id, name)); + arg.pat.each_binding(|_bm, hir_id, _x, ident| { + debug!("adding argument {:?}", hir_id); + fn_maps.add_variable(Arg(hir_id, ident.name)); }) }; @@ -378,31 +393,60 @@ fn visit_fn<'a, 'tcx: 'a>(ir: &mut IrMaps<'a, 'tcx>, lsets.warn_about_unused_args(body, entry_ln); } -fn visit_local<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, local: &'tcx hir::Local) { - local.pat.each_binding(|_, p_id, sp, path1| { - debug!("adding local variable {}", p_id); - let name = path1.node; - ir.add_live_node_for_node(p_id, VarDefNode(sp)); +fn add_from_pat<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, pat: &P) { + // For struct patterns, take note of which fields used shorthand + // (`x` rather than `x: x`). + let mut shorthand_field_ids = HirIdSet(); + let mut pats = VecDeque::new(); + pats.push_back(pat); + while let Some(pat) = pats.pop_front() { + use hir::PatKind::*; + match pat.node { + Binding(_, _, _, ref inner_pat) => { + pats.extend(inner_pat.iter()); + } + Struct(_, ref fields, _) => { + for field in fields { + if field.node.is_shorthand { + shorthand_field_ids.insert(field.node.pat.hir_id); + } + } + } + Ref(ref inner_pat, _) | + Box(ref inner_pat) => { + pats.push_back(inner_pat); + } + TupleStruct(_, ref inner_pats, _) | + Tuple(ref inner_pats, _) => { + pats.extend(inner_pats.iter()); + } + Slice(ref pre_pats, ref inner_pat, ref post_pats) => { + pats.extend(pre_pats.iter()); + pats.extend(inner_pat.iter()); + pats.extend(post_pats.iter()); + } + _ => {} + } + } + + pat.each_binding(|_bm, hir_id, _sp, ident| { + ir.add_live_node_for_node(hir_id, VarDefNode(ident.span)); ir.add_variable(Local(LocalInfo { - id: p_id, - name, + id: hir_id, + name: ident.name, + is_shorthand: shorthand_field_ids.contains(&hir_id) })); }); +} + +fn visit_local<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, local: &'tcx hir::Local) { + add_from_pat(ir, &local.pat); intravisit::walk_local(ir, local); } fn visit_arm<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, arm: &'tcx hir::Arm) { for pat in &arm.pats { - pat.each_binding(|bm, p_id, sp, path1| { - debug!("adding local variable {} from match with bm {:?}", - p_id, bm); - let name = path1.node; - ir.add_live_node_for_node(p_id, VarDefNode(sp)); - ir.add_variable(Local(LocalInfo { - id: p_id, - name, - })); - }) + add_from_pat(ir, pat); } intravisit::walk_arm(ir, arm); } @@ -410,17 +454,17 @@ fn visit_arm<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, arm: &'tcx hir::Arm) { fn visit_expr<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, expr: &'tcx Expr) { match expr.node { // live nodes required for uses or definitions of variables: - hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => { debug!("expr {}: path that leads to {:?}", expr.id, path.def); if let Def::Local(..) = path.def { - ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); + ir.add_live_node_for_node(expr.hir_id, ExprNode(expr.span)); } intravisit::walk_expr(ir, expr); } - hir::ExprClosure(..) => { + hir::ExprKind::Closure(..) => { // Interesting control flow (for loops can contain labeled // breaks or continues) - ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); + ir.add_live_node_for_node(expr.hir_id, ExprNode(expr.span)); // Make a live_node for each captured variable, with the span // being the location that the variable is used. This results @@ -431,8 +475,8 @@ fn visit_expr<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, expr: &'tcx Expr) { for fv in freevars { if let Def::Local(rv) = fv.def { let fv_ln = ir.add_live_node(FreeVarNode(fv.span)); - call_caps.push(CaptureInfo {ln: fv_ln, - var_nid: rv}); + let var_hid = ir.tcx.hir.node_to_hir_id(rv); + call_caps.push(CaptureInfo { ln: fv_ln, var_hid }); } } }); @@ -442,25 +486,43 @@ fn visit_expr<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, expr: &'tcx Expr) { } // live nodes required for interesting control flow: - hir::ExprIf(..) | hir::ExprMatch(..) | hir::ExprWhile(..) | hir::ExprLoop(..) => { - ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); + hir::ExprKind::If(..) | + hir::ExprKind::Match(..) | + hir::ExprKind::While(..) | + hir::ExprKind::Loop(..) => { + ir.add_live_node_for_node(expr.hir_id, ExprNode(expr.span)); intravisit::walk_expr(ir, expr); } - hir::ExprBinary(op, ..) if op.node.is_lazy() => { - ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); + hir::ExprKind::Binary(op, ..) if op.node.is_lazy() => { + ir.add_live_node_for_node(expr.hir_id, ExprNode(expr.span)); intravisit::walk_expr(ir, expr); } // otherwise, live nodes are not required: - hir::ExprIndex(..) | hir::ExprField(..) | hir::ExprTupField(..) | - hir::ExprArray(..) | hir::ExprCall(..) | hir::ExprMethodCall(..) | - hir::ExprTup(..) | hir::ExprBinary(..) | hir::ExprAddrOf(..) | - hir::ExprCast(..) | hir::ExprUnary(..) | hir::ExprBreak(..) | - hir::ExprAgain(_) | hir::ExprLit(_) | hir::ExprRet(..) | - hir::ExprBlock(..) | hir::ExprAssign(..) | hir::ExprAssignOp(..) | - hir::ExprStruct(..) | hir::ExprRepeat(..) | - hir::ExprInlineAsm(..) | hir::ExprBox(..) | hir::ExprYield(..) | - hir::ExprType(..) | hir::ExprPath(hir::QPath::TypeRelative(..)) => { + hir::ExprKind::Index(..) | + hir::ExprKind::Field(..) | + hir::ExprKind::Array(..) | + hir::ExprKind::Call(..) | + hir::ExprKind::MethodCall(..) | + hir::ExprKind::Tup(..) | + hir::ExprKind::Binary(..) | + hir::ExprKind::AddrOf(..) | + hir::ExprKind::Cast(..) | + hir::ExprKind::Unary(..) | + hir::ExprKind::Break(..) | + hir::ExprKind::Continue(_) | + hir::ExprKind::Lit(_) | + hir::ExprKind::Ret(..) | + hir::ExprKind::Block(..) | + hir::ExprKind::Assign(..) | + hir::ExprKind::AssignOp(..) | + hir::ExprKind::Struct(..) | + hir::ExprKind::Repeat(..) | + hir::ExprKind::InlineAsm(..) | + hir::ExprKind::Box(..) | + hir::ExprKind::Yield(..) | + hir::ExprKind::Type(..) | + hir::ExprKind::Path(hir::QPath::TypeRelative(..)) => { intravisit::walk_expr(ir, expr); } } @@ -510,9 +572,6 @@ struct Liveness<'a, 'tcx: 'a> { // it probably doesn't now) break_ln: NodeMap, cont_ln: NodeMap, - - // mappings from node ID to LiveNode for "breakable" blocks-- currently only `catch {...}` - breakable_block_ln: NodeMap, } impl<'a, 'tcx> Liveness<'a, 'tcx> { @@ -540,12 +599,11 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { users: vec![invalid_users(); num_live_nodes * num_vars], break_ln: NodeMap(), cont_ln: NodeMap(), - breakable_block_ln: NodeMap(), } } - fn live_node(&self, node_id: NodeId, span: Span) -> LiveNode { - match self.ir.live_node_map.get(&node_id) { + fn live_node(&self, hir_id: HirId, span: Span) -> LiveNode { + match self.ir.live_node_map.get(&hir_id) { Some(&ln) => ln, None => { // This must be a mismatch between the ir_map construction @@ -554,28 +612,28 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // creating liveness nodes for. span_bug!( span, - "no live node registered for node {}", - node_id); + "no live node registered for node {:?}", + hir_id); } } } - fn variable(&self, node_id: NodeId, span: Span) -> Variable { - self.ir.variable(node_id, span) + fn variable(&self, hir_id: HirId, span: Span) -> Variable { + self.ir.variable(hir_id, span) } fn pat_bindings(&mut self, pat: &hir::Pat, mut f: F) where - F: FnMut(&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, NodeId), + F: FnMut(&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, HirId), { - pat.each_binding(|_bm, p_id, sp, _n| { - let ln = self.live_node(p_id, sp); - let var = self.variable(p_id, sp); - f(self, ln, var, sp, p_id); + pat.each_binding(|_bm, hir_id, sp, n| { + let ln = self.live_node(hir_id, sp); + let var = self.variable(hir_id, n.span); + f(self, ln, var, n.span, hir_id); }) } fn arm_pats_bindings(&mut self, pat: Option<&hir::Pat>, f: F) where - F: FnMut(&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, NodeId), + F: FnMut(&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, HirId), { if let Some(pat) = pat { self.pat_bindings(pat, f); @@ -647,7 +705,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { } fn write_vars(&self, - wr: &mut Write, + wr: &mut dyn Write, ln: LiveNode, mut test: F) -> io::Result<()> where @@ -657,7 +715,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { for var_idx in 0..self.ir.num_vars { let idx = node_base_idx + var_idx; if test(idx).is_valid() { - write!(wr, " {:?}", Variable(var_idx))?; + write!(wr, " {:?}", Variable(var_idx as u32))?; } } Ok(()) @@ -668,7 +726,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn ln_str(&self, ln: LiveNode) -> String { let mut wr = Vec::new(); { - let wr = &mut wr as &mut Write; + let wr = &mut wr as &mut dyn Write; write!(wr, "[ln({:?}) of kind {:?} reads", ln.get(), self.ir.lnk(ln)); self.write_vars(wr, ln, |idx| self.users[idx].reader); write!(wr, " writes"); @@ -797,7 +855,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { debug!("^^ liveness computation results for body {} (entry={:?})", { for ln_idx in 0..self.ir.num_live_nodes { - debug!("{:?}", self.ln_str(LiveNode(ln_idx))); + debug!("{:?}", self.ln_str(LiveNode(ln_idx as u32))); } body.id }, @@ -809,7 +867,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn propagate_through_block(&mut self, blk: &hir::Block, succ: LiveNode) -> LiveNode { if blk.targeted_by_break { - self.breakable_block_ln.insert(blk.id, succ); + self.break_ln.insert(blk.id, succ); } let succ = self.propagate_through_opt_expr(blk.expr.as_ref().map(|e| &**e), succ); blk.stmts.iter().rev().fold(succ, |succ, stmt| { @@ -820,11 +878,11 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn propagate_through_stmt(&mut self, stmt: &hir::Stmt, succ: LiveNode) -> LiveNode { match stmt.node { - hir::StmtDecl(ref decl, _) => { + hir::StmtKind::Decl(ref decl, _) => { self.propagate_through_decl(&decl, succ) } - hir::StmtExpr(ref expr, _) | hir::StmtSemi(ref expr, _) => { + hir::StmtKind::Expr(ref expr, _) | hir::StmtKind::Semi(ref expr, _) => { self.propagate_through_expr(&expr, succ) } } @@ -833,10 +891,10 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn propagate_through_decl(&mut self, decl: &hir::Decl, succ: LiveNode) -> LiveNode { match decl.node { - hir::DeclLocal(ref local) => { + hir::DeclKind::Local(ref local) => { self.propagate_through_local(&local, succ) } - hir::DeclItem(_) => succ, + hir::DeclKind::Item(_) => succ, } } @@ -880,26 +938,20 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { match expr.node { // Interesting cases with control flow or which gen/kill - hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { - self.access_path(expr.id, path, succ, ACC_READ | ACC_USE) + hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => { + self.access_path(expr.hir_id, path, succ, ACC_READ | ACC_USE) } - hir::ExprField(ref e, _) => { + hir::ExprKind::Field(ref e, _) => { self.propagate_through_expr(&e, succ) } - hir::ExprTupField(ref e, _) => { - self.propagate_through_expr(&e, succ) - } + hir::ExprKind::Closure(.., blk_id, _, _) => { + debug!("{} is an ExprKind::Closure", self.ir.tcx.hir.node_to_pretty_string(expr.id)); - hir::ExprClosure(.., blk_id, _, _) => { - debug!("{} is an ExprClosure", self.ir.tcx.hir.node_to_pretty_string(expr.id)); - - /* - The next-node for a break is the successor of the entire - loop. The next-node for a continue is the top of this loop. - */ - let node = self.live_node(expr.id, expr.span); + // The next-node for a break is the successor of the entire + // loop. The next-node for a continue is the top of this loop. + let node = self.live_node(expr.hir_id, expr.span); let break_ln = succ; let cont_ln = node; @@ -916,13 +968,13 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { }; caps.iter().rev().fold(succ, |succ, cap| { self.init_from_succ(cap.ln, succ); - let var = self.variable(cap.var_nid, expr.span); + let var = self.variable(cap.var_hid, expr.span); self.acc(cap.ln, var, ACC_READ | ACC_USE); cap.ln }) } - hir::ExprIf(ref cond, ref then, ref els) => { + hir::ExprKind::If(ref cond, ref then, ref els) => { // // (cond) // | @@ -938,23 +990,23 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // let else_ln = self.propagate_through_opt_expr(els.as_ref().map(|e| &**e), succ); let then_ln = self.propagate_through_expr(&then, succ); - let ln = self.live_node(expr.id, expr.span); + let ln = self.live_node(expr.hir_id, expr.span); self.init_from_succ(ln, else_ln); self.merge_from_succ(ln, then_ln, false); self.propagate_through_expr(&cond, ln) } - hir::ExprWhile(ref cond, ref blk, _) => { + hir::ExprKind::While(ref cond, ref blk, _) => { self.propagate_through_loop(expr, WhileLoop(&cond), &blk, succ) } // Note that labels have been resolved, so we don't need to look // at the label ident - hir::ExprLoop(ref blk, _, _) => { + hir::ExprKind::Loop(ref blk, _, _) => { self.propagate_through_loop(expr, LoopLoop, &blk, succ) } - hir::ExprMatch(ref e, ref arms, _) => { + hir::ExprKind::Match(ref e, ref arms, _) => { // // (e) // | @@ -969,7 +1021,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // ( succ ) // // - let ln = self.live_node(expr.id, expr.span); + let ln = self.live_node(expr.hir_id, expr.span); self.init_empty(ln, succ); let mut first_merge = true; for arm in arms { @@ -989,21 +1041,17 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.propagate_through_expr(&e, ln) } - hir::ExprRet(ref o_e) => { + hir::ExprKind::Ret(ref o_e) => { // ignore succ and subst exit_ln: let exit_ln = self.s.exit_ln; self.propagate_through_opt_expr(o_e.as_ref().map(|e| &**e), exit_ln) } - hir::ExprBreak(label, ref opt_expr) => { + hir::ExprKind::Break(label, ref opt_expr) => { // Find which label this break jumps to let target = match label.target_id { - hir::ScopeTarget::Block(node_id) => - self.breakable_block_ln.get(&node_id), - hir::ScopeTarget::Loop(hir::LoopIdResult::Ok(node_id)) => - self.break_ln.get(&node_id), - hir::ScopeTarget::Loop(hir::LoopIdResult::Err(err)) => - span_bug!(expr.span, "loop scope error: {}", err), + Ok(node_id) => self.break_ln.get(&node_id), + Err(err) => span_bug!(expr.span, "loop scope error: {}", err), }.map(|x| *x); // Now that we know the label we're going to, @@ -1015,13 +1063,11 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { } } - hir::ExprAgain(label) => { + hir::ExprKind::Continue(label) => { // Find which label this expr continues to let sc = match label.target_id { - hir::ScopeTarget::Block(_) => bug!("can't `continue` to a non-loop block"), - hir::ScopeTarget::Loop(hir::LoopIdResult::Ok(node_id)) => node_id, - hir::ScopeTarget::Loop(hir::LoopIdResult::Err(err)) => - span_bug!(expr.span, "loop scope error: {}", err), + Ok(node_id) => node_id, + Err(err) => span_bug!(expr.span, "loop scope error: {}", err), }; // Now that we know the label we're going to, @@ -1033,42 +1079,42 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { } } - hir::ExprAssign(ref l, ref r) => { - // see comment on lvalues in - // propagate_through_lvalue_components() - let succ = self.write_lvalue(&l, succ, ACC_WRITE); - let succ = self.propagate_through_lvalue_components(&l, succ); + hir::ExprKind::Assign(ref l, ref r) => { + // see comment on places in + // propagate_through_place_components() + let succ = self.write_place(&l, succ, ACC_WRITE); + let succ = self.propagate_through_place_components(&l, succ); self.propagate_through_expr(&r, succ) } - hir::ExprAssignOp(_, ref l, ref r) => { + hir::ExprKind::AssignOp(_, ref l, ref r) => { // an overloaded assign op is like a method call if self.tables.is_method_call(expr) { let succ = self.propagate_through_expr(&l, succ); self.propagate_through_expr(&r, succ) } else { - // see comment on lvalues in - // propagate_through_lvalue_components() - let succ = self.write_lvalue(&l, succ, ACC_WRITE|ACC_READ); + // see comment on places in + // propagate_through_place_components() + let succ = self.write_place(&l, succ, ACC_WRITE|ACC_READ); let succ = self.propagate_through_expr(&r, succ); - self.propagate_through_lvalue_components(&l, succ) + self.propagate_through_place_components(&l, succ) } } // Uninteresting cases: just propagate in rev exec order - hir::ExprArray(ref exprs) => { + hir::ExprKind::Array(ref exprs) => { self.propagate_through_exprs(exprs, succ) } - hir::ExprStruct(_, ref fields, ref with_expr) => { + hir::ExprKind::Struct(_, ref fields, ref with_expr) => { let succ = self.propagate_through_opt_expr(with_expr.as_ref().map(|e| &**e), succ); fields.iter().rev().fold(succ, |succ, field| { self.propagate_through_expr(&field.expr, succ) }) } - hir::ExprCall(ref f, ref args) => { + hir::ExprKind::Call(ref f, ref args) => { // FIXME(canndrew): This is_never should really be an is_uninhabited let succ = if self.tables.expr_ty(expr).is_never() { self.s.exit_ln @@ -1079,7 +1125,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.propagate_through_expr(&f, succ) } - hir::ExprMethodCall(.., ref args) => { + hir::ExprKind::MethodCall(.., ref args) => { // FIXME(canndrew): This is_never should really be an is_uninhabited let succ = if self.tables.expr_ty(expr).is_never() { self.s.exit_ln @@ -1089,46 +1135,46 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.propagate_through_exprs(args, succ) } - hir::ExprTup(ref exprs) => { + hir::ExprKind::Tup(ref exprs) => { self.propagate_through_exprs(exprs, succ) } - hir::ExprBinary(op, ref l, ref r) if op.node.is_lazy() => { + hir::ExprKind::Binary(op, ref l, ref r) if op.node.is_lazy() => { let r_succ = self.propagate_through_expr(&r, succ); - let ln = self.live_node(expr.id, expr.span); + let ln = self.live_node(expr.hir_id, expr.span); self.init_from_succ(ln, succ); self.merge_from_succ(ln, r_succ, false); self.propagate_through_expr(&l, ln) } - hir::ExprIndex(ref l, ref r) | - hir::ExprBinary(_, ref l, ref r) => { + hir::ExprKind::Index(ref l, ref r) | + hir::ExprKind::Binary(_, ref l, ref r) => { let r_succ = self.propagate_through_expr(&r, succ); self.propagate_through_expr(&l, r_succ) } - hir::ExprBox(ref e) | - hir::ExprAddrOf(_, ref e) | - hir::ExprCast(ref e, _) | - hir::ExprType(ref e, _) | - hir::ExprUnary(_, ref e) | - hir::ExprYield(ref e) | - hir::ExprRepeat(ref e, _) => { + hir::ExprKind::Box(ref e) | + hir::ExprKind::AddrOf(_, ref e) | + hir::ExprKind::Cast(ref e, _) | + hir::ExprKind::Type(ref e, _) | + hir::ExprKind::Unary(_, ref e) | + hir::ExprKind::Yield(ref e) | + hir::ExprKind::Repeat(ref e, _) => { self.propagate_through_expr(&e, succ) } - hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => { + hir::ExprKind::InlineAsm(ref ia, ref outputs, ref inputs) => { let succ = ia.outputs.iter().zip(outputs).rev().fold(succ, |succ, (o, output)| { - // see comment on lvalues - // in propagate_through_lvalue_components() + // see comment on places + // in propagate_through_place_components() if o.is_indirect { self.propagate_through_expr(output, succ) } else { let acc = if o.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE }; - let succ = self.write_lvalue(output, succ, acc); - self.propagate_through_lvalue_components(output, succ) + let succ = self.write_place(output, succ, acc); + self.propagate_through_place_components(output, succ) } }); @@ -1136,21 +1182,23 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.propagate_through_exprs(inputs, succ) } - hir::ExprLit(..) | hir::ExprPath(hir::QPath::TypeRelative(..)) => { + hir::ExprKind::Lit(..) | hir::ExprKind::Path(hir::QPath::TypeRelative(..)) => { succ } - hir::ExprBlock(ref blk) => { + // Note that labels have been resolved, so we don't need to look + // at the label ident + hir::ExprKind::Block(ref blk, _) => { self.propagate_through_block(&blk, succ) } } } - fn propagate_through_lvalue_components(&mut self, + fn propagate_through_place_components(&mut self, expr: &Expr, succ: LiveNode) -> LiveNode { - // # Lvalues + // # Places // // In general, the full flow graph structure for an // assignment/move/etc can be handled in one of two ways, @@ -1160,7 +1208,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // // The two kinds of graphs are: // - // Tracked lvalue Untracked lvalue + // Tracked place Untracked place // ----------------------++----------------------- // || // | || | @@ -1168,7 +1216,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // (rvalue) || (rvalue) // | || | // v || v - // (write of lvalue) || (lvalue components) + // (write of place) || (place components) // | || | // v || v // (succ) || (succ) @@ -1177,68 +1225,68 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // // I will cover the two cases in turn: // - // # Tracked lvalues + // # Tracked places // - // A tracked lvalue is a local variable/argument `x`. In + // A tracked place is a local variable/argument `x`. In // these cases, the link_node where the write occurs is linked - // to node id of `x`. The `write_lvalue()` routine generates + // to node id of `x`. The `write_place()` routine generates // the contents of this node. There are no subcomponents to // consider. // - // # Non-tracked lvalues + // # Non-tracked places // - // These are lvalues like `x[5]` or `x.f`. In that case, we + // These are places like `x[5]` or `x.f`. In that case, we // basically ignore the value which is written to but generate // reads for the components---`x` in these two examples. The // components reads are generated by - // `propagate_through_lvalue_components()` (this fn). + // `propagate_through_place_components()` (this fn). // - // # Illegal lvalues + // # Illegal places // - // It is still possible to observe assignments to non-lvalues; + // It is still possible to observe assignments to non-places; // these errors are detected in the later pass borrowck. We // just ignore such cases and treat them as reads. match expr.node { - hir::ExprPath(_) => succ, - hir::ExprField(ref e, _) => self.propagate_through_expr(&e, succ), - hir::ExprTupField(ref e, _) => self.propagate_through_expr(&e, succ), + hir::ExprKind::Path(_) => succ, + hir::ExprKind::Field(ref e, _) => self.propagate_through_expr(&e, succ), _ => self.propagate_through_expr(expr, succ) } } - // see comment on propagate_through_lvalue() - fn write_lvalue(&mut self, expr: &Expr, succ: LiveNode, acc: u32) + // see comment on propagate_through_place() + fn write_place(&mut self, expr: &Expr, succ: LiveNode, acc: u32) -> LiveNode { match expr.node { - hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { - self.access_path(expr.id, path, succ, acc) + hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => { + self.access_path(expr.hir_id, path, succ, acc) } - // We do not track other lvalues, so just propagate through + // We do not track other places, so just propagate through // to their subcomponents. Also, it may happen that - // non-lvalues occur here, because those are detected in the + // non-places occur here, because those are detected in the // later pass borrowck. _ => succ } } - fn access_var(&mut self, id: NodeId, nid: NodeId, succ: LiveNode, acc: u32, span: Span) + fn access_var(&mut self, hir_id: HirId, nid: NodeId, succ: LiveNode, acc: u32, span: Span) -> LiveNode { - let ln = self.live_node(id, span); + let ln = self.live_node(hir_id, span); if acc != 0 { self.init_from_succ(ln, succ); - let var = self.variable(nid, span); + let var_hid = self.ir.tcx.hir.node_to_hir_id(nid); + let var = self.variable(var_hid, span); self.acc(ln, var, acc); } ln } - fn access_path(&mut self, id: NodeId, path: &hir::Path, succ: LiveNode, acc: u32) + fn access_path(&mut self, hir_id: HirId, path: &hir::Path, succ: LiveNode, acc: u32) -> LiveNode { match path.def { Def::Local(nid) => { - self.access_var(id, nid, succ, acc, path.span) + self.access_var(hir_id, nid, succ, acc, path.span) } _ => succ } @@ -1272,7 +1320,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // first iteration: let mut first_merge = true; - let ln = self.live_node(expr.id, expr.span); + let ln = self.live_node(expr.hir_id, expr.span); self.init_empty(ln, succ); match kind { LoopLoop => {} @@ -1342,7 +1390,8 @@ fn check_local<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, local: &'tcx hir::Local) }, None => { this.pat_bindings(&local.pat, |this, ln, var, sp, id| { - this.warn_about_unused(sp, id, ln, var); + let span = local.pat.simple_ident().map_or(sp, |ident| ident.span); + this.warn_about_unused(span, id, ln, var); }) } } @@ -1362,29 +1411,29 @@ fn check_arm<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, arm: &'tcx hir::Arm) { fn check_expr<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, expr: &'tcx Expr) { match expr.node { - hir::ExprAssign(ref l, _) => { - this.check_lvalue(&l); + hir::ExprKind::Assign(ref l, _) => { + this.check_place(&l); intravisit::walk_expr(this, expr); } - hir::ExprAssignOp(_, ref l, _) => { + hir::ExprKind::AssignOp(_, ref l, _) => { if !this.tables.is_method_call(expr) { - this.check_lvalue(&l); + this.check_place(&l); } intravisit::walk_expr(this, expr); } - hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => { + hir::ExprKind::InlineAsm(ref ia, ref outputs, ref inputs) => { for input in inputs { this.visit_expr(input); } - // Output operands must be lvalues + // Output operands must be places for (o, output) in ia.outputs.iter().zip(outputs) { if !o.is_indirect { - this.check_lvalue(output); + this.check_place(output); } this.visit_expr(output); } @@ -1393,37 +1442,38 @@ fn check_expr<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, expr: &'tcx Expr) { } // no correctness conditions related to liveness - hir::ExprCall(..) | hir::ExprMethodCall(..) | hir::ExprIf(..) | - hir::ExprMatch(..) | hir::ExprWhile(..) | hir::ExprLoop(..) | - hir::ExprIndex(..) | hir::ExprField(..) | hir::ExprTupField(..) | - hir::ExprArray(..) | hir::ExprTup(..) | hir::ExprBinary(..) | - hir::ExprCast(..) | hir::ExprUnary(..) | hir::ExprRet(..) | - hir::ExprBreak(..) | hir::ExprAgain(..) | hir::ExprLit(_) | - hir::ExprBlock(..) | hir::ExprAddrOf(..) | - hir::ExprStruct(..) | hir::ExprRepeat(..) | - hir::ExprClosure(..) | hir::ExprPath(_) | hir::ExprYield(..) | - hir::ExprBox(..) | hir::ExprType(..) => { + hir::ExprKind::Call(..) | hir::ExprKind::MethodCall(..) | hir::ExprKind::If(..) | + hir::ExprKind::Match(..) | hir::ExprKind::While(..) | hir::ExprKind::Loop(..) | + hir::ExprKind::Index(..) | hir::ExprKind::Field(..) | + hir::ExprKind::Array(..) | hir::ExprKind::Tup(..) | hir::ExprKind::Binary(..) | + hir::ExprKind::Cast(..) | hir::ExprKind::Unary(..) | hir::ExprKind::Ret(..) | + hir::ExprKind::Break(..) | hir::ExprKind::Continue(..) | hir::ExprKind::Lit(_) | + hir::ExprKind::Block(..) | hir::ExprKind::AddrOf(..) | + hir::ExprKind::Struct(..) | hir::ExprKind::Repeat(..) | + hir::ExprKind::Closure(..) | hir::ExprKind::Path(_) | hir::ExprKind::Yield(..) | + hir::ExprKind::Box(..) | hir::ExprKind::Type(..) => { intravisit::walk_expr(this, expr); } } } impl<'a, 'tcx> Liveness<'a, 'tcx> { - fn check_lvalue(&mut self, expr: &'tcx Expr) { + fn check_place(&mut self, expr: &'tcx Expr) { match expr.node { - hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => { if let Def::Local(nid) = path.def { // Assignment to an immutable variable or argument: only legal // if there is no later assignment. If this local is actually // mutable, then check for a reassignment to flag the mutability // as being used. - let ln = self.live_node(expr.id, expr.span); - let var = self.variable(nid, expr.span); - self.warn_about_dead_assign(expr.span, expr.id, ln, var); + let ln = self.live_node(expr.hir_id, expr.span); + let var_hid = self.ir.tcx.hir.node_to_hir_id(nid); + let var = self.variable(var_hid, expr.span); + self.warn_about_dead_assign(expr.span, expr.hir_id, ln, var); } } _ => { - // For other kinds of lvalues, no checks are required, + // For other kinds of places, no checks are required, // and any embedded expressions are actually rvalues intravisit::walk_expr(self, expr); } @@ -1441,14 +1491,14 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn warn_about_unused_args(&self, body: &hir::Body, entry_ln: LiveNode) { for arg in &body.arguments { - arg.pat.each_binding(|_bm, p_id, sp, path1| { - let var = self.variable(p_id, sp); + arg.pat.each_binding(|_bm, hir_id, _, ident| { + let sp = ident.span; + let var = self.variable(hir_id, sp); // Ignore unused self. - let name = path1.node; - if name != keywords::SelfValue.name() { - if !self.warn_about_unused(sp, p_id, entry_ln, var) { + if ident.name != keywords::SelfValue.name() { + if !self.warn_about_unused(sp, hir_id, entry_ln, var) { if self.live_on_entry(entry_ln, var).is_none() { - self.report_dead_assign(p_id, sp, var, true); + self.report_dead_assign(hir_id, sp, var, true); } } } @@ -1466,7 +1516,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn warn_about_unused(&self, sp: Span, - id: NodeId, + hir_id: HirId, ln: LiveNode, var: Variable) -> bool { @@ -1483,17 +1533,31 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.assigned_on_exit(ln, var).is_some() }; + let suggest_underscore_msg = format!("consider using `_{}` instead", + name); + if is_assigned { - self.ir.tcx.lint_node_note(lint::builtin::UNUSED_VARIABLES, id, sp, - &format!("variable `{}` is assigned to, but never used", - name), - &format!("to avoid this warning, consider using `_{}` instead", - name)); + self.ir.tcx + .lint_hir_note(lint::builtin::UNUSED_VARIABLES, hir_id, sp, + &format!("variable `{}` is assigned to, but never used", + name), + &suggest_underscore_msg); } else if name != "self" { - self.ir.tcx.lint_node_note(lint::builtin::UNUSED_VARIABLES, id, sp, - &format!("unused variable: `{}`", name), - &format!("to avoid this warning, consider using `_{}` instead", - name)); + let msg = format!("unused variable: `{}`", name); + let mut err = self.ir.tcx + .struct_span_lint_hir(lint::builtin::UNUSED_VARIABLES, hir_id, sp, &msg); + if self.ir.variable_is_shorthand(var) { + err.span_suggestion_with_applicability(sp, "try ignoring the field", + format!("{}: _", name), + Applicability::MachineApplicable); + } else { + err.span_suggestion_short_with_applicability( + sp, &suggest_underscore_msg, + format!("_{}", name), + Applicability::MachineApplicable, + ); + } + err.emit() } } true @@ -1504,21 +1568,21 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn warn_about_dead_assign(&self, sp: Span, - id: NodeId, + hir_id: HirId, ln: LiveNode, var: Variable) { if self.live_on_exit(ln, var).is_none() { - self.report_dead_assign(id, sp, var, false); + self.report_dead_assign(hir_id, sp, var, false); } } - fn report_dead_assign(&self, id: NodeId, sp: Span, var: Variable, is_argument: bool) { + fn report_dead_assign(&self, hir_id: HirId, sp: Span, var: Variable, is_argument: bool) { if let Some(name) = self.should_warn(var) { if is_argument { - self.ir.tcx.lint_node(lint::builtin::UNUSED_ASSIGNMENTS, id, sp, + self.ir.tcx.lint_hir(lint::builtin::UNUSED_ASSIGNMENTS, hir_id, sp, &format!("value passed to `{}` is never read", name)); } else { - self.ir.tcx.lint_node(lint::builtin::UNUSED_ASSIGNMENTS, id, sp, + self.ir.tcx.lint_hir(lint::builtin::UNUSED_ASSIGNMENTS, hir_id, sp, &format!("value assigned to `{}` is never read", name)); } } diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs index a8955723e3ae..8feefdf5a1dd 100644 --- a/src/librustc/middle/mem_categorization.rs +++ b/src/librustc/middle/mem_categorization.rs @@ -26,8 +26,8 @@ //! | E.comp // access to an interior component //! //! Imagine a routine ToAddr(Expr) that evaluates an expression and returns an -//! address where the result is to be found. If Expr is an lvalue, then this -//! is the address of the lvalue. If Expr is an rvalue, this is the address of +//! address where the result is to be found. If Expr is a place, then this +//! is the address of the place. If Expr is an rvalue, this is the address of //! some temporary spot in memory where the result is stored. //! //! Now, cat_expr() classifies the expression Expr and the address A=ToAddr(Expr) @@ -46,9 +46,9 @@ //! //! ## By-reference upvars //! -//! One part of the translation which may be non-obvious is that we translate +//! One part of the codegen which may be non-obvious is that we translate //! closure upvars into the dereference of a borrowed pointer; this more closely -//! resembles the runtime translation. So, for example, if we had: +//! resembles the runtime codegen. So, for example, if we had: //! //! let mut x = 3; //! let y = 5; @@ -62,7 +62,6 @@ pub use self::PointerKind::*; pub use self::InteriorKind::*; -pub use self::FieldName::*; pub use self::MutabilityCategory::*; pub use self::AliasableReason::*; pub use self::Note::*; @@ -81,10 +80,12 @@ use ty::fold::TypeFoldable; use hir::{MutImmutable, MutMutable, PatKind}; use hir::pat_util::EnumerateAndAdjustIterator; use hir; -use syntax::ast; +use syntax::ast::{self, Name}; use syntax_pos::Span; use std::fmt; +use std::hash::{Hash, Hasher}; +use rustc_data_structures::sync::Lrc; use std::rc::Rc; use util::nodemap::ItemLocalSet; @@ -94,7 +95,7 @@ pub enum Categorization<'tcx> { StaticItem, Upvar(Upvar), // upvar referenced by closure env Local(ast::NodeId), // local variable - Deref(cmt<'tcx>, PointerKind<'tcx>), // deref of a ptr + Deref(cmt<'tcx>, PointerKind<'tcx>), // deref of a ptr Interior(cmt<'tcx>, InteriorKind), // something interior: field, tuple, etc Downcast(cmt<'tcx>, DefId), // selects a particular enum variant (*1) @@ -119,23 +120,31 @@ pub enum PointerKind<'tcx> { /// `*T` UnsafePtr(hir::Mutability), - - /// Implicit deref of the `&T` that results from an overloaded index `[]`. - Implicit(ty::BorrowKind, ty::Region<'tcx>), } // We use the term "interior" to mean "something reachable from the // base without a pointer dereference", e.g. a field #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum InteriorKind { - InteriorField(FieldName), + InteriorField(FieldIndex), InteriorElement(InteriorOffsetKind), } -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum FieldName { - NamedField(ast::Name), - PositionalField(usize) +// Contains index of a field that is actually used for loan path comparisons and +// string representation of the field that should be used only for diagnostics. +#[derive(Clone, Copy, Eq)] +pub struct FieldIndex(pub usize, pub Name); + +impl PartialEq for FieldIndex { + fn eq(&self, rhs: &Self) -> bool { + self.0 == rhs.0 + } +} + +impl Hash for FieldIndex { + fn hash(&self, h: &mut H) { + self.0.hash(h) + } } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] @@ -160,6 +169,7 @@ pub enum MutabilityCategory { pub enum Note { NoteClosureEnv(ty::UpvarId), // Deref through closure env NoteUpvarRef(ty::UpvarId), // Deref through by-ref upvar + NoteIndex, // Deref as part of desugaring `x[]` into its two components NoteNone // Nothing special } @@ -170,19 +180,23 @@ pub enum Note { // which the value is stored. // // *WARNING* The field `cmt.type` is NOT necessarily the same as the -// result of `node_id_to_type(cmt.id)`. This is because the `id` is -// always the `id` of the node producing the type; in an expression -// like `*x`, the type of this deref node is the deref'd type (`T`), -// but in a pattern like `@x`, the `@x` pattern is again a -// dereference, but its type is the type *before* the dereference -// (`@T`). So use `cmt.ty` to find the type of the value in a consistent -// fashion. For more details, see the method `cat_pattern` +// result of `node_id_to_type(cmt.id)`. +// +// (FIXME: rewrite the following comment given that `@x` managed +// pointers have been obsolete for quite some time.) +// +// This is because the `id` is always the `id` of the node producing the +// type; in an expression like `*x`, the type of this deref node is the +// deref'd type (`T`), but in a pattern like `@x`, the `@x` pattern is +// again a dereference, but its type is the type *before* the +// dereference (`@T`). So use `cmt.ty` to find the type of the value in +// a consistent fashion. For more details, see the method `cat_pattern` #[derive(Clone, Debug, PartialEq)] pub struct cmt_<'tcx> { - pub id: ast::NodeId, // id of expr/pat producing this value + pub hir_id: hir::HirId, // HIR id of expr/pat producing this value pub span: Span, // span of same expr/pat pub cat: Categorization<'tcx>, // categorization of expr - pub mutbl: MutabilityCategory, // mutability of expr as lvalue + pub mutbl: MutabilityCategory, // mutability of expr as place pub ty: Ty<'tcx>, // type of the expr (*see WARNING above*) pub note: Note, // Note about the provenance of this cmt } @@ -197,7 +211,7 @@ pub enum ImmutabilityBlame<'tcx> { } impl<'tcx> cmt_<'tcx> { - fn resolve_field(&self, field_name: FieldName) -> Option<(&'tcx ty::AdtDef, &'tcx ty::FieldDef)> + fn resolve_field(&self, field_index: usize) -> Option<(&'tcx ty::AdtDef, &'tcx ty::FieldDef)> { let adt_def = match self.ty.sty { ty::TyAdt(def, _) => def, @@ -214,23 +228,18 @@ impl<'tcx> cmt_<'tcx> { &adt_def.variants[0] } }; - let field_def = match field_name { - NamedField(name) => variant_def.field_named(name), - PositionalField(idx) => &variant_def.fields[idx] - }; - Some((adt_def, field_def)) + Some((adt_def, &variant_def.fields[field_index])) } pub fn immutability_blame(&self) -> Option> { match self.cat { - Categorization::Deref(ref base_cmt, BorrowedPtr(ty::ImmBorrow, _)) | - Categorization::Deref(ref base_cmt, Implicit(ty::ImmBorrow, _)) => { + Categorization::Deref(ref base_cmt, BorrowedPtr(ty::ImmBorrow, _)) => { // try to figure out where the immutable reference came from match base_cmt.cat { Categorization::Local(node_id) => Some(ImmutabilityBlame::LocalDeref(node_id)), - Categorization::Interior(ref base_cmt, InteriorField(field_name)) => { - base_cmt.resolve_field(field_name).map(|(adt_def, field_def)| { + Categorization::Interior(ref base_cmt, InteriorField(field_index)) => { + base_cmt.resolve_field(field_index.0).map(|(adt_def, field_def)| { ImmutabilityBlame::AdtFieldDeref(adt_def, field_def) }) } @@ -266,18 +275,18 @@ impl<'tcx> cmt_<'tcx> { } } -pub trait ast_node { - fn id(&self) -> ast::NodeId; +pub trait HirNode { + fn hir_id(&self) -> hir::HirId; fn span(&self) -> Span; } -impl ast_node for hir::Expr { - fn id(&self) -> ast::NodeId { self.id } +impl HirNode for hir::Expr { + fn hir_id(&self) -> hir::HirId { self.hir_id } fn span(&self) -> Span { self.span } } -impl ast_node for hir::Pat { - fn id(&self) -> ast::NodeId { self.id } +impl HirNode for hir::Pat { + fn hir_id(&self) -> hir::HirId { self.hir_id } fn span(&self) -> Span { self.span } } @@ -286,7 +295,7 @@ pub struct MemCategorizationContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { pub tcx: TyCtxt<'a, 'gcx, 'tcx>, pub region_scope_tree: &'a region::ScopeTree, pub tables: &'a ty::TypeckTables<'tcx>, - rvalue_promotable_map: Option>, + rvalue_promotable_map: Option>, infcx: Option<&'a InferCtxt<'a, 'gcx, 'tcx>>, } @@ -320,7 +329,7 @@ impl MutabilityCategory { Unique => { base_mutbl.inherit() } - BorrowedPtr(borrow_kind, _) | Implicit(borrow_kind, _) => { + BorrowedPtr(borrow_kind, _) => { MutabilityCategory::from_borrow_kind(borrow_kind) } UnsafePtr(m) => { @@ -395,7 +404,7 @@ impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx, 'tcx> { pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, region_scope_tree: &'a region::ScopeTree, tables: &'a ty::TypeckTables<'tcx>, - rvalue_promotable_map: Option>) + rvalue_promotable_map: Option>) -> MemCategorizationContext<'a, 'tcx, 'tcx> { MemCategorizationContext { tcx, @@ -480,7 +489,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // FIXME None if self.is_tainted_by_errors() => Err(()), None => { - let id = self.tcx.hir.definitions().find_node_for_hir_id(id); + let id = self.tcx.hir.hir_to_node_id(id); bug!("no type for node {}: {} in mem_categorization", id, self.tcx.hir.node_to_string(id)); } @@ -502,8 +511,37 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { self.resolve_type_vars_or_error(expr.hir_id, self.tables.expr_ty_adjusted_opt(expr)) } - fn pat_ty(&self, pat: &hir::Pat) -> McResult> { + /// Returns the type of value that this pattern matches against. + /// Some non-obvious cases: + /// + /// - a `ref x` binding matches against a value of type `T` and gives + /// `x` the type `&T`; we return `T`. + /// - a pattern with implicit derefs (thanks to default binding + /// modes #42640) may look like `Some(x)` but in fact have + /// implicit deref patterns attached (e.g., it is really + /// `&Some(x)`). In that case, we return the "outermost" type + /// (e.g., `&Option). + pub fn pat_ty_adjusted(&self, pat: &hir::Pat) -> McResult> { + // Check for implicit `&` types wrapping the pattern; note + // that these are never attached to binding patterns, so + // actually this is somewhat "disjoint" from the code below + // that aims to account for `ref x`. + if let Some(vec) = self.tables.pat_adjustments().get(pat.hir_id) { + if let Some(first_ty) = vec.first() { + debug!("pat_ty(pat={:?}) found adjusted ty `{:?}`", pat, first_ty); + return Ok(first_ty); + } + } + + self.pat_ty_unadjusted(pat) + } + + + /// Like `pat_ty`, but ignores implicit `&` patterns. + fn pat_ty_unadjusted(&self, pat: &hir::Pat) -> McResult> { let base_ty = self.node_ty(pat.hir_id)?; + debug!("pat_ty(pat={:?}) base_ty={:?}", pat, base_ty); + // This code detects whether we are looking at a `ref x`, // and if so, figures out what the type *being borrowed* is. let ret_ty = match pat.node { @@ -517,7 +555,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // a bind-by-ref means that the base_ty will be the type of the ident itself, // but what we want here is the type of the underlying value being borrowed. // So peel off one-level, turning the &T into T. - match base_ty.builtin_deref(false, ty::NoPreference) { + match base_ty.builtin_deref(false) { Some(t) => t.ty, None => { debug!("By-ref binding of non-derefable type {:?}", base_ty); @@ -530,18 +568,18 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } _ => base_ty, }; - debug!("pat_ty(pat={:?}) base_ty={:?} ret_ty={:?}", - pat, base_ty, ret_ty); + debug!("pat_ty(pat={:?}) ret_ty={:?}", pat, ret_ty); + Ok(ret_ty) } - pub fn cat_expr(&self, expr: &hir::Expr) -> McResult> { + pub fn cat_expr(&self, expr: &hir::Expr) -> McResult> { // This recursion helper avoids going through *too many* // adjustments, since *only* non-overloaded deref recurses. fn helper<'a, 'gcx, 'tcx>(mc: &MemCategorizationContext<'a, 'gcx, 'tcx>, expr: &hir::Expr, adjustments: &[adjustment::Adjustment<'tcx>]) - -> McResult> { + -> McResult> { match adjustments.split_last() { None => mc.cat_expr_unadjusted(expr), Some((adjustment, previous)) => { @@ -554,33 +592,33 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } pub fn cat_expr_adjusted(&self, expr: &hir::Expr, - previous: cmt<'tcx>, + previous: cmt_<'tcx>, adjustment: &adjustment::Adjustment<'tcx>) - -> McResult> { + -> McResult> { self.cat_expr_adjusted_with(expr, || Ok(previous), adjustment) } fn cat_expr_adjusted_with(&self, expr: &hir::Expr, previous: F, adjustment: &adjustment::Adjustment<'tcx>) - -> McResult> - where F: FnOnce() -> McResult> + -> McResult> + where F: FnOnce() -> McResult> { debug!("cat_expr_adjusted_with({:?}): {:?}", adjustment, expr); let target = self.resolve_type_vars_if_possible(&adjustment.target); match adjustment.kind { adjustment::Adjust::Deref(overloaded) => { // Equivalent to *expr or something similar. - let base = if let Some(deref) = overloaded { + let base = Rc::new(if let Some(deref) = overloaded { let ref_ty = self.tcx.mk_ref(deref.region, ty::TypeAndMut { ty: target, mutbl: deref.mutbl, }); - self.cat_rvalue_node(expr.id, expr.span, ref_ty) + self.cat_rvalue_node(expr.hir_id, expr.span, ref_ty) } else { previous()? - }; - self.cat_deref(expr, base, false) + }); + self.cat_deref(expr, base, NoteNone) } adjustment::Adjust::NeverToAny | @@ -591,123 +629,119 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { adjustment::Adjust::Borrow(_) | adjustment::Adjust::Unsize => { // Result is an rvalue. - Ok(self.cat_rvalue_node(expr.id, expr.span, target)) + Ok(self.cat_rvalue_node(expr.hir_id, expr.span, target)) } } } - pub fn cat_expr_unadjusted(&self, expr: &hir::Expr) -> McResult> { + pub fn cat_expr_unadjusted(&self, expr: &hir::Expr) -> McResult> { debug!("cat_expr: id={} expr={:?}", expr.id, expr); let expr_ty = self.expr_ty(expr)?; match expr.node { - hir::ExprUnary(hir::UnDeref, ref e_base) => { + hir::ExprKind::Unary(hir::UnDeref, ref e_base) => { if self.tables.is_method_call(expr) { - self.cat_overloaded_lvalue(expr, e_base, false) + self.cat_overloaded_place(expr, e_base, NoteNone) } else { - let base_cmt = self.cat_expr(&e_base)?; - self.cat_deref(expr, base_cmt, false) + let base_cmt = Rc::new(self.cat_expr(&e_base)?); + self.cat_deref(expr, base_cmt, NoteNone) } } - hir::ExprField(ref base, f_name) => { - let base_cmt = self.cat_expr(&base)?; + hir::ExprKind::Field(ref base, f_ident) => { + let base_cmt = Rc::new(self.cat_expr(&base)?); debug!("cat_expr(cat_field): id={} expr={:?} base={:?}", expr.id, expr, base_cmt); - Ok(self.cat_field(expr, base_cmt, f_name.node, expr_ty)) + let f_index = self.tcx.field_index(expr.id, self.tables); + Ok(self.cat_field(expr, base_cmt, f_index, f_ident, expr_ty)) } - hir::ExprTupField(ref base, idx) => { - let base_cmt = self.cat_expr(&base)?; - Ok(self.cat_tup_field(expr, base_cmt, idx.node, expr_ty)) - } - - hir::ExprIndex(ref base, _) => { + hir::ExprKind::Index(ref base, _) => { if self.tables.is_method_call(expr) { // If this is an index implemented by a method call, then it // will include an implicit deref of the result. // The call to index() returns a `&T` value, which // is an rvalue. That is what we will be // dereferencing. - self.cat_overloaded_lvalue(expr, base, true) + self.cat_overloaded_place(expr, base, NoteIndex) } else { - let base_cmt = self.cat_expr(&base)?; + let base_cmt = Rc::new(self.cat_expr(&base)?); self.cat_index(expr, base_cmt, expr_ty, InteriorOffsetKind::Index) } } - hir::ExprPath(ref qpath) => { - let def = self.tables.qpath_def(qpath, expr.hir_id); - self.cat_def(expr.id, expr.span, expr_ty, def) + hir::ExprKind::Path(ref qpath) => { + let def = self.tables.qpath_def(qpath, expr.hir_id); + self.cat_def(expr.hir_id, expr.span, expr_ty, def) } - hir::ExprType(ref e, _) => { + hir::ExprKind::Type(ref e, _) => { self.cat_expr(&e) } - hir::ExprAddrOf(..) | hir::ExprCall(..) | - hir::ExprAssign(..) | hir::ExprAssignOp(..) | - hir::ExprClosure(..) | hir::ExprRet(..) | - hir::ExprUnary(..) | hir::ExprYield(..) | - hir::ExprMethodCall(..) | hir::ExprCast(..) | - hir::ExprArray(..) | hir::ExprTup(..) | hir::ExprIf(..) | - hir::ExprBinary(..) | hir::ExprWhile(..) | - hir::ExprBlock(..) | hir::ExprLoop(..) | hir::ExprMatch(..) | - hir::ExprLit(..) | hir::ExprBreak(..) | - hir::ExprAgain(..) | hir::ExprStruct(..) | hir::ExprRepeat(..) | - hir::ExprInlineAsm(..) | hir::ExprBox(..) => { - Ok(self.cat_rvalue_node(expr.id(), expr.span(), expr_ty)) + hir::ExprKind::AddrOf(..) | hir::ExprKind::Call(..) | + hir::ExprKind::Assign(..) | hir::ExprKind::AssignOp(..) | + hir::ExprKind::Closure(..) | hir::ExprKind::Ret(..) | + hir::ExprKind::Unary(..) | hir::ExprKind::Yield(..) | + hir::ExprKind::MethodCall(..) | hir::ExprKind::Cast(..) | + hir::ExprKind::Array(..) | hir::ExprKind::Tup(..) | hir::ExprKind::If(..) | + hir::ExprKind::Binary(..) | hir::ExprKind::While(..) | + hir::ExprKind::Block(..) | hir::ExprKind::Loop(..) | hir::ExprKind::Match(..) | + hir::ExprKind::Lit(..) | hir::ExprKind::Break(..) | + hir::ExprKind::Continue(..) | hir::ExprKind::Struct(..) | hir::ExprKind::Repeat(..) | + hir::ExprKind::InlineAsm(..) | hir::ExprKind::Box(..) => { + Ok(self.cat_rvalue_node(expr.hir_id, expr.span, expr_ty)) } } } pub fn cat_def(&self, - id: ast::NodeId, + hir_id: hir::HirId, span: Span, expr_ty: Ty<'tcx>, def: Def) - -> McResult> { - debug!("cat_def: id={} expr={:?} def={:?}", - id, expr_ty, def); + -> McResult> { + debug!("cat_def: id={:?} expr={:?} def={:?}", + hir_id, expr_ty, def); match def { Def::StructCtor(..) | Def::VariantCtor(..) | Def::Const(..) | Def::AssociatedConst(..) | Def::Fn(..) | Def::Method(..) => { - Ok(self.cat_rvalue_node(id, span, expr_ty)) + Ok(self.cat_rvalue_node(hir_id, span, expr_ty)) } Def::Static(def_id, mutbl) => { // `#[thread_local]` statics may not outlive the current function. for attr in &self.tcx.get_attrs(def_id)[..] { if attr.check_name("thread_local") { - return Ok(self.cat_rvalue_node(id, span, expr_ty)); + return Ok(self.cat_rvalue_node(hir_id, span, expr_ty)); } } - Ok(Rc::new(cmt_ { - id:id, + Ok(cmt_ { + hir_id, span:span, cat:Categorization::StaticItem, mutbl: if mutbl { McDeclared } else { McImmutable}, ty:expr_ty, note: NoteNone - })) + }) } Def::Upvar(var_id, _, fn_node_id) => { - self.cat_upvar(id, span, var_id, fn_node_id) + self.cat_upvar(hir_id, span, var_id, fn_node_id) } Def::Local(vid) => { - Ok(Rc::new(cmt_ { - id, + Ok(cmt_ { + hir_id, span, cat: Categorization::Local(vid), mutbl: MutabilityCategory::from_local(self.tcx, self.tables, vid), ty: expr_ty, note: NoteNone - })) + }) } def => span_bug!(span, "unexpected definition in memory categorization: {:?}", def) @@ -717,11 +751,11 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // Categorize an upvar, complete with invisible derefs of closure // environment and upvar reference as appropriate. fn cat_upvar(&self, - id: ast::NodeId, + hir_id: hir::HirId, span: Span, var_id: ast::NodeId, fn_node_id: ast::NodeId) - -> McResult> + -> McResult> { let fn_hir_id = self.tcx.hir.node_to_hir_id(fn_node_id); @@ -784,7 +818,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // from the environment (perhaps we should eventually desugar // this field further, but it will do for now). let cmt_result = cmt_ { - id, + hir_id, span, cat: Categorization::Upvar(Upvar {id: upvar_id, kind: kind}), mutbl: var_mutbl, @@ -800,10 +834,10 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { cmt_result } ty::ClosureKind::FnMut => { - self.env_deref(id, span, upvar_id, var_mutbl, ty::MutBorrow, cmt_result) + self.env_deref(hir_id, span, upvar_id, var_mutbl, ty::MutBorrow, cmt_result) } ty::ClosureKind::Fn => { - self.env_deref(id, span, upvar_id, var_mutbl, ty::ImmBorrow, cmt_result) + self.env_deref(hir_id, span, upvar_id, var_mutbl, ty::ImmBorrow, cmt_result) } }; @@ -818,7 +852,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { ty::UpvarCapture::ByRef(upvar_borrow) => { let ptr = BorrowedPtr(upvar_borrow.kind, upvar_borrow.region); cmt_ { - id, + hir_id, span, cat: Categorization::Deref(Rc::new(cmt_result), ptr), mutbl: MutabilityCategory::from_borrow_kind(upvar_borrow.kind), @@ -828,13 +862,13 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } }; - let ret = Rc::new(cmt_result); + let ret = cmt_result; debug!("cat_upvar ret={:?}", ret); Ok(ret) } fn env_deref(&self, - id: ast::NodeId, + hir_id: hir::HirId, span: Span, upvar_id: ty::UpvarId, upvar_mutbl: MutabilityCategory, @@ -878,7 +912,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } let ret = cmt_ { - id, + hir_id, span, cat: Categorization::Deref(Rc::new(cmt_result), env_ptr), mutbl: deref_mutbl, @@ -902,21 +936,35 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } pub fn cat_rvalue_node(&self, - id: ast::NodeId, + hir_id: hir::HirId, span: Span, expr_ty: Ty<'tcx>) - -> cmt<'tcx> { - let hir_id = self.tcx.hir.node_to_hir_id(id); + -> cmt_<'tcx> { + debug!( + "cat_rvalue_node(id={:?}, span={:?}, expr_ty={:?})", + hir_id, + span, + expr_ty, + ); let promotable = self.rvalue_promotable_map.as_ref().map(|m| m.contains(&hir_id.local_id)) .unwrap_or(false); + debug!( + "cat_rvalue_node: promotable = {:?}", + promotable, + ); + // Always promote `[T; 0]` (even when e.g. borrowed mutably). let promotable = match expr_ty.sty { - ty::TyArray(_, len) if - len.val.to_const_int().and_then(|i| i.to_u64()) == Some(0) => true, + ty::TyArray(_, len) if len.assert_usize(self.tcx) == Some(0) => true, _ => promotable, }; + debug!( + "cat_rvalue_node: promotable = {:?} (2)", + promotable, + ); + // Compute maximum lifetime of this rvalue. This is 'static if // we can promote to a constant, otherwise equal to enclosing temp // lifetime. @@ -925,101 +973,92 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } else { self.temporary_scope(hir_id.local_id) }; - let ret = self.cat_rvalue(id, span, re, expr_ty); + let ret = self.cat_rvalue(hir_id, span, re, expr_ty); debug!("cat_rvalue_node ret {:?}", ret); ret } pub fn cat_rvalue(&self, - cmt_id: ast::NodeId, + cmt_hir_id: hir::HirId, span: Span, temp_scope: ty::Region<'tcx>, - expr_ty: Ty<'tcx>) -> cmt<'tcx> { - let ret = Rc::new(cmt_ { - id:cmt_id, + expr_ty: Ty<'tcx>) -> cmt_<'tcx> { + let ret = cmt_ { + hir_id: cmt_hir_id, span:span, cat:Categorization::Rvalue(temp_scope), mutbl:McDeclared, ty:expr_ty, note: NoteNone - }); + }; debug!("cat_rvalue ret {:?}", ret); ret } - pub fn cat_field(&self, + pub fn cat_field(&self, node: &N, base_cmt: cmt<'tcx>, - f_name: ast::Name, + f_index: usize, + f_ident: ast::Ident, f_ty: Ty<'tcx>) - -> cmt<'tcx> { - let ret = Rc::new(cmt_ { - id: node.id(), + -> cmt_<'tcx> { + let ret = cmt_ { + hir_id: node.hir_id(), span: node.span(), mutbl: base_cmt.mutbl.inherit(), - cat: Categorization::Interior(base_cmt, InteriorField(NamedField(f_name))), + cat: Categorization::Interior(base_cmt, + InteriorField(FieldIndex(f_index, f_ident.name))), ty: f_ty, note: NoteNone - }); + }; debug!("cat_field ret {:?}", ret); ret } - pub fn cat_tup_field(&self, - node: &N, - base_cmt: cmt<'tcx>, - f_idx: usize, - f_ty: Ty<'tcx>) - -> cmt<'tcx> { - let ret = Rc::new(cmt_ { - id: node.id(), - span: node.span(), - mutbl: base_cmt.mutbl.inherit(), - cat: Categorization::Interior(base_cmt, InteriorField(PositionalField(f_idx))), - ty: f_ty, - note: NoteNone - }); - debug!("cat_tup_field ret {:?}", ret); - ret - } - - fn cat_overloaded_lvalue(&self, - expr: &hir::Expr, - base: &hir::Expr, - implicit: bool) - -> McResult> { - debug!("cat_overloaded_lvalue: implicit={}", implicit); + fn cat_overloaded_place( + &self, + expr: &hir::Expr, + base: &hir::Expr, + note: Note, + ) -> McResult> { + debug!( + "cat_overloaded_place(expr={:?}, base={:?}, note={:?})", + expr, + base, + note, + ); // Reconstruct the output assuming it's a reference with the // same region and mutability as the receiver. This holds for // `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`. - let lvalue_ty = self.expr_ty(expr)?; + let place_ty = self.expr_ty(expr)?; let base_ty = self.expr_ty_adjusted(base)?; let (region, mutbl) = match base_ty.sty { - ty::TyRef(region, mt) => (region, mt.mutbl), + ty::TyRef(region, _, mutbl) => (region, mutbl), _ => { - span_bug!(expr.span, "cat_overloaded_lvalue: base is not a reference") + span_bug!(expr.span, "cat_overloaded_place: base is not a reference") } }; let ref_ty = self.tcx.mk_ref(region, ty::TypeAndMut { - ty: lvalue_ty, + ty: place_ty, mutbl, }); - let base_cmt = self.cat_rvalue_node(expr.id, expr.span, ref_ty); - self.cat_deref(expr, base_cmt, implicit) + let base_cmt = Rc::new(self.cat_rvalue_node(expr.hir_id, expr.span, ref_ty)); + self.cat_deref(expr, base_cmt, note) } - pub fn cat_deref(&self, - node: &N, - base_cmt: cmt<'tcx>, - implicit: bool) - -> McResult> { + pub fn cat_deref( + &self, + node: &impl HirNode, + base_cmt: cmt<'tcx>, + note: Note, + ) -> McResult> { debug!("cat_deref: base_cmt={:?}", base_cmt); let base_cmt_ty = base_cmt.ty; - let deref_ty = match base_cmt_ty.builtin_deref(true, ty::NoPreference) { + let deref_ty = match base_cmt_ty.builtin_deref(true) { Some(mt) => mt.ty, None => { debug!("Explicit deref of non-derefable type: {:?}", @@ -1031,31 +1070,31 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { let ptr = match base_cmt.ty.sty { ty::TyAdt(def, ..) if def.is_box() => Unique, ty::TyRawPtr(ref mt) => UnsafePtr(mt.mutbl), - ty::TyRef(r, mt) => { - let bk = ty::BorrowKind::from_mutbl(mt.mutbl); - if implicit { Implicit(bk, r) } else { BorrowedPtr(bk, r) } + ty::TyRef(r, _, mutbl) => { + let bk = ty::BorrowKind::from_mutbl(mutbl); + BorrowedPtr(bk, r) } ref ty => bug!("unexpected type in cat_deref: {:?}", ty) }; - let ret = Rc::new(cmt_ { - id: node.id(), + let ret = cmt_ { + hir_id: node.hir_id(), span: node.span(), // For unique ptrs, we inherit mutability from the owning reference. mutbl: MutabilityCategory::from_pointer_kind(base_cmt.mutbl, ptr), cat: Categorization::Deref(base_cmt, ptr), ty: deref_ty, - note: NoteNone - }); + note: note, + }; debug!("cat_deref ret {:?}", ret); Ok(ret) } - fn cat_index(&self, + fn cat_index(&self, elt: &N, base_cmt: cmt<'tcx>, element_ty: Ty<'tcx>, context: InteriorOffsetKind) - -> McResult> { + -> McResult> { //! Creates a cmt for an indexing operation (`[]`). //! //! One subtle aspect of indexing that may not be @@ -1070,35 +1109,34 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { //! presuming that `base_cmt` is not of fixed-length type. //! //! # Parameters - //! - `elt`: the AST node being indexed + //! - `elt`: the HIR node being indexed //! - `base_cmt`: the cmt of `elt` let interior_elem = InteriorElement(context); - let ret = - self.cat_imm_interior(elt, base_cmt, element_ty, interior_elem); + let ret = self.cat_imm_interior(elt, base_cmt, element_ty, interior_elem); debug!("cat_index ret {:?}", ret); return Ok(ret); } - pub fn cat_imm_interior(&self, + pub fn cat_imm_interior(&self, node: &N, base_cmt: cmt<'tcx>, interior_ty: Ty<'tcx>, interior: InteriorKind) - -> cmt<'tcx> { - let ret = Rc::new(cmt_ { - id: node.id(), + -> cmt_<'tcx> { + let ret = cmt_ { + hir_id: node.hir_id(), span: node.span(), mutbl: base_cmt.mutbl.inherit(), cat: Categorization::Interior(base_cmt, interior), ty: interior_ty, note: NoteNone - }); + }; debug!("cat_imm_interior ret={:?}", ret); ret } - pub fn cat_downcast_if_needed(&self, + pub fn cat_downcast_if_needed(&self, node: &N, base_cmt: cmt<'tcx>, variant_did: DefId) @@ -1108,7 +1146,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { if self.tcx.adt_def(base_did).variants.len() != 1 { let base_ty = base_cmt.ty; let ret = Rc::new(cmt_ { - id: node.id(), + hir_id: node.hir_id(), span: node.span(), mutbl: base_cmt.mutbl.inherit(), cat: Categorization::Downcast(base_cmt, variant_did), @@ -1158,6 +1196,8 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // value, and I consider them to produce the value that was // matched. So if you have something like: // + // (FIXME: `@@3` is not legal code anymore!) + // // let x = @@3; // match x { // @@y { ... } @@ -1178,7 +1218,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // step out of sync again. So you'll see below that we always // get the type of the *subpattern* and use that. - debug!("cat_pattern: {:?} cmt={:?}", pat, cmt); + debug!("cat_pattern(pat={:?}, cmt={:?})", pat, cmt); // If (pattern) adjustments are active for this pattern, adjust the `cmt` correspondingly. // `cmt`s are constructed differently from patterns. For example, in @@ -1216,10 +1256,13 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { .pat_adjustments() .get(pat.hir_id) .map(|v| v.len()) - .unwrap_or(0) { - cmt = self.cat_deref(pat, cmt, true /* implicit */)?; + .unwrap_or(0) + { + debug!("cat_pattern: applying adjustment to cmt={:?}", cmt); + cmt = Rc::new(self.cat_deref(pat, cmt, NoteNone)?); } let cmt = cmt; // lose mutability + debug!("cat_pattern: applied adjustment derefs to get cmt={:?}", cmt); // Invoke the callback, but only now, after the `cmt` has adjusted. // @@ -1246,7 +1289,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { self.tcx.adt_def(enum_def).variant_with_id(def_id).fields.len()) } Def::StructCtor(_, CtorKind::Fn) => { - match self.pat_ty(&pat)?.sty { + match self.pat_ty_unadjusted(&pat)?.sty { ty::TyAdt(adt_def, _) => { (cmt, adt_def.non_enum_variant().fields.len()) } @@ -1262,9 +1305,9 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { }; for (i, subpat) in subpats.iter().enumerate_and_adjust(expected_len, ddpos) { - let subpat_ty = self.pat_ty(&subpat)?; // see (*2) - let subcmt = self.cat_imm_interior(pat, cmt.clone(), subpat_ty, - InteriorField(PositionalField(i))); + let subpat_ty = self.pat_ty_adjusted(&subpat)?; // see (*2) + let interior = InteriorField(FieldIndex(i, Name::intern(&i.to_string()))); + let subcmt = Rc::new(self.cat_imm_interior(pat, cmt.clone(), subpat_ty, interior)); self.cat_pattern_(subcmt, &subpat, op)?; } } @@ -1285,8 +1328,10 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { }; for fp in field_pats { - let field_ty = self.pat_ty(&fp.node.pat)?; // see (*2) - let cmt_field = self.cat_field(pat, cmt.clone(), fp.node.name, field_ty); + let field_ty = self.pat_ty_adjusted(&fp.node.pat)?; // see (*2) + let f_index = self.tcx.field_index(fp.node.id, self.tables); + let cmt_field = Rc::new(self.cat_field(pat, cmt.clone(), f_index, + fp.node.ident, field_ty)); self.cat_pattern_(cmt_field, &fp.node.pat, op)?; } } @@ -1297,14 +1342,14 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { PatKind::Tuple(ref subpats, ddpos) => { // (p1, ..., pN) - let expected_len = match self.pat_ty(&pat)?.sty { - ty::TyTuple(ref tys, _) => tys.len(), + let expected_len = match self.pat_ty_unadjusted(&pat)?.sty { + ty::TyTuple(ref tys) => tys.len(), ref ty => span_bug!(pat.span, "tuple pattern unexpected type {:?}", ty), }; for (i, subpat) in subpats.iter().enumerate_and_adjust(expected_len, ddpos) { - let subpat_ty = self.pat_ty(&subpat)?; // see (*2) - let subcmt = self.cat_imm_interior(pat, cmt.clone(), subpat_ty, - InteriorField(PositionalField(i))); + let subpat_ty = self.pat_ty_adjusted(&subpat)?; // see (*2) + let interior = InteriorField(FieldIndex(i, Name::intern(&i.to_string()))); + let subcmt = Rc::new(self.cat_imm_interior(pat, cmt.clone(), subpat_ty, interior)); self.cat_pattern_(subcmt, &subpat, op)?; } } @@ -1313,7 +1358,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { // box p1, &p1, &mut p1. we can ignore the mutability of // PatKind::Ref since that information is already contained // in the type. - let subcmt = self.cat_deref(pat, cmt, false)?; + let subcmt = Rc::new(self.cat_deref(pat, cmt, NoteNone)?); self.cat_pattern_(subcmt, &subpat, op)?; } @@ -1326,7 +1371,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { } }; let context = InteriorOffsetKind::Pattern; - let elt_cmt = self.cat_index(pat, cmt, element_ty, context)?; + let elt_cmt = Rc::new(self.cat_index(pat, cmt, element_ty, context)?); for before_pat in before { self.cat_pattern_(elt_cmt.clone(), &before_pat, op)?; } @@ -1363,7 +1408,7 @@ pub enum AliasableReason { } impl<'tcx> cmt_<'tcx> { - pub fn guarantor(&self) -> cmt<'tcx> { + pub fn guarantor(&self) -> cmt_<'tcx> { //! Returns `self` after stripping away any derefs or //! interior content. The return value is basically the `cmt` which //! determines how long the value in `self` remains live. @@ -1374,9 +1419,8 @@ impl<'tcx> cmt_<'tcx> { Categorization::Local(..) | Categorization::Deref(_, UnsafePtr(..)) | Categorization::Deref(_, BorrowedPtr(..)) | - Categorization::Deref(_, Implicit(..)) | Categorization::Upvar(..) => { - Rc::new((*self).clone()) + (*self).clone() } Categorization::Downcast(ref b, _) | Categorization::Interior(ref b, _) | @@ -1386,7 +1430,7 @@ impl<'tcx> cmt_<'tcx> { } } - /// Returns `FreelyAliasable(_)` if this lvalue represents a freely aliasable pointer type. + /// Returns `FreelyAliasable(_)` if this place represents a freely aliasable pointer type. pub fn freely_aliasable(&self) -> Aliasability { // Maybe non-obvious: copied upvars can only be considered // non-aliasable in once closures, since any other kind can be @@ -1394,9 +1438,7 @@ impl<'tcx> cmt_<'tcx> { match self.cat { Categorization::Deref(ref b, BorrowedPtr(ty::MutBorrow, _)) | - Categorization::Deref(ref b, Implicit(ty::MutBorrow, _)) | Categorization::Deref(ref b, BorrowedPtr(ty::UniqueImmBorrow, _)) | - Categorization::Deref(ref b, Implicit(ty::UniqueImmBorrow, _)) | Categorization::Deref(ref b, Unique) | Categorization::Downcast(ref b, _) | Categorization::Interior(ref b, _) => { @@ -1419,41 +1461,40 @@ impl<'tcx> cmt_<'tcx> { } } - Categorization::Deref(_, BorrowedPtr(ty::ImmBorrow, _)) | - Categorization::Deref(_, Implicit(ty::ImmBorrow, _)) => { + Categorization::Deref(_, BorrowedPtr(ty::ImmBorrow, _)) => { FreelyAliasable(AliasableBorrowed) } } } - // Digs down through one or two layers of deref and grabs the cmt - // for the upvar if a note indicates there is one. - pub fn upvar(&self) -> Option> { + // Digs down through one or two layers of deref and grabs the + // Categorization of the cmt for the upvar if a note indicates there is + // one. + pub fn upvar_cat(&self) -> Option<&Categorization<'tcx>> { match self.note { NoteClosureEnv(..) | NoteUpvarRef(..) => { Some(match self.cat { Categorization::Deref(ref inner, _) => { match inner.cat { - Categorization::Deref(ref inner, _) => inner.clone(), - Categorization::Upvar(..) => inner.clone(), + Categorization::Deref(ref inner, _) => &inner.cat, + Categorization::Upvar(..) => &inner.cat, _ => bug!() } } _ => bug!() }) } - NoteNone => None + NoteIndex | NoteNone => None } } - pub fn descriptive_string(&self, tcx: TyCtxt) -> String { match self.cat { Categorization::StaticItem => { "static item".to_string() } Categorization::Rvalue(..) => { - "non-lvalue".to_string() + "non-place".to_string() } Categorization::Local(vid) => { if tcx.hir.is_argument(vid) { @@ -1463,36 +1504,32 @@ impl<'tcx> cmt_<'tcx> { } } Categorization::Deref(_, pk) => { - let upvar = self.upvar(); - match upvar.as_ref().map(|i| &i.cat) { + match self.upvar_cat() { Some(&Categorization::Upvar(ref var)) => { var.to_string() } Some(_) => bug!(), None => { match pk { - Implicit(..) => { - format!("indexed content") - } Unique => { - format!("`Box` content") + "`Box` content".to_string() } UnsafePtr(..) => { - format!("dereference of raw pointer") + "dereference of raw pointer".to_string() } BorrowedPtr(..) => { - format!("borrowed content") + match self.note { + NoteIndex => "indexed content".to_string(), + _ => "borrowed content".to_string(), + } } } } } } - Categorization::Interior(_, InteriorField(NamedField(_))) => { + Categorization::Interior(_, InteriorField(..)) => { "field".to_string() } - Categorization::Interior(_, InteriorField(PositionalField(_))) => { - "anonymous field".to_string() - } Categorization::Interior(_, InteriorElement(InteriorOffsetKind::Index)) => { "indexed content".to_string() } @@ -1512,12 +1549,9 @@ impl<'tcx> cmt_<'tcx> { pub fn ptr_sigil(ptr: PointerKind) -> &'static str { match ptr { Unique => "Box", - BorrowedPtr(ty::ImmBorrow, _) | - Implicit(ty::ImmBorrow, _) => "&", - BorrowedPtr(ty::MutBorrow, _) | - Implicit(ty::MutBorrow, _) => "&mut", - BorrowedPtr(ty::UniqueImmBorrow, _) | - Implicit(ty::UniqueImmBorrow, _) => "&unique", + BorrowedPtr(ty::ImmBorrow, _) => "&", + BorrowedPtr(ty::MutBorrow, _) => "&mut", + BorrowedPtr(ty::UniqueImmBorrow, _) => "&unique", UnsafePtr(_) => "*", } } @@ -1525,8 +1559,7 @@ pub fn ptr_sigil(ptr: PointerKind) -> &'static str { impl fmt::Debug for InteriorKind { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - InteriorField(NamedField(fld)) => write!(f, "{}", fld), - InteriorField(PositionalField(i)) => write!(f, "#{}", i), + InteriorField(FieldIndex(_, info)) => write!(f, "{}", info), InteriorElement(..) => write!(f, "[]"), } } diff --git a/src/librustc/middle/reachable.rs b/src/librustc/middle/reachable.rs index e11609ea9b79..a09942258e22 100644 --- a/src/librustc/middle/reachable.rs +++ b/src/librustc/middle/reachable.rs @@ -15,19 +15,19 @@ // makes all other generics or inline functions that it references // reachable as well. +use hir::{CodegenFnAttrs, CodegenFnAttrFlags}; use hir::map as hir_map; use hir::def::Def; use hir::def_id::{DefId, CrateNum}; -use std::rc::Rc; -use ty::{self, TyCtxt}; -use ty::maps::Providers; +use rustc_data_structures::sync::Lrc; +use ty::{self, TyCtxt, GenericParamDefKind}; +use ty::query::Providers; use middle::privacy; use session::config; use util::nodemap::{NodeSet, FxHashSet}; -use syntax::abi::Abi; +use rustc_target::spec::abi::Abi; use syntax::ast; -use syntax::attr; use hir; use hir::def_id::LOCAL_CRATE; use hir::intravisit::{Visitor, NestedVisitorMap}; @@ -36,21 +36,30 @@ use hir::intravisit; // Returns true if the given set of generics implies that the item it's // associated with must be inlined. -fn generics_require_inlining(generics: &hir::Generics) -> bool { - generics.params.iter().any(|param| param.is_type_param()) +fn generics_require_inlining(generics: &ty::Generics) -> bool { + for param in &generics.params { + match param.kind { + GenericParamDefKind::Lifetime { .. } => {} + GenericParamDefKind::Type { .. } => return true, + } + } + false } // Returns true if the given item must be inlined because it may be // monomorphized or it was marked with `#[inline]`. This will only return // true for functions. -fn item_might_be_inlined(item: &hir::Item) -> bool { - if attr::requests_inline(&item.attrs) { +fn item_might_be_inlined(tcx: TyCtxt<'a, 'tcx, 'tcx>, + item: &hir::Item, + attrs: CodegenFnAttrs) -> bool { + if attrs.requests_inline() { return true } match item.node { - hir::ItemImpl(_, _, _, ref generics, ..) | - hir::ItemFn(.., ref generics, _) => { + hir::ItemKind::Impl(..) | + hir::ItemKind::Fn(..) => { + let generics = tcx.generics_of(tcx.hir.local_def_id(item.id)); generics_require_inlining(generics) } _ => false, @@ -60,14 +69,15 @@ fn item_might_be_inlined(item: &hir::Item) -> bool { fn method_might_be_inlined<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_item: &hir::ImplItem, impl_src: DefId) -> bool { - if attr::requests_inline(&impl_item.attrs) || - generics_require_inlining(&impl_item.generics) { + let codegen_fn_attrs = tcx.codegen_fn_attrs(impl_item.hir_id.owner_def_id()); + let generics = tcx.generics_of(tcx.hir.local_def_id(impl_item.id)); + if codegen_fn_attrs.requests_inline() || generics_require_inlining(generics) { return true } if let Some(impl_node_id) = tcx.hir.as_local_node_id(impl_src) { match tcx.hir.find(impl_node_id) { Some(hir_map::NodeItem(item)) => - item_might_be_inlined(&item), + item_might_be_inlined(tcx, &item, codegen_fn_attrs), Some(..) | None => span_bug!(impl_item.span, "impl did is not an item") } @@ -105,11 +115,11 @@ impl<'a, 'tcx> Visitor<'tcx> for ReachableContext<'a, 'tcx> { fn visit_expr(&mut self, expr: &'tcx hir::Expr) { let def = match expr.node { - hir::ExprPath(ref qpath) => { + hir::ExprKind::Path(ref qpath) => { Some(self.tables.qpath_def(qpath, expr.hir_id)) } - hir::ExprMethodCall(..) => { - Some(self.tables.type_dependent_defs()[expr.hir_id]) + hir::ExprKind::MethodCall(..) => { + self.tables.type_dependent_defs().get(expr.hir_id).cloned() } _ => None }; @@ -160,7 +170,8 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { match self.tcx.hir.find(node_id) { Some(hir_map::NodeItem(item)) => { match item.node { - hir::ItemFn(..) => item_might_be_inlined(&item), + hir::ItemKind::Fn(..) => + item_might_be_inlined(self.tcx, &item, self.tcx.codegen_fn_attrs(def_id)), _ => false, } } @@ -176,8 +187,10 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { match impl_item.node { hir::ImplItemKind::Const(..) => true, hir::ImplItemKind::Method(..) => { - if generics_require_inlining(&impl_item.generics) || - attr::requests_inline(&impl_item.attrs) { + let attrs = self.tcx.codegen_fn_attrs(def_id); + let generics = self.tcx.generics_of(def_id); + if generics_require_inlining(&generics) || + attrs.requests_inline() { true } else { let impl_did = self.tcx @@ -188,13 +201,15 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // does too. let impl_node_id = self.tcx.hir.as_local_node_id(impl_did).unwrap(); match self.tcx.hir.expect_item(impl_node_id).node { - hir::ItemImpl(_, _, _, ref generics, ..) => { - generics_require_inlining(generics) + hir::ItemKind::Impl(..) => { + let generics = self.tcx.generics_of(impl_did); + generics_require_inlining(&generics) } _ => false } } } + hir::ImplItemKind::Existential(..) | hir::ImplItemKind::Type(_) => false, } } @@ -206,11 +221,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // Step 2: Mark all symbols that the symbols on the worklist touch. fn propagate(&mut self) { let mut scanned = FxHashSet(); - loop { - let search_item = match self.worklist.pop() { - Some(item) => item, - None => break, - }; + while let Some(search_item) = self.worklist.pop() { if !scanned.insert(search_item) { continue } @@ -227,13 +238,13 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // If we are building an executable, only explicitly extern // types need to be exported. if let hir_map::NodeItem(item) = *node { - let reachable = if let hir::ItemFn(.., abi, _, _) = item.node { - abi != Abi::Rust + let reachable = if let hir::ItemKind::Fn(_, header, ..) = item.node { + header.abi != Abi::Rust } else { false }; let def_id = self.tcx.hir.local_def_id(item.id); - let is_extern = self.tcx.contains_extern_indicator(def_id); + let is_extern = self.tcx.codegen_fn_attrs(def_id).contains_extern_indicator(); if reachable || is_extern { self.reachable_symbols.insert(search_item); } @@ -249,8 +260,11 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { match *node { hir_map::NodeItem(item) => { match item.node { - hir::ItemFn(.., body) => { - if item_might_be_inlined(&item) { + hir::ItemKind::Fn(.., body) => { + let def_id = self.tcx.hir.local_def_id(item.id); + if item_might_be_inlined(self.tcx, + &item, + self.tcx.codegen_fn_attrs(def_id)) { self.visit_nested_body(body); } } @@ -258,19 +272,27 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // Reachable constants will be inlined into other crates // unconditionally, so we need to make sure that their // contents are also reachable. - hir::ItemConst(_, init) => { + hir::ItemKind::Const(_, init) => { self.visit_nested_body(init); } // These are normal, nothing reachable about these // inherently and their children are already in the // worklist, as determined by the privacy pass - hir::ItemExternCrate(_) | hir::ItemUse(..) | - hir::ItemTy(..) | hir::ItemStatic(..) | - hir::ItemMod(..) | hir::ItemForeignMod(..) | - hir::ItemImpl(..) | hir::ItemTrait(..) | hir::ItemTraitAlias(..) | - hir::ItemStruct(..) | hir::ItemEnum(..) | - hir::ItemUnion(..) | hir::ItemGlobalAsm(..) => {} + hir::ItemKind::ExternCrate(_) | + hir::ItemKind::Use(..) | + hir::ItemKind::Existential(..) | + hir::ItemKind::Ty(..) | + hir::ItemKind::Static(..) | + hir::ItemKind::Mod(..) | + hir::ItemKind::ForeignMod(..) | + hir::ItemKind::Impl(..) | + hir::ItemKind::Trait(..) | + hir::ItemKind::TraitAlias(..) | + hir::ItemKind::Struct(..) | + hir::ItemKind::Enum(..) | + hir::ItemKind::Union(..) | + hir::ItemKind::GlobalAsm(..) => {} } } hir_map::NodeTraitItem(trait_method) => { @@ -297,10 +319,11 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { self.visit_nested_body(body) } } + hir::ImplItemKind::Existential(..) | hir::ImplItemKind::Type(_) => {} } } - hir_map::NodeExpr(&hir::Expr { node: hir::ExprClosure(.., body, _, _), .. }) => { + hir_map::NodeExpr(&hir::Expr { node: hir::ExprKind::Closure(.., body, _, _), .. }) => { self.visit_nested_body(body); } // Nothing to recurse on for these @@ -335,17 +358,19 @@ struct CollectPrivateImplItemsVisitor<'a, 'tcx: 'a> { impl<'a, 'tcx: 'a> ItemLikeVisitor<'tcx> for CollectPrivateImplItemsVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { // Anything which has custom linkage gets thrown on the worklist no - // matter where it is in the crate. - if attr::contains_name(&item.attrs, "linkage") { + // matter where it is in the crate, along with "special std symbols" + // which are currently akin to allocator symbols. + let def_id = self.tcx.hir.local_def_id(item.id); + let codegen_attrs = self.tcx.codegen_fn_attrs(def_id); + if codegen_attrs.linkage.is_some() || + codegen_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) { self.worklist.push(item.id); } // We need only trait impls here, not inherent impls, and only non-exported ones - if let hir::ItemImpl(.., Some(ref trait_ref), _, ref impl_item_refs) = item.node { + if let hir::ItemKind::Impl(.., Some(ref trait_ref), _, ref impl_item_refs) = item.node { if !self.access_levels.is_reachable(item.id) { - for impl_item_ref in impl_item_refs { - self.worklist.push(impl_item_ref.id.node_id); - } + self.worklist.extend(impl_item_refs.iter().map(|r| r.id.node_id)); let trait_def_id = match trait_ref.path.def { Def::Trait(def_id) => def_id, @@ -377,7 +402,7 @@ impl<'a, 'tcx: 'a> ItemLikeVisitor<'tcx> for CollectPrivateImplItemsVisitor<'a, // We introduce a new-type here, so we can have a specialized HashStable // implementation for it. #[derive(Clone)] -pub struct ReachableSet(pub Rc); +pub struct ReachableSet(pub Lrc); fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> ReachableSet { @@ -386,8 +411,8 @@ fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE); let any_library = tcx.sess.crate_types.borrow().iter().any(|ty| { - *ty == config::CrateTypeRlib || *ty == config::CrateTypeDylib || - *ty == config::CrateTypeProcMacro + *ty == config::CrateType::Rlib || *ty == config::CrateType::Dylib || + *ty == config::CrateType::ProcMacro }); let mut reachable_context = ReachableContext { tcx, @@ -402,9 +427,7 @@ fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> // If other crates link to us, they're going to expect to be able to // use the lang items, so we need to be sure to mark them as // exported. - for (id, _) in &access_levels.map { - reachable_context.worklist.push(*id); - } + reachable_context.worklist.extend(access_levels.map.iter().map(|(id, _)| *id)); for item in tcx.lang_items().items().iter() { if let Some(did) = *item { if let Some(node_id) = tcx.hir.as_local_node_id(did) { @@ -425,7 +448,7 @@ fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> reachable_context.propagate(); // Return the set of reachable symbols. - ReachableSet(Rc::new(reachable_context.reachable_symbols)) + ReachableSet(Lrc::new(reachable_context.reachable_symbols)) } pub fn provide(providers: &mut Providers) { diff --git a/src/librustc/middle/recursion_limit.rs b/src/librustc/middle/recursion_limit.rs index 6c87f750376f..077a20315a2a 100644 --- a/src/librustc/middle/recursion_limit.rs +++ b/src/librustc/middle/recursion_limit.rs @@ -18,17 +18,17 @@ use session::Session; use syntax::ast; -use std::cell::Cell; +use rustc_data_structures::sync::Once; pub fn update_limits(sess: &Session, krate: &ast::Crate) { update_limit(sess, krate, &sess.recursion_limit, "recursion_limit", - "recursion limit"); + "recursion limit", 64); update_limit(sess, krate, &sess.type_length_limit, "type_length_limit", - "type length limit"); + "type length limit", 1048576); } -fn update_limit(sess: &Session, krate: &ast::Crate, limit: &Cell, - name: &str, description: &str) { +fn update_limit(sess: &Session, krate: &ast::Crate, limit: &Once, + name: &str, description: &str, default: usize) { for attr in &krate.attrs { if !attr.check_name(name) { continue; @@ -45,4 +45,5 @@ fn update_limit(sess: &Session, krate: &ast::Crate, limit: &Cell, "malformed {} attribute, expected #![{}=\"N\"]", description, name); } + limit.set(default); } diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index 049bf4470cbc..ebdc9c922b1d 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -11,8 +11,10 @@ //! This file builds up the `ScopeTree`, which describes //! the parent links in the region hierarchy. //! -//! Most of the documentation on regions can be found in -//! `middle/infer/region_constraints/README.md` +//! For more information about how MIR-based region-checking works, +//! see the [rustc guide]. +//! +//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/mir/borrowck.html use ich::{StableHashingContext, NodeIdHashingMode}; use util::nodemap::{FxHashMap, FxHashSet}; @@ -20,12 +22,12 @@ use ty; use std::fmt; use std::mem; -use std::rc::Rc; +use rustc_data_structures::sync::Lrc; use syntax::codemap; use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; use ty::TyCtxt; -use ty::maps::Providers; +use ty::query::Providers; use hir; use hir::def_id::DefId; @@ -277,6 +279,8 @@ impl Scope { } } +pub type ScopeDepth = u32; + /// The region scope tree encodes information about region relationships. #[derive(Default, Debug)] pub struct ScopeTree { @@ -294,7 +298,7 @@ pub struct ScopeTree { /// conditional expression or repeating block. (Note that the /// enclosing scope id for the block associated with a closure is /// the closure itself.) - parent_map: FxHashMap, + parent_map: FxHashMap, /// `var_map` maps from a variable or binding id to the block in /// which that variable is declared. @@ -351,8 +355,8 @@ pub struct ScopeTree { /// the result of `g()` occurs after the yield (and therefore /// doesn't). If we want to infer that, we can look at the /// postorder traversal: - /// ``` - /// `foo` `f` Call#1 `y` Yield `bar` `g` Call#3 Call#2 Call#0 + /// ```plain,ignore + /// `foo` `f` Call#1 `y` Yield `bar` `g` Call#3 Call#2 Call#0 /// ``` /// /// In which we can easily see that `Call#1` occurs before the yield, @@ -412,11 +416,12 @@ pub struct Context { /// details. root_id: Option, - /// the scope that contains any new variables declared - var_parent: Option, + /// The scope that contains any new variables declared, plus its depth in + /// the scope tree. + var_parent: Option<(Scope, ScopeDepth)>, - /// region parent of expressions etc - parent: Option, + /// Region parent of expressions, etc., plus its depth in the scope tree. + parent: Option<(Scope, ScopeDepth)>, } struct RegionResolutionVisitor<'a, 'tcx: 'a> { @@ -453,9 +458,50 @@ struct RegionResolutionVisitor<'a, 'tcx: 'a> { terminating_scopes: FxHashSet, } +struct ExprLocatorVisitor { + hir_id: hir::HirId, + result: Option, + expr_and_pat_count: usize, +} + +// This visitor has to have the same visit_expr calls as RegionResolutionVisitor +// since `expr_count` is compared against the results there. +impl<'tcx> Visitor<'tcx> for ExprLocatorVisitor { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::None + } + + fn visit_pat(&mut self, pat: &'tcx Pat) { + intravisit::walk_pat(self, pat); + + self.expr_and_pat_count += 1; + + if pat.hir_id == self.hir_id { + self.result = Some(self.expr_and_pat_count); + } + } + + fn visit_expr(&mut self, expr: &'tcx Expr) { + debug!("ExprLocatorVisitor - pre-increment {} expr = {:?}", + self.expr_and_pat_count, + expr); + + intravisit::walk_expr(self, expr); + + self.expr_and_pat_count += 1; + + debug!("ExprLocatorVisitor - post-increment {} expr = {:?}", + self.expr_and_pat_count, + expr); + + if expr.hir_id == self.hir_id { + self.result = Some(self.expr_and_pat_count); + } + } +} impl<'tcx> ScopeTree { - pub fn record_scope_parent(&mut self, child: Scope, parent: Option) { + pub fn record_scope_parent(&mut self, child: Scope, parent: Option<(Scope, ScopeDepth)>) { debug!("{:?}.parent = {:?}", child, parent); if let Some(p) = parent { @@ -471,7 +517,7 @@ impl<'tcx> ScopeTree { pub fn each_encl_scope(&self, mut e:E) where E: FnMut(Scope, Scope) { for (&child, &parent) in &self.parent_map { - e(child, parent) + e(child, parent.0) } } @@ -498,18 +544,6 @@ impl<'tcx> ScopeTree { assert!(previous.is_none()); } - fn closure_is_enclosed_by(&self, - mut sub_closure: hir::ItemLocalId, - sup_closure: hir::ItemLocalId) -> bool { - loop { - if sub_closure == sup_closure { return true; } - match self.closure_tree.get(&sub_closure) { - Some(&s) => { sub_closure = s; } - None => { return false; } - } - } - } - fn record_var_scope(&mut self, var: hir::ItemLocalId, lifetime: Scope) { debug!("record_var_scope(sub={:?}, sup={:?})", var, lifetime); assert!(var != lifetime.item_local_id()); @@ -526,7 +560,7 @@ impl<'tcx> ScopeTree { pub fn opt_encl_scope(&self, id: Scope) -> Option { //! Returns the narrowest scope that encloses `id`, if any. - self.parent_map.get(&id).cloned() + self.parent_map.get(&id).cloned().map(|(p, _)| p) } #[allow(dead_code)] // used in cfg @@ -558,7 +592,7 @@ impl<'tcx> ScopeTree { // returned. let mut id = Scope::Node(expr_id); - while let Some(&p) = self.parent_map.get(&id) { + while let Some(&(p, _)) = self.parent_map.get(&id) { match p.data() { ScopeData::Destruction(..) => { debug!("temporary_scope({:?}) = {:?} [enclosing]", @@ -612,105 +646,75 @@ impl<'tcx> ScopeTree { return true; } - /// Finds the nearest common ancestor (if any) of two scopes. That is, finds the smallest - /// scope which is greater than or equal to both `scope_a` and `scope_b`. - pub fn nearest_common_ancestor(&self, - scope_a: Scope, - scope_b: Scope) - -> Scope { + /// Returns the id of the innermost containing body + pub fn containing_body(&self, mut scope: Scope)-> Option { + loop { + if let ScopeData::CallSite(id) = scope.data() { + return Some(id); + } + + match self.opt_encl_scope(scope) { + None => return None, + Some(parent) => scope = parent, + } + } + } + + /// Finds the nearest common ancestor of two scopes. That is, finds the + /// smallest scope which is greater than or equal to both `scope_a` and + /// `scope_b`. + pub fn nearest_common_ancestor(&self, scope_a: Scope, scope_b: Scope) -> Scope { if scope_a == scope_b { return scope_a; } - // [1] The initial values for `a_buf` and `b_buf` are not used. - // The `ancestors_of` function will return some prefix that - // is re-initialized with new values (or else fallback to a - // heap-allocated vector). - let mut a_buf: [Scope; 32] = [scope_a /* [1] */; 32]; - let mut a_vec: Vec = vec![]; - let mut b_buf: [Scope; 32] = [scope_b /* [1] */; 32]; - let mut b_vec: Vec = vec![]; - let parent_map = &self.parent_map; - let a_ancestors = ancestors_of(parent_map, scope_a, &mut a_buf, &mut a_vec); - let b_ancestors = ancestors_of(parent_map, scope_b, &mut b_buf, &mut b_vec); - let mut a_index = a_ancestors.len() - 1; - let mut b_index = b_ancestors.len() - 1; + let mut a = scope_a; + let mut b = scope_b; - // Here, [ab]_ancestors is a vector going from narrow to broad. - // The end of each vector will be the item where the scope is - // defined; if there are any common ancestors, then the tails of - // the vector will be the same. So basically we want to walk - // backwards from the tail of each vector and find the first point - // where they diverge. If one vector is a suffix of the other, - // then the corresponding scope is a superscope of the other. + // Get the depth of each scope's parent. If either scope has no parent, + // it must be the root, which means we can stop immediately because the + // root must be the nearest common ancestor. (In practice, this is + // moderately common.) + let (parent_a, parent_a_depth) = match self.parent_map.get(&a) { + Some(pd) => *pd, + None => return a, + }; + let (parent_b, parent_b_depth) = match self.parent_map.get(&b) { + Some(pd) => *pd, + None => return b, + }; - if a_ancestors[a_index] != b_ancestors[b_index] { - // In this case, the two regions belong to completely - // different functions. Compare those fn for lexical - // nesting. The reasoning behind this is subtle. See the - // "Modeling closures" section of the README in - // infer::region_constraints for more details. - let a_root_scope = a_ancestors[a_index]; - let b_root_scope = a_ancestors[a_index]; - return match (a_root_scope.data(), b_root_scope.data()) { - (ScopeData::Destruction(a_root_id), - ScopeData::Destruction(b_root_id)) => { - if self.closure_is_enclosed_by(a_root_id, b_root_id) { - // `a` is enclosed by `b`, hence `b` is the ancestor of everything in `a` - scope_b - } else if self.closure_is_enclosed_by(b_root_id, a_root_id) { - // `b` is enclosed by `a`, hence `a` is the ancestor of everything in `b` - scope_a - } else { - // neither fn encloses the other - bug!() - } - } - _ => { - // root ids are always Node right now - bug!() - } - }; + if parent_a_depth > parent_b_depth { + // `a` is lower than `b`. Move `a` up until it's at the same depth + // as `b`. The first move up is trivial because we already found + // `parent_a` above; the loop does the remaining N-1 moves. + a = parent_a; + for _ in 0..(parent_a_depth - parent_b_depth - 1) { + a = self.parent_map.get(&a).unwrap().0; + } + } else if parent_b_depth > parent_a_depth { + // `b` is lower than `a`. + b = parent_b; + for _ in 0..(parent_b_depth - parent_a_depth - 1) { + b = self.parent_map.get(&b).unwrap().0; + } + } else { + // Both scopes are at the same depth, and we know they're not equal + // because that case was tested for at the top of this function. So + // we can trivially move them both up one level now. + assert!(parent_a_depth != 0); + a = parent_a; + b = parent_b; } - loop { - // Loop invariant: a_ancestors[a_index] == b_ancestors[b_index] - // for all indices between a_index and the end of the array - if a_index == 0 { return scope_a; } - if b_index == 0 { return scope_b; } - a_index -= 1; - b_index -= 1; - if a_ancestors[a_index] != b_ancestors[b_index] { - return a_ancestors[a_index + 1]; - } - } + // Now both scopes are at the same level. We move upwards in lockstep + // until they match. In practice, this loop is almost always executed + // zero times because `a` is almost always a direct ancestor of `b` or + // vice versa. + while a != b { + a = self.parent_map.get(&a).unwrap().0; + b = self.parent_map.get(&b).unwrap().0; + }; - fn ancestors_of<'a, 'tcx>(parent_map: &FxHashMap, - scope: Scope, - buf: &'a mut [Scope; 32], - vec: &'a mut Vec) - -> &'a [Scope] { - // debug!("ancestors_of(scope={:?})", scope); - let mut scope = scope; - - let mut i = 0; - while i < 32 { - buf[i] = scope; - match parent_map.get(&scope) { - Some(&superscope) => scope = superscope, - _ => return &buf[..i+1] - } - i += 1; - } - - *vec = Vec::with_capacity(64); - vec.extend_from_slice(buf); - loop { - vec.push(scope); - match parent_map.get(&scope) { - Some(&superscope) => scope = superscope, - _ => return &*vec - } - } - } + a } /// Assuming that the provided region was defined within this `ScopeTree`, @@ -763,11 +767,34 @@ impl<'tcx> ScopeTree { /// Checks whether the given scope contains a `yield`. If so, /// returns `Some((span, expr_count))` with the span of a yield we found and - /// the number of expressions appearing before the `yield` in the body. + /// the number of expressions and patterns appearing before the `yield` in the body + 1. + /// If there a are multiple yields in a scope, the one with the highest number is returned. pub fn yield_in_scope(&self, scope: Scope) -> Option<(Span, usize)> { self.yield_in_scope.get(&scope).cloned() } + /// Checks whether the given scope contains a `yield` and if that yield could execute + /// after `expr`. If so, it returns the span of that `yield`. + /// `scope` must be inside the body. + pub fn yield_in_scope_for_expr(&self, + scope: Scope, + expr_hir_id: hir::HirId, + body: &'tcx hir::Body) -> Option { + self.yield_in_scope(scope).and_then(|(span, count)| { + let mut visitor = ExprLocatorVisitor { + hir_id: expr_hir_id, + result: None, + expr_and_pat_count: 0, + }; + visitor.visit_body(body); + if count >= visitor.result.unwrap() { + Some(span) + } else { + None + } + }) + } + /// Gives the number of expressions visited in a body. /// Used to sanity check visit_expr call count when /// calculating generator interiors. @@ -786,7 +813,7 @@ fn record_var_lifetime(visitor: &mut RegionResolutionVisitor, // // extern fn isalnum(c: c_int) -> c_int } - Some(parent_scope) => + Some((parent_scope, _)) => visitor.scope_tree.record_var_scope(var_id, parent_scope), } } @@ -808,7 +835,7 @@ fn resolve_block<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, blk: // // Each of the statements within the block is a terminating // scope, and thus a temporary (e.g. the result of calling - // `bar()` in the initalizer expression for `let inner = ...;`) + // `bar()` in the initializer expression for `let inner = ...;`) // will be cleaned up immediately after its corresponding // statement (i.e. `let inner = ...;`) executes. // @@ -831,8 +858,8 @@ fn resolve_block<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, blk: // index information.) for (i, statement) in blk.stmts.iter().enumerate() { - if let hir::StmtDecl(..) = statement.node { - // Each StmtDecl introduces a subscope for bindings + if let hir::StmtKind::Decl(..) = statement.node { + // Each StmtKind::Decl introduces a subscope for bindings // introduced by the declaration; this subscope covers // a suffix of the block . Each subscope in a block // has the previous subscope in the block as a parent, @@ -872,9 +899,13 @@ fn resolve_pat<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, pat: & record_var_lifetime(visitor, pat.hir_id.local_id, pat.span); } + debug!("resolve_pat - pre-increment {} pat = {:?}", visitor.expr_and_pat_count, pat); + intravisit::walk_pat(visitor, pat); visitor.expr_and_pat_count += 1; + + debug!("resolve_pat - post-increment {} pat = {:?}", visitor.expr_and_pat_count, pat); } fn resolve_stmt<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, stmt: &'tcx hir::Stmt) { @@ -897,7 +928,7 @@ fn resolve_stmt<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, stmt: } fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: &'tcx hir::Expr) { - debug!("resolve_expr(expr.id={:?})", expr.id); + debug!("resolve_expr - pre-increment {} expr = {:?}", visitor.expr_and_pat_count, expr); let prev_cx = visitor.cx; visitor.enter_node_scope_with_dtor(expr.hir_id.local_id); @@ -912,39 +943,39 @@ fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: // scopes, meaning that temporaries cannot outlive them. // This ensures fixed size stacks. - hir::ExprBinary(codemap::Spanned { node: hir::BiAnd, .. }, _, ref r) | - hir::ExprBinary(codemap::Spanned { node: hir::BiOr, .. }, _, ref r) => { + hir::ExprKind::Binary(codemap::Spanned { node: hir::BinOpKind::And, .. }, _, ref r) | + hir::ExprKind::Binary(codemap::Spanned { node: hir::BinOpKind::Or, .. }, _, ref r) => { // For shortcircuiting operators, mark the RHS as a terminating // scope since it only executes conditionally. terminating(r.hir_id.local_id); } - hir::ExprIf(ref expr, ref then, Some(ref otherwise)) => { + hir::ExprKind::If(ref expr, ref then, Some(ref otherwise)) => { terminating(expr.hir_id.local_id); terminating(then.hir_id.local_id); terminating(otherwise.hir_id.local_id); } - hir::ExprIf(ref expr, ref then, None) => { + hir::ExprKind::If(ref expr, ref then, None) => { terminating(expr.hir_id.local_id); terminating(then.hir_id.local_id); } - hir::ExprLoop(ref body, _, _) => { + hir::ExprKind::Loop(ref body, _, _) => { terminating(body.hir_id.local_id); } - hir::ExprWhile(ref expr, ref body, _) => { + hir::ExprKind::While(ref expr, ref body, _) => { terminating(expr.hir_id.local_id); terminating(body.hir_id.local_id); } - hir::ExprMatch(..) => { + hir::ExprKind::Match(..) => { visitor.cx.var_parent = visitor.cx.parent; } - hir::ExprAssignOp(..) | hir::ExprIndex(..) | - hir::ExprUnary(..) | hir::ExprCall(..) | hir::ExprMethodCall(..) => { + hir::ExprKind::AssignOp(..) | hir::ExprKind::Index(..) | + hir::ExprKind::Unary(..) | hir::ExprKind::Call(..) | hir::ExprKind::MethodCall(..) => { // FIXME(https://github.com/rust-lang/rfcs/issues/811) Nested method calls // // The lifetimes for a call or method call look as follows: @@ -972,7 +1003,7 @@ fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: match expr.node { // Manually recurse over closures, because they are the only // case of nested bodies that share the parent environment. - hir::ExprClosure(.., body, _, _) => { + hir::ExprKind::Closure(.., body, _, _) => { let body = visitor.tcx.hir.body(body); visitor.visit_body(body); } @@ -982,7 +1013,9 @@ fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: visitor.expr_and_pat_count += 1; - if let hir::ExprYield(..) = expr.node { + debug!("resolve_expr post-increment {}, expr = {:?}", visitor.expr_and_pat_count, expr); + + if let hir::ExprKind::Yield(..) = expr.node { // Mark this expr's scope and all parent scopes as containing `yield`. let mut scope = Scope::Node(expr.hir_id.local_id); loop { @@ -992,7 +1025,7 @@ fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: // Keep traversing up while we can. match visitor.scope_tree.parent_map.get(&scope) { // Don't cross from closure bodies to their parent. - Some(&superscope) => match superscope.data() { + Some(&(superscope, _)) => match superscope.data() { ScopeData::CallSite(_) => break, _ => scope = superscope }, @@ -1009,7 +1042,7 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, init: Option<&'tcx hir::Expr>) { debug!("resolve_local(pat={:?}, init={:?})", pat, init); - let blk_scope = visitor.cx.var_parent; + let blk_scope = visitor.cx.var_parent.map(|(p, _)| p); // As an exception to the normal rules governing temporary // lifetimes, initializers in a let have a temporary lifetime @@ -1033,7 +1066,7 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, // I mean that creating a binding into a ref-counted or managed value // would still count.) // - // 3. `ET`, which matches both rvalues like `foo()` as well as lvalues + // 3. `ET`, which matches both rvalues like `foo()` as well as places // based on rvalues like `foo().x[2].y`. // // A subexpression `` that appears in a let initializer @@ -1077,12 +1110,13 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, } } - if let Some(pat) = pat { - visitor.visit_pat(pat); - } + // Make sure we visit the initializer first, so expr_and_pat_count remains correct if let Some(expr) = init { visitor.visit_expr(expr); } + if let Some(pat) = pat { + visitor.visit_pat(pat); + } /// True if `pat` match the `P&` nonterminal: /// @@ -1159,27 +1193,27 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, blk_id: Option) { match expr.node { - hir::ExprAddrOf(_, ref subexpr) => { + hir::ExprKind::AddrOf(_, ref subexpr) => { record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id); record_rvalue_scope(visitor, &subexpr, blk_id); } - hir::ExprStruct(_, ref fields, _) => { + hir::ExprKind::Struct(_, ref fields, _) => { for field in fields { record_rvalue_scope_if_borrow_expr( visitor, &field.expr, blk_id); } } - hir::ExprArray(ref subexprs) | - hir::ExprTup(ref subexprs) => { + hir::ExprKind::Array(ref subexprs) | + hir::ExprKind::Tup(ref subexprs) => { for subexpr in subexprs { record_rvalue_scope_if_borrow_expr( visitor, &subexpr, blk_id); } } - hir::ExprCast(ref subexpr, _) => { + hir::ExprKind::Cast(ref subexpr, _) => { record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id) } - hir::ExprBlock(ref block) => { + hir::ExprKind::Block(ref block, _) => { if let Some(ref subexpr) = block.expr { record_rvalue_scope_if_borrow_expr( visitor, &subexpr, blk_id); @@ -1203,7 +1237,7 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, /// | (ET) /// | /// - /// Note: ET is intended to match "rvalues or lvalues based on rvalues". + /// Note: ET is intended to match "rvalues or places based on rvalues". fn record_rvalue_scope<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, expr: &hir::Expr, blk_scope: Option) { @@ -1211,17 +1245,16 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, loop { // Note: give all the expressions matching `ET` with the // extended temporary lifetime, not just the innermost rvalue, - // because in trans if we must compile e.g. `*rvalue()` + // because in codegen if we must compile e.g. `*rvalue()` // into a temporary, we request the temporary scope of the // outer expression. visitor.scope_tree.record_rvalue_scope(expr.hir_id.local_id, blk_scope); match expr.node { - hir::ExprAddrOf(_, ref subexpr) | - hir::ExprUnary(hir::UnDeref, ref subexpr) | - hir::ExprField(ref subexpr, _) | - hir::ExprTupField(ref subexpr, _) | - hir::ExprIndex(ref subexpr, _) => { + hir::ExprKind::AddrOf(_, ref subexpr) | + hir::ExprKind::Unary(hir::UnDeref, ref subexpr) | + hir::ExprKind::Field(ref subexpr, _) | + hir::ExprKind::Index(ref subexpr, _) => { expr = &subexpr; } _ => { @@ -1234,16 +1267,20 @@ fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'a, 'tcx>, impl<'a, 'tcx> RegionResolutionVisitor<'a, 'tcx> { /// Records the current parent (if any) as the parent of `child_scope`. - fn record_child_scope(&mut self, child_scope: Scope) { + /// Returns the depth of `child_scope`. + fn record_child_scope(&mut self, child_scope: Scope) -> ScopeDepth { let parent = self.cx.parent; self.scope_tree.record_scope_parent(child_scope, parent); + // If `child_scope` has no parent, it must be the root node, and so has + // a depth of 1. Otherwise, its depth is one more than its parent's. + parent.map_or(1, |(_p, d)| d + 1) } /// Records the current parent (if any) as the parent of `child_scope`, /// and sets `child_scope` as the new current parent. fn enter_scope(&mut self, child_scope: Scope) { - self.record_child_scope(child_scope); - self.cx.parent = Some(child_scope); + let child_depth = self.record_child_scope(child_scope); + self.cx.parent = Some((child_scope, child_depth)); } fn enter_node_scope_with_dtor(&mut self, id: hir::ItemLocalId) { @@ -1351,7 +1388,7 @@ impl<'a, 'tcx> Visitor<'tcx> for RegionResolutionVisitor<'a, 'tcx> { } fn region_scope_tree<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) - -> Rc + -> Lrc { let closure_base_def_id = tcx.closure_base_def_id(def_id); if closure_base_def_id != def_id { @@ -1393,7 +1430,7 @@ fn region_scope_tree<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) ScopeTree::default() }; - Rc::new(scope_tree) + Lrc::new(scope_tree) } pub fn provide(providers: &mut Providers) { @@ -1403,9 +1440,9 @@ pub fn provide(providers: &mut Providers) { }; } -impl<'gcx> HashStable> for ScopeTree { +impl<'a> HashStable> for ScopeTree { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let ScopeTree { root_body, diff --git a/src/librustc/middle/resolve_lifetime.rs b/src/librustc/middle/resolve_lifetime.rs index 935dfd75dd8b..6ae027dac7e2 100644 --- a/src/librustc/middle/resolve_lifetime.rs +++ b/src/librustc/middle/resolve_lifetime.rs @@ -15,26 +15,27 @@ //! used between functions, and they operate in a purely top-down //! way. Therefore we break lifetime name resolution into a separate pass. -use hir::map::Map; use hir::def::Def; use hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE}; -use hir::ItemLocalId; -use ty::{self, TyCtxt}; +use hir::map::Map; +use hir::{GenericArg, GenericParam, ItemLocalId, LifetimeName, ParamName}; +use ty::{self, TyCtxt, GenericParamDefKind}; +use errors::DiagnosticBuilder; +use rustc::lint; +use rustc_data_structures::sync::Lrc; +use session::Session; use std::cell::Cell; use std::mem::replace; -use std::rc::Rc; use syntax::ast; use syntax::attr; use syntax::ptr::P; +use syntax::symbol::keywords; use syntax_pos::Span; -use errors::DiagnosticBuilder; use util::nodemap::{DefIdMap, FxHashMap, FxHashSet, NodeMap, NodeSet}; -use std::slice; -use rustc::lint; -use hir::{self, GenericParamsExt}; use hir::intravisit::{self, NestedVisitorMap, Visitor}; +use hir::{self, GenericParamKind}; /// The origin of a named lifetime definition. /// @@ -48,11 +49,16 @@ pub enum LifetimeDefOrigin { } impl LifetimeDefOrigin { - fn from_is_in_band(is_in_band: bool) -> Self { - if is_in_band { - LifetimeDefOrigin::InBand - } else { - LifetimeDefOrigin::Explicit + fn from_param(param: &GenericParam) -> Self { + match param.kind { + GenericParamKind::Lifetime { in_band } => { + if in_band { + LifetimeDefOrigin::InBand + } else { + LifetimeDefOrigin::Explicit + } + } + _ => bug!("expected a lifetime param"), } } } @@ -82,30 +88,33 @@ pub enum Region { } impl Region { - fn early( - hir_map: &Map, - index: &mut u32, - def: &hir::LifetimeDef, - ) -> (hir::LifetimeName, Region) { + fn early(hir_map: &Map, index: &mut u32, param: &GenericParam) -> (ParamName, Region) { let i = *index; *index += 1; - let def_id = hir_map.local_def_id(def.lifetime.id); - let origin = LifetimeDefOrigin::from_is_in_band(def.in_band); + let def_id = hir_map.local_def_id(param.id); + let origin = LifetimeDefOrigin::from_param(param); debug!("Region::early: index={} def_id={:?}", i, def_id); - (def.lifetime.name, Region::EarlyBound(i, def_id, origin)) + (param.name.modern(), Region::EarlyBound(i, def_id, origin)) } - fn late(hir_map: &Map, def: &hir::LifetimeDef) -> (hir::LifetimeName, Region) { - let depth = ty::DebruijnIndex::new(1); - let def_id = hir_map.local_def_id(def.lifetime.id); - let origin = LifetimeDefOrigin::from_is_in_band(def.in_band); - (def.lifetime.name, Region::LateBound(depth, def_id, origin)) + fn late(hir_map: &Map, param: &GenericParam) -> (ParamName, Region) { + let depth = ty::INNERMOST; + let def_id = hir_map.local_def_id(param.id); + let origin = LifetimeDefOrigin::from_param(param); + debug!( + "Region::late: param={:?} depth={:?} def_id={:?} origin={:?}", + param, + depth, + def_id, + origin, + ); + (param.name.modern(), Region::LateBound(depth, def_id, origin)) } fn late_anon(index: &Cell) -> Region { let i = index.get(); index.set(i + 1); - let depth = ty::DebruijnIndex::new(1); + let depth = ty::INNERMOST; Region::LateBoundAnon(depth, i) } @@ -121,40 +130,35 @@ impl Region { fn shifted(self, amount: u32) -> Region { match self { - Region::LateBound(depth, id, origin) => { - Region::LateBound(depth.shifted(amount), id, origin) + Region::LateBound(debruijn, id, origin) => { + Region::LateBound(debruijn.shifted_in(amount), id, origin) } - Region::LateBoundAnon(depth, index) => { - Region::LateBoundAnon(depth.shifted(amount), index) + Region::LateBoundAnon(debruijn, index) => { + Region::LateBoundAnon(debruijn.shifted_in(amount), index) } _ => self, } } - fn from_depth(self, depth: u32) -> Region { + fn shifted_out_to_binder(self, binder: ty::DebruijnIndex) -> Region { match self { Region::LateBound(debruijn, id, origin) => Region::LateBound( - ty::DebruijnIndex { - depth: debruijn.depth - (depth - 1), - }, + debruijn.shifted_out_to_binder(binder), id, origin, ), Region::LateBoundAnon(debruijn, index) => Region::LateBoundAnon( - ty::DebruijnIndex { - depth: debruijn.depth - (depth - 1), - }, + debruijn.shifted_out_to_binder(binder), index, ), _ => self, } } - fn subst(self, params: &[hir::Lifetime], map: &NamedRegionMap) -> Option { + fn subst<'a, L>(self, mut params: L, map: &NamedRegionMap) -> Option + where L: Iterator { if let Region::EarlyBound(index, _, _) = self { - params - .get(index as usize) - .and_then(|lifetime| map.defs.get(&lifetime.id).cloned()) + params.nth(index as usize).and_then(|lifetime| map.defs.get(&lifetime.id).cloned()) } else { Some(self) } @@ -212,10 +216,10 @@ struct NamedRegionMap { /// See `NamedRegionMap`. pub struct ResolveLifetimes { - defs: FxHashMap>>, - late_bound: FxHashMap>>, + defs: FxHashMap>>, + late_bound: FxHashMap>>, object_lifetime_defaults: - FxHashMap>>>>, + FxHashMap>>>>, } impl_stable_hash_for!(struct ::middle::resolve_lifetime::ResolveLifetimes { @@ -228,33 +232,34 @@ struct LifetimeContext<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, map: &'a mut NamedRegionMap, scope: ScopeRef<'a>, - // Deep breath. Our representation for poly trait refs contains a single - // binder and thus we only allow a single level of quantification. However, - // the syntax of Rust permits quantification in two places, e.g., `T: for <'a> Foo<'a>` - // and `for <'a, 'b> &'b T: Foo<'a>`. In order to get the de Bruijn indices - // correct when representing these constraints, we should only introduce one - // scope. However, we want to support both locations for the quantifier and - // during lifetime resolution we want precise information (so we can't - // desugar in an earlier phase). - // SO, if we encounter a quantifier at the outer scope, we set - // trait_ref_hack to true (and introduce a scope), and then if we encounter - // a quantifier at the inner scope, we error. If trait_ref_hack is false, - // then we introduce the scope at the inner quantifier. - - // I'm sorry. + /// Deep breath. Our representation for poly trait refs contains a single + /// binder and thus we only allow a single level of quantification. However, + /// the syntax of Rust permits quantification in two places, e.g., `T: for <'a> Foo<'a>` + /// and `for <'a, 'b> &'b T: Foo<'a>`. In order to get the de Bruijn indices + /// correct when representing these constraints, we should only introduce one + /// scope. However, we want to support both locations for the quantifier and + /// during lifetime resolution we want precise information (so we can't + /// desugar in an earlier phase). + /// + /// SO, if we encounter a quantifier at the outer scope, we set + /// trait_ref_hack to true (and introduce a scope), and then if we encounter + /// a quantifier at the inner scope, we error. If trait_ref_hack is false, + /// then we introduce the scope at the inner quantifier. + /// + /// I'm sorry. trait_ref_hack: bool, - // Used to disallow the use of in-band lifetimes in `fn` or `Fn` syntax. + /// Used to disallow the use of in-band lifetimes in `fn` or `Fn` syntax. is_in_fn_syntax: bool, - // List of labels in the function/method currently under analysis. - labels_in_fn: Vec<(ast::Name, Span)>, + /// List of labels in the function/method currently under analysis. + labels_in_fn: Vec, - // Cache for cross-crate per-definition object lifetime defaults. + /// Cache for cross-crate per-definition object lifetime defaults. xcrate_object_lifetime_defaults: DefIdMap>, - lifetime_uses: DefIdMap>, + lifetime_uses: &'a mut DefIdMap>, } #[derive(Debug)] @@ -264,12 +269,30 @@ enum Scope<'a> { /// it should be shifted by the number of `Binder`s in between the /// declaration `Binder` and the location it's referenced from. Binder { - lifetimes: FxHashMap, + lifetimes: FxHashMap, /// if we extend this scope with another scope, what is the next index /// we should use for an early-bound region? next_early_index: u32, + /// Flag is set to true if, in this binder, `'_` would be + /// equivalent to a "single-use region". This is true on + /// impls, but not other kinds of items. + track_lifetime_uses: bool, + + /// Whether or not this binder would serve as the parent + /// binder for abstract types introduced within. For example: + /// + /// fn foo<'a>() -> impl for<'b> Trait> + /// + /// Here, the abstract types we create for the `impl Trait` + /// and `impl Trait2` references will both have the `foo` item + /// as their parent. When we get to `impl Trait2`, we find + /// that it is nested within the `for<>` binder -- this flag + /// allows us to skip that when looking for the parent binder + /// of the resulting abstract type. + abstract_type_parent: bool, + s: ScopeRef<'a>, }, @@ -325,8 +348,8 @@ type ScopeRef<'a> = &'a Scope<'a>; const ROOT_SCOPE: ScopeRef<'static> = &Scope::Root; -pub fn provide(providers: &mut ty::maps::Providers) { - *providers = ty::maps::Providers { +pub fn provide(providers: &mut ty::query::Providers) { + *providers = ty::query::Providers { resolve_lifetimes, named_region_map: |tcx, id| { @@ -363,42 +386,38 @@ pub fn provide(providers: &mut ty::maps::Providers) { fn resolve_lifetimes<'tcx>( tcx: TyCtxt<'_, 'tcx, 'tcx>, for_krate: CrateNum, -) -> Rc { +) -> Lrc { assert_eq!(for_krate, LOCAL_CRATE); let named_region_map = krate(tcx); - let mut defs = FxHashMap(); + let mut rl = ResolveLifetimes { + defs: FxHashMap(), + late_bound: FxHashMap(), + object_lifetime_defaults: FxHashMap(), + }; + for (k, v) in named_region_map.defs { let hir_id = tcx.hir.node_to_hir_id(k); - let map = defs.entry(hir_id.owner_local_def_id()) - .or_insert_with(|| Rc::new(FxHashMap())); - Rc::get_mut(map).unwrap().insert(hir_id.local_id, v); + let map = rl.defs.entry(hir_id.owner_local_def_id()).or_default(); + Lrc::get_mut(map).unwrap().insert(hir_id.local_id, v); } - let mut late_bound = FxHashMap(); for k in named_region_map.late_bound { let hir_id = tcx.hir.node_to_hir_id(k); - let map = late_bound - .entry(hir_id.owner_local_def_id()) - .or_insert_with(|| Rc::new(FxHashSet())); - Rc::get_mut(map).unwrap().insert(hir_id.local_id); + let map = rl.late_bound.entry(hir_id.owner_local_def_id()).or_default(); + Lrc::get_mut(map).unwrap().insert(hir_id.local_id); } - let mut object_lifetime_defaults = FxHashMap(); for (k, v) in named_region_map.object_lifetime_defaults { let hir_id = tcx.hir.node_to_hir_id(k); - let map = object_lifetime_defaults + let map = rl.object_lifetime_defaults .entry(hir_id.owner_local_def_id()) - .or_insert_with(|| Rc::new(FxHashMap())); - Rc::get_mut(map) + .or_default(); + Lrc::get_mut(map) .unwrap() - .insert(hir_id.local_id, Rc::new(v)); + .insert(hir_id.local_id, Lrc::new(v)); } - Rc::new(ResolveLifetimes { - defs, - late_bound, - object_lifetime_defaults, - }) + Lrc::new(rl) } fn krate<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) -> NamedRegionMap { @@ -417,7 +436,7 @@ fn krate<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) -> NamedRegionMap { is_in_fn_syntax: false, labels_in_fn: vec![], xcrate_object_lifetime_defaults: DefIdMap(), - lifetime_uses: DefIdMap(), + lifetime_uses: &mut DefIdMap(), }; for (_, item) in &krate.items { visitor.visit_item(item); @@ -453,24 +472,21 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { match item.node { - hir::ItemFn(ref decl, _, _, _, ref generics, _) => { - self.visit_early_late(None, - decl, - generics, - |this| { - intravisit::walk_item(this, item); + hir::ItemKind::Fn(ref decl, _, ref generics, _) => { + self.visit_early_late(None, decl, generics, |this| { + intravisit::walk_item(this, item); }); } - hir::ItemExternCrate(_) - | hir::ItemUse(..) - | hir::ItemMod(..) - | hir::ItemForeignMod(..) - | hir::ItemGlobalAsm(..) => { + hir::ItemKind::ExternCrate(_) + | hir::ItemKind::Use(..) + | hir::ItemKind::Mod(..) + | hir::ItemKind::ForeignMod(..) + | hir::ItemKind::GlobalAsm(..) => { // These sorts of items have no lifetime parameters at all. intravisit::walk_item(self, item); } - hir::ItemStatic(..) | hir::ItemConst(..) => { + hir::ItemKind::Static(..) | hir::ItemKind::Const(..) => { // No lifetime parameters, but implied 'static. let scope = Scope::Elision { elide: Elide::Exact(Region::Static), @@ -478,26 +494,46 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { }; self.with(scope, |_, this| intravisit::walk_item(this, item)); } - hir::ItemTy(_, ref generics) - | hir::ItemEnum(_, ref generics) - | hir::ItemStruct(_, ref generics) - | hir::ItemUnion(_, ref generics) - | hir::ItemTrait(_, _, ref generics, ..) - | hir::ItemTraitAlias(ref generics, ..) - | hir::ItemImpl(_, _, _, ref generics, ..) => { + hir::ItemKind::Existential(hir::ExistTy { impl_trait_fn: Some(_), .. }) => { + // currently existential type declarations are just generated from impl Trait + // items. doing anything on this node is irrelevant, as we currently don't need + // it. + } + hir::ItemKind::Ty(_, ref generics) + | hir::ItemKind::Existential(hir::ExistTy { impl_trait_fn: None, ref generics, .. }) + | hir::ItemKind::Enum(_, ref generics) + | hir::ItemKind::Struct(_, ref generics) + | hir::ItemKind::Union(_, ref generics) + | hir::ItemKind::Trait(_, _, ref generics, ..) + | hir::ItemKind::TraitAlias(ref generics, ..) + | hir::ItemKind::Impl(_, _, _, ref generics, ..) => { + // Impls permit `'_` to be used and it is equivalent to "some fresh lifetime name". + // This is not true for other kinds of items.x + let track_lifetime_uses = match item.node { + hir::ItemKind::Impl(..) => true, + _ => false, + }; // These kinds of items have only early bound lifetime parameters. - let mut index = if let hir::ItemTrait(..) = item.node { + let mut index = if let hir::ItemKind::Trait(..) = item.node { 1 // Self comes before lifetimes } else { 0 }; - let lifetimes = generics.lifetimes() - .map(|def| Region::early(&self.tcx.hir, &mut index, def)) - .collect(); - let next_early_index = index + generics.ty_params().count() as u32; + let mut type_count = 0; + let lifetimes = generics.params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => { + Some(Region::early(&self.tcx.hir, &mut index, param)) + } + GenericParamKind::Type { .. } => { + type_count += 1; + None + } + }).collect(); let scope = Scope::Binder { lifetimes, - next_early_index, + next_early_index: index + type_count, + abstract_type_parent: true, + track_lifetime_uses, s: ROOT_SCOPE, }; self.with(scope, |old_scope, this| { @@ -510,37 +546,38 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) { match item.node { - hir::ForeignItemFn(ref decl, _, ref generics) => { - self.visit_early_late(None, - decl, - generics, - |this| { - intravisit::walk_foreign_item(this, item); - }) + hir::ForeignItemKind::Fn(ref decl, _, ref generics) => { + self.visit_early_late(None, decl, generics, |this| { + intravisit::walk_foreign_item(this, item); + }) } - hir::ForeignItemStatic(..) => { + hir::ForeignItemKind::Static(..) => { intravisit::walk_foreign_item(self, item); } - hir::ForeignItemType => { + hir::ForeignItemKind::Type => { intravisit::walk_foreign_item(self, item); } } } fn visit_ty(&mut self, ty: &'tcx hir::Ty) { - debug!("visit_ty: ty={:?}", ty); + debug!("visit_ty: id={:?} ty={:?}", ty.id, ty); match ty.node { - hir::TyBareFn(ref c) => { + hir::TyKind::BareFn(ref c) => { let next_early_index = self.next_early_index(); let was_in_fn_syntax = self.is_in_fn_syntax; self.is_in_fn_syntax = true; let scope = Scope::Binder { - lifetimes: c.generic_params - .lifetimes() - .map(|def| Region::late(&self.tcx.hir, def)) - .collect(), + lifetimes: c.generic_params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => { + Some(Region::late(&self.tcx.hir, param)) + } + _ => None, + }).collect(), s: self.scope, next_early_index, + track_lifetime_uses: true, + abstract_type_parent: false, }; self.with(scope, |old_scope, this| { // a bare fn has no bounds, so everything @@ -550,17 +587,33 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { }); self.is_in_fn_syntax = was_in_fn_syntax; } - hir::TyTraitObject(ref bounds, ref lifetime) => { + hir::TyKind::TraitObject(ref bounds, ref lifetime) => { for bound in bounds { self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None); } - if lifetime.is_elided() { - self.resolve_object_lifetime_default(lifetime) - } else { - self.visit_lifetime(lifetime); + match lifetime.name { + LifetimeName::Implicit => { + // If the user does not write *anything*, we + // use the object lifetime defaulting + // rules. So e.g. `Box` becomes + // `Box`. + self.resolve_object_lifetime_default(lifetime) + } + LifetimeName::Underscore => { + // If the user writes `'_`, we use the *ordinary* elision + // rules. So the `'_` in e.g. `Box` will be + // resolved the same as the `'_` in `&'_ Foo`. + // + // cc #48468 + self.resolve_elided_lifetimes(vec![lifetime]) + } + LifetimeName::Param(_) | LifetimeName::Static => { + // If the user wrote an explicit name, use that. + self.visit_lifetime(lifetime); + } } } - hir::TyRptr(ref lifetime_ref, ref mt) => { + hir::TyKind::Rptr(ref lifetime_ref, ref mt) => { self.visit_lifetime(lifetime_ref); let scope = Scope::ObjectLifetimeDefault { lifetime: self.map.defs.get(&lifetime_ref.id).cloned(), @@ -568,92 +621,140 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { }; self.with(scope, |_, this| this.visit_ty(&mt.ty)); } - hir::TyImplTraitExistential(ref exist_ty, ref lifetimes) => { - // Resolve the lifetimes that are applied to the existential type. - // These are resolved in the current scope. - // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to - // `fn foo<'a>() -> MyAnonTy<'a> { ... }` - // ^ ^this gets resolved in the current scope - for lifetime in lifetimes { - self.visit_lifetime(lifetime); + hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => { + if let Def::Existential(exist_ty_did) = path.def { + let id = self.tcx.hir.as_local_node_id(exist_ty_did).unwrap(); - // Check for predicates like `impl for<'a> SomeTrait>` - // and ban them. Type variables instantiated inside binders aren't - // well-supported at the moment, so this doesn't work. - // In the future, this should be fixed and this error should be removed. - let def = self.map.defs.get(&lifetime.id); - if let Some(&Region::LateBound(_, def_id, _)) = def { - if let Some(node_id) = self.tcx.hir.as_local_node_id(def_id) { - // Ensure that the parent of the def is an item, not HRTB - let parent_id = self.tcx.hir.get_parent_node(node_id); - let parent_impl_id = hir::ImplItemId { node_id: parent_id }; - let parent_trait_id = hir::TraitItemId { node_id: parent_id }; - let krate = self.tcx.hir.forest.krate(); - if !(krate.items.contains_key(&parent_id) - || krate.impl_items.contains_key(&parent_impl_id) - || krate.trait_items.contains_key(&parent_trait_id)) - { - span_err!( - self.tcx.sess, - lifetime.span, - E0657, - "`impl Trait` can only capture lifetimes \ - bound at the fn or impl level" - ); + // Resolve the lifetimes in the bounds to the lifetime defs in the generics. + // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to + // `abstract type MyAnonTy<'b>: MyTrait<'b>;` + // ^ ^ this gets resolved in the scope of + // the exist_ty generics + let (generics, bounds) = match self.tcx.hir.expect_item(id).node { + // named existential types don't need these hacks + hir::ItemKind::Existential(hir::ExistTy{ impl_trait_fn: None, .. }) => { + intravisit::walk_ty(self, ty); + return; + }, + hir::ItemKind::Existential(hir::ExistTy{ + ref generics, + ref bounds, + .. + }) => ( + generics, + bounds, + ), + ref i => bug!("impl Trait pointed to non-existential type?? {:#?}", i), + }; + + assert!(exist_ty_did.is_local()); + // Resolve the lifetimes that are applied to the existential type. + // These are resolved in the current scope. + // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to + // `fn foo<'a>() -> MyAnonTy<'a> { ... }` + // ^ ^this gets resolved in the current scope + for lifetime in &path.segments[0].args.as_ref().unwrap().args { + if let hir::GenericArg::Lifetime(lifetime) = lifetime { + self.visit_lifetime(lifetime); + + // Check for predicates like `impl for<'a> Trait>` + // and ban them. Type variables instantiated inside binders aren't + // well-supported at the moment, so this doesn't work. + // In the future, this should be fixed and this error should be removed. + let def = self.map.defs.get(&lifetime.id).cloned(); + if let Some(Region::LateBound(_, def_id, _)) = def { + if let Some(node_id) = self.tcx.hir.as_local_node_id(def_id) { + // Ensure that the parent of the def is an item, not HRTB + let parent_id = self.tcx.hir.get_parent_node(node_id); + let parent_impl_id = hir::ImplItemId { node_id: parent_id }; + let parent_trait_id = hir::TraitItemId { node_id: parent_id }; + let krate = self.tcx.hir.forest.krate(); + if !(krate.items.contains_key(&parent_id) + || krate.impl_items.contains_key(&parent_impl_id) + || krate.trait_items.contains_key(&parent_trait_id)) + { + span_err!( + self.tcx.sess, + lifetime.span, + E0657, + "`impl Trait` can only capture lifetimes \ + bound at the fn or impl level" + ); + self.uninsert_lifetime_on_error(lifetime, def.unwrap()); + } + } } } } - } - // Resolve the lifetimes in the bounds to the lifetime defs in the generics. - // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to - // `abstract type MyAnonTy<'b>: MyTrait<'b>;` - // ^ ^ this gets resolved in the scope of - // the exist_ty generics - let hir::ExistTy { - ref generics, - ref bounds, - } = *exist_ty; - let mut index = self.next_early_index(); - debug!("visit_ty: index = {}", index); + // We want to start our early-bound indices at the end of the parent scope, + // not including any parent `impl Trait`s. + let mut index = self.next_early_index_for_abstract_type(); + debug!("visit_ty: index = {}", index); - let mut elision = None; - let mut lifetimes = FxHashMap(); - for lt_def in generics.lifetimes() { - let (lt_name, region) = Region::early(&self.tcx.hir, &mut index, <_def); - if let hir::LifetimeName::Underscore = lt_name { - // Pick the elided lifetime "definition" if one exists and use it to make an - // elision scope. - elision = Some(region); - } else { - lifetimes.insert(lt_name, region); + let mut elision = None; + let mut lifetimes = FxHashMap(); + let mut type_count = 0; + for param in &generics.params { + match param.kind { + GenericParamKind::Lifetime { .. } => { + let (name, reg) = Region::early(&self.tcx.hir, &mut index, ¶m); + if let hir::ParamName::Plain(param_name) = name { + if param_name.name == keywords::UnderscoreLifetime.name() { + // Pick the elided lifetime "definition" if one exists + // and use it to make an elision scope. + elision = Some(reg); + } else { + lifetimes.insert(name, reg); + } + } else { + lifetimes.insert(name, reg); + } + } + GenericParamKind::Type { .. } => { + type_count += 1; + } + } } - } + let next_early_index = index + type_count; - let next_early_index = index + generics.ty_params().count() as u32; - - if let Some(elision_region) = elision { - let scope = Scope::Elision { - elide: Elide::Exact(elision_region), - s: self.scope - }; - self.with(scope, |_old_scope, this| { - let scope = Scope::Binder { lifetimes, next_early_index, s: this.scope }; - this.with(scope, |_old_scope, this| { + if let Some(elision_region) = elision { + let scope = Scope::Elision { + elide: Elide::Exact(elision_region), + s: self.scope, + }; + self.with(scope, |_old_scope, this| { + let scope = Scope::Binder { + lifetimes, + next_early_index, + s: this.scope, + track_lifetime_uses: true, + abstract_type_parent: false, + }; + this.with(scope, |_old_scope, this| { + this.visit_generics(generics); + for bound in bounds { + this.visit_param_bound(bound); + } + }); + }); + } else { + let scope = Scope::Binder { + lifetimes, + next_early_index, + s: self.scope, + track_lifetime_uses: true, + abstract_type_parent: false, + }; + self.with(scope, |_old_scope, this| { this.visit_generics(generics); for bound in bounds { - this.visit_ty_param_bound(bound); + this.visit_param_bound(bound); } }); - }); + } } else { - let scope = Scope::Binder { lifetimes, next_early_index, s: self.scope }; - self.with(scope, |_old_scope, this| { - this.visit_generics(generics); - for bound in bounds { - this.visit_ty_param_bound(bound); - } - }); + intravisit::walk_ty(self, ty) } } _ => intravisit::walk_ty(self, ty), @@ -671,32 +772,43 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { &trait_item.generics, |this| intravisit::walk_trait_item(this, trait_item), ); - }, + } Type(ref bounds, ref ty) => { let generics = &trait_item.generics; let mut index = self.next_early_index(); debug!("visit_ty: index = {}", index); - let lifetimes = generics.lifetimes() - .map(|lt_def| Region::early(&self.tcx.hir, &mut index, lt_def)) - .collect(); - - let next_early_index = index + generics.ty_params().count() as u32; - let scope = Scope::Binder { lifetimes, next_early_index, s: self.scope }; + let mut type_count = 0; + let lifetimes = generics.params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => { + Some(Region::early(&self.tcx.hir, &mut index, param)) + } + GenericParamKind::Type { .. } => { + type_count += 1; + None + } + }).collect(); + let scope = Scope::Binder { + lifetimes, + next_early_index: index + type_count, + s: self.scope, + track_lifetime_uses: true, + abstract_type_parent: true, + }; self.with(scope, |_old_scope, this| { this.visit_generics(generics); for bound in bounds { - this.visit_ty_param_bound(bound); + this.visit_param_bound(bound); } if let Some(ty) = ty { this.visit_ty(ty); } }); - }, + } Const(_, _) => { // Only methods and types support generics. assert!(trait_item.generics.params.is_empty()); intravisit::walk_trait_item(self, trait_item); - }, + } } } @@ -711,33 +823,73 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { &impl_item.generics, |this| intravisit::walk_impl_item(this, impl_item), ) - }, + } Type(ref ty) => { let generics = &impl_item.generics; let mut index = self.next_early_index(); + let mut next_early_index = index; debug!("visit_ty: index = {}", index); - let lifetimes = generics.lifetimes() - .map(|lt_def| Region::early(&self.tcx.hir, &mut index, lt_def)) - .collect(); - - let next_early_index = index + generics.ty_params().count() as u32; - let scope = Scope::Binder { lifetimes, next_early_index, s: self.scope }; + let lifetimes = generics.params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => { + Some(Region::early(&self.tcx.hir, &mut index, param)) + } + GenericParamKind::Type { .. } => { + next_early_index += 1; + None + } + }).collect(); + let scope = Scope::Binder { + lifetimes, + next_early_index, + s: self.scope, + track_lifetime_uses: true, + abstract_type_parent: true, + }; self.with(scope, |_old_scope, this| { this.visit_generics(generics); this.visit_ty(ty); }); - }, + } + Existential(ref bounds) => { + let generics = &impl_item.generics; + let mut index = self.next_early_index(); + let mut next_early_index = index; + debug!("visit_ty: index = {}", index); + let lifetimes = generics.params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => { + Some(Region::early(&self.tcx.hir, &mut index, param)) + } + GenericParamKind::Type { .. } => { + next_early_index += 1; + None + } + }).collect(); + + let scope = Scope::Binder { + lifetimes, + next_early_index, + s: self.scope, + track_lifetime_uses: true, + abstract_type_parent: true, + }; + self.with(scope, |_old_scope, this| { + this.visit_generics(generics); + for bound in bounds { + this.visit_param_bound(bound); + } + }); + } Const(_, _) => { // Only methods and types support generics. assert!(impl_item.generics.params.is_empty()); intravisit::walk_impl_item(self, impl_item); - }, + } } } fn visit_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime) { if lifetime_ref.is_elided() { - self.resolve_elided_lifetimes(slice::from_ref(lifetime_ref)); + self.resolve_elided_lifetimes(vec![lifetime_ref]); return; } if lifetime_ref.is_static() { @@ -747,11 +899,11 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { self.resolve_lifetime_ref(lifetime_ref); } - fn visit_path(&mut self, path: &'tcx hir::Path, _: ast::NodeId) { + fn visit_path(&mut self, path: &'tcx hir::Path, _: hir::HirId) { for (i, segment) in path.segments.iter().enumerate() { let depth = path.segments.len() - i - 1; - if let Some(ref parameters) = segment.parameters { - self.visit_segment_parameters(path.def, depth, parameters); + if let Some(ref args) = segment.args { + self.visit_segment_args(path.def, depth, args); } } } @@ -765,14 +917,16 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { } fn visit_generics(&mut self, generics: &'tcx hir::Generics) { - check_mixed_explicit_and_in_band_defs( - self.tcx, - &generics.lifetimes().cloned().collect::>() - ); - for ty_param in generics.ty_params() { - walk_list!(self, visit_ty_param_bound, &ty_param.bounds); - if let Some(ref ty) = ty_param.default { - self.visit_ty(&ty); + check_mixed_explicit_and_in_band_defs(self.tcx, &generics.params); + for param in &generics.params { + match param.kind { + GenericParamKind::Lifetime { .. } => {} + GenericParamKind::Type { ref default, .. } => { + walk_list!(self, visit_param_bound, ¶m.bounds); + if let Some(ref ty) = default { + self.visit_ty(&ty); + } + } } } for predicate in &generics.where_clause.predicates { @@ -783,26 +937,33 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { ref bound_generic_params, .. }) => { - if bound_generic_params.iter().any(|p| p.is_lifetime_param()) { + let lifetimes: FxHashMap<_, _> = bound_generic_params.iter() + .filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => { + Some(Region::late(&self.tcx.hir, param)) + } + _ => None, + }).collect(); + if !lifetimes.is_empty() { self.trait_ref_hack = true; let next_early_index = self.next_early_index(); let scope = Scope::Binder { - lifetimes: bound_generic_params.lifetimes() - .map(|def| Region::late(&self.tcx.hir, def)) - .collect(), + lifetimes, s: self.scope, next_early_index, + track_lifetime_uses: true, + abstract_type_parent: false, }; let result = self.with(scope, |old_scope, this| { this.check_lifetime_params(old_scope, &bound_generic_params); this.visit_ty(&bounded_ty); - walk_list!(this, visit_ty_param_bound, bounds); + walk_list!(this, visit_param_bound, bounds); }); self.trait_ref_hack = false; result } else { self.visit_ty(&bounded_ty); - walk_list!(self, visit_ty_param_bound, bounds); + walk_list!(self, visit_param_bound, bounds); } } &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate { @@ -811,9 +972,7 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { .. }) => { self.visit_lifetime(lifetime); - for bound in bounds { - self.visit_lifetime(bound); - } + walk_list!(self, visit_param_bound, bounds); } &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate { ref lhs_ty, @@ -834,8 +993,14 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { ) { debug!("visit_poly_trait_ref trait_ref={:?}", trait_ref); - if !self.trait_ref_hack || - trait_ref.bound_generic_params.iter().any(|p| p.is_lifetime_param()) + if !self.trait_ref_hack + || trait_ref + .bound_generic_params + .iter() + .any(|param| match param.kind { + GenericParamKind::Lifetime { .. } => true, + _ => false, + }) { if self.trait_ref_hack { span_err!( @@ -847,12 +1012,17 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { } let next_early_index = self.next_early_index(); let scope = Scope::Binder { - lifetimes: trait_ref.bound_generic_params - .lifetimes() - .map(|def| Region::late(&self.tcx.hir, def)) - .collect(), + lifetimes: trait_ref.bound_generic_params.iter() + .filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => { + Some(Region::late(&self.tcx.hir, param)) + } + _ => None, + }).collect(), s: self.scope, next_early_index, + track_lifetime_uses: true, + abstract_type_parent: false, }; self.with(scope, |old_scope, this| { this.check_lifetime_params(old_scope, &trait_ref.bound_generic_params); @@ -897,10 +1067,10 @@ fn original_lifetime(span: Span) -> Original { span: span, } } -fn shadower_lifetime(l: &hir::Lifetime) -> Shadower { +fn shadower_lifetime(param: &hir::GenericParam) -> Shadower { Shadower { kind: ShadowKind::Lifetime, - span: l.span, + span: param.span, } } @@ -915,23 +1085,27 @@ impl ShadowKind { fn check_mixed_explicit_and_in_band_defs( tcx: TyCtxt<'_, '_, '_>, - lifetime_defs: &[hir::LifetimeDef], + params: &P<[hir::GenericParam]>, ) { - let oob_def = lifetime_defs.iter().find(|lt| !lt.in_band); - let in_band_def = lifetime_defs.iter().find(|lt| lt.in_band); + let in_bands: Vec<_> = params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { in_band, .. } => Some((in_band, param.span)), + _ => None, + }).collect(); + let out_of_band = in_bands.iter().find(|(in_band, _)| !in_band); + let in_band = in_bands.iter().find(|(in_band, _)| *in_band); - if let (Some(oob_def), Some(in_band_def)) = (oob_def, in_band_def) { + if let (Some((_, out_of_band_span)), Some((_, in_band_span))) + = (out_of_band, in_band) { struct_span_err!( tcx.sess, - in_band_def.lifetime.span, + *in_band_span, E0688, "cannot mix in-band and explicit lifetime definitions" ).span_label( - in_band_def.lifetime.span, + *in_band_span, "in-band lifetime definition here", - ) - .span_label(oob_def.lifetime.span, "explicit lifetime definition here") - .emit(); + ).span_label(*out_of_band_span, "explicit lifetime definition here") + .emit(); } } @@ -978,7 +1152,7 @@ fn extract_labels(ctxt: &mut LifetimeContext<'_, '_>, body: &hir::Body) { struct GatherLabels<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, scope: ScopeRef<'a>, - labels_in_fn: &'a mut Vec<(ast::Name, Span)>, + labels_in_fn: &'a mut Vec, } let mut gather = GatherLabels { @@ -994,32 +1168,31 @@ fn extract_labels(ctxt: &mut LifetimeContext<'_, '_>, body: &hir::Body) { } fn visit_expr(&mut self, ex: &hir::Expr) { - if let Some((label, label_span)) = expression_label(ex) { - for &(prior, prior_span) in &self.labels_in_fn[..] { + if let Some(label) = expression_label(ex) { + for prior_label in &self.labels_in_fn[..] { // FIXME (#24278): non-hygienic comparison - if label == prior { + if label.name == prior_label.name { signal_shadowing_problem( self.tcx, - label, - original_label(prior_span), - shadower_label(label_span), + label.name, + original_label(prior_label.span), + shadower_label(label.span), ); } } - check_if_label_shadows_lifetime(self.tcx, self.scope, label, label_span); + check_if_label_shadows_lifetime(self.tcx, self.scope, label); - self.labels_in_fn.push((label, label_span)); + self.labels_in_fn.push(label); } intravisit::walk_expr(self, ex) } } - fn expression_label(ex: &hir::Expr) -> Option<(ast::Name, Span)> { + fn expression_label(ex: &hir::Expr) -> Option { match ex.node { - hir::ExprWhile(.., Some(label)) | hir::ExprLoop(_, Some(label), _) => { - Some((label.node, label.span)) - } + hir::ExprKind::While(.., Some(label)) | + hir::ExprKind::Loop(_, Some(label), _) => Some(label.ident), _ => None, } } @@ -1027,8 +1200,7 @@ fn extract_labels(ctxt: &mut LifetimeContext<'_, '_>, body: &hir::Body) { fn check_if_label_shadows_lifetime( tcx: TyCtxt<'_, '_, '_>, mut scope: ScopeRef<'_>, - label: ast::Name, - label_span: Span, + label: ast::Ident, ) { loop { match *scope { @@ -1043,19 +1215,17 @@ fn extract_labels(ctxt: &mut LifetimeContext<'_, '_>, body: &hir::Body) { } Scope::Binder { - ref lifetimes, - s, - next_early_index: _, + ref lifetimes, s, .. } => { // FIXME (#24278): non-hygienic comparison - if let Some(def) = lifetimes.get(&hir::LifetimeName::Name(label)) { + if let Some(def) = lifetimes.get(&hir::ParamName::Plain(label.modern())) { let node_id = tcx.hir.as_local_node_id(def.id().unwrap()).unwrap(); signal_shadowing_problem( tcx, - label, + label.name, original_lifetime(tcx.hir.span(node_id)), - shadower_label(label_span), + shadower_label(label.span), ); return; } @@ -1072,11 +1242,12 @@ fn compute_object_lifetime_defaults( let mut map = NodeMap(); for item in tcx.hir.krate().items.values() { match item.node { - hir::ItemStruct(_, ref generics) - | hir::ItemUnion(_, ref generics) - | hir::ItemEnum(_, ref generics) - | hir::ItemTy(_, ref generics) - | hir::ItemTrait(_, _, ref generics, ..) => { + hir::ItemKind::Struct(_, ref generics) + | hir::ItemKind::Union(_, ref generics) + | hir::ItemKind::Enum(_, ref generics) + | hir::ItemKind::Existential(hir::ExistTy { ref generics, impl_trait_fn: None, .. }) + | hir::ItemKind::Ty(_, ref generics) + | hir::ItemKind::Trait(_, _, ref generics, ..) => { let result = object_lifetime_defaults_for_item(tcx, generics); // Debugging aid. @@ -1086,13 +1257,18 @@ fn compute_object_lifetime_defaults( .map(|set| match *set { Set1::Empty => "BaseDefault".to_string(), Set1::One(Region::Static) => "'static".to_string(), - Set1::One(Region::EarlyBound(i, _, _)) => generics.lifetimes() - .nth(i as usize) - .unwrap() - .lifetime - .name - .name() - .to_string(), + Set1::One(Region::EarlyBound(mut i, _, _)) => { + generics.params.iter().find_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => { + if i == 0 { + return Some(param.name.ident().to_string()); + } + i -= 1; + None + } + _ => None, + }).unwrap() + } Set1::One(_) => bug!(), Set1::Many => "Ambiguous".to_string(), }) @@ -1116,16 +1292,17 @@ fn object_lifetime_defaults_for_item( tcx: TyCtxt<'_, '_, '_>, generics: &hir::Generics, ) -> Vec { - fn add_bounds(set: &mut Set1, bounds: &[hir::TyParamBound]) { + fn add_bounds(set: &mut Set1, bounds: &[hir::GenericBound]) { for bound in bounds { - if let hir::RegionTyParamBound(ref lifetime) = *bound { - set.insert(lifetime.name); + if let hir::GenericBound::Outlives(ref lifetime) = *bound { + set.insert(lifetime.name.modern()); } } } - generics.ty_params() - .map(|param| { + generics.params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => None, + GenericParamKind::Type { .. } => { let mut set = Set1::Empty; add_bounds(&mut set, ¶m.bounds); @@ -1145,7 +1322,7 @@ fn object_lifetime_defaults_for_item( } let def = match data.bounded_ty.node { - hir::TyPath(hir::QPath::Resolved(None, ref path)) => path.def, + hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => path.def, _ => continue, }; @@ -1154,27 +1331,35 @@ fn object_lifetime_defaults_for_item( } } - match set { + Some(match set { Set1::Empty => Set1::Empty, Set1::One(name) => { if name == hir::LifetimeName::Static { Set1::One(Region::Static) } else { - generics - .lifetimes() - .enumerate() - .find(|&(_, def)| def.lifetime.name == name) - .map_or(Set1::Many, |(i, def)| { - let def_id = tcx.hir.local_def_id(def.lifetime.id); - let origin = LifetimeDefOrigin::from_is_in_band(def.in_band); - Set1::One(Region::EarlyBound(i as u32, def_id, origin)) - }) + generics.params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => { + Some(( + param.id, + hir::LifetimeName::Param(param.name), + LifetimeDefOrigin::from_param(param), + )) + } + _ => None, + }) + .enumerate() + .find(|&(_, (_, lt_name, _))| lt_name == name) + .map_or(Set1::Many, |(i, (id, _, origin))| { + let def_id = tcx.hir.local_def_id(id); + Set1::One(Region::EarlyBound(i as u32, def_id, origin)) + }) } } Set1::Many => Set1::Many, - } - }) - .collect() + }) + } + }) + .collect() } impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { @@ -1191,52 +1376,113 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { F: for<'b> FnOnce(ScopeRef, &mut LifetimeContext<'b, 'tcx>), { let LifetimeContext { - tcx, ref mut map, .. - } = *self; + tcx, + map, + lifetime_uses, + .. + } = self; let labels_in_fn = replace(&mut self.labels_in_fn, vec![]); let xcrate_object_lifetime_defaults = replace(&mut self.xcrate_object_lifetime_defaults, DefIdMap()); let mut this = LifetimeContext { - tcx, - map: *map, + tcx: *tcx, + map: map, scope: &wrap_scope, trait_ref_hack: self.trait_ref_hack, is_in_fn_syntax: self.is_in_fn_syntax, labels_in_fn, xcrate_object_lifetime_defaults, - lifetime_uses: DefIdMap(), + lifetime_uses: lifetime_uses, }; debug!("entering scope {:?}", this.scope); f(self.scope, &mut this); + this.check_uses_for_lifetimes_defined_by_scope(); debug!("exiting scope {:?}", this.scope); self.labels_in_fn = this.labels_in_fn; self.xcrate_object_lifetime_defaults = this.xcrate_object_lifetime_defaults; + } - for (def_id, lifetimeuseset) in &this.lifetime_uses { + fn check_uses_for_lifetimes_defined_by_scope(&mut self) { + let defined_by = match self.scope { + Scope::Binder { lifetimes, .. } => lifetimes, + _ => { + debug!("check_uses_for_lifetimes_defined_by_scope: not in a binder scope"); + return; + } + }; + + let mut def_ids: Vec<_> = defined_by.values() + .flat_map(|region| match region { + Region::EarlyBound(_, def_id, _) + | Region::LateBound(_, def_id, _) + | Region::Free(_, def_id) => Some(*def_id), + + Region::LateBoundAnon(..) | Region::Static => None, + }) + .collect(); + + // ensure that we issue lints in a repeatable order + def_ids.sort_by_key(|&def_id| self.tcx.def_path_hash(def_id)); + + for def_id in def_ids { + debug!( + "check_uses_for_lifetimes_defined_by_scope: def_id = {:?}", + def_id, + ); + + let lifetimeuseset = self.lifetime_uses.remove(&def_id); + debug!( + "check_uses_for_lifetimes_defined_by_scope: lifetimeuseset = {:?}", + lifetimeuseset + ); match lifetimeuseset { - &LifetimeUseSet::One(_) => { - let node_id = this.tcx.hir.as_local_node_id(*def_id).unwrap(); + Some(LifetimeUseSet::One(lifetime)) => { + let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap(); debug!("node id first={:?}", node_id); - if let hir::map::NodeLifetime(hir_lifetime) = this.tcx.hir.get(node_id) { - let span = hir_lifetime.span; - let id = hir_lifetime.id; - debug!("id ={:?} span = {:?} hir_lifetime = {:?}", - node_id, + if let Some((id, span, name)) = match self.tcx.hir.get(node_id) { + hir::map::NodeLifetime(hir_lifetime) => { + Some((hir_lifetime.id, hir_lifetime.span, hir_lifetime.name.ident())) + } + hir::map::NodeGenericParam(param) => { + Some((param.id, param.span, param.name.ident())) + } + _ => None, + } { + debug!("id = {:?} span = {:?} name = {:?}", node_id, span, name); + let mut err = self.tcx.struct_span_lint_node( + lint::builtin::SINGLE_USE_LIFETIMES, + id, span, - hir_lifetime); - - this.tcx - .struct_span_lint_node(lint::builtin::SINGLE_USE_LIFETIME, - id, - span, - &format!("lifetime name `{}` only used once", - hir_lifetime.name.name())) - .emit(); + &format!("lifetime parameter `{}` only used once", name), + ); + err.span_label(span, "this lifetime..."); + err.span_label(lifetime.span, "...is used only here"); + err.emit(); } } - _ => { + Some(LifetimeUseSet::Many) => { debug!("Not one use lifetime"); } + None => { + let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap(); + if let Some((id, span, name)) = match self.tcx.hir.get(node_id) { + hir::map::NodeLifetime(hir_lifetime) => { + Some((hir_lifetime.id, hir_lifetime.span, hir_lifetime.name.ident())) + } + hir::map::NodeGenericParam(param) => { + Some((param.id, param.span, param.name.ident())) + } + _ => None, + } { + debug!("id ={:?} span = {:?} name = {:?}", node_id, span, name); + self.tcx.struct_span_lint_node( + lint::builtin::UNUSED_LIFETIMES, + id, + span, + &format!("lifetime parameter `{}` never used", name) + ).emit(); + } + } } } } @@ -1274,35 +1520,40 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { let mut index = 0; if let Some(parent_id) = parent_id { let parent = self.tcx.hir.expect_item(parent_id); - if let hir::ItemTrait(..) = parent.node { + if let hir::ItemKind::Trait(..) = parent.node { index += 1; // Self comes first. } match parent.node { - hir::ItemTrait(_, _, ref generics, ..) - | hir::ItemImpl(_, _, _, ref generics, ..) => { + hir::ItemKind::Trait(_, _, ref generics, ..) + | hir::ItemKind::Impl(_, _, _, ref generics, ..) => { index += generics.params.len() as u32; } _ => {} } } - let lifetimes = generics - .lifetimes() - .map(|def| { - if self.map.late_bound.contains(&def.lifetime.id) { - Region::late(&self.tcx.hir, def) + let mut type_count = 0; + let lifetimes = generics.params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => { + if self.map.late_bound.contains(¶m.id) { + Some(Region::late(&self.tcx.hir, param)) } else { - Region::early(&self.tcx.hir, &mut index, def) + Some(Region::early(&self.tcx.hir, &mut index, param)) } - }) - .collect(); - - let next_early_index = index + generics.ty_params().count() as u32; + } + GenericParamKind::Type { .. } => { + type_count += 1; + None + } + }).collect(); + let next_early_index = index + type_count; let scope = Scope::Binder { lifetimes, next_early_index, s: self.scope, + abstract_type_parent: true, + track_lifetime_uses: false, }; self.with(scope, move |old_scope, this| { this.check_lifetime_params(old_scope, &generics.params); @@ -1310,25 +1561,43 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { }); } - /// Returns the next index one would use for an early-bound-region - /// if extending the current scope. - fn next_early_index(&self) -> u32 { + fn next_early_index_helper(&self, only_abstract_type_parent: bool) -> u32 { let mut scope = self.scope; loop { match *scope { Scope::Root => return 0, Scope::Binder { - next_early_index, .. - } => return next_early_index, + next_early_index, + abstract_type_parent, + .. + } if (!only_abstract_type_parent || abstract_type_parent) => + { + return next_early_index + } - Scope::Body { s, .. } + Scope::Binder { s, .. } + | Scope::Body { s, .. } | Scope::Elision { s, .. } | Scope::ObjectLifetimeDefault { s, .. } => scope = s, } } } + /// Returns the next index one would use for an early-bound-region + /// if extending the current scope. + fn next_early_index(&self) -> u32 { + self.next_early_index_helper(true) + } + + /// Returns the next index one would use for an `impl Trait` that + /// is being converted into an `abstract type`. This will be the + /// next early index from the enclosing item, for the most + /// part. See the `abstract_type_parent` field for more info. + fn next_early_index_for_abstract_type(&self) -> u32 { + self.next_early_index_helper(false) + } + fn resolve_lifetime_ref(&mut self, lifetime_ref: &'tcx hir::Lifetime) { debug!("resolve_lifetime_ref(lifetime_ref={:?})", lifetime_ref); // Walk up the scope chain, tracking the number of fn scopes @@ -1349,12 +1618,12 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { break None; } - Scope::Binder { - ref lifetimes, - s, - next_early_index: _, - } => { - if let Some(&def) = lifetimes.get(&lifetime_ref.name) { + Scope::Binder { ref lifetimes, s, .. } => { + let name = match lifetime_ref.name { + LifetimeName::Param(param_name) => param_name, + _ => bug!("expected LifetimeName::Param"), + }; + if let Some(&def) = lifetimes.get(&name.modern()) { break Some(def.shifted(late_depth)); } else { late_depth += 1; @@ -1375,7 +1644,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { let fn_id = self.tcx.hir.body_owner(body_id); match self.tcx.hir.get(fn_id) { hir::map::NodeItem(&hir::Item { - node: hir::ItemFn(..), + node: hir::ItemKind::Fn(..), .. }) | hir::map::NodeTraitItem(&hir::TraitItem { @@ -1423,32 +1692,41 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { lifetime_ref.span, E0261, "use of undeclared lifetime name `{}`", - lifetime_ref.name.name() + lifetime_ref ).span_label(lifetime_ref.span, "undeclared lifetime") .emit(); } } - fn visit_segment_parameters( + fn visit_segment_args( &mut self, def: Def, depth: usize, - params: &'tcx hir::PathParameters, + generic_args: &'tcx hir::GenericArgs, ) { - if params.parenthesized { + if generic_args.parenthesized { let was_in_fn_syntax = self.is_in_fn_syntax; self.is_in_fn_syntax = true; - self.visit_fn_like_elision(params.inputs(), Some(¶ms.bindings[0].ty)); + self.visit_fn_like_elision(generic_args.inputs(), + Some(&generic_args.bindings[0].ty)); self.is_in_fn_syntax = was_in_fn_syntax; return; } - if params.lifetimes.iter().all(|l| l.is_elided()) { - self.resolve_elided_lifetimes(¶ms.lifetimes); - } else { - for l in ¶ms.lifetimes { - self.visit_lifetime(l); + let mut elide_lifetimes = true; + let lifetimes = generic_args.args.iter().filter_map(|arg| match arg { + hir::GenericArg::Lifetime(lt) => { + if !lt.is_elided() { + elide_lifetimes = false; + } + Some(lt) } + _ => None, + }).collect(); + if elide_lifetimes { + self.resolve_elided_lifetimes(lifetimes); + } else { + lifetimes.iter().for_each(|lt| self.visit_lifetime(lt)); } // Figure out if this is a type/trait segment, @@ -1500,49 +1778,65 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { self.xcrate_object_lifetime_defaults .entry(def_id) .or_insert_with(|| { - tcx.generics_of(def_id) - .types - .iter() - .map(|def| def.object_lifetime_default) - .collect() + tcx.generics_of(def_id).params.iter().filter_map(|param| { + match param.kind { + GenericParamDefKind::Type { object_lifetime_default, .. } => { + Some(object_lifetime_default) + } + GenericParamDefKind::Lifetime => None, + } + }).collect() }) }; - unsubst - .iter() - .map(|set| match *set { - Set1::Empty => if in_body { - None - } else { - Some(Region::Static) - }, - Set1::One(r) => r.subst(¶ms.lifetimes, map), - Set1::Many => None, - }) - .collect() + unsubst.iter() + .map(|set| match *set { + Set1::Empty => if in_body { + None + } else { + Some(Region::Static) + }, + Set1::One(r) => { + let lifetimes = generic_args.args.iter().filter_map(|arg| match arg { + GenericArg::Lifetime(lt) => Some(lt), + _ => None, + }); + r.subst(lifetimes, map) + } + Set1::Many => None, + }) + .collect() }); - for (i, ty) in params.types.iter().enumerate() { - if let Some(<) = object_lifetime_defaults.get(i) { - let scope = Scope::ObjectLifetimeDefault { - lifetime: lt, - s: self.scope, - }; - self.with(scope, |_, this| this.visit_ty(ty)); - } else { - self.visit_ty(ty); + let mut i = 0; + for arg in &generic_args.args { + match arg { + GenericArg::Lifetime(_) => {} + GenericArg::Type(ty) => { + if let Some(<) = object_lifetime_defaults.get(i) { + let scope = Scope::ObjectLifetimeDefault { + lifetime: lt, + s: self.scope, + }; + self.with(scope, |_, this| this.visit_ty(ty)); + } else { + self.visit_ty(ty); + } + i += 1; + } } } - for b in ¶ms.bindings { + for b in &generic_args.bindings { self.visit_assoc_type_binding(b); } } fn visit_fn_like_elision( &mut self, - inputs: &'tcx [P], + inputs: &'tcx [hir::Ty], output: Option<&'tcx P>, ) { + debug!("visit_fn_like_elision: enter"); let mut arg_elide = Elide::FreshLateAnon(Cell::new(0)); let arg_scope = Scope::Elision { elide: arg_elide.clone(), @@ -1565,6 +1859,8 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { None => return, }; + debug!("visit_fn_like_elision: determine output"); + // Figure out if there's a body we can get argument names from, // and whether there's a `self` argument (treated specially). let mut assoc_item_kind = None; @@ -1573,7 +1869,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { let body = match self.tcx.hir.get(parent) { // `fn` definitions and methods. hir::map::NodeItem(&hir::Item { - node: hir::ItemFn(.., body), + node: hir::ItemKind::Fn(.., body), .. }) => Some(body), @@ -1586,7 +1882,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { .expect_item(self.tcx.hir.get_parent(parent)) .node { - hir::ItemTrait(.., ref trait_items) => { + hir::ItemKind::Trait(.., ref trait_items) => { assoc_item_kind = trait_items .iter() .find(|ti| ti.id.node_id == parent) @@ -1609,7 +1905,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { .expect_item(self.tcx.hir.get_parent(parent)) .node { - hir::ItemImpl(.., ref self_ty, ref impl_items) => { + hir::ItemKind::Impl(.., ref self_ty, ref impl_items) => { impl_self = Some(self_ty); assoc_item_kind = impl_items .iter() @@ -1622,8 +1918,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { } // Foreign functions, `fn(...) -> R` and `Trait(...) -> R` (both types and bounds). - hir::map::NodeForeignItem(_) | hir::map::NodeTy(_) | hir::map::NodeTraitRef(_) => - None, + hir::map::NodeForeignItem(_) | hir::map::NodeTy(_) | hir::map::NodeTraitRef(_) => None, // Everything else (only closures?) doesn't // actually enjoy elision in return types. _ => { @@ -1652,7 +1947,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { // Can't always rely on literal (or implied) `Self` due // to the way elision rules were originally specified. let impl_self = impl_self.map(|ty| &ty.node); - if let Some(&hir::TyPath(hir::QPath::Resolved(None, ref path))) = impl_self { + if let Some(&hir::TyKind::Path(hir::QPath::Resolved(None, ref path))) = impl_self { match path.def { // Whitelist the types that unambiguously always // result in the same type constructor being used @@ -1667,8 +1962,8 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { false }; - if let hir::TyRptr(lifetime_ref, ref mt) = inputs[0].node { - if let hir::TyPath(hir::QPath::Resolved(None, ref path)) = mt.ty.node { + if let hir::TyKind::Rptr(lifetime_ref, ref mt) = inputs[0].node { + if let hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) = mt.ty.node { if is_self_ty(path.def) { if let Some(&lifetime) = self.map.defs.get(&lifetime_ref.id) { let scope = Scope::Elision { @@ -1695,7 +1990,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { .map(|(i, input)| { let mut gather = GatherLifetimes { map: self.map, - binder_depth: 1, + outer_index: ty::INNERMOST, have_bound_regions: false, lifetimes: FxHashSet(), }; @@ -1725,15 +2020,18 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { Elide::Error(arg_lifetimes) }; + debug!("visit_fn_like_elision: elide={:?}", elide); + let scope = Scope::Elision { elide, s: self.scope, }; self.with(scope, |_, this| this.visit_ty(output)); + debug!("visit_fn_like_elision: exit"); struct GatherLifetimes<'a> { map: &'a NamedRegionMap, - binder_depth: u32, + outer_index: ty::DebruijnIndex, have_bound_regions: bool, lifetimes: FxHashSet, } @@ -1744,10 +2042,10 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { } fn visit_ty(&mut self, ty: &hir::Ty) { - if let hir::TyBareFn(_) = ty.node { - self.binder_depth += 1; + if let hir::TyKind::BareFn(_) = ty.node { + self.outer_index.shift_in(1); } - if let hir::TyTraitObject(ref bounds, ref lifetime) = ty.node { + if let hir::TyKind::TraitObject(ref bounds, ref lifetime) = ty.node { for bound in bounds { self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None); } @@ -1760,16 +2058,16 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { } else { intravisit::walk_ty(self, ty); } - if let hir::TyBareFn(_) = ty.node { - self.binder_depth -= 1; + if let hir::TyKind::BareFn(_) = ty.node { + self.outer_index.shift_out(1); } } fn visit_generic_param(&mut self, param: &hir::GenericParam) { - if let hir::GenericParam::Lifetime(ref lifetime_def) = *param { - for l in &lifetime_def.bounds { - self.visit_lifetime(l); - } + if let hir::GenericParamKind::Lifetime { .. } = param.kind { + // FIXME(eddyb) Do we want this? It only makes a difference + // if this `for<'a>` lifetime parameter is never used. + self.have_bound_regions = true; } intravisit::walk_generic_param(self, param); @@ -1780,22 +2078,22 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { trait_ref: &hir::PolyTraitRef, modifier: hir::TraitBoundModifier, ) { - self.binder_depth += 1; + self.outer_index.shift_in(1); intravisit::walk_poly_trait_ref(self, trait_ref, modifier); - self.binder_depth -= 1; + self.outer_index.shift_out(1); } fn visit_lifetime(&mut self, lifetime_ref: &hir::Lifetime) { if let Some(&lifetime) = self.map.defs.get(&lifetime_ref.id) { match lifetime { Region::LateBound(debruijn, _, _) | Region::LateBoundAnon(debruijn, _) - if debruijn.depth < self.binder_depth => + if debruijn < self.outer_index => { self.have_bound_regions = true; } _ => { self.lifetimes - .insert(lifetime.from_depth(self.binder_depth)); + .insert(lifetime.shifted_out_to_binder(self.outer_index)); } } } @@ -1803,7 +2101,8 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { } } - fn resolve_elided_lifetimes(&mut self, lifetime_refs: &'tcx [hir::Lifetime]) { + fn resolve_elided_lifetimes(&mut self, + lifetime_refs: Vec<&'tcx hir::Lifetime>) { if lifetime_refs.is_empty() { return; } @@ -1847,25 +2146,14 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { } }; - let mut err = struct_span_err!( - self.tcx.sess, - span, - E0106, - "missing lifetime specifier{}", - if lifetime_refs.len() > 1 { "s" } else { "" } - ); - let msg = if lifetime_refs.len() > 1 { - format!("expected {} lifetime parameters", lifetime_refs.len()) - } else { - format!("expected lifetime parameter") - }; - err.span_label(span, msg); + let mut err = report_missing_lifetime_specifiers(self.tcx.sess, span, lifetime_refs.len()); if let Some(params) = error { if lifetime_refs.len() == 1 { self.report_elision_failure(&mut err, params); } } + err.emit(); } @@ -1981,96 +2269,99 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { } fn check_lifetime_params(&mut self, old_scope: ScopeRef, params: &'tcx [hir::GenericParam]) { - for (i, lifetime_i) in params.lifetimes().enumerate() { - for lifetime in params.lifetimes() { - match lifetime.lifetime.name { - hir::LifetimeName::Static | hir::LifetimeName::Underscore => { - let lifetime = lifetime.lifetime; - let name = lifetime.name.name(); - let mut err = struct_span_err!( - self.tcx.sess, - lifetime.span, - E0262, - "invalid lifetime parameter name: `{}`", - name - ); - err.span_label( - lifetime.span, - format!("{} is a reserved lifetime name", name), - ); - err.emit(); - } - hir::LifetimeName::Implicit | hir::LifetimeName::Name(_) => {} + let lifetimes: Vec<_> = params.iter().filter_map(|param| match param.kind { + GenericParamKind::Lifetime { .. } => Some((param, param.name)), + _ => None, + }).collect(); + for (i, (lifetime_i, lifetime_i_name)) in lifetimes.iter().enumerate() { + if let hir::ParamName::Plain(_) = lifetime_i_name { + let name = lifetime_i_name.ident().name; + if name == keywords::UnderscoreLifetime.name() || + name == keywords::StaticLifetime.name() { + let mut err = struct_span_err!( + self.tcx.sess, + lifetime_i.span, + E0262, + "invalid lifetime parameter name: `{}`", + lifetime_i.name.ident(), + ); + err.span_label( + lifetime_i.span, + format!("{} is a reserved lifetime name", name), + ); + err.emit(); } } // It is a hard error to shadow a lifetime within the same scope. - for lifetime_j in params.lifetimes().skip(i + 1) { - if lifetime_i.lifetime.name == lifetime_j.lifetime.name { + for (lifetime_j, lifetime_j_name) in lifetimes.iter().skip(i + 1) { + if lifetime_i_name == lifetime_j_name { struct_span_err!( self.tcx.sess, - lifetime_j.lifetime.span, + lifetime_j.span, E0263, "lifetime name `{}` declared twice in the same scope", - lifetime_j.lifetime.name.name() - ).span_label(lifetime_j.lifetime.span, "declared twice") - .span_label(lifetime_i.lifetime.span, "previous declaration here") - .emit(); + lifetime_j.name.ident() + ).span_label(lifetime_j.span, "declared twice") + .span_label(lifetime_i.span, "previous declaration here") + .emit(); } } // It is a soft error to shadow a lifetime within a parent scope. - self.check_lifetime_def_for_shadowing(old_scope, &lifetime_i.lifetime); + self.check_lifetime_param_for_shadowing(old_scope, &lifetime_i); for bound in &lifetime_i.bounds { - match bound.name { - hir::LifetimeName::Underscore => { - let mut err = struct_span_err!( - self.tcx.sess, - bound.span, - E0637, - "invalid lifetime bound name: `'_`" - ); - err.span_label(bound.span, "`'_` is a reserved lifetime name"); - err.emit(); - } - hir::LifetimeName::Static => { - self.insert_lifetime(bound, Region::Static); - self.tcx - .sess - .struct_span_warn( - lifetime_i.lifetime.span.to(bound.span), + match bound { + hir::GenericBound::Outlives(lt) => match lt.name { + hir::LifetimeName::Underscore => { + let mut err = struct_span_err!( + self.tcx.sess, + lt.span, + E0637, + "invalid lifetime bound name: `'_`" + ); + err.span_label(lt.span, "`'_` is a reserved lifetime name"); + err.emit(); + } + hir::LifetimeName::Static => { + self.insert_lifetime(lt, Region::Static); + self.tcx.sess.struct_span_warn( + lifetime_i.span.to(lt.span), &format!( "unnecessary lifetime parameter `{}`", - lifetime_i.lifetime.name.name() + lifetime_i.name.ident(), ), - ) - .help(&format!( + ).help(&format!( "you can use the `'static` lifetime directly, in place \ - of `{}`", - lifetime_i.lifetime.name.name() - )) - .emit(); - } - hir::LifetimeName::Implicit | hir::LifetimeName::Name(_) => { - self.resolve_lifetime_ref(bound); + of `{}`", + lifetime_i.name.ident(), + )).emit(); + } + hir::LifetimeName::Param(_) + | hir::LifetimeName::Implicit => { + self.resolve_lifetime_ref(lt); + } } + _ => bug!(), } } } } - fn check_lifetime_def_for_shadowing(&self, - mut old_scope: ScopeRef, - lifetime: &'tcx hir::Lifetime) { - for &(label, label_span) in &self.labels_in_fn { + fn check_lifetime_param_for_shadowing( + &self, + mut old_scope: ScopeRef, + param: &'tcx hir::GenericParam, + ) { + for label in &self.labels_in_fn { // FIXME (#24278): non-hygienic comparison - if lifetime.name.name() == label { + if param.name.ident().name == label.name { signal_shadowing_problem( self.tcx, - label, - original_label(label_span), - shadower_lifetime(&lifetime), + label.name, + original_label(label.span), + shadower_lifetime(¶m), ); return; } @@ -2089,18 +2380,16 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { } Scope::Binder { - ref lifetimes, - s, - next_early_index: _, + ref lifetimes, s, .. } => { - if let Some(&def) = lifetimes.get(&lifetime.name) { + if let Some(&def) = lifetimes.get(¶m.name.modern()) { let node_id = self.tcx.hir.as_local_node_id(def.id().unwrap()).unwrap(); signal_shadowing_problem( self.tcx, - lifetime.name.name(), + param.name.ident().name, original_lifetime(self.tcx.hir.span(node_id)), - shadower_lifetime(&lifetime), + shadower_lifetime(¶m), ); return; } @@ -2111,6 +2400,50 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { } } + /// Returns true if, in the current scope, replacing `'_` would be + /// equivalent to a single-use lifetime. + fn track_lifetime_uses(&self) -> bool { + let mut scope = self.scope; + loop { + match *scope { + Scope::Root => break false, + + // Inside of items, it depends on the kind of item. + Scope::Binder { + track_lifetime_uses, + .. + } => break track_lifetime_uses, + + // Inside a body, `'_` will use an inference variable, + // should be fine. + Scope::Body { .. } => break true, + + // A lifetime only used in a fn argument could as well + // be replaced with `'_`, as that would generate a + // fresh name, too. + Scope::Elision { + elide: Elide::FreshLateAnon(_), + .. + } => break true, + + // In the return type or other such place, `'_` is not + // going to make a fresh name, so we cannot + // necessarily replace a single-use lifetime with + // `'_`. + Scope::Elision { + elide: Elide::Exact(_), + .. + } => break false, + Scope::Elision { + elide: Elide::Error(_), + .. + } => break false, + + Scope::ObjectLifetimeDefault { s, .. } => scope = s, + } + } + } + fn insert_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime, def: Region) { if lifetime_ref.id == ast::DUMMY_NODE_ID { span_bug!( @@ -2129,24 +2462,38 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { self.map.defs.insert(lifetime_ref.id, def); match def { - Region::LateBoundAnon(..) | - Region::Static => { + Region::LateBoundAnon(..) | Region::Static => { // These are anonymous lifetimes or lifetimes that are not declared. } - Region::Free(_, def_id) | - Region::LateBound(_, def_id, _) | - Region::EarlyBound(_, def_id, _) => { + Region::Free(_, def_id) + | Region::LateBound(_, def_id, _) + | Region::EarlyBound(_, def_id, _) => { // A lifetime declared by the user. - if !self.lifetime_uses.contains_key(&def_id) { + let track_lifetime_uses = self.track_lifetime_uses(); + debug!( + "insert_lifetime: track_lifetime_uses={}", + track_lifetime_uses + ); + if track_lifetime_uses && !self.lifetime_uses.contains_key(&def_id) { + debug!("insert_lifetime: first use of {:?}", def_id); self.lifetime_uses .insert(def_id, LifetimeUseSet::One(lifetime_ref)); } else { + debug!("insert_lifetime: many uses of {:?}", def_id); self.lifetime_uses.insert(def_id, LifetimeUseSet::Many); } } } } + + /// Sometimes we resolve a lifetime, but later find that it is an + /// error (esp. around impl trait). In that case, we remove the + /// entry into `map.defs` so as not to confuse later code. + fn uninsert_lifetime_on_error(&mut self, lifetime_ref: &'tcx hir::Lifetime, bad_def: Region) { + let old_value = self.map.defs.remove(&lifetime_ref.id); + assert_eq!(old_value, Some(bad_def)); + } } /////////////////////////////////////////////////////////////////////////// @@ -2168,8 +2515,7 @@ fn insert_late_bound_lifetimes( ) { debug!( "insert_late_bound_lifetimes(decl={:?}, generics={:?})", - decl, - generics + decl, generics ); let mut constrained_by_input = ConstrainedCollector { @@ -2196,31 +2542,21 @@ fn insert_late_bound_lifetimes( let mut appears_in_where_clause = AllCollector { regions: FxHashSet(), }; + appears_in_where_clause.visit_generics(generics); for param in &generics.params { - match *param { - hir::GenericParam::Lifetime(ref lifetime_def) => { - if !lifetime_def.bounds.is_empty() { + match param.kind { + hir::GenericParamKind::Lifetime { .. } => { + if !param.bounds.is_empty() { // `'a: 'b` means both `'a` and `'b` are referenced - appears_in_where_clause.visit_generic_param(param); + appears_in_where_clause + .regions.insert(hir::LifetimeName::Param(param.name.modern())); } } - hir::GenericParam::Type(ref ty_param) => { - walk_list!( - &mut appears_in_where_clause, - visit_ty_param_bound, - &ty_param.bounds - ); - } + hir::GenericParamKind::Type { .. } => {} } } - walk_list!( - &mut appears_in_where_clause, - visit_where_predicate, - &generics.where_clause.predicates - ); - debug!( "insert_late_bound_lifetimes: appears_in_where_clause={:?}", appears_in_where_clause.regions @@ -2230,34 +2566,26 @@ fn insert_late_bound_lifetimes( // - appear in the inputs // - do not appear in the where-clauses // - are not implicitly captured by `impl Trait` - for lifetime in generics.lifetimes() { - let name = lifetime.lifetime.name; - + for param in &generics.params { + let lt_name = hir::LifetimeName::Param(param.name.modern()); // appears in the where clauses? early-bound. - if appears_in_where_clause.regions.contains(&name) { + if appears_in_where_clause.regions.contains(<_name) { continue; } // does not appear in the inputs, but appears in the return type? early-bound. - if !constrained_by_input.regions.contains(&name) - && appears_in_output.regions.contains(&name) + if !constrained_by_input.regions.contains(<_name) + && appears_in_output.regions.contains(<_name) { continue; } - debug!( - "insert_late_bound_lifetimes: \ - lifetime {:?} with id {:?} is late-bound", - lifetime.lifetime.name, - lifetime.lifetime.id - ); + debug!("insert_late_bound_lifetimes: lifetime {:?} with id {:?} is late-bound", + param.name.ident(), + param.id); - let inserted = map.late_bound.insert(lifetime.lifetime.id); - assert!( - inserted, - "visited lifetime {:?} twice", - lifetime.lifetime.id - ); + let inserted = map.late_bound.insert(param.id); + assert!(inserted, "visited lifetime {:?} twice", param.id); } return; @@ -2273,14 +2601,14 @@ fn insert_late_bound_lifetimes( fn visit_ty(&mut self, ty: &'v hir::Ty) { match ty.node { - hir::TyPath(hir::QPath::Resolved(Some(_), _)) - | hir::TyPath(hir::QPath::TypeRelative(..)) => { + hir::TyKind::Path(hir::QPath::Resolved(Some(_), _)) + | hir::TyKind::Path(hir::QPath::TypeRelative(..)) => { // ignore lifetimes appearing in associated type // projections, as they are not *constrained* // (defined above) } - hir::TyPath(hir::QPath::Resolved(None, ref path)) => { + hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => { // consider only the lifetimes on the final // segment; I am not sure it's even currently // valid to have them elsewhere, but even if it @@ -2298,7 +2626,7 @@ fn insert_late_bound_lifetimes( } fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) { - self.regions.insert(lifetime_ref.name); + self.regions.insert(lifetime_ref.name.modern()); } } @@ -2312,7 +2640,31 @@ fn insert_late_bound_lifetimes( } fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) { - self.regions.insert(lifetime_ref.name); + self.regions.insert(lifetime_ref.name.modern()); } } } + +pub fn report_missing_lifetime_specifiers( + sess: &Session, + span: Span, + count: usize, +) -> DiagnosticBuilder<'_> { + let mut err = struct_span_err!( + sess, + span, + E0106, + "missing lifetime specifier{}", + if count > 1 { "s" } else { "" } + ); + + let msg = if count > 1 { + format!("expected {} lifetime parameters", count) + } else { + "expected lifetime parameter".to_string() + }; + + err.span_label(span, msg); + + err +} diff --git a/src/librustc/middle/stability.rs b/src/librustc/middle/stability.rs index e80ea16f565a..8af8d463b112 100644 --- a/src/librustc/middle/stability.rs +++ b/src/librustc/middle/stability.rs @@ -18,12 +18,12 @@ use hir::def::Def; use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, LOCAL_CRATE}; use ty::{self, TyCtxt}; use middle::privacy::AccessLevels; -use session::DiagnosticMessageId; +use session::{DiagnosticMessageId, Session}; use syntax::symbol::Symbol; -use syntax_pos::{Span, MultiSpan, DUMMY_SP}; +use syntax_pos::{Span, MultiSpan}; use syntax::ast; use syntax::ast::{NodeId, Attribute}; -use syntax::feature_gate::{GateIssue, emit_feature_err, find_lang_feature_accepted_version}; +use syntax::feature_gate::{GateIssue, emit_feature_err}; use syntax::attr::{self, Stability, Deprecation}; use util::nodemap::{FxHashSet, FxHashMap}; @@ -131,7 +131,7 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { item_sp: Span, kind: AnnotationKind, visit_children: F) where F: FnOnce(&mut Self) { - if self.tcx.sess.features.borrow().staged_api { + if self.tcx.features().staged_api { // This crate explicitly wants staged API. debug!("annotate(id = {:?}, attrs = {:?})", id, attrs); if let Some(..) = attr::find_deprecation(self.tcx.sess.diagnostic(), attrs, item_sp) { @@ -165,7 +165,7 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { &attr::Stable {since: stab_since}) = (&stab.rustc_depr, &stab.level) { // Explicit version of iter::order::lt to handle parse errors properly for (dep_v, stab_v) in - dep_since.as_str().split(".").zip(stab_since.as_str().split(".")) { + dep_since.as_str().split('.').zip(stab_since.as_str().split('.')) { if let (Ok(dep_v), Ok(stab_v)) = (dep_v.parse::(), stab_v.parse()) { match dep_v.cmp(&stab_v) { Ordering::Less => { @@ -205,7 +205,7 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { } else { // Emit errors for non-staged-api crates. for attr in attrs { - let tag = unwrap_or!(attr.name(), continue); + let tag = attr.name(); if tag == "unstable" || tag == "stable" || tag == "rustc_deprecated" { attr::mark_used(attr); self.tcx.sess.span_err(attr.span(), "stability attributes may not be used \ @@ -263,14 +263,14 @@ impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> { // they don't have their own stability. They still can be annotated as unstable // and propagate this unstability to children, but this annotation is completely // optional. They inherit stability from their parents when unannotated. - hir::ItemImpl(.., None, _, _) | hir::ItemForeignMod(..) => { + hir::ItemKind::Impl(.., None, _, _) | hir::ItemKind::ForeignMod(..) => { self.in_trait_impl = false; kind = AnnotationKind::Container; } - hir::ItemImpl(.., Some(_), _, _) => { + hir::ItemKind::Impl(.., Some(_), _, _) => { self.in_trait_impl = true; } - hir::ItemStruct(ref sd, _) => { + hir::ItemKind::Struct(ref sd, _) => { if !sd.is_struct() { self.annotate(sd.id(), &i.attrs, i.span, AnnotationKind::Required, |_| {}) } @@ -353,7 +353,7 @@ impl<'a, 'tcx> Visitor<'tcx> for MissingStabilityAnnotations<'a, 'tcx> { // they don't have their own stability. They still can be annotated as unstable // and propagate this unstability to children, but this annotation is completely // optional. They inherit stability from their parents when unannotated. - hir::ItemImpl(.., None, _, _) | hir::ItemForeignMod(..) => {} + hir::ItemKind::Impl(.., None, _, _) | hir::ItemKind::ForeignMod(..) => {} _ => self.check_missing_stability(i.id, i.span) } @@ -398,7 +398,7 @@ impl<'a, 'tcx> Index<'tcx> { pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Index<'tcx> { let is_staged_api = tcx.sess.opts.debugging_opts.force_unstable_if_unmarked || - tcx.sess.features.borrow().staged_api; + tcx.features().staged_api; let mut staged_api = FxHashMap(); staged_api.insert(LOCAL_CRATE, is_staged_api); let mut index = Index { @@ -408,7 +408,7 @@ impl<'a, 'tcx> Index<'tcx> { active_features: FxHashSet(), }; - let ref active_lib_features = tcx.sess.features.borrow().declared_lib_features; + let ref active_lib_features = tcx.features().declared_lib_features; // Put the active features into a map for quick lookup index.active_features = active_lib_features.iter().map(|&(ref s, _)| s.clone()).collect(); @@ -470,10 +470,50 @@ pub fn check_unstable_api_usage<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { tcx.hir.krate().visit_all_item_likes(&mut checker.as_deep_visitor()); } +/// Check whether an item marked with `deprecated(since="X")` is currently +/// deprecated (i.e. whether X is not greater than the current rustc version). +pub fn deprecation_in_effect(since: &str) -> bool { + fn parse_version(ver: &str) -> Vec { + // We ignore non-integer components of the version (e.g. "nightly"). + ver.split(|c| c == '.' || c == '-').flat_map(|s| s.parse()).collect() + } + + if let Some(rustc) = option_env!("CFG_RELEASE") { + let since: Vec = parse_version(since); + let rustc: Vec = parse_version(rustc); + // We simply treat invalid `since` attributes as relating to a previous + // Rust version, thus always displaying the warning. + if since.len() != 3 { + return true; + } + since <= rustc + } else { + // By default, a deprecation warning applies to + // the current version of the compiler. + true + } +} + struct Checker<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, } +/// Result of `TyCtxt::eval_stability`. +pub enum EvalResult { + /// We can use the item because it is stable or we provided the + /// corresponding feature gate. + Allow, + /// We cannot use the item because it is unstable and we did not provide the + /// corresponding feature gate. + Deny { + feature: Symbol, + reason: Option, + issue: u32, + }, + /// The item does not have the `#[stable]` or `#[unstable]` marker assigned. + Unmarked, +} + impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // (See issue #38412) fn skip_stability_check_due_to_privacy(self, mut def_id: DefId) -> bool { @@ -509,14 +549,23 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - pub fn check_stability(self, def_id: DefId, id: NodeId, span: Span) { + /// Evaluates the stability of an item. + /// + /// Returns `EvalResult::Allow` if the item is stable, or unstable but the corresponding + /// `#![feature]` has been provided. Returns `EvalResult::Deny` which describes the offending + /// unstable feature otherwise. + /// + /// If `id` is `Some(_)`, this function will also check if the item at `def_id` has been + /// deprecated. If the item is indeed deprecated, we will emit a deprecation lint attached to + /// `id`. + pub fn eval_stability(self, def_id: DefId, id: Option, span: Span) -> EvalResult { if span.allows_unstable() { debug!("stability: \ skipping span={:?} since it is internal", span); - return; + return EvalResult::Allow; } - let lint_deprecated = |def_id: DefId, note: Option| { + let lint_deprecated = |def_id: DefId, id: NodeId, note: Option| { let path = self.item_path_str(def_id); let msg = if let Some(note) = note { @@ -526,22 +575,31 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }; self.lint_node(lint::builtin::DEPRECATED, id, span, &msg); + if id == ast::DUMMY_NODE_ID { + span_bug!(span, "emitted a deprecated lint with dummy node id: {:?}", def_id); + } }; // Deprecated attributes apply in-crate and cross-crate. - if let Some(depr_entry) = self.lookup_deprecation_entry(def_id) { - let skip = if id == ast::DUMMY_NODE_ID { - true - } else { - let parent_def_id = self.hir.local_def_id(self.hir.get_parent(id)); - self.lookup_deprecation_entry(parent_def_id).map_or(false, |parent_depr| { - parent_depr.same_origin(&depr_entry) - }) - }; + if let Some(id) = id { + if let Some(depr_entry) = self.lookup_deprecation_entry(def_id) { + // If the deprecation is scheduled for a future Rust + // version, then we should display no warning message. + let deprecated_in_future_version = if let Some(sym) = depr_entry.attr.since { + let since = sym.as_str(); + !deprecation_in_effect(&since) + } else { + false + }; - if !skip { - lint_deprecated(def_id, depr_entry.attr.note); - } + let parent_def_id = self.hir.local_def_id(self.hir.get_parent(id)); + let skip = deprecated_in_future_version || + self.lookup_deprecation_entry(parent_def_id) + .map_or(false, |parent_depr| parent_depr.same_origin(&depr_entry)); + if !skip { + lint_deprecated(def_id, id, depr_entry.attr.note); + } + }; } let is_staged_api = self.lookup_stability(DefId { @@ -549,35 +607,37 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ..def_id }).is_some(); if !is_staged_api { - return; + return EvalResult::Allow; } let stability = self.lookup_stability(def_id); debug!("stability: \ inspecting def_id={:?} span={:?} of stability={:?}", def_id, span, stability); - if let Some(&Stability{rustc_depr: Some(attr::RustcDeprecation { reason, .. }), ..}) + if let Some(&Stability{rustc_depr: Some(attr::RustcDeprecation { reason, since }), ..}) = stability { - if id != ast::DUMMY_NODE_ID { - lint_deprecated(def_id, Some(reason)); + if let Some(id) = id { + if deprecation_in_effect(&since.as_str()) { + lint_deprecated(def_id, id, Some(reason)); + } } } // Only the cross-crate scenario matters when checking unstable APIs let cross_crate = !def_id.is_local(); if !cross_crate { - return + return EvalResult::Allow; } // Issue 38412: private items lack stability markers. if self.skip_stability_check_due_to_privacy(def_id) { - return + return EvalResult::Allow; } match stability { - Some(&Stability { level: attr::Unstable {ref reason, issue}, ref feature, .. }) => { - if self.stability().active_features.contains(feature) { - return + Some(&Stability { level: attr::Unstable { reason, issue }, feature, .. }) => { + if self.stability().active_features.contains(&feature) { + return EvalResult::Allow; } // When we're compiling the compiler itself we may pull in @@ -589,23 +649,45 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // the `-Z force-unstable-if-unmarked` flag present (we're // compiling a compiler crate), then let this missing feature // annotation slide. - if *feature == "rustc_private" && issue == 27812 { + if feature == "rustc_private" && issue == 27812 { if self.sess.opts.debugging_opts.force_unstable_if_unmarked { - return + return EvalResult::Allow; } } - let msg = match *reason { - Some(ref r) => format!("use of unstable library feature '{}': {}", - feature.as_str(), &r), + EvalResult::Deny { feature, reason, issue } + } + Some(_) => { + // Stable APIs are always ok to call and deprecated APIs are + // handled by the lint emitting logic above. + EvalResult::Allow + } + None => { + EvalResult::Unmarked + } + } + } + + /// Checks if an item is stable or error out. + /// + /// If the item defined by `def_id` is unstable and the corresponding `#![feature]` does not + /// exist, emits an error. + /// + /// Additionally, this function will also check if the item is deprecated. If so, and `id` is + /// not `None`, a deprecated lint attached to `id` will be emitted. + pub fn check_stability(self, def_id: DefId, id: Option, span: Span) { + match self.eval_stability(def_id, id, span) { + EvalResult::Allow => {} + EvalResult::Deny { feature, reason, issue } => { + let msg = match reason { + Some(r) => format!("use of unstable library feature '{}': {}", feature, r), None => format!("use of unstable library feature '{}'", &feature) }; - let msp: MultiSpan = span.into(); let cm = &self.sess.parse_sess.codemap(); let span_key = msp.primary_span().and_then(|sp: Span| - if sp != DUMMY_SP { + if !sp.is_dummy() { let file = cm.lookup_char_pos(sp.lo()).file; if file.name.is_macros() { None @@ -624,12 +706,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { GateIssue::Library(Some(issue)), &msg); } } - Some(_) => { - // Stable APIs are always ok to call and deprecated APIs are - // handled by the lint emitting logic above. - } - None => { - span_bug!(span, "encountered unmarked API"); + EvalResult::Unmarked => { + span_bug!(span, "encountered unmarked API: {:?}", def_id); } } } @@ -645,9 +723,9 @@ impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { match item.node { - hir::ItemExternCrate(_) => { + hir::ItemKind::ExternCrate(_) => { // compiler-generated `extern crate` items have a dummy span. - if item.span == DUMMY_SP { return } + if item.span.is_dummy() { return } let def_id = self.tcx.hir.local_def_id(item.id); let cnum = match self.tcx.extern_mod_stmt_cnum(def_id) { @@ -655,21 +733,22 @@ impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> { None => return, }; let def_id = DefId { krate: cnum, index: CRATE_DEF_INDEX }; - self.tcx.check_stability(def_id, item.id, item.span); + self.tcx.check_stability(def_id, Some(item.id), item.span); } // For implementations of traits, check the stability of each item // individually as it's possible to have a stable trait with unstable // items. - hir::ItemImpl(.., Some(ref t), _, ref impl_item_refs) => { + hir::ItemKind::Impl(.., Some(ref t), _, ref impl_item_refs) => { if let Def::Trait(trait_did) = t.path.def { for impl_item_ref in impl_item_refs { let impl_item = self.tcx.hir.impl_item(impl_item_ref.id); let trait_item_def_id = self.tcx.associated_items(trait_did) - .find(|item| item.name == impl_item.name).map(|item| item.def_id); + .find(|item| item.ident.name == impl_item.ident.name) + .map(|item| item.def_id); if let Some(def_id) = trait_item_def_id { - // Pass `DUMMY_NODE_ID` to skip deprecation warnings. - self.tcx.check_stability(def_id, ast::DUMMY_NODE_ID, impl_item.span); + // Pass `None` to skip deprecation warnings. + self.tcx.check_stability(def_id, None, impl_item.span); } } } @@ -677,7 +756,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> { // There's no good place to insert stability check for non-Copy unions, // so semi-randomly perform it here in stability.rs - hir::ItemUnion(..) if !self.tcx.sess.features.borrow().untagged_unions => { + hir::ItemKind::Union(..) if !self.tcx.features().untagged_unions => { let def_id = self.tcx.hir.local_def_id(item.id); let adt_def = self.tcx.adt_def(def_id); let ty = self.tcx.type_of(def_id); @@ -688,7 +767,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> { "unions with `Drop` implementations are unstable"); } else { let param_env = self.tcx.param_env(def_id); - if !param_env.can_type_implement_copy(self.tcx, ty, item.span).is_ok() { + if !param_env.can_type_implement_copy(self.tcx, ty).is_ok() { emit_feature_err(&self.tcx.sess.parse_sess, "untagged_unions", item.span, GateIssue::Language, "unions with non-`Copy` fields are unstable"); @@ -701,11 +780,12 @@ impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> { intravisit::walk_item(self, item); } - fn visit_path(&mut self, path: &'tcx hir::Path, id: ast::NodeId) { + fn visit_path(&mut self, path: &'tcx hir::Path, id: hir::HirId) { + let id = self.tcx.hir.hir_to_node_id(id); match path.def { Def::Local(..) | Def::Upvar(..) | Def::PrimTy(..) | Def::SelfTy(..) | Def::Err => {} - _ => self.tcx.check_stability(path.def.def_id(), id, path.span) + _ => self.tcx.check_stability(path.def.def_id(), Some(id), path.span) } intravisit::walk_path(self, path) } @@ -721,8 +801,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// were expected to be library features), and the list of features used from /// libraries, identify activated features that don't exist and error about them. pub fn check_unused_or_stable_features<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let sess = &tcx.sess; - let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE); if tcx.stability().staged_api[&LOCAL_CRATE] { @@ -736,37 +814,70 @@ pub fn check_unused_or_stable_features<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { krate.visit_all_item_likes(&mut missing.as_deep_visitor()); } - let ref declared_lib_features = sess.features.borrow().declared_lib_features; - let mut remaining_lib_features: FxHashMap - = declared_lib_features.clone().into_iter().collect(); - remaining_lib_features.remove(&Symbol::intern("proc_macro")); - - for &(ref stable_lang_feature, span) in &sess.features.borrow().declared_stable_lang_features { - let version = find_lang_feature_accepted_version(&stable_lang_feature.as_str()) - .expect("unexpectedly couldn't find version feature was stabilized"); - tcx.lint_node(lint::builtin::STABLE_FEATURES, - ast::CRATE_NODE_ID, - span, - &format_stable_since_msg(version)); + let declared_lang_features = &tcx.features().declared_lang_features; + let mut lang_features = FxHashSet(); + for &(feature, span, since) in declared_lang_features { + if let Some(since) = since { + // Warn if the user has enabled an already-stable lang feature. + unnecessary_stable_feature_lint(tcx, span, feature, since); + } + if lang_features.contains(&feature) { + // Warn if the user enables a lang feature multiple times. + duplicate_feature_err(tcx.sess, span, feature); + } + lang_features.insert(feature); } - // FIXME(#44232) the `used_features` table no longer exists, so we don't - // lint about unknown or unused features. We should reenable - // this one day! - // - // let index = tcx.stability(); - // for (used_lib_feature, level) in &index.used_features { - // remaining_lib_features.remove(used_lib_feature); - // } - // - // for &span in remaining_lib_features.values() { - // tcx.lint_node(lint::builtin::UNUSED_FEATURES, - // ast::CRATE_NODE_ID, - // span, - // "unused or unknown feature"); - // } + let declared_lib_features = &tcx.features().declared_lib_features; + let mut remaining_lib_features = FxHashMap(); + for (feature, span) in declared_lib_features { + if remaining_lib_features.contains_key(&feature) { + // Warn if the user enables a lib feature multiple times. + duplicate_feature_err(tcx.sess, *span, *feature); + } + remaining_lib_features.insert(feature, span.clone()); + } + // `stdbuild` has special handling for `libc`, so we need to + // recognise the feature when building std. + // Likewise, libtest is handled specially, so `test` isn't + // available as we'd like it to be. + // FIXME: only remove `libc` when `stdbuild` is active. + // FIXME: remove special casing for `test`. + remaining_lib_features.remove(&Symbol::intern("libc")); + remaining_lib_features.remove(&Symbol::intern("test")); + + for (feature, stable) in tcx.lib_features().to_vec() { + if let Some(since) = stable { + if let Some(span) = remaining_lib_features.get(&feature) { + // Warn if the user has enabled an already-stable lib feature. + unnecessary_stable_feature_lint(tcx, *span, feature, since); + } + } + remaining_lib_features.remove(&feature); + } + + for (feature, span) in remaining_lib_features { + struct_span_err!(tcx.sess, span, E0635, "unknown feature `{}`", feature).emit(); + } + + // FIXME(#44232): the `used_features` table no longer exists, so we + // don't lint about unused features. We should reenable this one day! } -fn format_stable_since_msg(version: &str) -> String { - format!("this feature has been stable since {}. Attribute no longer needed", version) +fn unnecessary_stable_feature_lint<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + span: Span, + feature: Symbol, + since: Symbol +) { + tcx.lint_node(lint::builtin::STABLE_FEATURES, + ast::CRATE_NODE_ID, + span, + &format!("the feature `{}` has been stable since {} and no longer requires \ + an attribute to enable", feature, since)); +} + +fn duplicate_feature_err(sess: &Session, span: Span, feature: Symbol) { + struct_span_err!(sess, span, E0636, "the feature `{}` has already been declared", feature) + .emit(); } diff --git a/src/librustc/middle/weak_lang_items.rs b/src/librustc/middle/weak_lang_items.rs index 50fb58407026..e8431ce3e109 100644 --- a/src/librustc/middle/weak_lang_items.rs +++ b/src/librustc/middle/weak_lang_items.rs @@ -13,10 +13,11 @@ use session::config; use middle::lang_items; -use rustc_back::PanicStrategy; +use rustc_target::spec::PanicStrategy; use syntax::ast; use syntax::symbol::Symbol; use syntax_pos::Span; +use hir::def_id::DefId; use hir::intravisit::{Visitor, NestedVisitorMap}; use hir::intravisit; use hir; @@ -55,7 +56,7 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } pub fn link_name(attrs: &[ast::Attribute]) -> Option { - lang_items::extract(attrs).and_then(|name| { + lang_items::extract(attrs).and_then(|(name, _)| { $(if name == stringify!($name) { Some(Symbol::intern(stringify!($sym))) } else)* { @@ -64,18 +65,36 @@ pub fn link_name(attrs: &[ast::Attribute]) -> Option { }) } +/// Returns whether the specified `lang_item` doesn't actually need to be +/// present for this compilation. +/// +/// Not all lang items are always required for each compilation, particularly in +/// the case of panic=abort. In these situations some lang items are injected by +/// crates and don't actually need to be defined in libstd. +pub fn whitelisted(tcx: TyCtxt, lang_item: lang_items::LangItem) -> bool { + // If we're not compiling with unwinding, we won't actually need these + // symbols. Other panic runtimes ensure that the relevant symbols are + // available to link things together, but they're never exercised. + if tcx.sess.panic_strategy() != PanicStrategy::Unwind { + return lang_item == lang_items::EhPersonalityLangItem || + lang_item == lang_items::EhUnwindResumeLangItem + } + + false +} + fn verify<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, items: &lang_items::LanguageItems) { // We only need to check for the presence of weak lang items if we're // emitting something that's not an rlib. let needs_check = tcx.sess.crate_types.borrow().iter().any(|kind| { match *kind { - config::CrateTypeDylib | - config::CrateTypeProcMacro | - config::CrateTypeCdylib | - config::CrateTypeExecutable | - config::CrateTypeStaticlib => true, - config::CrateTypeRlib => false, + config::CrateType::Dylib | + config::CrateType::ProcMacro | + config::CrateType::Cdylib | + config::CrateType::Executable | + config::CrateType::Staticlib => true, + config::CrateType::Rlib => false, } }); if !needs_check { @@ -89,22 +108,20 @@ fn verify<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } - // If we're not compiling with unwinding, we won't actually need these - // symbols. Other panic runtimes ensure that the relevant symbols are - // available to link things together, but they're never exercised. - let mut whitelisted = HashSet::new(); - if tcx.sess.panic_strategy() != PanicStrategy::Unwind { - whitelisted.insert(lang_items::EhPersonalityLangItem); - whitelisted.insert(lang_items::EhUnwindResumeLangItem); - } - $( if missing.contains(&lang_items::$item) && - !whitelisted.contains(&lang_items::$item) && + !whitelisted(tcx, lang_items::$item) && items.$name().is_none() { - tcx.sess.err(&format!("language item required, but not found: `{}`", - stringify!($name))); - + if lang_items::$item == lang_items::PanicImplLangItem { + tcx.sess.err(&format!("`#[panic_implementation]` function required, \ + but not found")); + } else if lang_items::$item == lang_items::OomLangItem { + tcx.sess.err(&format!("`#[alloc_error_handler]` function required, \ + but not found")); + } else { + tcx.sess.err(&format!("language item required, but not found: `{}`", + stringify!($name))); + } } )* } @@ -129,17 +146,27 @@ impl<'a, 'tcx, 'v> Visitor<'v> for Context<'a, 'tcx> { } fn visit_foreign_item(&mut self, i: &hir::ForeignItem) { - if let Some(lang_item) = lang_items::extract(&i.attrs) { + if let Some((lang_item, _)) = lang_items::extract(&i.attrs) { self.register(&lang_item.as_str(), i.span); } intravisit::walk_foreign_item(self, i) } } +impl<'a, 'tcx, 'gcx> TyCtxt<'a, 'tcx, 'gcx> { + pub fn is_weak_lang_item(&self, item_def_id: DefId) -> bool { + let lang_items = self.lang_items(); + let did = Some(item_def_id); + + $(lang_items.$name() == did)||+ + } +} + ) } weak_lang_items! { - panic_fmt, PanicFmtLangItem, rust_begin_unwind; + panic_impl, PanicImplLangItem, rust_begin_unwind; eh_personality, EhPersonalityLangItem, rust_eh_personality; eh_unwind_resume, EhUnwindResumeLangItem, rust_eh_unwind_resume; + oom, OomLangItem, rust_oom; } diff --git a/src/librustc/mir/README.md b/src/librustc/mir/README.md deleted file mode 100644 index cac86be0fcb7..000000000000 --- a/src/librustc/mir/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# MIR definition and pass system - -This file contains the definition of the MIR datatypes along with the -various types for the "MIR Pass" system, which lets you easily -register and define new MIR transformations and analyses. - -Most of the code that operates on MIR can be found in the -`librustc_mir` crate or other crates. The code found here in -`librustc` is just the datatype definitions, along with the functions -which operate on MIR to be placed everywhere else. - -## MIR Data Types and visitor - -The main MIR data type is `rustc::mir::Mir`, defined in `mod.rs`. -There is also the MIR visitor (in `visit.rs`) which allows you to walk -the MIR and override what actions will be taken at various points (you -can visit in either shared or mutable mode; the latter allows changing -the MIR in place). Finally `traverse.rs` contains various traversal -routines for visiting the MIR CFG in [different standard orders][traversal] -(e.g. pre-order, reverse post-order, and so forth). - -[traversal]: https://en.wikipedia.org/wiki/Tree_traversal - -## MIR pass suites and their integration into the query system - -As a MIR *consumer*, you are expected to use one of the queries that -returns a "final MIR". As of the time of this writing, there is only -one: `optimized_mir(def_id)`, but more are expected to come in the -future. For foreign def-ids, we simply read the MIR from the other -crate's metadata. But for local def-ids, the query will construct the -MIR and then iteratively optimize it by putting it through various -pipeline stages. This section describes those pipeline stages and how -you can extend them. - -To produce the `optimized_mir(D)` for a given def-id `D`, the MIR -passes through several suites of optimizations, each represented by a -query. Each suite consists of multiple optimizations and -transformations. These suites represent useful intermediate points -where we want to access the MIR for type checking or other purposes: - -- `mir_build(D)` -- not a query, but this constructs the initial MIR -- `mir_const(D)` -- applies some simple transformations to make MIR ready for constant evaluation; -- `mir_validated(D)` -- applies some more transformations, making MIR ready for borrow checking; -- `optimized_mir(D)` -- the final state, after all optimizations have been performed. - -### Stealing - -The intermediate queries `mir_const()` and `mir_validated()` yield up -a `&'tcx Steal>`, allocated using -`tcx.alloc_steal_mir()`. This indicates that the result may be -**stolen** by the next suite of optimizations -- this is an -optimization to avoid cloning the MIR. Attempting to use a stolen -result will cause a panic in the compiler. Therefore, it is important -that you do not read directly from these intermediate queries except as -part of the MIR processing pipeline. - -Because of this stealing mechanism, some care must also be taken to -ensure that, before the MIR at a particular phase in the processing -pipeline is stolen, anyone who may want to read from it has already -done so. Concretely, this means that if you have some query `foo(D)` -that wants to access the result of `mir_const(D)` or -`mir_validated(D)`, you need to have the successor pass "force" -`foo(D)` using `ty::queries::foo::force(...)`. This will force a query -to execute even though you don't directly require its result. - -As an example, consider MIR const qualification. It wants to read the -result produced by the `mir_const()` suite. However, that result will -be **stolen** by the `mir_validated()` suite. If nothing was done, -then `mir_const_qualif(D)` would succeed if it came before -`mir_validated(D)`, but fail otherwise. Therefore, `mir_validated(D)` -will **force** `mir_const_qualif` before it actually steals, thus -ensuring that the reads have already happened: - -``` -mir_const(D) --read-by--> mir_const_qualif(D) - | ^ - stolen-by | - | (forces) - v | -mir_validated(D) ------------+ -``` - -### Implementing and registering a pass - -To create a new MIR pass, you simply implement the `MirPass` trait for -some fresh singleton type `Foo`. Once you have implemented a trait for -your type `Foo`, you then have to insert `Foo` into one of the suites; -this is done in `librustc_driver/driver.rs` by invoking `push_pass(S, -Foo)` with the appropriate suite substituted for `S`. - diff --git a/src/librustc/mir/cache.rs b/src/librustc/mir/cache.rs index efc2f647cfdf..41ba526b73fe 100644 --- a/src/librustc/mir/cache.rs +++ b/src/librustc/mir/cache.rs @@ -8,8 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cell::{Ref, RefCell}; use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::sync::{RwLock, ReadGuard}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; use ich::StableHashingContext; @@ -19,7 +19,7 @@ use rustc_serialize as serialize; #[derive(Clone, Debug)] pub struct Cache { - predecessors: RefCell>>> + predecessors: RwLock>>> } @@ -35,9 +35,9 @@ impl serialize::Decodable for Cache { } } -impl<'gcx> HashStable> for Cache { +impl<'a> HashStable> for Cache { fn hash_stable(&self, - _: &mut StableHashingContext<'gcx>, + _: &mut StableHashingContext<'a>, _: &mut StableHasher) { // do nothing } @@ -46,7 +46,7 @@ impl<'gcx> HashStable> for Cache { impl Cache { pub fn new() -> Self { Cache { - predecessors: RefCell::new(None) + predecessors: RwLock::new(None) } } @@ -55,12 +55,12 @@ impl Cache { *self.predecessors.borrow_mut() = None; } - pub fn predecessors(&self, mir: &Mir) -> Ref>> { + pub fn predecessors(&self, mir: &Mir) -> ReadGuard>> { if self.predecessors.borrow().is_none() { *self.predecessors.borrow_mut() = Some(calculate_predecessors(mir)); } - Ref::map(self.predecessors.borrow(), |p| p.as_ref().unwrap()) + ReadGuard::map(self.predecessors.borrow(), |p| p.as_ref().unwrap()) } } @@ -68,7 +68,7 @@ fn calculate_predecessors(mir: &Mir) -> IndexVec> { let mut result = IndexVec::from_elem(vec![], mir.basic_blocks()); for (bb, data) in mir.basic_blocks().iter_enumerated() { if let Some(ref term) = data.terminator { - for &tgt in term.successors().iter() { + for &tgt in term.successors() { result[tgt].push(bb); } } @@ -76,3 +76,7 @@ fn calculate_predecessors(mir: &Mir) -> IndexVec> { result } + +CloneTypeFoldableAndLiftImpls! { + Cache, +} diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs index 9ebfe25c107a..1e9584bc55bd 100644 --- a/src/librustc/mir/interpret/error.rs +++ b/src/librustc/mir/interpret/error.rs @@ -1,44 +1,179 @@ -use std::error::Error; use std::{fmt, env}; use mir; use ty::{FnSig, Ty, layout}; +use ty::layout::{Size, Align}; +use rustc_data_structures::sync::Lrc; use super::{ - MemoryPointer, Lock, AccessKind + Pointer, Lock, AccessKind }; -use rustc_const_math::ConstMathErr; -use syntax::codemap::Span; use backtrace::Backtrace; -#[derive(Debug)] -pub struct EvalError<'tcx> { - pub kind: EvalErrorKind<'tcx>, - pub backtrace: Option, +use ty; +use ty::query::TyCtxtAt; +use errors::DiagnosticBuilder; + +use syntax_pos::Span; +use syntax::ast; + +pub type ConstEvalResult<'tcx> = Result<&'tcx ty::Const<'tcx>, Lrc>>; + +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct ConstEvalErr<'tcx> { + pub span: Span, + pub error: ::mir::interpret::EvalError<'tcx>, + pub stacktrace: Vec, } -impl<'tcx> From> for EvalError<'tcx> { - fn from(kind: EvalErrorKind<'tcx>) -> Self { - let backtrace = match env::var("RUST_BACKTRACE") { - Ok(ref val) if !val.is_empty() => Some(Backtrace::new_unresolved()), - _ => None +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct FrameInfo { + pub span: Span, + pub location: String, + pub lint_root: Option, +} + +impl<'a, 'gcx, 'tcx> ConstEvalErr<'tcx> { + pub fn struct_error(&self, + tcx: TyCtxtAt<'a, 'gcx, 'tcx>, + message: &str) + -> Option> + { + self.struct_generic(tcx, message, None) + } + + pub fn report_as_error(&self, + tcx: TyCtxtAt<'a, 'gcx, 'tcx>, + message: &str + ) { + let err = self.struct_error(tcx, message); + if let Some(mut err) = err { + err.emit(); + } + } + + pub fn report_as_lint(&self, + tcx: TyCtxtAt<'a, 'gcx, 'tcx>, + message: &str, + lint_root: ast::NodeId, + ) { + let lint = self.struct_generic( + tcx, + message, + Some(lint_root), + ); + if let Some(mut lint) = lint { + lint.emit(); + } + } + + fn struct_generic( + &self, + tcx: TyCtxtAt<'a, 'gcx, 'tcx>, + message: &str, + lint_root: Option, + ) -> Option> { + match self.error.kind { + ::mir::interpret::EvalErrorKind::TypeckError | + ::mir::interpret::EvalErrorKind::TooGeneric | + ::mir::interpret::EvalErrorKind::CheckMatchError | + ::mir::interpret::EvalErrorKind::Layout(_) => return None, + ::mir::interpret::EvalErrorKind::ReferencedConstant(ref inner) => { + inner.struct_generic(tcx, "referenced constant has errors", lint_root)?.emit(); + }, + _ => {}, + } + trace!("reporting const eval failure at {:?}", self.span); + let mut err = if let Some(lint_root) = lint_root { + let node_id = self.stacktrace + .iter() + .rev() + .filter_map(|frame| frame.lint_root) + .next() + .unwrap_or(lint_root); + tcx.struct_span_lint_node( + ::rustc::lint::builtin::CONST_ERR, + node_id, + tcx.span, + message, + ) + } else { + struct_error(tcx, message) }; + err.span_label(self.span, self.error.to_string()); + for FrameInfo { span, location, .. } in &self.stacktrace { + err.span_label(*span, format!("inside call to `{}`", location)); + } + Some(err) + } +} + +pub fn struct_error<'a, 'gcx, 'tcx>( + tcx: TyCtxtAt<'a, 'gcx, 'tcx>, + msg: &str, +) -> DiagnosticBuilder<'tcx> { + struct_span_err!(tcx.sess, tcx.span, E0080, "{}", msg) +} + +#[derive(Debug, Clone, RustcEncodable, RustcDecodable)] +pub struct EvalError<'tcx> { + pub kind: EvalErrorKind<'tcx, u64>, +} + +impl<'tcx> From> for EvalError<'tcx> { + fn from(kind: EvalErrorKind<'tcx, u64>) -> Self { + match env::var("MIRI_BACKTRACE") { + Ok(ref val) if !val.is_empty() => { + let backtrace = Backtrace::new(); + + use std::fmt::Write; + let mut trace_text = "\n\nAn error occurred in miri:\n".to_string(); + write!(trace_text, "backtrace frames: {}\n", backtrace.frames().len()).unwrap(); + 'frames: for (i, frame) in backtrace.frames().iter().enumerate() { + if frame.symbols().is_empty() { + write!(trace_text, "{}: no symbols\n", i).unwrap(); + } + for symbol in frame.symbols() { + write!(trace_text, "{}: ", i).unwrap(); + if let Some(name) = symbol.name() { + write!(trace_text, "{}\n", name).unwrap(); + } else { + write!(trace_text, "\n").unwrap(); + } + write!(trace_text, "\tat ").unwrap(); + if let Some(file_path) = symbol.filename() { + write!(trace_text, "{}", file_path.display()).unwrap(); + } else { + write!(trace_text, "").unwrap(); + } + if let Some(line) = symbol.lineno() { + write!(trace_text, ":{}\n", line).unwrap(); + } else { + write!(trace_text, "\n").unwrap(); + } + } + } + error!("{}", trace_text); + }, + _ => {}, + } EvalError { kind, - backtrace, } } } -#[derive(Debug)] -pub enum EvalErrorKind<'tcx> { +pub type AssertMessage<'tcx> = EvalErrorKind<'tcx, mir::Operand<'tcx>>; + +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub enum EvalErrorKind<'tcx, O> { /// This variant is used by machines to signal their own errors that do not /// match an existing variant - MachineError(Box), + MachineError(String), FunctionPointerTyMismatch(FnSig<'tcx>, FnSig<'tcx>), NoMirFor(String), - UnterminatedCString(MemoryPointer), + UnterminatedCString(Pointer), DanglingPointerDeref, DoubleFree, InvalidMemoryAccess, @@ -46,13 +181,14 @@ pub enum EvalErrorKind<'tcx> { InvalidBool, InvalidDiscriminant, PointerOutOfBounds { - ptr: MemoryPointer, + ptr: Pointer, access: bool, - allocation_size: u64, + allocation_size: Size, }, InvalidNullPointerUsage, ReadPointerAsBytes, ReadBytesAsPointer, + ReadForeignStatic, InvalidPointerMath, ReadUndefBytes, DeadLocal, @@ -60,46 +196,42 @@ pub enum EvalErrorKind<'tcx> { Unimplemented(String), DerefFunctionPointer, ExecuteMemory, - ArrayIndexOutOfBounds(Span, u64, u64), - Math(Span, ConstMathErr), + BoundsCheck { len: O, index: O }, + Overflow(mir::BinOp), + OverflowNeg, + DivisionByZero, + RemainderByZero, Intrinsic(String), - OverflowingMath, InvalidChar(u128), - OutOfMemory { - allocation_size: u64, - memory_size: u64, - memory_usage: u64, - }, - ExecutionTimeLimitReached, StackFrameLimitReached, OutOfTls, TlsOutOfBounds, AbiViolation(String), AlignmentCheckFailed { - required: u64, - has: u64, + required: Align, + has: Align, }, MemoryLockViolation { - ptr: MemoryPointer, + ptr: Pointer, len: u64, frame: usize, access: AccessKind, lock: Lock, }, MemoryAcquireConflict { - ptr: MemoryPointer, + ptr: Pointer, len: u64, kind: AccessKind, lock: Lock, }, InvalidMemoryLockRelease { - ptr: MemoryPointer, + ptr: Pointer, len: u64, frame: usize, lock: Lock, }, DeallocatedLockedMemory { - ptr: MemoryPointer, + ptr: Pointer, lock: Lock, }, ValidationFailure(String), @@ -113,7 +245,7 @@ pub enum EvalErrorKind<'tcx> { DeallocatedWrongMemoryKind(String, String), ReallocateNonBasePtr, DeallocateNonBasePtr, - IncorrectAllocationInformation(u64, usize, u64, u64), + IncorrectAllocationInformation(Size, Size, Align, Align), Layout(layout::LayoutError<'tcx>), HeapAllocZeroBytes, HeapAllocNonPowerOfTwoAlignment(u64), @@ -124,15 +256,24 @@ pub enum EvalErrorKind<'tcx> { UnimplementedTraitSelection, /// Abort in case type errors are reached TypeckError, + /// Resolution can fail if we are in a too generic context + TooGeneric, + CheckMatchError, + /// Cannot compute this constant because it depends on another one + /// which already produced an error + ReferencedConstant(Lrc>), + GeneratorResumedAfterReturn, + GeneratorResumedAfterPanic, + InfiniteLoop, } pub type EvalResult<'tcx, T = ()> = Result>; -impl<'tcx> Error for EvalError<'tcx> { - fn description(&self) -> &str { +impl<'tcx, O> EvalErrorKind<'tcx, O> { + pub fn description(&self) -> &str { use self::EvalErrorKind::*; - match self.kind { - MachineError(ref inner) => inner.description(), + match *self { + MachineError(ref inner) => inner, FunctionPointerTyMismatch(..) => "tried to call a function through a function pointer of a different type", InvalidMemoryAccess => @@ -165,6 +306,8 @@ impl<'tcx> Error for EvalError<'tcx> { "a raw memory access tried to access part of a pointer value as raw bytes", ReadBytesAsPointer => "a memory access tried to interpret some bytes as a pointer", + ReadForeignStatic => + "tried to read from foreign (extern) static", InvalidPointerMath => "attempted to do invalid arithmetic on pointers that would leak base addresses, e.g. comparing pointers into different allocations", ReadUndefBytes => @@ -178,22 +321,14 @@ impl<'tcx> Error for EvalError<'tcx> { "tried to dereference a function pointer", ExecuteMemory => "tried to treat a memory pointer as a function pointer", - ArrayIndexOutOfBounds(..) => + BoundsCheck{..} => "array index out of bounds", - Math(..) => - "mathematical operation failed", Intrinsic(..) => "intrinsic failed", - OverflowingMath => - "attempted to do overflowing math", NoMirFor(..) => "mir not found", InvalidChar(..) => "tried to interpret an invalid 32-bit value as a char", - OutOfMemory{..} => - "could not allocate more memory", - ExecutionTimeLimitReached => - "reached the configured maximum execution time", StackFrameLimitReached => "reached the configured maximum number of stack frames", OutOfTls => @@ -239,32 +374,51 @@ impl<'tcx> Error for EvalError<'tcx> { "the evaluated program panicked", ReadFromReturnPointer => "tried to read from the return pointer", - EvalErrorKind::PathNotFound(_) => + PathNotFound(_) => "a path could not be resolved, maybe the crate is not loaded", UnimplementedTraitSelection => "there were unresolved type arguments during trait selection", TypeckError => "encountered constants with type errors, stopping evaluation", - } - } - - fn cause(&self) -> Option<&Error> { - use self::EvalErrorKind::*; - match self.kind { - MachineError(ref inner) => Some(&**inner), - _ => None, + TooGeneric => + "encountered overly generic constant", + CheckMatchError => + "match checking failed", + ReferencedConstant(_) => + "referenced constant has errors", + Overflow(mir::BinOp::Add) => "attempt to add with overflow", + Overflow(mir::BinOp::Sub) => "attempt to subtract with overflow", + Overflow(mir::BinOp::Mul) => "attempt to multiply with overflow", + Overflow(mir::BinOp::Div) => "attempt to divide with overflow", + Overflow(mir::BinOp::Rem) => "attempt to calculate the remainder with overflow", + OverflowNeg => "attempt to negate with overflow", + Overflow(mir::BinOp::Shr) => "attempt to shift right with overflow", + Overflow(mir::BinOp::Shl) => "attempt to shift left with overflow", + Overflow(op) => bug!("{:?} cannot overflow", op), + DivisionByZero => "attempt to divide by zero", + RemainderByZero => "attempt to calculate the remainder with a divisor of zero", + GeneratorResumedAfterReturn => "generator resumed after completion", + GeneratorResumedAfterPanic => "generator resumed after panicking", + InfiniteLoop => + "duplicate interpreter state observed here, const evaluation will never terminate", } } } impl<'tcx> fmt::Display for EvalError<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self.kind) + } +} + +impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::EvalErrorKind::*; - match self.kind { + match *self { PointerOutOfBounds { ptr, access, allocation_size } => { write!(f, "{} at offset {}, outside bounds of allocation {} which has size {}", if access { "memory access" } else { "pointer computed" }, - ptr.offset, ptr.alloc_id, allocation_size) + ptr.offset.bytes(), ptr.alloc_id, allocation_size.bytes()) }, MemoryLockViolation { ptr, len, frame, access, ref lock } => { write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}", @@ -288,24 +442,19 @@ impl<'tcx> fmt::Display for EvalError<'tcx> { NoMirFor(ref func) => write!(f, "no mir for `{}`", func), FunctionPointerTyMismatch(sig, got) => write!(f, "tried to call a function with sig {} through a function pointer of type {}", sig, got), - ArrayIndexOutOfBounds(span, len, index) => - write!(f, "index out of bounds: the len is {} but the index is {} at {:?}", len, index, span), + BoundsCheck { ref len, ref index } => + write!(f, "index out of bounds: the len is {:?} but the index is {:?}", len, index), ReallocatedWrongMemoryKind(ref old, ref new) => write!(f, "tried to reallocate memory from {} to {}", old, new), DeallocatedWrongMemoryKind(ref old, ref new) => write!(f, "tried to deallocate {} memory but gave {} as the kind", old, new), - Math(span, ref err) => - write!(f, "{:?} at {:?}", err, span), Intrinsic(ref err) => write!(f, "{}", err), InvalidChar(c) => write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c), - OutOfMemory { allocation_size, memory_size, memory_usage } => - write!(f, "tried to allocate {} more bytes, but only {} bytes are free of the {} byte memory", - allocation_size, memory_size - memory_usage, memory_size), AlignmentCheckFailed { required, has } => write!(f, "tried to access memory with alignment {}, but alignment {} is required", - has, required), + has.abi(), required.abi()), TypeNotPrimitive(ty) => write!(f, "expected primitive type, got {}", ty), Layout(ref err) => @@ -313,9 +462,9 @@ impl<'tcx> fmt::Display for EvalError<'tcx> { PathNotFound(ref path) => write!(f, "Cannot find path {:?}", path), MachineError(ref inner) => - write!(f, "machine error: {}", inner), + write!(f, "{}", inner), IncorrectAllocationInformation(size, size2, align, align2) => - write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and align {}", size, align, size2, align2), + write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and align {}", size.bytes(), align.abi(), size2.bytes(), align2.abi()), _ => write!(f, "{}", self.description()), } } diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 8ffea62f6be5..a0980b06230c 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -8,19 +8,35 @@ macro_rules! err { mod error; mod value; -pub use self::error::{EvalError, EvalResult, EvalErrorKind}; +pub use self::error::{ + EvalError, EvalResult, EvalErrorKind, AssertMessage, ConstEvalErr, struct_error, + FrameInfo, ConstEvalResult, +}; -pub use self::value::{PrimVal, PrimValKind, Value, Pointer, bytes_to_f32, bytes_to_f64}; +pub use self::value::{Scalar, Value, ConstValue, ScalarMaybeUndef}; -use std::collections::BTreeMap; use std::fmt; use mir; -use ty; -use ty::layout::{self, Align, HasDataLayout}; +use hir::def_id::DefId; +use ty::{self, TyCtxt, Instance}; +use ty::layout::{self, Align, HasDataLayout, Size}; use middle::region; use std::iter; +use std::io; +use std::ops::{Deref, DerefMut}; +use std::hash::Hash; +use syntax::ast::Mutability; +use rustc_serialize::{Encoder, Decodable, Encodable}; +use rustc_data_structures::sorted_map::SortedMap; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sync::{Lock as Mutex, HashMapExt}; +use rustc_data_structures::tiny_list::TinyList; +use byteorder::{WriteBytesExt, ReadBytesExt, LittleEndian, BigEndian}; +use ty::codec::TyDecoder; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::num::NonZeroU32; -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum Lock { NoLock, WriteLock(DynamicLifetime), @@ -28,20 +44,20 @@ pub enum Lock { ReadLock(Vec), } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct DynamicLifetime { pub frame: usize, pub region: Option, // "None" indicates "until the function ends" } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub enum AccessKind { Read, Write, } /// Uniquely identifies a specific constant or static. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, RustcEncodable, RustcDecodable)] pub struct GlobalId<'tcx> { /// For a constant or static, the `Instance` of the item itself. /// For a promoted global, the `Instance` of the function they belong to. @@ -56,7 +72,7 @@ pub struct GlobalId<'tcx> { //////////////////////////////////////////////////////////////////////////////// pub trait PointerArithmetic: layout::HasDataLayout { - // These are not supposed to be overriden. + // These are not supposed to be overridden. //// Trunace the given value to the pointer size; also return whether there was an overflow fn truncate_to_ptr(self, val: u128) -> (u64, bool) { @@ -85,12 +101,12 @@ pub trait PointerArithmetic: layout::HasDataLayout { fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> { let (res, over) = self.overflowing_signed_offset(val, i as i128); - if over { err!(OverflowingMath) } else { Ok(res) } + if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } } fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> { let (res, over) = self.overflowing_offset(val, i); - if over { err!(OverflowingMath) } else { Ok(res) } + if over { err!(Overflow(mir::BinOp::Add)) } else { Ok(res) } } fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 { @@ -101,84 +117,473 @@ pub trait PointerArithmetic: layout::HasDataLayout { impl PointerArithmetic for T {} -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub struct MemoryPointer { +#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub struct Pointer { pub alloc_id: AllocId, - pub offset: u64, + pub offset: Size, } -impl<'tcx> MemoryPointer { - pub fn new(alloc_id: AllocId, offset: u64) -> Self { - MemoryPointer { alloc_id, offset } +/// Produces a `Pointer` which points to the beginning of the Allocation +impl From for Pointer { + fn from(alloc_id: AllocId) -> Self { + Pointer::new(alloc_id, Size::ZERO) + } +} + +impl<'tcx> Pointer { + pub fn new(alloc_id: AllocId, offset: Size) -> Self { + Pointer { alloc_id, offset } } pub(crate) fn wrapping_signed_offset(self, i: i64, cx: C) -> Self { - MemoryPointer::new( + Pointer::new( self.alloc_id, - cx.data_layout().wrapping_signed_offset(self.offset, i), + Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)), ) } pub fn overflowing_signed_offset(self, i: i128, cx: C) -> (Self, bool) { - let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset, i); - (MemoryPointer::new(self.alloc_id, res), over) + let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); + (Pointer::new(self.alloc_id, Size::from_bytes(res)), over) } pub(crate) fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { - Ok(MemoryPointer::new( + Ok(Pointer::new( self.alloc_id, - cx.data_layout().signed_offset(self.offset, i)?, + Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), )) } - pub fn overflowing_offset(self, i: u64, cx: C) -> (Self, bool) { - let (res, over) = cx.data_layout().overflowing_offset(self.offset, i); - (MemoryPointer::new(self.alloc_id, res), over) + pub fn overflowing_offset(self, i: Size, cx: C) -> (Self, bool) { + let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes()); + (Pointer::new(self.alloc_id, Size::from_bytes(res)), over) } - pub fn offset(self, i: u64, cx: C) -> EvalResult<'tcx, Self> { - Ok(MemoryPointer::new( + pub fn offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { + Ok(Pointer::new( self.alloc_id, - cx.data_layout().offset(self.offset, i)?, + Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?), )) } } -#[derive(Copy, Clone, Default, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)] +#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)] pub struct AllocId(pub u64); +impl ::rustc_serialize::UseSpecializedEncodable for AllocId {} +impl ::rustc_serialize::UseSpecializedDecodable for AllocId {} + +#[derive(RustcDecodable, RustcEncodable)] +enum AllocKind { + Alloc, + Fn, + Static, +} + +pub fn specialized_encode_alloc_id< + 'a, 'tcx, + E: Encoder, +>( + encoder: &mut E, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + alloc_id: AllocId, +) -> Result<(), E::Error> { + let alloc_type: AllocType<'tcx, &'tcx Allocation> = + tcx.alloc_map.lock().get(alloc_id).expect("no value for AllocId"); + match alloc_type { + AllocType::Memory(alloc) => { + trace!("encoding {:?} with {:#?}", alloc_id, alloc); + AllocKind::Alloc.encode(encoder)?; + alloc.encode(encoder)?; + } + AllocType::Function(fn_instance) => { + trace!("encoding {:?} with {:#?}", alloc_id, fn_instance); + AllocKind::Fn.encode(encoder)?; + fn_instance.encode(encoder)?; + } + AllocType::Static(did) => { + // referring to statics doesn't need to know about their allocations, + // just about its DefId + AllocKind::Static.encode(encoder)?; + did.encode(encoder)?; + } + } + Ok(()) +} + +// Used to avoid infinite recursion when decoding cyclic allocations. +type DecodingSessionId = NonZeroU32; + +#[derive(Clone)] +enum State { + Empty, + InProgressNonAlloc(TinyList), + InProgress(TinyList, AllocId), + Done(AllocId), +} + +pub struct AllocDecodingState { + // For each AllocId we keep track of which decoding state it's currently in. + decoding_state: Vec>, + // The offsets of each allocation in the data stream. + data_offsets: Vec, +} + +impl AllocDecodingState { + + pub fn new_decoding_session(&self) -> AllocDecodingSession { + static DECODER_SESSION_ID: AtomicU32 = AtomicU32::new(0); + let counter = DECODER_SESSION_ID.fetch_add(1, Ordering::SeqCst); + + // Make sure this is never zero + let session_id = DecodingSessionId::new((counter & 0x7FFFFFFF) + 1).unwrap(); + + AllocDecodingSession { + state: self, + session_id, + } + } + + pub fn new(data_offsets: Vec) -> AllocDecodingState { + let decoding_state: Vec<_> = ::std::iter::repeat(Mutex::new(State::Empty)) + .take(data_offsets.len()) + .collect(); + + AllocDecodingState { + decoding_state: decoding_state, + data_offsets, + } + } +} + +#[derive(Copy, Clone)] +pub struct AllocDecodingSession<'s> { + state: &'s AllocDecodingState, + session_id: DecodingSessionId, +} + +impl<'s> AllocDecodingSession<'s> { + + // Decodes an AllocId in a thread-safe way. + pub fn decode_alloc_id<'a, 'tcx, D>(&self, + decoder: &mut D) + -> Result + where D: TyDecoder<'a, 'tcx>, + 'tcx: 'a, + { + // Read the index of the allocation + let idx = decoder.read_u32()? as usize; + let pos = self.state.data_offsets[idx] as usize; + + // Decode the AllocKind now so that we know if we have to reserve an + // AllocId. + let (alloc_kind, pos) = decoder.with_position(pos, |decoder| { + let alloc_kind = AllocKind::decode(decoder)?; + Ok((alloc_kind, decoder.position())) + })?; + + // Check the decoding state, see if it's already decoded or if we should + // decode it here. + let alloc_id = { + let mut entry = self.state.decoding_state[idx].lock(); + + match *entry { + State::Done(alloc_id) => { + return Ok(alloc_id); + } + ref mut entry @ State::Empty => { + // We are allowed to decode + match alloc_kind { + AllocKind::Alloc => { + // If this is an allocation, we need to reserve an + // AllocId so we can decode cyclic graphs. + let alloc_id = decoder.tcx().alloc_map.lock().reserve(); + *entry = State::InProgress( + TinyList::new_single(self.session_id), + alloc_id); + Some(alloc_id) + }, + AllocKind::Fn | AllocKind::Static => { + // Fns and statics cannot be cyclic and their AllocId + // is determined later by interning + *entry = State::InProgressNonAlloc( + TinyList::new_single(self.session_id)); + None + } + } + } + State::InProgressNonAlloc(ref mut sessions) => { + if sessions.contains(&self.session_id) { + bug!("This should be unreachable") + } else { + // Start decoding concurrently + sessions.insert(self.session_id); + None + } + } + State::InProgress(ref mut sessions, alloc_id) => { + if sessions.contains(&self.session_id) { + // Don't recurse. + return Ok(alloc_id) + } else { + // Start decoding concurrently + sessions.insert(self.session_id); + Some(alloc_id) + } + } + } + }; + + // Now decode the actual data + let alloc_id = decoder.with_position(pos, |decoder| { + match alloc_kind { + AllocKind::Alloc => { + let allocation = <&'tcx Allocation as Decodable>::decode(decoder)?; + // We already have a reserved AllocId. + let alloc_id = alloc_id.unwrap(); + trace!("decoded alloc {:?} {:#?}", alloc_id, allocation); + decoder.tcx().alloc_map.lock().set_id_same_memory(alloc_id, allocation); + Ok(alloc_id) + }, + AllocKind::Fn => { + assert!(alloc_id.is_none()); + trace!("creating fn alloc id"); + let instance = ty::Instance::decode(decoder)?; + trace!("decoded fn alloc instance: {:?}", instance); + let alloc_id = decoder.tcx().alloc_map.lock().create_fn_alloc(instance); + Ok(alloc_id) + }, + AllocKind::Static => { + assert!(alloc_id.is_none()); + trace!("creating extern static alloc id at"); + let did = DefId::decode(decoder)?; + let alloc_id = decoder.tcx().alloc_map.lock().intern_static(did); + Ok(alloc_id) + } + } + })?; + + self.state.decoding_state[idx].with_lock(|entry| { + *entry = State::Done(alloc_id); + }); + + Ok(alloc_id) + } +} + impl fmt::Display for AllocId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } -#[derive(Debug, Eq, PartialEq, Hash)] +#[derive(Debug, Clone, Eq, PartialEq, Hash, RustcDecodable, RustcEncodable)] +pub enum AllocType<'tcx, M> { + /// The alloc id is used as a function pointer + Function(Instance<'tcx>), + /// The alloc id points to a static variable + Static(DefId), + /// The alloc id points to memory + Memory(M) +} + +pub struct AllocMap<'tcx, M> { + /// Lets you know what an AllocId refers to + id_to_type: FxHashMap>, + + /// Used to ensure that functions and statics only get one associated AllocId + type_interner: FxHashMap, AllocId>, + + /// The AllocId to assign to the next requested id. + /// Always incremented, never gets smaller. + next_id: AllocId, +} + +impl<'tcx, M: fmt::Debug + Eq + Hash + Clone> AllocMap<'tcx, M> { + pub fn new() -> Self { + AllocMap { + id_to_type: FxHashMap(), + type_interner: FxHashMap(), + next_id: AllocId(0), + } + } + + /// obtains a new allocation ID that can be referenced but does not + /// yet have an allocation backing it. + pub fn reserve( + &mut self, + ) -> AllocId { + let next = self.next_id; + self.next_id.0 = self.next_id.0 + .checked_add(1) + .expect("You overflowed a u64 by incrementing by 1... \ + You've just earned yourself a free drink if we ever meet. \ + Seriously, how did you do that?!"); + next + } + + fn intern(&mut self, alloc_type: AllocType<'tcx, M>) -> AllocId { + if let Some(&alloc_id) = self.type_interner.get(&alloc_type) { + return alloc_id; + } + let id = self.reserve(); + debug!("creating alloc_type {:?} with id {}", alloc_type, id); + self.id_to_type.insert(id, alloc_type.clone()); + self.type_interner.insert(alloc_type, id); + id + } + + // FIXME: Check if functions have identity. If not, we should not intern these, + // but instead create a new id per use. + // Alternatively we could just make comparing function pointers an error. + pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> AllocId { + self.intern(AllocType::Function(instance)) + } + + pub fn get(&self, id: AllocId) -> Option> { + self.id_to_type.get(&id).cloned() + } + + pub fn unwrap_memory(&self, id: AllocId) -> M { + match self.get(id) { + Some(AllocType::Memory(mem)) => mem, + _ => bug!("expected allocation id {} to point to memory", id), + } + } + + pub fn intern_static(&mut self, static_id: DefId) -> AllocId { + self.intern(AllocType::Static(static_id)) + } + + pub fn allocate(&mut self, mem: M) -> AllocId { + let id = self.reserve(); + self.set_id_memory(id, mem); + id + } + + pub fn set_id_memory(&mut self, id: AllocId, mem: M) { + if let Some(old) = self.id_to_type.insert(id, AllocType::Memory(mem)) { + bug!("tried to set allocation id {}, but it was already existing as {:#?}", id, old); + } + } + + pub fn set_id_same_memory(&mut self, id: AllocId, mem: M) { + self.id_to_type.insert_same(id, AllocType::Memory(mem)); + } +} + +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct Allocation { /// The actual bytes of the allocation. /// Note that the bytes of a pointer represent the offset of the pointer pub bytes: Vec, /// Maps from byte addresses to allocations. /// Only the first byte of a pointer is inserted into the map. - pub relocations: BTreeMap, + pub relocations: Relocations, /// Denotes undefined memory. Reading from undefined memory is forbidden in miri pub undef_mask: UndefMask, /// The alignment of the allocation to detect unaligned reads. pub align: Align, + /// Whether the allocation (of a static) should be put into mutable memory when codegenning + /// + /// Only happens for `static mut` or `static` with interior mutability + pub runtime_mutability: Mutability, } impl Allocation { - pub fn from_bytes(slice: &[u8]) -> Self { - let mut undef_mask = UndefMask::new(0); - undef_mask.grow(slice.len() as u64, true); + pub fn from_bytes(slice: &[u8], align: Align) -> Self { + let mut undef_mask = UndefMask::new(Size::ZERO); + undef_mask.grow(Size::from_bytes(slice.len() as u64), true); Self { bytes: slice.to_owned(), - relocations: BTreeMap::new(), + relocations: Relocations::new(), undef_mask, - align: Align::from_bytes(1, 1).unwrap(), + align, + runtime_mutability: Mutability::Immutable, } } + + pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self { + Allocation::from_bytes(slice, Align::from_bytes(1, 1).unwrap()) + } + + pub fn undef(size: Size, align: Align) -> Self { + assert_eq!(size.bytes() as usize as u64, size.bytes()); + Allocation { + bytes: vec![0; size.bytes() as usize], + relocations: Relocations::new(), + undef_mask: UndefMask::new(size), + align, + runtime_mutability: Mutability::Immutable, + } + } +} + +impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct Relocations(SortedMap); + +impl Relocations { + pub fn new() -> Relocations { + Relocations(SortedMap::new()) + } + + // The caller must guarantee that the given relocations are already sorted + // by address and contain no duplicates. + pub fn from_presorted(r: Vec<(Size, AllocId)>) -> Relocations { + Relocations(SortedMap::from_presorted_elements(r)) + } +} + +impl Deref for Relocations { + type Target = SortedMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Relocations { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Methods to access integers in the target endianness +//////////////////////////////////////////////////////////////////////////////// + +pub fn write_target_uint( + endianness: layout::Endian, + mut target: &mut [u8], + data: u128, +) -> Result<(), io::Error> { + let len = target.len(); + match endianness { + layout::Endian::Little => target.write_uint128::(data, len), + layout::Endian::Big => target.write_uint128::(data, len), + } +} + +pub fn write_target_int( + endianness: layout::Endian, + mut target: &mut [u8], + data: i128, +) -> Result<(), io::Error> { + let len = target.len(); + match endianness { + layout::Endian::Little => target.write_int128::(data, len), + layout::Endian::Big => target.write_int128::(data, len), + } +} + +pub fn read_target_uint(endianness: layout::Endian, mut source: &[u8]) -> Result { + match endianness { + layout::Endian::Little => source.read_uint128::(source.len()), + layout::Endian::Big => source.read_uint128::(source.len()), + } } //////////////////////////////////////////////////////////////////////////////// @@ -188,36 +593,38 @@ impl Allocation { type Block = u64; const BLOCK_SIZE: u64 = 64; -#[derive(Clone, Debug, Eq, PartialEq, Hash)] +#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct UndefMask { blocks: Vec, - len: u64, + len: Size, } +impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len}); + impl UndefMask { - pub fn new(size: u64) -> Self { + pub fn new(size: Size) -> Self { let mut m = UndefMask { blocks: vec![], - len: 0, + len: Size::ZERO, }; m.grow(size, false); m } /// Check whether the range `start..end` (end-exclusive) is entirely defined. - pub fn is_range_defined(&self, start: u64, end: u64) -> bool { + pub fn is_range_defined(&self, start: Size, end: Size) -> bool { if end > self.len { return false; } - for i in start..end { - if !self.get(i) { + for i in start.bytes()..end.bytes() { + if !self.get(Size::from_bytes(i)) { return false; } } true } - pub fn set_range(&mut self, start: u64, end: u64, new_state: bool) { + pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) { let len = self.len; if end > len { self.grow(end - len, new_state); @@ -225,18 +632,20 @@ impl UndefMask { self.set_range_inbounds(start, end, new_state); } - pub fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) { - for i in start..end { - self.set(i, new_state); + pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) { + for i in start.bytes()..end.bytes() { + self.set(Size::from_bytes(i), new_state); } } - pub fn get(&self, i: u64) -> bool { + #[inline] + pub fn get(&self, i: Size) -> bool { let (block, bit) = bit_index(i); (self.blocks[block] & 1 << bit) != 0 } - pub fn set(&mut self, i: u64, new_state: bool) { + #[inline] + pub fn set(&mut self, i: Size, new_state: bool) { let (block, bit) = bit_index(i); if new_state { self.blocks[block] |= 1 << bit; @@ -245,10 +654,10 @@ impl UndefMask { } } - pub fn grow(&mut self, amount: u64, new_state: bool) { - let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len; - if amount > unused_trailing_bits { - let additional_blocks = amount / BLOCK_SIZE + 1; + pub fn grow(&mut self, amount: Size, new_state: bool) { + let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len.bytes(); + if amount.bytes() > unused_trailing_bits { + let additional_blocks = amount.bytes() / BLOCK_SIZE + 1; assert_eq!(additional_blocks as usize as u64, additional_blocks); self.blocks.extend( iter::repeat(0).take(additional_blocks as usize), @@ -260,7 +669,9 @@ impl UndefMask { } } -fn bit_index(bits: u64) -> (usize, usize) { +#[inline] +fn bit_index(bits: Size) -> (usize, usize) { + let bits = bits.bytes(); let a = bits / BLOCK_SIZE; let b = bits % BLOCK_SIZE; assert_eq!(a as usize as u64, a); diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 0bfff2a80e67..f569f4def143 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -1,324 +1,282 @@ #![allow(unknown_lints)] -use ty::layout::{Align, HasDataLayout}; +use ty::layout::{Align, HasDataLayout, Size}; +use ty; +use ty::subst::Substs; +use hir::def_id::DefId; -use super::{EvalResult, MemoryPointer, PointerArithmetic}; -use syntax::ast::FloatTy; -use rustc_const_math::ConstFloat; +use super::{EvalResult, Pointer, PointerArithmetic, Allocation}; -pub fn bytes_to_f32(bits: u128) -> ConstFloat { - ConstFloat { - bits, - ty: FloatTy::F32, - } +/// Represents a constant value in Rust. Scalar and ScalarPair are optimizations which +/// matches Value's optimizations for easy conversions between these two types +#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash)] +pub enum ConstValue<'tcx> { + /// Never returned from the `const_eval` query, but the HIR contains these frequently in order + /// to allow HIR creation to happen for everything before needing to be able to run constant + /// evaluation + Unevaluated(DefId, &'tcx Substs<'tcx>), + /// Used only for types with layout::abi::Scalar ABI and ZSTs + Scalar(Scalar), + /// Used only for types with layout::abi::ScalarPair + /// + /// The second field may be undef in case of `Option::None` + ScalarPair(Scalar, ScalarMaybeUndef), + /// Used only for the remaining cases. An allocation + offset into the allocation + ByRef(&'tcx Allocation, Size), } -pub fn bytes_to_f64(bits: u128) -> ConstFloat { - ConstFloat { - bits, - ty: FloatTy::F64, +impl<'tcx> ConstValue<'tcx> { + #[inline] + pub fn from_byval_value(val: Value) -> EvalResult<'static, Self> { + Ok(match val { + Value::ByRef(..) => bug!(), + Value::ScalarPair(a, b) => ConstValue::ScalarPair(a.unwrap_or_err()?, b), + Value::Scalar(val) => ConstValue::Scalar(val.unwrap_or_err()?), + }) + } + + #[inline] + pub fn to_byval_value(&self) -> Option { + match *self { + ConstValue::Unevaluated(..) | + ConstValue::ByRef(..) => None, + ConstValue::ScalarPair(a, b) => Some(Value::ScalarPair(a.into(), b)), + ConstValue::Scalar(val) => Some(Value::Scalar(val.into())), + } + } + + #[inline] + pub fn try_to_scalar(&self) -> Option { + match *self { + ConstValue::Unevaluated(..) | + ConstValue::ByRef(..) | + ConstValue::ScalarPair(..) => None, + ConstValue::Scalar(val) => Some(val), + } + } + + #[inline] + pub fn to_bits(&self, size: Size) -> Option { + self.try_to_scalar()?.to_bits(size).ok() + } + + #[inline] + pub fn to_ptr(&self) -> Option { + self.try_to_scalar()?.to_ptr().ok() } } /// A `Value` represents a single self-contained Rust value. /// /// A `Value` can either refer to a block of memory inside an allocation (`ByRef`) or to a primitve -/// value held directly, outside of any allocation (`ByVal`). For `ByRef`-values, we remember +/// value held directly, outside of any allocation (`Scalar`). For `ByRef`-values, we remember /// whether the pointer is supposed to be aligned or not (also see Place). /// /// For optimization of a few very common cases, there is also a representation for a pair of -/// primitive values (`ByValPair`). It allows Miri to avoid making allocations for checked binary -/// operations and fat pointers. This idea was taken from rustc's trans. -#[derive(Clone, Copy, Debug)] +/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary +/// operations and fat pointers. This idea was taken from rustc's codegen. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] pub enum Value { - ByRef(Pointer, Align), - ByVal(PrimVal), - ByValPair(PrimVal, PrimVal), + ByRef(Scalar, Align), + Scalar(ScalarMaybeUndef), + ScalarPair(ScalarMaybeUndef, ScalarMaybeUndef), } -/// A wrapper type around `PrimVal` that cannot be turned back into a `PrimVal` accidentally. -/// This type clears up a few APIs where having a `PrimVal` argument for something that is -/// potentially an integer pointer or a pointer to an allocation was unclear. -/// -/// I (@oli-obk) believe it is less easy to mix up generic primvals and primvals that are just -/// the representation of pointers. Also all the sites that convert between primvals and pointers -/// are explicit now (and rare!) -#[derive(Clone, Copy, Debug)] -pub struct Pointer { - primval: PrimVal, +impl<'tcx> ty::TypeFoldable<'tcx> for Value { + fn super_fold_with<'gcx: 'tcx, F: ty::fold::TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> Self { + *self + } + fn super_visit_with>(&self, _: &mut V) -> bool { + false + } } -impl<'tcx> Pointer { - pub fn null() -> Self { - PrimVal::Bytes(0).into() - } - pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { - self.primval.to_ptr() - } - pub fn into_inner_primval(self) -> PrimVal { - self.primval +impl<'tcx> Scalar { + pub fn ptr_null(cx: C) -> Self { + Scalar::Bits { + bits: 0, + size: cx.data_layout().pointer_size.bytes() as u8, + } } - pub fn signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + pub fn to_value_with_len(self, len: u64, cx: C) -> Value { + ScalarMaybeUndef::Scalar(self).to_value_with_len(len, cx) + } + + pub fn to_value_with_vtable(self, vtable: Pointer) -> Value { + ScalarMaybeUndef::Scalar(self).to_value_with_vtable(vtable) + } + + pub fn ptr_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); - match self.primval { - PrimVal::Bytes(b) => { - assert_eq!(b as u64 as u128, b); - Ok(Pointer::from( - PrimVal::Bytes(layout.signed_offset(b as u64, i)? as u128), - )) + match self { + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.pointer_size.bytes()); + Ok(Scalar::Bits { + bits: layout.signed_offset(bits as u64, i)? as u128, + size, + }) } - PrimVal::Ptr(ptr) => ptr.signed_offset(i, layout).map(Pointer::from), - PrimVal::Undef => err!(ReadUndefBytes), + Scalar::Ptr(ptr) => ptr.signed_offset(i, layout).map(Scalar::Ptr), } } - pub fn offset(self, i: u64, cx: C) -> EvalResult<'tcx, Self> { + pub fn ptr_offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { let layout = cx.data_layout(); - match self.primval { - PrimVal::Bytes(b) => { - assert_eq!(b as u64 as u128, b); - Ok(Pointer::from( - PrimVal::Bytes(layout.offset(b as u64, i)? as u128), - )) + match self { + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.pointer_size.bytes()); + Ok(Scalar::Bits { + bits: layout.offset(bits as u64, i.bytes())? as u128, + size, + }) } - PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from), - PrimVal::Undef => err!(ReadUndefBytes), + Scalar::Ptr(ptr) => ptr.offset(i, layout).map(Scalar::Ptr), } } - pub fn wrapping_signed_offset(self, i: i64, cx: C) -> EvalResult<'tcx, Self> { + pub fn ptr_wrapping_signed_offset(self, i: i64, cx: C) -> Self { let layout = cx.data_layout(); - match self.primval { - PrimVal::Bytes(b) => { - assert_eq!(b as u64 as u128, b); - Ok(Pointer::from(PrimVal::Bytes( - layout.wrapping_signed_offset(b as u64, i) as u128, - ))) + match self { + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.pointer_size.bytes()); + Scalar::Bits { + bits: layout.wrapping_signed_offset(bits as u64, i) as u128, + size, + } } - PrimVal::Ptr(ptr) => Ok(Pointer::from(ptr.wrapping_signed_offset(i, layout))), - PrimVal::Undef => err!(ReadUndefBytes), + Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, layout)), } } - pub fn is_null(self) -> EvalResult<'tcx, bool> { - match self.primval { - PrimVal::Bytes(b) => Ok(b == 0), - PrimVal::Ptr(_) => Ok(false), - PrimVal::Undef => err!(ReadUndefBytes), + pub fn is_null_ptr(self, cx: C) -> bool { + match self { + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, cx.data_layout().pointer_size.bytes()); + bits == 0 + }, + Scalar::Ptr(_) => false, } } - pub fn to_value_with_len(self, len: u64) -> Value { - Value::ByValPair(self.primval, PrimVal::from_u128(len as u128)) - } - - pub fn to_value_with_vtable(self, vtable: MemoryPointer) -> Value { - Value::ByValPair(self.primval, PrimVal::Ptr(vtable)) - } - pub fn to_value(self) -> Value { - Value::ByVal(self.primval) + Value::Scalar(ScalarMaybeUndef::Scalar(self)) } } -impl ::std::convert::From for Pointer { - fn from(primval: PrimVal) -> Self { - Pointer { primval } +impl From for Scalar { + fn from(ptr: Pointer) -> Self { + Scalar::Ptr(ptr) } } -impl ::std::convert::From for Pointer { - fn from(ptr: MemoryPointer) -> Self { - PrimVal::Ptr(ptr).into() - } -} - -/// A `PrimVal` represents an immediate, primitive value existing outside of a +/// A `Scalar` represents an immediate, primitive value existing outside of a /// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in -/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes -/// of a simple value, a pointer into another `Allocation`, or be undefined. -#[derive(Clone, Copy, Debug)] -pub enum PrimVal { +/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes +/// of a simple value or a pointer into another `Allocation` +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub enum Scalar { /// The raw bytes of a simple value. - Bytes(u128), + Bits { + /// The first `size` bytes are the value. + /// Do not try to read less or more bytes that that + size: u8, + bits: u128, + }, /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of - /// relocations, but a `PrimVal` is only large enough to contain one, so we just represent the - /// relocation and its associated offset together as a `MemoryPointer` here. - Ptr(MemoryPointer), + /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the + /// relocation and its associated offset together as a `Pointer` here. + Ptr(Pointer), +} - /// An undefined `PrimVal`, for representing values that aren't safe to examine, but are safe - /// to copy around, just like undefined bytes in an `Allocation`. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, RustcEncodable, RustcDecodable, Hash)] +pub enum ScalarMaybeUndef { + Scalar(Scalar), Undef, } -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum PrimValKind { - I8, I16, I32, I64, I128, - U8, U16, U32, U64, U128, - F32, F64, - Ptr, FnPtr, - Bool, - Char, +impl From for ScalarMaybeUndef { + fn from(s: Scalar) -> Self { + ScalarMaybeUndef::Scalar(s) + } } -impl<'tcx> PrimVal { - pub fn from_u128(n: u128) -> Self { - PrimVal::Bytes(n) +impl ScalarMaybeUndef { + pub fn unwrap_or_err(self) -> EvalResult<'static, Scalar> { + match self { + ScalarMaybeUndef::Scalar(scalar) => Ok(scalar), + ScalarMaybeUndef::Undef => err!(ReadUndefBytes), + } } - pub fn from_i128(n: i128) -> Self { - PrimVal::Bytes(n as u128) + pub fn to_value_with_len(self, len: u64, cx: C) -> Value { + Value::ScalarPair(self, Scalar::Bits { + bits: len as u128, + size: cx.data_layout().pointer_size.bytes() as u8, + }.into()) } - pub fn from_float(f: ConstFloat) -> Self { - PrimVal::Bytes(f.bits) + pub fn to_value_with_vtable(self, vtable: Pointer) -> Value { + Value::ScalarPair(self, Scalar::Ptr(vtable).into()) } + pub fn ptr_offset(self, i: Size, cx: C) -> EvalResult<'tcx, Self> { + match self { + ScalarMaybeUndef::Scalar(scalar) => { + scalar.ptr_offset(i, cx).map(ScalarMaybeUndef::Scalar) + }, + ScalarMaybeUndef::Undef => Ok(ScalarMaybeUndef::Undef) + } + } +} + +impl<'tcx> Scalar { pub fn from_bool(b: bool) -> Self { - PrimVal::Bytes(b as u128) + Scalar::Bits { bits: b as u128, size: 1 } } pub fn from_char(c: char) -> Self { - PrimVal::Bytes(c as u128) + Scalar::Bits { bits: c as u128, size: 4 } } - pub fn to_bytes(self) -> EvalResult<'tcx, u128> { + pub fn to_bits(self, target_size: Size) -> EvalResult<'tcx, u128> { match self { - PrimVal::Bytes(b) => Ok(b), - PrimVal::Ptr(_) => err!(ReadPointerAsBytes), - PrimVal::Undef => err!(ReadUndefBytes), + Scalar::Bits { bits, size } => { + assert_eq!(target_size.bytes(), size as u64); + assert_ne!(size, 0, "to_bits cannot be used with zsts"); + Ok(bits) + } + Scalar::Ptr(_) => err!(ReadPointerAsBytes), } } - pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> { + pub fn to_ptr(self) -> EvalResult<'tcx, Pointer> { match self { - PrimVal::Bytes(_) => err!(ReadBytesAsPointer), - PrimVal::Ptr(p) => Ok(p), - PrimVal::Undef => err!(ReadUndefBytes), + Scalar::Bits {..} => err!(ReadBytesAsPointer), + Scalar::Ptr(p) => Ok(p), } } - pub fn is_bytes(self) -> bool { + pub fn is_bits(self) -> bool { match self { - PrimVal::Bytes(_) => true, + Scalar::Bits { .. } => true, _ => false, } } pub fn is_ptr(self) -> bool { match self { - PrimVal::Ptr(_) => true, + Scalar::Ptr(_) => true, _ => false, } } - pub fn is_undef(self) -> bool { - match self { - PrimVal::Undef => true, - _ => false, - } - } - - pub fn to_u128(self) -> EvalResult<'tcx, u128> { - self.to_bytes() - } - - pub fn to_u64(self) -> EvalResult<'tcx, u64> { - self.to_bytes().map(|b| { - assert_eq!(b as u64 as u128, b); - b as u64 - }) - } - - pub fn to_i32(self) -> EvalResult<'tcx, i32> { - self.to_bytes().map(|b| { - assert_eq!(b as i32 as u128, b); - b as i32 - }) - } - - pub fn to_i128(self) -> EvalResult<'tcx, i128> { - self.to_bytes().map(|b| b as i128) - } - - pub fn to_i64(self) -> EvalResult<'tcx, i64> { - self.to_bytes().map(|b| { - assert_eq!(b as i64 as u128, b); - b as i64 - }) - } - - pub fn to_f32(self) -> EvalResult<'tcx, ConstFloat> { - self.to_bytes().map(bytes_to_f32) - } - - pub fn to_f64(self) -> EvalResult<'tcx, ConstFloat> { - self.to_bytes().map(bytes_to_f64) - } - pub fn to_bool(self) -> EvalResult<'tcx, bool> { - match self.to_bytes()? { - 0 => Ok(false), - 1 => Ok(true), + match self { + Scalar::Bits { bits: 0, size: 1 } => Ok(false), + Scalar::Bits { bits: 1, size: 1 } => Ok(true), _ => err!(InvalidBool), } } } - -impl PrimValKind { - pub fn is_int(self) -> bool { - use self::PrimValKind::*; - match self { - I8 | I16 | I32 | I64 | I128 | U8 | U16 | U32 | U64 | U128 => true, - _ => false, - } - } - - pub fn is_signed_int(self) -> bool { - use self::PrimValKind::*; - match self { - I8 | I16 | I32 | I64 | I128 => true, - _ => false, - } - } - - pub fn is_float(self) -> bool { - use self::PrimValKind::*; - match self { - F32 | F64 => true, - _ => false, - } - } - - pub fn from_uint_size(size: u64) -> Self { - match size { - 1 => PrimValKind::U8, - 2 => PrimValKind::U16, - 4 => PrimValKind::U32, - 8 => PrimValKind::U64, - 16 => PrimValKind::U128, - _ => bug!("can't make uint with size {}", size), - } - } - - pub fn from_int_size(size: u64) -> Self { - match size { - 1 => PrimValKind::I8, - 2 => PrimValKind::I16, - 4 => PrimValKind::I32, - 8 => PrimValKind::I64, - 16 => PrimValKind::I128, - _ => bug!("can't make int with size {}", size), - } - } - - pub fn is_ptr(self) -> bool { - use self::PrimValKind::*; - match self { - Ptr | FnPtr => true, - _ => false, - } - } -} diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs index 3aa94b346994..8ceff303774b 100644 --- a/src/librustc/mir/mod.rs +++ b/src/librustc/mir/mod.rs @@ -8,46 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! MIR datatypes and passes. See the module-level [README] for details. +//! MIR datatypes and passes. See the [rustc guide] for more info. //! -//! [README]: https://github.com/rust-lang/rust/blob/master/src/librustc/mir/README.md +//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/mir/index.html use graphviz::IntoCow; -use middle::const_val::ConstVal; -use middle::region; -use rustc_const_math::{ConstUsize, ConstInt, ConstMathErr}; -use rustc_data_structures::indexed_vec::{IndexVec, Idx}; -use rustc_data_structures::control_flow_graph::dominators::{Dominators, dominators}; -use rustc_data_structures::control_flow_graph::{GraphPredecessors, GraphSuccessors}; -use rustc_data_structures::control_flow_graph::ControlFlowGraph; -use rustc_serialize as serialize; use hir::def::CtorKind; use hir::def_id::DefId; +use hir::{self, HirId, InlineAsm}; +use middle::region; +use mir::interpret::{EvalErrorKind, Scalar, Value, ScalarMaybeUndef}; use mir::visit::MirVisitable; -use ty::subst::{Subst, Substs}; -use ty::{self, AdtDef, ClosureSubsts, Region, Ty, TyCtxt, GeneratorInterior}; -use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; -use util::ppaux; -use std::slice; -use hir::{self, InlineAsm}; -use std::ascii; -use std::borrow::{Cow}; -use std::cell::Ref; +use rustc_apfloat::ieee::{Double, Single}; +use rustc_apfloat::Float; +use rustc_data_structures::graph::dominators::{dominators, Dominators}; +use rustc_data_structures::graph::{self, GraphPredecessors, GraphSuccessors}; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use rustc_data_structures::small_vec::SmallVec; +use rustc_data_structures::sync::Lrc; +use rustc_data_structures::sync::ReadGuard; +use rustc_serialize as serialize; +use std::borrow::Cow; use std::fmt::{self, Debug, Formatter, Write}; -use std::{iter, u32}; use std::ops::{Index, IndexMut}; -use std::rc::Rc; +use std::slice; use std::vec::IntoIter; +use std::{iter, mem, option, u32}; use syntax::ast::{self, Name}; use syntax::symbol::InternedString; -use syntax_pos::Span; +use syntax_pos::{Span, DUMMY_SP}; +use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use ty::subst::{Subst, Substs}; +use ty::{self, AdtDef, CanonicalTy, ClosureSubsts, GeneratorSubsts, Region, Ty, TyCtxt}; +use util::ppaux; + +pub use mir::interpret::AssertMessage; mod cache; -pub mod tcx; -pub mod visit; -pub mod traversal; pub mod interpret; pub mod mono; +pub mod tcx; +pub mod traversal; +pub mod visit; /// Types for locals type LocalDecls<'tcx> = IndexVec>; @@ -75,13 +77,13 @@ pub struct Mir<'tcx> { /// that indexes into this vector. basic_blocks: IndexVec>, - /// List of visibility (lexical) scopes; these are referenced by statements - /// and used (eventually) for debuginfo. Indexed by a `VisibilityScope`. - pub visibility_scopes: IndexVec, + /// List of source scopes; these are referenced by statements + /// and used for debuginfo. Indexed by a `SourceScope`. + pub source_scopes: IndexVec, - /// Crate-local information for each visibility scope, that can't (and + /// Crate-local information for each source scope, that can't (and /// needn't) be tracked across crates. - pub visibility_scope_info: ClearCrossCrate>, + pub source_scope_local_data: ClearCrossCrate>, /// Rvalues promoted from this function, such as borrows of constants. /// Each of them is the Mir of a constant with the fn's type parameters @@ -126,32 +128,36 @@ pub struct Mir<'tcx> { pub span: Span, /// A cache for various calculations - cache: cache::Cache + cache: cache::Cache, } /// where execution begins pub const START_BLOCK: BasicBlock = BasicBlock(0); impl<'tcx> Mir<'tcx> { - pub fn new(basic_blocks: IndexVec>, - visibility_scopes: IndexVec, - visibility_scope_info: ClearCrossCrate>, - promoted: IndexVec>, - yield_ty: Option>, - local_decls: IndexVec>, - arg_count: usize, - upvar_decls: Vec, - span: Span) -> Self - { + pub fn new( + basic_blocks: IndexVec>, + source_scopes: IndexVec, + source_scope_local_data: ClearCrossCrate>, + promoted: IndexVec>, + yield_ty: Option>, + local_decls: IndexVec>, + arg_count: usize, + upvar_decls: Vec, + span: Span, + ) -> Self { // We need `arg_count` locals, and one for the return place - assert!(local_decls.len() >= arg_count + 1, - "expected at least {} locals, got {}", arg_count + 1, local_decls.len()); + assert!( + local_decls.len() >= arg_count + 1, + "expected at least {} locals, got {}", + arg_count + 1, + local_decls.len() + ); Mir { basic_blocks, - visibility_scopes, - visibility_scope_info, + source_scopes, + source_scope_local_data, promoted, yield_ty, generator_drop: None, @@ -161,7 +167,7 @@ impl<'tcx> Mir<'tcx> { upvar_decls, spread_arg: None, span, - cache: cache::Cache::new() + cache: cache::Cache::new(), } } @@ -177,7 +183,9 @@ impl<'tcx> Mir<'tcx> { } #[inline] - pub fn basic_blocks_and_local_decls_mut(&mut self) -> ( + pub fn basic_blocks_and_local_decls_mut( + &mut self, + ) -> ( &mut IndexVec>, &mut LocalDecls<'tcx>, ) { @@ -186,13 +194,13 @@ impl<'tcx> Mir<'tcx> { } #[inline] - pub fn predecessors(&self) -> Ref>> { + pub fn predecessors(&self) -> ReadGuard>> { self.cache.predecessors(self) } #[inline] - pub fn predecessors_for(&self, bb: BasicBlock) -> Ref> { - Ref::map(self.predecessors(), |p| &p[bb]) + pub fn predecessors_for(&self, bb: BasicBlock) -> ReadGuard> { + ReadGuard::map(self.predecessors(), |p| &p[bb]) } #[inline] @@ -204,8 +212,10 @@ impl<'tcx> Mir<'tcx> { pub fn local_kind(&self, local: Local) -> LocalKind { let index = local.0 as usize; if index == 0 { - debug_assert!(self.local_decls[local].mutability == Mutability::Mut, - "return place should be mutable"); + debug_assert!( + self.local_decls[local].mutability == Mutability::Mut, + "return place should be mutable" + ); LocalKind::ReturnPointer } else if index < self.arg_count + 1 { @@ -213,8 +223,10 @@ impl<'tcx> Mir<'tcx> { } else if self.local_decls[local].name.is_some() { LocalKind::Var } else { - debug_assert!(self.local_decls[local].mutability == Mutability::Mut, - "temp should be mutable"); + debug_assert!( + self.local_decls[local].mutability == Mutability::Mut, + "temp should be mutable" + ); LocalKind::Temp } @@ -222,10 +234,10 @@ impl<'tcx> Mir<'tcx> { /// Returns an iterator over all temporaries. #[inline] - pub fn temps_iter<'a>(&'a self) -> impl Iterator + 'a { - (self.arg_count+1..self.local_decls.len()).filter_map(move |index| { + pub fn temps_iter<'a>(&'a self) -> impl Iterator + 'a { + (self.arg_count + 1..self.local_decls.len()).filter_map(move |index| { let local = Local::new(index); - if self.local_decls[local].is_user_variable { + if self.local_decls[local].is_user_variable.is_some() { None } else { Some(local) @@ -235,10 +247,26 @@ impl<'tcx> Mir<'tcx> { /// Returns an iterator over all user-declared locals. #[inline] - pub fn vars_iter<'a>(&'a self) -> impl Iterator + 'a { - (self.arg_count+1..self.local_decls.len()).filter_map(move |index| { + pub fn vars_iter<'a>(&'a self) -> impl Iterator + 'a { + (self.arg_count + 1..self.local_decls.len()).filter_map(move |index| { let local = Local::new(index); - if self.local_decls[local].is_user_variable { + if self.local_decls[local].is_user_variable.is_some() { + Some(local) + } else { + None + } + }) + } + + /// Returns an iterator over all user-declared mutable arguments and locals. + #[inline] + pub fn mut_vars_and_args_iter<'a>(&'a self) -> impl Iterator + 'a { + (1..self.local_decls.len()).filter_map(move |index| { + let local = Local::new(index); + let decl = &self.local_decls[local]; + if (decl.is_user_variable.is_some() || index < self.arg_count + 1) + && decl.mutability == Mutability::Mut + { Some(local) } else { None @@ -248,18 +276,18 @@ impl<'tcx> Mir<'tcx> { /// Returns an iterator over all function arguments. #[inline] - pub fn args_iter(&self) -> impl Iterator { + pub fn args_iter(&self) -> impl Iterator { let arg_count = self.arg_count; - (1..arg_count+1).map(Local::new) + (1..arg_count + 1).map(Local::new) } /// Returns an iterator over all user-defined variables and compiler-generated temporaries (all /// locals that are neither arguments nor the return place). #[inline] - pub fn vars_and_temps_iter(&self) -> impl Iterator { + pub fn vars_and_temps_iter(&self) -> impl Iterator { let arg_count = self.arg_count; let local_count = self.local_decls.len(); - (arg_count+1..local_count).map(Local::new) + (arg_count + 1..local_count).map(Local::new) } /// Changes a statement to a nop. This is both faster than deleting instructions and avoids @@ -283,20 +311,25 @@ impl<'tcx> Mir<'tcx> { } } + /// Check if `sub` is a sub scope of `sup` + pub fn is_sub_scope(&self, mut sub: SourceScope, sup: SourceScope) -> bool { + loop { + if sub == sup { + return true; + } + match self.source_scopes[sub].parent_scope { + None => return false, + Some(p) => sub = p, + } + } + } + /// Return the return type, it always return first element from `local_decls` array pub fn return_ty(&self) -> Ty<'tcx> { self.local_decls[RETURN_PLACE].ty } } -#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] -pub struct VisibilityScopeInfo { - /// A NodeId with lint levels equivalent to this scope's lint levels. - pub lint_root: ast::NodeId, - /// The unsafe block that contains this node. - pub safety: Safety, -} - #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] pub enum Safety { Safe, @@ -305,13 +338,13 @@ pub enum Safety { /// Unsafe because of an unsafe fn FnUnsafe, /// Unsafe because of an `unsafe` block - ExplicitUnsafe(ast::NodeId) + ExplicitUnsafe(ast::NodeId), } impl_stable_hash_for!(struct Mir<'tcx> { basic_blocks, - visibility_scopes, - visibility_scope_info, + source_scopes, + source_scope_local_data, promoted, yield_ty, generator_drop, @@ -340,10 +373,19 @@ impl<'tcx> IndexMut for Mir<'tcx> { } } -#[derive(Clone, Debug)] +#[derive(Copy, Clone, Debug)] pub enum ClearCrossCrate { Clear, - Set(T) + Set(T), +} + +impl ClearCrossCrate { + pub fn assert_crate_local(self) -> T { + match self { + ClearCrossCrate::Clear => bug!("unwrapping cross-crate data"), + ClearCrossCrate::Set(v) => v, + } + } } impl serialize::UseSpecializedEncodable for ClearCrossCrate {} @@ -357,8 +399,9 @@ pub struct SourceInfo { /// Source span for the AST pertaining to this MIR entity. pub span: Span, - /// The lexical visibility scope, i.e. which bindings can be seen. - pub scope: VisibilityScope + /// The source scope, keeping track of which bindings can be + /// seen by debuginfo, active lint levels, `unsafe {...}`, etc. + pub scope: SourceScope, } /////////////////////////////////////////////////////////////////////////// @@ -370,6 +413,15 @@ pub enum Mutability { Not, } +impl From for hir::Mutability { + fn from(m: Mutability) -> Self { + match m { + Mutability::Mut => hir::MutMutable, + Mutability::Not => hir::MutImmutable, + } + } +} + #[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub enum BorrowKind { /// Data must be immutable and is aliasable. @@ -377,8 +429,8 @@ pub enum BorrowKind { /// Data must be immutable but not aliasable. This kind of borrow /// cannot currently be expressed by the user and is used only in - /// implicit closure bindings. It is needed when you the closure - /// is borrowing or mutating a mutable referent, e.g.: + /// implicit closure bindings. It is needed when the closure is + /// borrowing or mutating a mutable referent, e.g.: /// /// let x: &mut isize = ...; /// let y = || *x += 5; @@ -391,7 +443,7 @@ pub enum BorrowKind { /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn /// fn fn_ptr(env: &mut Env) { **env.x += 5; } /// - /// This is then illegal because you cannot mutate a `&mut` found + /// This is then illegal because you cannot mutate an `&mut` found /// in an aliasable location. To solve, you'd have to translate with /// an `&mut` borrow: /// @@ -413,7 +465,22 @@ pub enum BorrowKind { Unique, /// Data is mutable and not aliasable. - Mut, + Mut { + /// True if this borrow arose from method-call auto-ref + /// (i.e. `adjustment::Adjust::Borrow`) + allow_two_phase_borrow: bool, + }, +} + +impl BorrowKind { + pub fn allows_two_phase_borrow(&self) -> bool { + match *self { + BorrowKind::Shared | BorrowKind::Unique => false, + BorrowKind::Mut { + allow_two_phase_borrow, + } => allow_two_phase_borrow, + } + } } /////////////////////////////////////////////////////////////////////////// @@ -438,6 +505,67 @@ pub enum LocalKind { ReturnPointer, } +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct VarBindingForm<'tcx> { + /// Is variable bound via `x`, `mut x`, `ref x`, or `ref mut x`? + pub binding_mode: ty::BindingMode, + /// If an explicit type was provided for this variable binding, + /// this holds the source Span of that type. + /// + /// NOTE: If you want to change this to a `HirId`, be wary that + /// doing so breaks incremental compilation (as of this writing), + /// while a `Span` does not cause our tests to fail. + pub opt_ty_info: Option, + /// Place of the RHS of the =, or the subject of the `match` where this + /// variable is initialized. None in the case of `let PATTERN;`. + /// Some((None, ..)) in the case of and `let [mut] x = ...` because + /// (a) the right-hand side isn't evaluated as a place expression. + /// (b) it gives a way to separate this case from the remaining cases + /// for diagnostics. + pub opt_match_place: Option<(Option>, Span)>, + /// Span of the pattern in which this variable was bound. + pub pat_span: Span, +} + +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub enum BindingForm<'tcx> { + /// This is a binding for a non-`self` binding, or a `self` that has an explicit type. + Var(VarBindingForm<'tcx>), + /// Binding for a `self`/`&self`/`&mut self` binding where the type is implicit. + ImplicitSelf, + /// Reference used in a guard expression to ensure immutability. + RefForGuard, +} + +CloneTypeFoldableAndLiftImpls! { BindingForm<'tcx>, } + +impl_stable_hash_for!(struct self::VarBindingForm<'tcx> { + binding_mode, + opt_ty_info, + opt_match_place, + pat_span +}); + +mod binding_form_impl { + use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; + use ich::StableHashingContext; + + impl<'a, 'tcx> HashStable> for super::BindingForm<'tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use super::BindingForm::*; + ::std::mem::discriminant(self).hash_stable(hcx, hasher); + + match self { + Var(binding) => binding.hash_stable(hcx, hasher), + ImplicitSelf => (), + RefForGuard => (), + } + } + } +} + /// A MIR local. /// /// This can be a binding declared by the user, a temporary inserted by the compiler, a function @@ -449,8 +577,14 @@ pub struct LocalDecl<'tcx> { /// Temporaries and the return place are always mutable. pub mutability: Mutability, - /// True if this corresponds to a user-declared local variable. - pub is_user_variable: bool, + /// Some(binding_mode) if this corresponds to a user-declared local variable. + /// + /// This is solely used for local diagnostics when generating + /// warnings/errors when compiling the current crate, and + /// therefore it need not be visible across crates. pnkfelix + /// currently hypothesized we *need* to wrap this in a + /// `ClearCrossCrate` as long as it carries as `HirId`. + pub is_user_variable: Option>>, /// True if this is an internal local /// @@ -480,16 +614,13 @@ pub struct LocalDecl<'tcx> { /// to generate better debuginfo. pub name: Option, - /// Source info of the local. - pub source_info: SourceInfo, - - /// The *syntactic* visibility scope the local is defined + /// The *syntactic* (i.e. not visibility) source scope the local is defined /// in. If the local was defined in a let-statement, this /// is *within* the let-statement, rather than outside /// of it. /// - /// This is needed because visibility scope of locals within a let-statement - /// is weird. + /// This is needed because the visibility source scope of locals within + /// a let-statement is weird. /// /// The reason is that we want the local to be *within* the let-statement /// for lint purposes, but we want the local to be *after* the let-statement @@ -534,9 +665,9 @@ pub struct LocalDecl<'tcx> { /// `drop(x)`, we want it to refer to `x: u32`. /// /// To allow both uses to work, we need to have more than a single scope - /// for a local. We have the `syntactic_scope` represent the + /// for a local. We have the `source_info.scope` represent the /// "syntactic" lint scope (with a variable being under its let - /// block) while the source-info scope represents the "local variable" + /// block) while the `visibility_scope` represents the "local variable" /// scope (where the "rest" of a block is under all prior let-statements). /// /// The end result looks like this: @@ -545,27 +676,72 @@ pub struct LocalDecl<'tcx> { /// ROOT SCOPE /// │{ argument x: &str } /// │ - /// │ │{ #[allow(unused_mut] } // this is actually split into 2 scopes + /// │ │{ #[allow(unused_mut)] } // this is actually split into 2 scopes /// │ │ // in practice because I'm lazy. /// │ │ - /// │ │← x.syntactic_scope + /// │ │← x.source_info.scope /// │ │← `x.parse().unwrap()` /// │ │ - /// │ │ │← y.syntactic_scope + /// │ │ │← y.source_info.scope /// │ │ /// │ │ │{ let y: u32 } /// │ │ │ - /// │ │ │← y.source_info.scope + /// │ │ │← y.visibility_scope /// │ │ │← `y + 2` /// │ /// │ │{ let x: u32 } - /// │ │← x.source_info.scope + /// │ │← x.visibility_scope /// │ │← `drop(x)` // this accesses `x: u32` /// ``` - pub syntactic_scope: VisibilityScope, + pub source_info: SourceInfo, + + /// Source scope within which the local is visible (for debuginfo) + /// (see `source_info` for more details). + pub visibility_scope: SourceScope, } impl<'tcx> LocalDecl<'tcx> { + /// Returns true only if local is a binding that can itself be + /// made mutable via the addition of the `mut` keyword, namely + /// something like the occurrences of `x` in: + /// - `fn foo(x: Type) { ... }`, + /// - `let x = ...`, + /// - or `match ... { C(x) => ... }` + pub fn can_be_made_mutable(&self) -> bool { + match self.is_user_variable { + Some(ClearCrossCrate::Set(BindingForm::Var(VarBindingForm { + binding_mode: ty::BindingMode::BindByValue(_), + opt_ty_info: _, + opt_match_place: _, + pat_span: _, + }))) => true, + + // FIXME: might be able to thread the distinction between + // `self`/`mut self`/`&self`/`&mut self` into the + // `BindingForm::ImplicitSelf` variant, (and then return + // true here for solely the first case). + _ => false, + } + } + + /// Returns true if local is definitely not a `ref ident` or + /// `ref mut ident` binding. (Such bindings cannot be made into + /// mutable bindings, but the inverse does not necessarily hold). + pub fn is_nonref_binding(&self) -> bool { + match self.is_user_variable { + Some(ClearCrossCrate::Set(BindingForm::Var(VarBindingForm { + binding_mode: ty::BindingMode::BindByValue(_), + opt_ty_info: _, + opt_match_place: _, + pat_span: _, + }))) => true, + + Some(ClearCrossCrate::Set(BindingForm::ImplicitSelf)) => true, + + _ => false, + } + } + /// Create a new `LocalDecl` for a temporary. #[inline] pub fn new_temp(ty: Ty<'tcx>, span: Span) -> Self { @@ -575,11 +751,11 @@ impl<'tcx> LocalDecl<'tcx> { name: None, source_info: SourceInfo { span, - scope: ARGUMENT_VISIBILITY_SCOPE + scope: OUTERMOST_SOURCE_SCOPE, }, - syntactic_scope: ARGUMENT_VISIBILITY_SCOPE, + visibility_scope: OUTERMOST_SOURCE_SCOPE, internal: false, - is_user_variable: false + is_user_variable: None, } } @@ -592,11 +768,11 @@ impl<'tcx> LocalDecl<'tcx> { name: None, source_info: SourceInfo { span, - scope: ARGUMENT_VISIBILITY_SCOPE + scope: OUTERMOST_SOURCE_SCOPE, }, - syntactic_scope: ARGUMENT_VISIBILITY_SCOPE, + visibility_scope: OUTERMOST_SOURCE_SCOPE, internal: true, - is_user_variable: false + is_user_variable: None, } } @@ -610,12 +786,12 @@ impl<'tcx> LocalDecl<'tcx> { ty: return_ty, source_info: SourceInfo { span, - scope: ARGUMENT_VISIBILITY_SCOPE + scope: OUTERMOST_SOURCE_SCOPE, }, - syntactic_scope: ARGUMENT_VISIBILITY_SCOPE, + visibility_scope: OUTERMOST_SOURCE_SCOPE, internal: false, - name: None, // FIXME maybe we do want some name here? - is_user_variable: false + name: None, // FIXME maybe we do want some name here? + is_user_variable: None, } } } @@ -625,6 +801,9 @@ impl<'tcx> LocalDecl<'tcx> { pub struct UpvarDecl { pub debug_name: Name, + /// `HirId` of the captured variable + pub var_hir_id: ClearCrossCrate, + /// If true, the capture is behind a reference. pub by_ref: bool, @@ -664,7 +843,7 @@ pub struct BasicBlockData<'tcx> { pub terminator: Option>, /// If true, this block lies on an unwind path. This is used - /// during trans where distinct kinds of basic blocks may be + /// during codegen where distinct kinds of basic blocks may be /// generated (particularly for MSVC cleanup). Unwind blocks must /// only branch to other unwind blocks. pub is_cleanup: bool, @@ -673,15 +852,13 @@ pub struct BasicBlockData<'tcx> { #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub struct Terminator<'tcx> { pub source_info: SourceInfo, - pub kind: TerminatorKind<'tcx> + pub kind: TerminatorKind<'tcx>, } #[derive(Clone, RustcEncodable, RustcDecodable)] pub enum TerminatorKind<'tcx> { /// block should have one successor in the graph; we jump there - Goto { - target: BasicBlock, - }, + Goto { target: BasicBlock }, /// operand evaluates to an integer; jump depending on its value /// to one of the targets, and otherwise fallback to `otherwise` @@ -694,7 +871,7 @@ pub enum TerminatorKind<'tcx> { /// Possible values. The locations to branch to in each case /// are found in the corresponding indices from the `targets` vector. - values: Cow<'tcx, [ConstInt]>, + values: Cow<'tcx, [u128]>, /// Possible branch sites. The last element of this vector is used /// for the otherwise branch, so targets.len() == values.len() + 1 @@ -729,17 +906,17 @@ pub enum TerminatorKind<'tcx> { Drop { location: Place<'tcx>, target: BasicBlock, - unwind: Option + unwind: Option, }, /// Drop the Place and assign the new value over it. This ensures - /// that the assignment to LV occurs *even if* the destructor for - /// place unwinds. Its semantics are best explained by by the + /// that the assignment to `P` occurs *even if* the destructor for + /// place unwinds. Its semantics are best explained by the /// elaboration: /// /// ``` /// BB0 { - /// DropAndReplace(LV <- RV, goto BB1, unwind BB2) + /// DropAndReplace(P <- V, goto BB1, unwind BB2) /// } /// ``` /// @@ -747,15 +924,15 @@ pub enum TerminatorKind<'tcx> { /// /// ``` /// BB0 { - /// Drop(LV, goto BB1, unwind BB2) + /// Drop(P, goto BB1, unwind BB2) /// } /// BB1 { - /// // LV is now unitialized - /// LV <- RV + /// // P is now unitialized + /// P <- V /// } /// BB2 { - /// // LV is now unitialized -- its dtor panicked - /// LV <- RV + /// // P is now unitialized -- its dtor panicked + /// P <- V /// } /// ``` DropAndReplace { @@ -777,7 +954,7 @@ pub enum TerminatorKind<'tcx> { /// Destination for the return value. If some, the call is converging. destination: Option<(Place<'tcx>, BasicBlock)>, /// Cleanups to be done if the call unwinds. - cleanup: Option + cleanup: Option, }, /// Jump to the target if the condition has the expected value, @@ -787,7 +964,7 @@ pub enum TerminatorKind<'tcx> { expected: bool, msg: AssertMessage<'tcx>, target: BasicBlock, - cleanup: Option + cleanup: Option, }, /// A suspend point @@ -803,30 +980,62 @@ pub enum TerminatorKind<'tcx> { /// Indicates the end of the dropping of a generator GeneratorDrop, + /// A block where control flow only ever takes one real path, but borrowck + /// needs to be more conservative. FalseEdges { + /// The target normal control flow will take real_target: BasicBlock, - imaginary_targets: Vec + /// The list of blocks control flow could conceptually take, but won't + /// in practice + imaginary_targets: Vec, + }, + /// A terminator for blocks that only take one path in reality, but where we + /// reserve the right to unwind in borrowck, even if it won't happen in practice. + /// This can arise in infinite loops with no function calls for example. + FalseUnwind { + /// The target normal control flow will take + real_target: BasicBlock, + /// The imaginary cleanup block link. This particular path will never be taken + /// in practice, but in order to avoid fragility we want to always + /// consider it in borrowck. We don't want to accept programs which + /// pass borrowck only when panic=abort or some assertions are disabled + /// due to release vs. debug mode builds. This needs to be an Option because + /// of the remove_noop_landing_pads and no_landing_pads passes + unwind: Option, }, } +pub type Successors<'a> = + iter::Chain, slice::Iter<'a, BasicBlock>>; +pub type SuccessorsMut<'a> = + iter::Chain, slice::IterMut<'a, BasicBlock>>; + impl<'tcx> Terminator<'tcx> { - pub fn successors(&self) -> Cow<[BasicBlock]> { + pub fn successors(&self) -> Successors { self.kind.successors() } - pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> { + pub fn successors_mut(&mut self) -> SuccessorsMut { self.kind.successors_mut() } + pub fn unwind(&self) -> Option<&Option> { + self.kind.unwind() + } + pub fn unwind_mut(&mut self) -> Option<&mut Option> { self.kind.unwind_mut() } } impl<'tcx> TerminatorKind<'tcx> { - pub fn if_<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, cond: Operand<'tcx>, - t: BasicBlock, f: BasicBlock) -> TerminatorKind<'tcx> { - static BOOL_SWITCH_FALSE: &'static [ConstInt] = &[ConstInt::U8(0)]; + pub fn if_<'a, 'gcx>( + tcx: TyCtxt<'a, 'gcx, 'tcx>, + cond: Operand<'tcx>, + t: BasicBlock, + f: BasicBlock, + ) -> TerminatorKind<'tcx> { + static BOOL_SWITCH_FALSE: &'static [u128] = &[0]; TerminatorKind::SwitchInt { discr: cond, switch_ty: tcx.types.bool, @@ -835,90 +1044,227 @@ impl<'tcx> TerminatorKind<'tcx> { } } - pub fn successors(&self) -> Cow<[BasicBlock]> { + pub fn successors(&self) -> Successors { use self::TerminatorKind::*; match *self { - Goto { target: ref b } => slice::from_ref(b).into_cow(), - SwitchInt { targets: ref b, .. } => b[..].into_cow(), - Resume | Abort | GeneratorDrop => (&[]).into_cow(), - Return => (&[]).into_cow(), - Unreachable => (&[]).into_cow(), - Call { destination: Some((_, t)), cleanup: Some(c), .. } => vec![t, c].into_cow(), - Call { destination: Some((_, ref t)), cleanup: None, .. } => - slice::from_ref(t).into_cow(), - Call { destination: None, cleanup: Some(ref c), .. } => slice::from_ref(c).into_cow(), - Call { destination: None, cleanup: None, .. } => (&[]).into_cow(), - Yield { resume: t, drop: Some(c), .. } => vec![t, c].into_cow(), - Yield { resume: ref t, drop: None, .. } => slice::from_ref(t).into_cow(), - DropAndReplace { target, unwind: Some(unwind), .. } | - Drop { target, unwind: Some(unwind), .. } => { - vec![target, unwind].into_cow() + Resume + | Abort + | GeneratorDrop + | Return + | Unreachable + | Call { + destination: None, + cleanup: None, + .. + } => None.into_iter().chain(&[]), + Goto { target: ref t } + | Call { + destination: None, + cleanup: Some(ref t), + .. } - DropAndReplace { ref target, unwind: None, .. } | - Drop { ref target, unwind: None, .. } => { - slice::from_ref(target).into_cow() + | Call { + destination: Some((_, ref t)), + cleanup: None, + .. } - Assert { target, cleanup: Some(unwind), .. } => vec![target, unwind].into_cow(), - Assert { ref target, .. } => slice::from_ref(target).into_cow(), - FalseEdges { ref real_target, ref imaginary_targets } => { - let mut s = vec![*real_target]; - s.extend_from_slice(imaginary_targets); - s.into_cow() + | Yield { + resume: ref t, + drop: None, + .. } + | DropAndReplace { + target: ref t, + unwind: None, + .. + } + | Drop { + target: ref t, + unwind: None, + .. + } + | Assert { + target: ref t, + cleanup: None, + .. + } + | FalseUnwind { + real_target: ref t, + unwind: None, + } => Some(t).into_iter().chain(&[]), + Call { + destination: Some((_, ref t)), + cleanup: Some(ref u), + .. + } + | Yield { + resume: ref t, + drop: Some(ref u), + .. + } + | DropAndReplace { + target: ref t, + unwind: Some(ref u), + .. + } + | Drop { + target: ref t, + unwind: Some(ref u), + .. + } + | Assert { + target: ref t, + cleanup: Some(ref u), + .. + } + | FalseUnwind { + real_target: ref t, + unwind: Some(ref u), + } => Some(t).into_iter().chain(slice::from_ref(u)), + SwitchInt { ref targets, .. } => None.into_iter().chain(&targets[..]), + FalseEdges { + ref real_target, + ref imaginary_targets, + } => Some(real_target).into_iter().chain(&imaginary_targets[..]), } } - // FIXME: no mootable cow. I’m honestly not sure what a “cow” between `&mut [BasicBlock]` and - // `Vec<&mut BasicBlock>` would look like in the first place. - pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> { + pub fn successors_mut(&mut self) -> SuccessorsMut { use self::TerminatorKind::*; match *self { - Goto { target: ref mut b } => vec![b], - SwitchInt { targets: ref mut b, .. } => b.iter_mut().collect(), - Resume | Abort | GeneratorDrop => Vec::new(), - Return => Vec::new(), - Unreachable => Vec::new(), - Call { destination: Some((_, ref mut t)), cleanup: Some(ref mut c), .. } => vec![t, c], - Call { destination: Some((_, ref mut t)), cleanup: None, .. } => vec![t], - Call { destination: None, cleanup: Some(ref mut c), .. } => vec![c], - Call { destination: None, cleanup: None, .. } => vec![], - Yield { resume: ref mut t, drop: Some(ref mut c), .. } => vec![t, c], - Yield { resume: ref mut t, drop: None, .. } => vec![t], - DropAndReplace { ref mut target, unwind: Some(ref mut unwind), .. } | - Drop { ref mut target, unwind: Some(ref mut unwind), .. } => vec![target, unwind], - DropAndReplace { ref mut target, unwind: None, .. } | - Drop { ref mut target, unwind: None, .. } => { - vec![target] + Resume + | Abort + | GeneratorDrop + | Return + | Unreachable + | Call { + destination: None, + cleanup: None, + .. + } => None.into_iter().chain(&mut []), + Goto { target: ref mut t } + | Call { + destination: None, + cleanup: Some(ref mut t), + .. } - Assert { ref mut target, cleanup: Some(ref mut unwind), .. } => vec![target, unwind], - Assert { ref mut target, .. } => vec![target], - FalseEdges { ref mut real_target, ref mut imaginary_targets } => { - let mut s = vec![real_target]; - s.extend(imaginary_targets.iter_mut()); - s + | Call { + destination: Some((_, ref mut t)), + cleanup: None, + .. } + | Yield { + resume: ref mut t, + drop: None, + .. + } + | DropAndReplace { + target: ref mut t, + unwind: None, + .. + } + | Drop { + target: ref mut t, + unwind: None, + .. + } + | Assert { + target: ref mut t, + cleanup: None, + .. + } + | FalseUnwind { + real_target: ref mut t, + unwind: None, + } => Some(t).into_iter().chain(&mut []), + Call { + destination: Some((_, ref mut t)), + cleanup: Some(ref mut u), + .. + } + | Yield { + resume: ref mut t, + drop: Some(ref mut u), + .. + } + | DropAndReplace { + target: ref mut t, + unwind: Some(ref mut u), + .. + } + | Drop { + target: ref mut t, + unwind: Some(ref mut u), + .. + } + | Assert { + target: ref mut t, + cleanup: Some(ref mut u), + .. + } + | FalseUnwind { + real_target: ref mut t, + unwind: Some(ref mut u), + } => Some(t).into_iter().chain(slice::from_mut(u)), + SwitchInt { + ref mut targets, .. + } => None.into_iter().chain(&mut targets[..]), + FalseEdges { + ref mut real_target, + ref mut imaginary_targets, + } => Some(real_target) + .into_iter() + .chain(&mut imaginary_targets[..]), + } + } + + pub fn unwind(&self) -> Option<&Option> { + match *self { + TerminatorKind::Goto { .. } + | TerminatorKind::Resume + | TerminatorKind::Abort + | TerminatorKind::Return + | TerminatorKind::Unreachable + | TerminatorKind::GeneratorDrop + | TerminatorKind::Yield { .. } + | TerminatorKind::SwitchInt { .. } + | TerminatorKind::FalseEdges { .. } => None, + TerminatorKind::Call { + cleanup: ref unwind, + .. + } + | TerminatorKind::Assert { + cleanup: ref unwind, + .. + } + | TerminatorKind::DropAndReplace { ref unwind, .. } + | TerminatorKind::Drop { ref unwind, .. } + | TerminatorKind::FalseUnwind { ref unwind, .. } => Some(unwind), } } pub fn unwind_mut(&mut self) -> Option<&mut Option> { match *self { - TerminatorKind::Goto { .. } | - TerminatorKind::Resume | - TerminatorKind::Abort | - TerminatorKind::Return | - TerminatorKind::Unreachable | - TerminatorKind::GeneratorDrop | - TerminatorKind::Yield { .. } | - TerminatorKind::SwitchInt { .. } | - TerminatorKind::FalseEdges { .. } => { - None - }, - TerminatorKind::Call { cleanup: ref mut unwind, .. } | - TerminatorKind::Assert { cleanup: ref mut unwind, .. } | - TerminatorKind::DropAndReplace { ref mut unwind, .. } | - TerminatorKind::Drop { ref mut unwind, .. } => { - Some(unwind) + TerminatorKind::Goto { .. } + | TerminatorKind::Resume + | TerminatorKind::Abort + | TerminatorKind::Return + | TerminatorKind::Unreachable + | TerminatorKind::GeneratorDrop + | TerminatorKind::Yield { .. } + | TerminatorKind::SwitchInt { .. } + | TerminatorKind::FalseEdges { .. } => None, + TerminatorKind::Call { + cleanup: ref mut unwind, + .. } + | TerminatorKind::Assert { + cleanup: ref mut unwind, + .. + } + | TerminatorKind::DropAndReplace { ref mut unwind, .. } + | TerminatorKind::Drop { ref mut unwind, .. } + | TerminatorKind::FalseUnwind { ref mut unwind, .. } => Some(unwind), } } } @@ -944,14 +1290,72 @@ impl<'tcx> BasicBlockData<'tcx> { self.terminator.as_mut().expect("invalid terminator state") } - pub fn retain_statements(&mut self, mut f: F) where F: FnMut(&mut Statement) -> bool { + pub fn retain_statements(&mut self, mut f: F) + where + F: FnMut(&mut Statement) -> bool, + { for s in &mut self.statements { if !f(s) { - s.kind = StatementKind::Nop; + s.make_nop(); } } } + pub fn expand_statements(&mut self, mut f: F) + where + F: FnMut(&mut Statement<'tcx>) -> Option, + I: iter::TrustedLen>, + { + // Gather all the iterators we'll need to splice in, and their positions. + let mut splices: Vec<(usize, I)> = vec![]; + let mut extra_stmts = 0; + for (i, s) in self.statements.iter_mut().enumerate() { + if let Some(mut new_stmts) = f(s) { + if let Some(first) = new_stmts.next() { + // We can already store the first new statement. + *s = first; + + // Save the other statements for optimized splicing. + let remaining = new_stmts.size_hint().0; + if remaining > 0 { + splices.push((i + 1 + extra_stmts, new_stmts)); + extra_stmts += remaining; + } + } else { + s.make_nop(); + } + } + } + + // Splice in the new statements, from the end of the block. + // FIXME(eddyb) This could be more efficient with a "gap buffer" + // where a range of elements ("gap") is left uninitialized, with + // splicing adding new elements to the end of that gap and moving + // existing elements from before the gap to the end of the gap. + // For now, this is safe code, emulating a gap but initializing it. + let mut gap = self.statements.len()..self.statements.len() + extra_stmts; + self.statements.resize( + gap.end, + Statement { + source_info: SourceInfo { + span: DUMMY_SP, + scope: OUTERMOST_SOURCE_SCOPE, + }, + kind: StatementKind::Nop, + }, + ); + for (splice_start, new_stmts) in splices.into_iter().rev() { + let splice_end = splice_start + new_stmts.size_hint().0; + while gap.end > splice_end { + gap.start -= 1; + gap.end -= 1; + self.statements.swap(gap.start, gap.end); + } + self.statements.splice(splice_start..splice_end, new_stmts); + gap.end = splice_start; + } + } + pub fn visitable(&self, index: usize) -> &dyn MirVisitable<'tcx> { if index < self.statements.len() { &self.statements[index] @@ -964,18 +1368,18 @@ impl<'tcx> BasicBlockData<'tcx> { impl<'tcx> Debug for TerminatorKind<'tcx> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { self.fmt_head(fmt)?; - let successors = self.successors(); + let successor_count = self.successors().count(); let labels = self.fmt_successor_labels(); - assert_eq!(successors.len(), labels.len()); + assert_eq!(successor_count, labels.len()); - match successors.len() { + match successor_count { 0 => Ok(()), - 1 => write!(fmt, " -> {:?}", successors[0]), + 1 => write!(fmt, " -> {:?}", self.successors().nth(0).unwrap()), _ => { write!(fmt, " -> [")?; - for (i, target) in successors.iter().enumerate() { + for (i, target) in self.successors().enumerate() { if i > 0 { write!(fmt, ", ")?; } @@ -983,7 +1387,6 @@ impl<'tcx> Debug for TerminatorKind<'tcx> { } write!(fmt, "]") } - } } } @@ -996,7 +1399,9 @@ impl<'tcx> TerminatorKind<'tcx> { use self::TerminatorKind::*; match *self { Goto { .. } => write!(fmt, "goto"), - SwitchInt { discr: ref place, .. } => write!(fmt, "switchInt({:?})", place), + SwitchInt { + discr: ref place, .. + } => write!(fmt, "switchInt({:?})", place), Return => write!(fmt, "return"), GeneratorDrop => write!(fmt, "generator_drop"), Resume => write!(fmt, "resume"), @@ -1004,9 +1409,17 @@ impl<'tcx> TerminatorKind<'tcx> { Yield { ref value, .. } => write!(fmt, "_1 = suspend({:?})", value), Unreachable => write!(fmt, "unreachable"), Drop { ref location, .. } => write!(fmt, "drop({:?})", location), - DropAndReplace { ref location, ref value, .. } => - write!(fmt, "replace({:?} <- {:?})", location, value), - Call { ref func, ref args, ref destination, .. } => { + DropAndReplace { + ref location, + ref value, + .. + } => write!(fmt, "replace({:?} <- {:?})", location, value), + Call { + ref func, + ref args, + ref destination, + .. + } => { if let Some((ref destination, _)) = *destination { write!(fmt, "{:?} = ", destination)?; } @@ -1019,33 +1432,20 @@ impl<'tcx> TerminatorKind<'tcx> { } write!(fmt, ")") } - Assert { ref cond, expected, ref msg, .. } => { + Assert { + ref cond, + expected, + ref msg, + .. + } => { write!(fmt, "assert(")?; if !expected { write!(fmt, "!")?; } - write!(fmt, "{:?}, ", cond)?; - - match *msg { - AssertMessage::BoundsCheck { ref len, ref index } => { - write!(fmt, "{:?}, {:?}, {:?}", - "index out of bounds: the len is {} but the index is {}", - len, index)?; - } - AssertMessage::Math(ref err) => { - write!(fmt, "{:?}", err.description())?; - } - AssertMessage::GeneratorResumedAfterReturn => { - write!(fmt, "{:?}", "generator resumed after completion")?; - } - AssertMessage::GeneratorResumedAfterPanic => { - write!(fmt, "{:?}", "generator resumed after panicking")?; - } - } - - write!(fmt, ")") - }, - FalseEdges { .. } => write!(fmt, "falseEdges") + write!(fmt, "{:?}, \"{:?}\")", cond, msg) + } + FalseEdges { .. } => write!(fmt, "falseEdges"), + FalseUnwind { .. } => write!(fmt, "falseUnwind"), } } @@ -1055,53 +1455,82 @@ impl<'tcx> TerminatorKind<'tcx> { match *self { Return | Resume | Abort | Unreachable | GeneratorDrop => vec![], Goto { .. } => vec!["".into()], - SwitchInt { ref values, .. } => { - values.iter() - .map(|const_val| { - let mut buf = String::new(); - fmt_const_val(&mut buf, &ConstVal::Integral(*const_val)).unwrap(); - buf.into() - }) - .chain(iter::once(String::from("otherwise").into())) - .collect() + SwitchInt { + ref values, + switch_ty, + .. + } => { + let size = ty::tls::with(|tcx| { + let param_env = ty::ParamEnv::empty(); + let switch_ty = tcx.lift_to_global(&switch_ty).unwrap(); + tcx.layout_of(param_env.and(switch_ty)).unwrap().size + }); + values + .iter() + .map(|&u| { + let mut s = String::new(); + print_miri_value( + Scalar::Bits { + bits: u, + size: size.bytes() as u8, + }.to_value(), + switch_ty, + &mut s, + ).unwrap(); + s.into() + }) + .chain(iter::once(String::from("otherwise").into())) + .collect() } - Call { destination: Some(_), cleanup: Some(_), .. } => - vec!["return".into_cow(), "unwind".into_cow()], - Call { destination: Some(_), cleanup: None, .. } => vec!["return".into_cow()], - Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into_cow()], - Call { destination: None, cleanup: None, .. } => vec![], - Yield { drop: Some(_), .. } => - vec!["resume".into_cow(), "drop".into_cow()], + Call { + destination: Some(_), + cleanup: Some(_), + .. + } => vec!["return".into_cow(), "unwind".into_cow()], + Call { + destination: Some(_), + cleanup: None, + .. + } => vec!["return".into_cow()], + Call { + destination: None, + cleanup: Some(_), + .. + } => vec!["unwind".into_cow()], + Call { + destination: None, + cleanup: None, + .. + } => vec![], + Yield { drop: Some(_), .. } => vec!["resume".into_cow(), "drop".into_cow()], Yield { drop: None, .. } => vec!["resume".into_cow()], - DropAndReplace { unwind: None, .. } | - Drop { unwind: None, .. } => vec!["return".into_cow()], - DropAndReplace { unwind: Some(_), .. } | - Drop { unwind: Some(_), .. } => { - vec!["return".into_cow(), "unwind".into_cow()] + DropAndReplace { unwind: None, .. } | Drop { unwind: None, .. } => { + vec!["return".into_cow()] } + DropAndReplace { + unwind: Some(_), .. + } + | Drop { + unwind: Some(_), .. + } => vec!["return".into_cow(), "unwind".into_cow()], Assert { cleanup: None, .. } => vec!["".into()], - Assert { .. } => - vec!["success".into_cow(), "unwind".into_cow()], - FalseEdges { ref imaginary_targets, .. } => { + Assert { .. } => vec!["success".into_cow(), "unwind".into_cow()], + FalseEdges { + ref imaginary_targets, + .. + } => { let mut l = vec!["real".into()]; l.resize(imaginary_targets.len() + 1, "imaginary".into()); l } + FalseUnwind { + unwind: Some(_), .. + } => vec!["real".into(), "cleanup".into()], + FalseUnwind { unwind: None, .. } => vec!["real".into()], } } } -#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] -pub enum AssertMessage<'tcx> { - BoundsCheck { - len: Operand<'tcx>, - index: Operand<'tcx> - }, - Math(ConstMathErr), - GeneratorResumedAfterReturn, - GeneratorResumedAfterPanic, -} - /////////////////////////////////////////////////////////////////////////// // Statements @@ -1117,6 +1546,14 @@ impl<'tcx> Statement<'tcx> { pub fn make_nop(&mut self) { self.kind = StatementKind::Nop } + + /// Changes a statement to a nop and returns the original statement. + pub fn replace_nop(&mut self) -> Self { + Statement { + source_info: self.source_info, + kind: mem::replace(&mut self.kind, StatementKind::Nop), + } + } } #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] @@ -1124,8 +1561,15 @@ pub enum StatementKind<'tcx> { /// Write the RHS Rvalue to the LHS Place. Assign(Place<'tcx>, Rvalue<'tcx>), + /// This represents all the reading that a pattern match may do + /// (e.g. inspecting constants and discriminant values). + ReadForMatch(Place<'tcx>), + /// Write the discriminant for a variant to the enum Place. - SetDiscriminant { place: Place<'tcx>, variant_index: usize }, + SetDiscriminant { + place: Place<'tcx>, + variant_index: usize, + }, /// Start a live range for the storage of the local. StorageLive(Local), @@ -1137,7 +1581,7 @@ pub enum StatementKind<'tcx> { InlineAsm { asm: Box, outputs: Vec>, - inputs: Vec> + inputs: Vec>, }, /// Assert the given places to be valid inhabitants of their type. These statements are @@ -1149,6 +1593,23 @@ pub enum StatementKind<'tcx> { /// (The starting point(s) arise implicitly from borrows.) EndRegion(region::Scope), + /// Encodes a user's type assertion. These need to be preserved intact so that NLL can respect + /// them. For example: + /// + /// let (a, b): (T, U) = y; + /// + /// Here we would insert a `UserAssertTy<(T, U)>(y)` instruction to check that the type of `y` + /// is the right thing. + /// + /// `CanonicalTy` is used to capture "inference variables" from the user's types. For example: + /// + /// let x: Vec<_> = ...; + /// let y: &u32 = ...; + /// + /// would result in `Vec` and `&'?0 u32` respectively (where `?0` is a canonicalized + /// variable). + UserAssertTy(CanonicalTy<'tcx>, Local), + /// No-op. Useful for deleting instructions without affecting statement indices. Nop, } @@ -1182,7 +1643,7 @@ impl Debug for ValidationOp { } // This is generic so that it can be reused by miri -#[derive(Clone, RustcEncodable, RustcDecodable)] +#[derive(Clone, Hash, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub struct ValidationOperand<'tcx, T> { pub place: T, pub ty: Ty<'tcx>, @@ -1209,17 +1670,24 @@ impl<'tcx> Debug for Statement<'tcx> { use self::StatementKind::*; match self.kind { Assign(ref place, ref rv) => write!(fmt, "{:?} = {:?}", place, rv), + ReadForMatch(ref place) => write!(fmt, "ReadForMatch({:?})", place), // (reuse lifetime rendering policy from ppaux.) EndRegion(ref ce) => write!(fmt, "EndRegion({})", ty::ReScope(*ce)), Validate(ref op, ref places) => write!(fmt, "Validate({:?}, {:?})", op, places), StorageLive(ref place) => write!(fmt, "StorageLive({:?})", place), StorageDead(ref place) => write!(fmt, "StorageDead({:?})", place), - SetDiscriminant { ref place, variant_index } => { - write!(fmt, "discriminant({:?}) = {:?}", place, variant_index) - }, - InlineAsm { ref asm, ref outputs, ref inputs } => { - write!(fmt, "asm!({:?} : {:?} : {:?})", asm, outputs, inputs) - }, + SetDiscriminant { + ref place, + variant_index, + } => write!(fmt, "discriminant({:?}) = {:?}", place, variant_index), + InlineAsm { + ref asm, + ref outputs, + ref inputs, + } => write!(fmt, "asm!({:?} : {:?} : {:?})", asm, outputs, inputs), + UserAssertTy(ref c_ty, ref local) => { + write!(fmt, "UserAssertTy({:?}, {:?})", c_ty, local) + } Nop => write!(fmt, "nop"), } } @@ -1238,6 +1706,9 @@ pub enum Place<'tcx> { /// static or static mut variable Static(Box>), + /// Constant code promoted to an injected static + Promoted(Box<(Promoted, Ty<'tcx>)>), + /// projection out of a place (access a field, deref a pointer, etc) Projection(Box>), } @@ -1331,10 +1802,7 @@ impl<'tcx> Place<'tcx> { } pub fn elem(self, elem: PlaceElem<'tcx>) -> Place<'tcx> { - Place::Projection(Box::new(PlaceProjection { - base: self, - elem, - })) + Place::Projection(Box::new(PlaceProjection { base: self, elem })) } } @@ -1344,31 +1812,42 @@ impl<'tcx> Debug for Place<'tcx> { match *self { Local(id) => write!(fmt, "{:?}", id), - Static(box self::Static { def_id, ty }) => - write!(fmt, "({}: {:?})", ty::tls::with(|tcx| tcx.item_path_str(def_id)), ty), - Projection(ref data) => - match data.elem { - ProjectionElem::Downcast(ref adt_def, index) => - write!(fmt, "({:?} as {})", data.base, adt_def.variants[index].name), - ProjectionElem::Deref => - write!(fmt, "(*{:?})", data.base), - ProjectionElem::Field(field, ty) => - write!(fmt, "({:?}.{:?}: {:?})", data.base, field.index(), ty), - ProjectionElem::Index(ref index) => - write!(fmt, "{:?}[{:?}]", data.base, index), - ProjectionElem::ConstantIndex { offset, min_length, from_end: false } => - write!(fmt, "{:?}[{:?} of {:?}]", data.base, offset, min_length), - ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => - write!(fmt, "{:?}[-{:?} of {:?}]", data.base, offset, min_length), - ProjectionElem::Subslice { from, to } if to == 0 => - write!(fmt, "{:?}[{:?}:]", data.base, from), - ProjectionElem::Subslice { from, to } if from == 0 => - write!(fmt, "{:?}[:-{:?}]", data.base, to), - ProjectionElem::Subslice { from, to } => - write!(fmt, "{:?}[{:?}:-{:?}]", data.base, - from, to), - - }, + Static(box self::Static { def_id, ty }) => write!( + fmt, + "({}: {:?})", + ty::tls::with(|tcx| tcx.item_path_str(def_id)), + ty + ), + Promoted(ref promoted) => write!(fmt, "({:?}: {:?})", promoted.0, promoted.1), + Projection(ref data) => match data.elem { + ProjectionElem::Downcast(ref adt_def, index) => { + write!(fmt, "({:?} as {})", data.base, adt_def.variants[index].name) + } + ProjectionElem::Deref => write!(fmt, "(*{:?})", data.base), + ProjectionElem::Field(field, ty) => { + write!(fmt, "({:?}.{:?}: {:?})", data.base, field.index(), ty) + } + ProjectionElem::Index(ref index) => write!(fmt, "{:?}[{:?}]", data.base, index), + ProjectionElem::ConstantIndex { + offset, + min_length, + from_end: false, + } => write!(fmt, "{:?}[{:?} of {:?}]", data.base, offset, min_length), + ProjectionElem::ConstantIndex { + offset, + min_length, + from_end: true, + } => write!(fmt, "{:?}[-{:?} of {:?}]", data.base, offset, min_length), + ProjectionElem::Subslice { from, to } if to == 0 => { + write!(fmt, "{:?}[{:?}:]", data.base, from) + } + ProjectionElem::Subslice { from, to } if from == 0 => { + write!(fmt, "{:?}[:-{:?}]", data.base, to) + } + ProjectionElem::Subslice { from, to } => { + write!(fmt, "{:?}[{:?}:-{:?}]", data.base, from, to) + } + }, } } } @@ -1376,16 +1855,24 @@ impl<'tcx> Debug for Place<'tcx> { /////////////////////////////////////////////////////////////////////////// // Scopes -newtype_index!(VisibilityScope +newtype_index!(SourceScope { DEBUG_FORMAT = "scope[{}]", - const ARGUMENT_VISIBILITY_SCOPE = 0, + const OUTERMOST_SOURCE_SCOPE = 0, }); #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] -pub struct VisibilityScopeData { +pub struct SourceScopeData { pub span: Span, - pub parent_scope: Option, + pub parent_scope: Option, +} + +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct SourceScopeLocalData { + /// A NodeId with lint levels equivalent to this scope's lint levels. + pub lint_root: ast::NodeId, + /// The unsafe block that contains this node. + pub safety: Safety, } /////////////////////////////////////////////////////////////////////////// @@ -1432,19 +1919,14 @@ impl<'tcx> Operand<'tcx> { Operand::Constant(box Constant { span, ty, - literal: Literal::Value { - value: tcx.mk_const(ty::Const { - val: ConstVal::Function(def_id, substs), - ty - }) - }, + literal: ty::Const::zero_sized(tcx, ty), }) } pub fn to_copy(&self) -> Self { match *self { Operand::Copy(_) | Operand::Constant(_) => self.clone(), - Operand::Move(ref place) => Operand::Copy(place.clone()) + Operand::Move(ref place) => Operand::Copy(place.clone()), } } } @@ -1458,7 +1940,7 @@ pub enum Rvalue<'tcx> { Use(Operand<'tcx>), /// [x; 32] - Repeat(Operand<'tcx>, ConstUsize), + Repeat(Operand<'tcx>, u64), /// &x or &mut x Ref(Region<'tcx>, BorrowKind, Place<'tcx>), @@ -1502,7 +1984,7 @@ pub enum CastKind { UnsafeFnPointer, /// "Unsize" -- convert a thin-or-fat pointer to a fat pointer. - /// trans must figure out the details once full monomorphization + /// codegen must figure out the details once full monomorphization /// is known. For example, this could be used to cast from a /// `&[i32;N]` to a `&[i32]`, or a `Box` to a `Box` /// (presuming `T: Trait`). @@ -1515,15 +1997,15 @@ pub enum AggregateKind<'tcx> { Array(Ty<'tcx>), Tuple, - /// The second field is variant number (discriminant), it's equal - /// to 0 for struct and union expressions. The fourth field is + /// The second field is the variant index. It's equal to 0 for struct + /// and union expressions. The fourth field is /// active field number and is present only for union expressions /// -- e.g. for a union expression `SomeUnion { c: .. }`, the /// active field index would identity the field `c` Adt(&'tcx AdtDef, usize, &'tcx Substs<'tcx>, Option), Closure(DefId, ClosureSubsts<'tcx>), - Generator(DefId, ClosureSubsts<'tcx>, GeneratorInterior<'tcx>), + Generator(DefId, GeneratorSubsts<'tcx>, hir::GeneratorMovability), } #[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] @@ -1569,7 +2051,7 @@ impl BinOp { use self::BinOp::*; match self { Add | Sub | Mul | Shl | Shr => true, - _ => false + _ => false, } } } @@ -1611,13 +2093,15 @@ impl<'tcx> Debug for Rvalue<'tcx> { Ref(region, borrow_kind, ref place) => { let kind_str = match borrow_kind { BorrowKind::Shared => "", - BorrowKind::Mut | BorrowKind::Unique => "mut ", + BorrowKind::Mut { .. } | BorrowKind::Unique => "mut ", }; // When printing regions, add trailing space if necessary. let region = if ppaux::verbose() || ppaux::identify_regions() { - let mut region = format!("{}", region); - if region.len() > 0 { region.push(' '); } + let mut region = region.to_string(); + if region.len() > 0 { + region.push(' '); + } region } else { // Do not even print 'static @@ -1638,13 +2122,11 @@ impl<'tcx> Debug for Rvalue<'tcx> { match **kind { AggregateKind::Array(_) => write!(fmt, "{:?}", places), - AggregateKind::Tuple => { - match places.len() { - 0 => write!(fmt, "()"), - 1 => write!(fmt, "({:?},)", places[0]), - _ => fmt_tuple(fmt, places), - } - } + AggregateKind::Tuple => match places.len() { + 0 => write!(fmt, "()"), + 1 => write!(fmt, "({:?},)", places[0]), + _ => fmt_tuple(fmt, places), + }, AggregateKind::Adt(adt_def, variant, substs, _) => { let variant_def = &adt_def.variants[variant]; @@ -1657,7 +2139,7 @@ impl<'tcx> Debug for Rvalue<'tcx> { CtorKind::Fictive => { let mut struct_fmt = fmt.debug_struct(""); for (field, place) in variant_def.fields.iter().zip(places) { - struct_fmt.field(&field.name.as_str(), place); + struct_fmt.field(&field.ident.as_str(), place); } struct_fmt.finish() } @@ -1698,8 +2180,8 @@ impl<'tcx> Debug for Rvalue<'tcx> { } struct_fmt.field("$state", &places[freevars.len()]); for i in (freevars.len() + 1)..places.len() { - struct_fmt.field(&format!("${}", i - freevars.len() - 1), - &places[i]); + struct_fmt + .field(&format!("${}", i - freevars.len() - 1), &places[i]); } }); @@ -1725,99 +2207,129 @@ impl<'tcx> Debug for Rvalue<'tcx> { pub struct Constant<'tcx> { pub span: Span, pub ty: Ty<'tcx>, - pub literal: Literal<'tcx>, + pub literal: &'tcx ty::Const<'tcx>, } newtype_index!(Promoted { DEBUG_FORMAT = "promoted[{}]" }); - -#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub enum Literal<'tcx> { - Value { - value: &'tcx ty::Const<'tcx>, - }, - Promoted { - // Index into the `promoted` vector of `Mir`. - index: Promoted - }, -} - impl<'tcx> Debug for Constant<'tcx> { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - write!(fmt, "{:?}", self.literal) + write!(fmt, "const ")?; + fmt_const_val(fmt, self.literal) } } -impl<'tcx> Debug for Literal<'tcx> { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - use self::Literal::*; - match *self { - Value { value } => { - write!(fmt, "const ")?; - fmt_const_val(fmt, &value.val) - } - Promoted { index } => { - write!(fmt, "{:?}", index) - } - } +/// Write a `ConstValue` in a way closer to the original source code than the `Debug` output. +pub fn fmt_const_val(fmt: &mut W, const_val: &ty::Const) -> fmt::Result { + if let Some(value) = const_val.to_byval_value() { + print_miri_value(value, const_val.ty, fmt) + } else { + write!(fmt, "{:?}:{}", const_val.val, const_val.ty) } } -/// Write a `ConstVal` in a way closer to the original source code than the `Debug` output. -fn fmt_const_val(fmt: &mut W, const_val: &ConstVal) -> fmt::Result { - use middle::const_val::ConstVal::*; - match *const_val { - Float(f) => write!(fmt, "{:?}", f), - Integral(n) => write!(fmt, "{}", n), - Str(s) => write!(fmt, "{:?}", s), - ByteStr(bytes) => { - let escaped: String = bytes.data - .iter() - .flat_map(|&ch| ascii::escape_default(ch).map(|c| c as char)) - .collect(); - write!(fmt, "b\"{}\"", escaped) +pub fn print_miri_value(value: Value, ty: Ty, f: &mut W) -> fmt::Result { + use ty::TypeVariants::*; + // print some primitives + if let Value::Scalar(ScalarMaybeUndef::Scalar(Scalar::Bits { bits, .. })) = value { + match ty.sty { + TyBool if bits == 0 => return write!(f, "false"), + TyBool if bits == 1 => return write!(f, "true"), + TyFloat(ast::FloatTy::F32) => return write!(f, "{}f32", Single::from_bits(bits)), + TyFloat(ast::FloatTy::F64) => return write!(f, "{}f64", Double::from_bits(bits)), + TyUint(ui) => return write!(f, "{:?}{}", bits, ui), + TyInt(i) => { + let bit_width = ty::tls::with(|tcx| { + let ty = tcx.lift_to_global(&ty).unwrap(); + tcx.layout_of(ty::ParamEnv::empty().and(ty)) + .unwrap() + .size + .bits() + }); + let shift = 128 - bit_width; + return write!(f, "{:?}{}", ((bits as i128) << shift) >> shift, i); + } + TyChar => return write!(f, "{:?}", ::std::char::from_u32(bits as u32).unwrap()), + _ => {}, } - Bool(b) => write!(fmt, "{:?}", b), - Char(c) => write!(fmt, "{:?}", c), - Variant(def_id) | - Function(def_id, _) => write!(fmt, "{}", item_path_str(def_id)), - Aggregate(_) => bug!("`ConstVal::{:?}` should not be in MIR", const_val), - Unevaluated(..) => write!(fmt, "{:?}", const_val) } + // print function definitons + if let TyFnDef(did, _) = ty.sty { + return write!(f, "{}", item_path_str(did)); + } + // print string literals + if let Value::ScalarPair(ptr, len) = value { + if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = ptr { + if let ScalarMaybeUndef::Scalar(Scalar::Bits { bits: len, .. }) = len { + if let TyRef(_, &ty::TyS { sty: TyStr, .. }, _) = ty.sty { + return ty::tls::with(|tcx| { + let alloc = tcx.alloc_map.lock().get(ptr.alloc_id); + if let Some(interpret::AllocType::Memory(alloc)) = alloc { + assert_eq!(len as usize as u128, len); + let slice = &alloc + .bytes + [(ptr.offset.bytes() as usize)..] + [..(len as usize)]; + let s = ::std::str::from_utf8(slice).expect("non utf8 str from miri"); + write!(f, "{:?}", s) + } else { + write!(f, "pointer to erroneous constant {:?}, {:?}", ptr, len) + } + }); + } + } + } + } + // just raw dump everything else + write!(f, "{:?}:{}", value, ty) } fn item_path_str(def_id: DefId) -> String { ty::tls::with(|tcx| tcx.item_path_str(def_id)) } -impl<'tcx> ControlFlowGraph for Mir<'tcx> { - +impl<'tcx> graph::DirectedGraph for Mir<'tcx> { type Node = BasicBlock; +} - fn num_nodes(&self) -> usize { self.basic_blocks.len() } +impl<'tcx> graph::WithNumNodes for Mir<'tcx> { + fn num_nodes(&self) -> usize { + self.basic_blocks.len() + } +} - fn start_node(&self) -> Self::Node { START_BLOCK } +impl<'tcx> graph::WithStartNode for Mir<'tcx> { + fn start_node(&self) -> Self::Node { + START_BLOCK + } +} - fn predecessors<'graph>(&'graph self, node: Self::Node) - -> >::Iter - { +impl<'tcx> graph::WithPredecessors for Mir<'tcx> { + fn predecessors<'graph>( + &'graph self, + node: Self::Node, + ) -> >::Iter { self.predecessors_for(node).clone().into_iter() } - fn successors<'graph>(&'graph self, node: Self::Node) - -> >::Iter - { - self.basic_blocks[node].terminator().successors().into_owned().into_iter() +} + +impl<'tcx> graph::WithSuccessors for Mir<'tcx> { + fn successors<'graph>( + &'graph self, + node: Self::Node, + ) -> >::Iter { + self.basic_blocks[node].terminator().successors().cloned() } } -impl<'a, 'b> GraphPredecessors<'b> for Mir<'a> { +impl<'a, 'b> graph::GraphPredecessors<'b> for Mir<'a> { type Item = BasicBlock; type Iter = IntoIter; } -impl<'a, 'b> GraphSuccessors<'b> for Mir<'a> { +impl<'a, 'b> graph::GraphSuccessors<'b> for Mir<'a> { type Item = BasicBlock; - type Iter = IntoIter; + type Iter = iter::Cloned>; } #[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] @@ -1825,7 +2337,7 @@ pub struct Location { /// the location is within this block pub block: BasicBlock, - /// the location is the start of the this statement; or, if `statement_index` + /// the location is the start of the statement; or, if `statement_index` /// == num-statements, then the start of the terminator. pub statement_index: usize, } @@ -1837,15 +2349,23 @@ impl fmt::Debug for Location { } impl Location { + pub const START: Location = Location { + block: START_BLOCK, + statement_index: 0, + }; + /// Returns the location immediately after this one within the enclosing block. /// /// Note that if this location represents a terminator, then the /// resulting location would be out of bounds and invalid. pub fn successor_within_block(&self) -> Location { - Location { block: self.block, statement_index: self.statement_index + 1 } + Location { + block: self.block, + statement_index: self.statement_index + 1, + } } - pub fn dominates(&self, other: &Location, dominators: &Dominators) -> bool { + pub fn dominates(&self, other: Location, dominators: &Dominators) -> bool { if self.block == other.block { self.statement_index <= other.statement_index } else { @@ -1865,16 +2385,17 @@ pub enum UnsafetyViolationKind { pub struct UnsafetyViolation { pub source_info: SourceInfo, pub description: InternedString, + pub details: InternedString, pub kind: UnsafetyViolationKind, } #[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct UnsafetyCheckResult { /// Violations that are propagated *upwards* from this function - pub violations: Rc<[UnsafetyViolation]>, + pub violations: Lrc<[UnsafetyViolation]>, /// unsafe blocks in this function, along with whether they are used. This is /// used for the "unused_unsafe" lint. - pub unsafe_blocks: Rc<[(ast::NodeId, bool)]>, + pub unsafe_blocks: Lrc<[(ast::NodeId, bool)]>, } /// The layout of generator state @@ -1883,6 +2404,12 @@ pub struct GeneratorLayout<'tcx> { pub fields: Vec>, } +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct BorrowCheckResult<'gcx> { + pub closure_requirements: Option>, + pub used_mut_upvars: SmallVec<[Field; 8]>, +} + /// After we borrow check a closure, we are left with various /// requirements that we have inferred between the free regions that /// appear in the closure's signature or on its field types. These @@ -1910,7 +2437,7 @@ pub struct GeneratorLayout<'tcx> { /// ``` /// /// here, there is one unique free region (`'a`) but it appears -/// twice. We would "renumber" each occurence to a unique vid, as follows: +/// twice. We would "renumber" each occurrence to a unique vid, as follows: /// /// ```text /// ClosureSubsts = [ @@ -1982,182 +2509,149 @@ pub enum ClosureOutlivesSubject<'tcx> { * TypeFoldable implementations for MIR types */ -impl<'tcx> TypeFoldable<'tcx> for Mir<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - Mir { - basic_blocks: self.basic_blocks.fold_with(folder), - visibility_scopes: self.visibility_scopes.clone(), - visibility_scope_info: self.visibility_scope_info.clone(), - promoted: self.promoted.fold_with(folder), - yield_ty: self.yield_ty.fold_with(folder), - generator_drop: self.generator_drop.fold_with(folder), - generator_layout: self.generator_layout.fold_with(folder), - local_decls: self.local_decls.fold_with(folder), - arg_count: self.arg_count, - upvar_decls: self.upvar_decls.clone(), - spread_arg: self.spread_arg, - span: self.span, - cache: cache::Cache::new() - } - } +CloneTypeFoldableAndLiftImpls! { + Mutability, + SourceInfo, + UpvarDecl, + ValidationOp, + SourceScope, + SourceScopeData, + SourceScopeLocalData, +} - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.basic_blocks.visit_with(visitor) || - self.generator_drop.visit_with(visitor) || - self.generator_layout.visit_with(visitor) || - self.yield_ty.visit_with(visitor) || - self.promoted.visit_with(visitor) || - self.local_decls.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for Mir<'tcx> { + basic_blocks, + source_scopes, + source_scope_local_data, + promoted, + yield_ty, + generator_drop, + generator_layout, + local_decls, + arg_count, + upvar_decls, + spread_arg, + span, + cache, } } -impl<'tcx> TypeFoldable<'tcx> for GeneratorLayout<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - GeneratorLayout { - fields: self.fields.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.fields.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for GeneratorLayout<'tcx> { + fields } } -impl<'tcx> TypeFoldable<'tcx> for LocalDecl<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - LocalDecl { - ty: self.ty.fold_with(folder), - ..self.clone() - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.ty.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for LocalDecl<'tcx> { + mutability, + is_user_variable, + internal, + ty, + name, + source_info, + visibility_scope, } } -impl<'tcx> TypeFoldable<'tcx> for BasicBlockData<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - BasicBlockData { - statements: self.statements.fold_with(folder), - terminator: self.terminator.fold_with(folder), - is_cleanup: self.is_cleanup - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.statements.visit_with(visitor) || self.terminator.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for BasicBlockData<'tcx> { + statements, + terminator, + is_cleanup, } } -impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx, Place<'tcx>> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ValidationOperand { - place: self.place.fold_with(folder), - ty: self.ty.fold_with(folder), - re: self.re, - mutbl: self.mutbl, - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.place.visit_with(visitor) || self.ty.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ValidationOperand<'tcx, Place<'tcx>> { + place, ty, re, mutbl } } -impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - use mir::StatementKind::*; - - let kind = match self.kind { - Assign(ref place, ref rval) => Assign(place.fold_with(folder), rval.fold_with(folder)), - SetDiscriminant { ref place, variant_index } => SetDiscriminant { - place: place.fold_with(folder), - variant_index, - }, - StorageLive(ref local) => StorageLive(local.fold_with(folder)), - StorageDead(ref local) => StorageDead(local.fold_with(folder)), - InlineAsm { ref asm, ref outputs, ref inputs } => InlineAsm { - asm: asm.clone(), - outputs: outputs.fold_with(folder), - inputs: inputs.fold_with(folder) - }, - - // Note for future: If we want to expose the region scopes - // during the fold, we need to either generalize EndRegion - // to carry `[ty::Region]`, or extend the `TypeFolder` - // trait with a `fn fold_scope`. - EndRegion(ref region_scope) => EndRegion(region_scope.clone()), - - Validate(ref op, ref places) => - Validate(op.clone(), - places.iter().map(|operand| operand.fold_with(folder)).collect()), - - Nop => Nop, - }; - Statement { - source_info: self.source_info, - kind, - } +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for Statement<'tcx> { + source_info, kind } +} - fn super_visit_with>(&self, visitor: &mut V) -> bool { - use mir::StatementKind::*; - - match self.kind { - Assign(ref place, ref rval) => { place.visit_with(visitor) || rval.visit_with(visitor) } - SetDiscriminant { ref place, .. } => place.visit_with(visitor), - StorageLive(ref local) | - StorageDead(ref local) => local.visit_with(visitor), - InlineAsm { ref outputs, ref inputs, .. } => - outputs.visit_with(visitor) || inputs.visit_with(visitor), - - // Note for future: If we want to expose the region scopes - // during the visit, we need to either generalize EndRegion - // to carry `[ty::Region]`, or extend the `TypeVisitor` - // trait with a `fn visit_scope`. - EndRegion(ref _scope) => false, - - Validate(ref _op, ref places) => - places.iter().any(|ty_and_place| ty_and_place.visit_with(visitor)), - - Nop => false, - } +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for StatementKind<'tcx> { + (StatementKind::Assign)(a, b), + (StatementKind::ReadForMatch)(place), + (StatementKind::SetDiscriminant) { place, variant_index }, + (StatementKind::StorageLive)(a), + (StatementKind::StorageDead)(a), + (StatementKind::InlineAsm) { asm, outputs, inputs }, + (StatementKind::Validate)(a, b), + (StatementKind::EndRegion)(a), + (StatementKind::UserAssertTy)(a, b), + (StatementKind::Nop), } } +EnumTypeFoldableImpl! { + impl<'tcx, T> TypeFoldable<'tcx> for ClearCrossCrate { + (ClearCrossCrate::Clear), + (ClearCrossCrate::Set)(a), + } where T: TypeFoldable<'tcx> +} + impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { use mir::TerminatorKind::*; let kind = match self.kind { Goto { target } => Goto { target: target }, - SwitchInt { ref discr, switch_ty, ref values, ref targets } => SwitchInt { + SwitchInt { + ref discr, + switch_ty, + ref values, + ref targets, + } => SwitchInt { discr: discr.fold_with(folder), switch_ty: switch_ty.fold_with(folder), values: values.clone(), - targets: targets.clone() + targets: targets.clone(), }, - Drop { ref location, target, unwind } => Drop { + Drop { + ref location, + target, + unwind, + } => Drop { location: location.fold_with(folder), target, unwind, }, - DropAndReplace { ref location, ref value, target, unwind } => DropAndReplace { + DropAndReplace { + ref location, + ref value, + target, + unwind, + } => DropAndReplace { location: location.fold_with(folder), value: value.fold_with(folder), target, unwind, }, - Yield { ref value, resume, drop } => Yield { + Yield { + ref value, + resume, + drop, + } => Yield { value: value.fold_with(folder), resume: resume, drop: drop, }, - Call { ref func, ref args, ref destination, cleanup } => { - let dest = destination.as_ref().map(|&(ref loc, dest)| { - (loc.fold_with(folder), dest) - }); + Call { + ref func, + ref args, + ref destination, + cleanup, + } => { + let dest = destination + .as_ref() + .map(|&(ref loc, dest)| (loc.fold_with(folder), dest)); Call { func: func.fold_with(folder), @@ -2165,10 +2659,16 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { destination: dest, cleanup, } - }, - Assert { ref cond, expected, ref msg, target, cleanup } => { - let msg = if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { - AssertMessage::BoundsCheck { + } + Assert { + ref cond, + expected, + ref msg, + target, + cleanup, + } => { + let msg = if let EvalErrorKind::BoundsCheck { ref len, ref index } = *msg { + EvalErrorKind::BoundsCheck { len: len.fold_with(folder), index: index.fold_with(folder), } @@ -2182,14 +2682,26 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { target, cleanup, } - }, + } GeneratorDrop => GeneratorDrop, Resume => Resume, Abort => Abort, Return => Return, Unreachable => Unreachable, - FalseEdges { real_target, ref imaginary_targets } => - FalseEdges { real_target, imaginary_targets: imaginary_targets.clone() } + FalseEdges { + real_target, + ref imaginary_targets, + } => FalseEdges { + real_target, + imaginary_targets: imaginary_targets.clone(), + }, + FalseUnwind { + real_target, + unwind, + } => FalseUnwind { + real_target, + unwind, + }, }; Terminator { source_info: self.source_info, @@ -2201,22 +2713,36 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { use mir::TerminatorKind::*; match self.kind { - SwitchInt { ref discr, switch_ty, .. } => - discr.visit_with(visitor) || switch_ty.visit_with(visitor), - Drop { ref location, ..} => location.visit_with(visitor), - DropAndReplace { ref location, ref value, ..} => - location.visit_with(visitor) || value.visit_with(visitor), - Yield { ref value, ..} => - value.visit_with(visitor), - Call { ref func, ref args, ref destination, .. } => { + SwitchInt { + ref discr, + switch_ty, + .. + } => discr.visit_with(visitor) || switch_ty.visit_with(visitor), + Drop { ref location, .. } => location.visit_with(visitor), + DropAndReplace { + ref location, + ref value, + .. + } => location.visit_with(visitor) || value.visit_with(visitor), + Yield { ref value, .. } => value.visit_with(visitor), + Call { + ref func, + ref args, + ref destination, + .. + } => { let dest = if let Some((ref loc, _)) = *destination { loc.visit_with(visitor) - } else { false }; + } else { + false + }; dest || func.visit_with(visitor) || args.visit_with(visitor) - }, - Assert { ref cond, ref msg, .. } => { + } + Assert { + ref cond, ref msg, .. + } => { if cond.visit_with(visitor) { - if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { + if let EvalErrorKind::BoundsCheck { ref len, ref index } = *msg { len.visit_with(visitor) || index.visit_with(visitor) } else { false @@ -2224,14 +2750,15 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { } else { false } - }, - Goto { .. } | - Resume | - Abort | - Return | - GeneratorDrop | - Unreachable | - FalseEdges { .. } => false + } + Goto { .. } + | Resume + | Abort + | Return + | GeneratorDrop + | Unreachable + | FalseEdges { .. } + | FalseUnwind { .. } => false, } } } @@ -2240,7 +2767,7 @@ impl<'tcx> TypeFoldable<'tcx> for Place<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { match self { &Place::Projection(ref p) => Place::Projection(p.fold_with(folder)), - _ => self.clone() + _ => self.clone(), } } @@ -2259,14 +2786,17 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { match *self { Use(ref op) => Use(op.fold_with(folder)), Repeat(ref op, len) => Repeat(op.fold_with(folder), len), - Ref(region, bk, ref place) => - Ref(region.fold_with(folder), bk, place.fold_with(folder)), + Ref(region, bk, ref place) => { + Ref(region.fold_with(folder), bk, place.fold_with(folder)) + } Len(ref place) => Len(place.fold_with(folder)), Cast(kind, ref op, ty) => Cast(kind, op.fold_with(folder), ty.fold_with(folder)), - BinaryOp(op, ref rhs, ref lhs) => - BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), - CheckedBinaryOp(op, ref rhs, ref lhs) => - CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)), + BinaryOp(op, ref rhs, ref lhs) => { + BinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)) + } + CheckedBinaryOp(op, ref rhs, ref lhs) => { + CheckedBinaryOp(op, rhs.fold_with(folder), lhs.fold_with(folder)) + } UnaryOp(op, ref val) => UnaryOp(op, val.fold_with(folder)), Discriminant(ref place) => Discriminant(place.fold_with(folder)), NullaryOp(op, ty) => NullaryOp(op, ty.fold_with(folder)), @@ -2274,14 +2804,15 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { let kind = box match **kind { AggregateKind::Array(ty) => AggregateKind::Array(ty.fold_with(folder)), AggregateKind::Tuple => AggregateKind::Tuple, - AggregateKind::Adt(def, v, substs, n) => - AggregateKind::Adt(def, v, substs.fold_with(folder), n), - AggregateKind::Closure(id, substs) => - AggregateKind::Closure(id, substs.fold_with(folder)), - AggregateKind::Generator(id, substs, interior) => - AggregateKind::Generator(id, - substs.fold_with(folder), - interior.fold_with(folder)), + AggregateKind::Adt(def, v, substs, n) => { + AggregateKind::Adt(def, v, substs.fold_with(folder), n) + } + AggregateKind::Closure(id, substs) => { + AggregateKind::Closure(id, substs.fold_with(folder)) + } + AggregateKind::Generator(id, substs, movablity) => { + AggregateKind::Generator(id, substs.fold_with(folder), movablity) + } }; Aggregate(kind, fields.fold_with(folder)) } @@ -2296,9 +2827,9 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { Ref(region, _, ref place) => region.visit_with(visitor) || place.visit_with(visitor), Len(ref place) => place.visit_with(visitor), Cast(_, ref op, ty) => op.visit_with(visitor) || ty.visit_with(visitor), - BinaryOp(_, ref rhs, ref lhs) | - CheckedBinaryOp(_, ref rhs, ref lhs) => - rhs.visit_with(visitor) || lhs.visit_with(visitor), + BinaryOp(_, ref rhs, ref lhs) | CheckedBinaryOp(_, ref rhs, ref lhs) => { + rhs.visit_with(visitor) || lhs.visit_with(visitor) + } UnaryOp(_, ref val) => val.visit_with(visitor), Discriminant(ref place) => place.visit_with(visitor), NullaryOp(_, ty) => ty.visit_with(visitor), @@ -2308,8 +2839,7 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { AggregateKind::Tuple => false, AggregateKind::Adt(_, _, substs, _) => substs.visit_with(visitor), AggregateKind::Closure(_, substs) => substs.visit_with(visitor), - AggregateKind::Generator(_, substs, interior) => substs.visit_with(visitor) || - interior.visit_with(visitor), + AggregateKind::Generator(_, substs, _) => substs.visit_with(visitor), }) || fields.visit_with(visitor) } } @@ -2327,15 +2857,17 @@ impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> { fn super_visit_with>(&self, visitor: &mut V) -> bool { match *self { - Operand::Copy(ref place) | - Operand::Move(ref place) => place.visit_with(visitor), - Operand::Constant(ref c) => c.visit_with(visitor) + Operand::Copy(ref place) | Operand::Move(ref place) => place.visit_with(visitor), + Operand::Constant(ref c) => c.visit_with(visitor), } } } impl<'tcx, B, V, T> TypeFoldable<'tcx> for Projection<'tcx, B, V, T> - where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx>, T: TypeFoldable<'tcx> +where + B: TypeFoldable<'tcx>, + V: TypeFoldable<'tcx>, + T: TypeFoldable<'tcx>, { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { use mir::ProjectionElem::*; @@ -2345,24 +2877,29 @@ impl<'tcx, B, V, T> TypeFoldable<'tcx> for Projection<'tcx, B, V, T> Deref => Deref, Field(f, ref ty) => Field(f, ty.fold_with(folder)), Index(ref v) => Index(v.fold_with(folder)), - ref elem => elem.clone() + ref elem => elem.clone(), }; - Projection { - base, - elem, - } + Projection { base, elem } } fn super_visit_with>(&self, visitor: &mut Vs) -> bool { use mir::ProjectionElem::*; - self.base.visit_with(visitor) || - match self.elem { - Field(_, ref ty) => ty.visit_with(visitor), - Index(ref v) => v.visit_with(visitor), - _ => false - } + self.base.visit_with(visitor) || match self.elem { + Field(_, ref ty) => ty.visit_with(visitor), + Index(ref v) => v.visit_with(visitor), + _ => false, + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Field { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> Self { + *self + } + fn super_visit_with>(&self, _: &mut V) -> bool { + false } } @@ -2371,27 +2908,10 @@ impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> { Constant { span: self.span.clone(), ty: self.ty.fold_with(folder), - literal: self.literal.fold_with(folder) + literal: self.literal.fold_with(folder), } } fn super_visit_with>(&self, visitor: &mut V) -> bool { self.ty.visit_with(visitor) || self.literal.visit_with(visitor) } } - -impl<'tcx> TypeFoldable<'tcx> for Literal<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - match *self { - Literal::Value { value } => Literal::Value { - value: value.fold_with(folder) - }, - Literal::Promoted { index } => Literal::Promoted { index } - } - } - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - Literal::Value { value } => value.visit_with(visitor), - Literal::Promoted { .. } => false - } - } -} diff --git a/src/librustc/mir/mono.rs b/src/librustc/mir/mono.rs index efdf4066815f..9d2f62bd0304 100644 --- a/src/librustc/mir/mono.rs +++ b/src/librustc/mir/mono.rs @@ -8,26 +8,44 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use hir::def_id::{DefId, CrateNum}; use syntax::ast::NodeId; -use syntax::symbol::InternedString; -use ty::Instance; +use syntax::symbol::{Symbol, InternedString}; +use ty::{Instance, TyCtxt}; use util::nodemap::FxHashMap; use rustc_data_structures::base_n; use rustc_data_structures::stable_hasher::{HashStable, StableHasherResult, StableHasher}; use ich::{Fingerprint, StableHashingContext, NodeIdHashingMode}; +use std::fmt; use std::hash::Hash; #[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] pub enum MonoItem<'tcx> { Fn(Instance<'tcx>), - Static(NodeId), + Static(DefId), GlobalAsm(NodeId), } -impl<'tcx> HashStable> for MonoItem<'tcx> { +impl<'tcx> MonoItem<'tcx> { + pub fn size_estimate<'a>(&self, tcx: &TyCtxt<'a, 'tcx, 'tcx>) -> usize { + match *self { + MonoItem::Fn(instance) => { + // Estimate the size of a function based on how many statements + // it contains. + tcx.instance_def_size_estimate(instance.def) + }, + // Conservatively estimate the size of a static declaration + // or assembly to be 1. + MonoItem::Static(_) | + MonoItem::GlobalAsm(_) => 1, + } + } +} + +impl<'a, 'tcx> HashStable> for MonoItem<'tcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'tcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { ::std::mem::discriminant(self).hash_stable(hcx, hasher); @@ -35,7 +53,9 @@ impl<'tcx> HashStable> for MonoItem<'tcx> { MonoItem::Fn(ref instance) => { instance.hash_stable(hcx, hasher); } - MonoItem::Static(node_id) | + MonoItem::Static(def_id) => { + def_id.hash_stable(hcx, hasher); + } MonoItem::GlobalAsm(node_id) => { hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { node_id.hash_stable(hcx, hasher); @@ -52,9 +72,10 @@ pub struct CodegenUnit<'tcx> { /// as well as the crate name and disambiguator. name: InternedString, items: FxHashMap, (Linkage, Visibility)>, + size_estimate: Option, } -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub enum Linkage { External, AvailableExternally, @@ -101,6 +122,7 @@ impl<'tcx> CodegenUnit<'tcx> { CodegenUnit { name: name, items: FxHashMap(), + size_estimate: None, } } @@ -131,24 +153,44 @@ impl<'tcx> CodegenUnit<'tcx> { let hash = hash & ((1u128 << 80) - 1); base_n::encode(hash, base_n::CASE_INSENSITIVE) } + + pub fn estimate_size<'a>(&mut self, tcx: &TyCtxt<'a, 'tcx, 'tcx>) { + // Estimate the size of a codegen unit as (approximately) the number of MIR + // statements it corresponds to. + self.size_estimate = Some(self.items.keys().map(|mi| mi.size_estimate(tcx)).sum()); + } + + pub fn size_estimate(&self) -> usize { + // Should only be called if `estimate_size` has previously been called. + self.size_estimate.expect("estimate_size must be called before getting a size_estimate") + } + + pub fn modify_size_estimate(&mut self, delta: usize) { + assert!(self.size_estimate.is_some()); + if let Some(size_estimate) = self.size_estimate { + self.size_estimate = Some(size_estimate + delta); + } + } } -impl<'tcx> HashStable> for CodegenUnit<'tcx> { +impl<'a, 'tcx> HashStable> for CodegenUnit<'tcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'tcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let CodegenUnit { ref items, name, + // The size estimate is not relevant to the hash + size_estimate: _, } = *self; name.hash_stable(hcx, hasher); - let mut items: Vec<(Fingerprint, _)> = items.iter().map(|(trans_item, &attrs)| { + let mut items: Vec<(Fingerprint, _)> = items.iter().map(|(mono_item, &attrs)| { let mut hasher = StableHasher::new(); - trans_item.hash_stable(hcx, &mut hasher); - let trans_item_fingerprint = hasher.finish(); - (trans_item_fingerprint, attrs) + mono_item.hash_stable(hcx, &mut hasher); + let mono_item_fingerprint = hasher.finish(); + (mono_item_fingerprint, attrs) }).collect(); items.sort_unstable_by_key(|i| i.0); @@ -199,3 +241,94 @@ impl Stats { } } +pub struct CodegenUnitNameBuilder<'a, 'gcx: 'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + cache: FxHashMap, +} + +impl<'a, 'gcx: 'tcx, 'tcx: 'a> CodegenUnitNameBuilder<'a, 'gcx, 'tcx> { + + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Self { + CodegenUnitNameBuilder { + tcx, + cache: FxHashMap(), + } + } + + /// CGU names should fulfill the following requirements: + /// - They should be able to act as a file name on any kind of file system + /// - They should not collide with other CGU names, even for different versions + /// of the same crate. + /// + /// Consequently, we don't use special characters except for '.' and '-' and we + /// prefix each name with the crate-name and crate-disambiguator. + /// + /// This function will build CGU names of the form: + /// + /// ``` + /// .(-)*[.] + /// ``` + /// + /// The '.' before `` makes sure that names with a special + /// suffix can never collide with a name built out of regular Rust + /// identifiers (e.g. module paths). + pub fn build_cgu_name(&mut self, + cnum: CrateNum, + components: I, + special_suffix: Option) + -> InternedString + where I: IntoIterator, + C: fmt::Display, + S: fmt::Display, + { + let cgu_name = self.build_cgu_name_no_mangle(cnum, + components, + special_suffix); + + if self.tcx.sess.opts.debugging_opts.human_readable_cgu_names { + cgu_name + } else { + let cgu_name = &cgu_name.as_str()[..]; + Symbol::intern(&CodegenUnit::mangle_name(cgu_name)).as_interned_str() + } + } + + /// Same as `CodegenUnit::build_cgu_name()` but will never mangle the + /// resulting name. + pub fn build_cgu_name_no_mangle(&mut self, + cnum: CrateNum, + components: I, + special_suffix: Option) + -> InternedString + where I: IntoIterator, + C: fmt::Display, + S: fmt::Display, + { + use std::fmt::Write; + + let mut cgu_name = String::with_capacity(64); + + // Start out with the crate name and disambiguator + let tcx = self.tcx; + let crate_prefix = self.cache.entry(cnum).or_insert_with(|| { + let crate_disambiguator = format!("{}", tcx.crate_disambiguator(cnum)); + // Using a shortened disambiguator of about 40 bits + format!("{}.{}", tcx.crate_name(cnum), &crate_disambiguator[0 .. 8]) + }); + + write!(cgu_name, "{}", crate_prefix).unwrap(); + + // Add the components + for component in components { + write!(cgu_name, "-{}", component).unwrap(); + } + + if let Some(special_suffix) = special_suffix { + // We add a dot in here so it cannot clash with anything in a regular + // Rust identifier + write!(cgu_name, ".{}", special_suffix).unwrap(); + } + + Symbol::intern(&cgu_name[..]).as_interned_str() + } +} diff --git a/src/librustc/mir/tcx.rs b/src/librustc/mir/tcx.rs index 23f360d5c392..5991845d265b 100644 --- a/src/librustc/mir/tcx.rs +++ b/src/librustc/mir/tcx.rs @@ -16,7 +16,6 @@ use mir::*; use ty::subst::{Subst, Substs}; use ty::{self, AdtDef, Ty, TyCtxt}; -use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use hir; use ty::util::IntTypeExt; @@ -52,7 +51,7 @@ impl<'a, 'gcx, 'tcx> PlaceTy<'tcx> { match *elem { ProjectionElem::Deref => { let ty = self.to_ty(tcx) - .builtin_deref(true, ty::LvaluePreference::NoPreference) + .builtin_deref(true) .unwrap_or_else(|| { bug!("deref projection of non-dereferencable ty {:?}", self) }) @@ -70,7 +69,7 @@ impl<'a, 'gcx, 'tcx> PlaceTy<'tcx> { PlaceTy::Ty { ty: match ty.sty { ty::TyArray(inner, size) => { - let size = size.val.to_const_int().unwrap().to_u64().unwrap(); + let size = size.unwrap_usize(tcx); let len = size - (from as u64) - (to as u64); tcx.mk_array(inner, len) } @@ -100,25 +99,10 @@ impl<'a, 'gcx, 'tcx> PlaceTy<'tcx> { } } -impl<'tcx> TypeFoldable<'tcx> for PlaceTy<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - match *self { - PlaceTy::Ty { ty } => PlaceTy::Ty { ty: ty.fold_with(folder) }, - PlaceTy::Downcast { adt_def, substs, variant_index } => { - PlaceTy::Downcast { - adt_def, - substs: substs.fold_with(folder), - variant_index, - } - } - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - PlaceTy::Ty { ty } => ty.visit_with(visitor), - PlaceTy::Downcast { substs, .. } => substs.visit_with(visitor) - } +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for PlaceTy<'tcx> { + (PlaceTy::Ty) { ty }, + (PlaceTy::Downcast) { adt_def, substs, variant_index }, } } @@ -129,12 +113,48 @@ impl<'tcx> Place<'tcx> { match *self { Place::Local(index) => PlaceTy::Ty { ty: local_decls.local_decls()[index].ty }, + Place::Promoted(ref data) => PlaceTy::Ty { ty: data.1 }, Place::Static(ref data) => PlaceTy::Ty { ty: data.ty }, Place::Projection(ref proj) => proj.base.ty(local_decls, tcx).projection_ty(tcx, &proj.elem), } } + + /// If this is a field projection, and the field is being projected from a closure type, + /// then returns the index of the field being projected. Note that this closure will always + /// be `self` in the current MIR, because that is the only time we directly access the fields + /// of a closure type. + pub fn is_upvar_field_projection<'cx, 'gcx>(&self, mir: &'cx Mir<'tcx>, + tcx: &TyCtxt<'cx, 'gcx, 'tcx>) -> Option { + let (place, by_ref) = if let Place::Projection(ref proj) = self { + if let ProjectionElem::Deref = proj.elem { + (&proj.base, true) + } else { + (self, false) + } + } else { + (self, false) + }; + + match place { + Place::Projection(ref proj) => match proj.elem { + ProjectionElem::Field(field, _ty) => { + let base_ty = proj.base.ty(mir, *tcx).to_ty(*tcx); + + if (base_ty.is_closure() || base_ty.is_generator()) && + (!by_ref || mir.upvar_decls[field.index()].by_ref) + { + Some(field) + } else { + None + } + }, + _ => None, + } + _ => None, + } + } } pub enum RvalueInitializationState { @@ -149,7 +169,7 @@ impl<'tcx> Rvalue<'tcx> { match *self { Rvalue::Use(ref operand) => operand.ty(local_decls, tcx), Rvalue::Repeat(ref operand, count) => { - tcx.mk_array_const_usize(operand.ty(local_decls, tcx), count) + tcx.mk_array(operand.ty(local_decls, tcx), count) } Rvalue::Ref(reg, bk, ref place) => { let place_ty = place.ty(local_decls, tcx).to_ty(tcx); @@ -171,7 +191,7 @@ impl<'tcx> Rvalue<'tcx> { let lhs_ty = lhs.ty(local_decls, tcx); let rhs_ty = rhs.ty(local_decls, tcx); let ty = op.ty(tcx, lhs_ty, rhs_ty); - tcx.intern_tup(&[ty, tcx.types.bool], false) + tcx.intern_tup(&[ty, tcx.types.bool]) } Rvalue::UnaryOp(UnOp::Not, ref operand) | Rvalue::UnaryOp(UnOp::Neg, ref operand) => { @@ -182,9 +202,8 @@ impl<'tcx> Rvalue<'tcx> { if let ty::TyAdt(adt_def, _) = ty.sty { adt_def.repr.discr_type().to_ty(tcx) } else { - // Undefined behaviour, bug for now; may want to return something for - // the `discriminant` intrinsic later. - bug!("Rvalue::Discriminant on Place of type {:?}", ty); + // This can only be `0`, for now, so `u8` will suffice. + tcx.types.u8 } } Rvalue::NullaryOp(NullOp::Box, t) => tcx.mk_box(t), @@ -195,19 +214,16 @@ impl<'tcx> Rvalue<'tcx> { tcx.mk_array(ty, ops.len() as u64) } AggregateKind::Tuple => { - tcx.mk_tup( - ops.iter().map(|op| op.ty(local_decls, tcx)), - false - ) + tcx.mk_tup(ops.iter().map(|op| op.ty(local_decls, tcx))) } AggregateKind::Adt(def, _, substs, _) => { tcx.type_of(def.did).subst(tcx, substs) } AggregateKind::Closure(did, substs) => { - tcx.mk_closure_from_closure_substs(did, substs) + tcx.mk_closure(did, substs) } - AggregateKind::Generator(did, substs, interior) => { - tcx.mk_generator(did, substs, interior) + AggregateKind::Generator(did, substs, movability) => { + tcx.mk_generator(did, substs, movability) } } } @@ -264,7 +280,7 @@ impl<'tcx> BinOp { impl BorrowKind { pub fn to_mutbl_lossy(self) -> hir::Mutability { match self { - BorrowKind::Mut => hir::MutMutable, + BorrowKind::Mut { .. } => hir::MutMutable, BorrowKind::Shared => hir::MutImmutable, // We have no type corresponding to a unique imm borrow, so @@ -276,24 +292,24 @@ impl BorrowKind { } impl BinOp { - pub fn to_hir_binop(self) -> hir::BinOp_ { + pub fn to_hir_binop(self) -> hir::BinOpKind { match self { - BinOp::Add => hir::BinOp_::BiAdd, - BinOp::Sub => hir::BinOp_::BiSub, - BinOp::Mul => hir::BinOp_::BiMul, - BinOp::Div => hir::BinOp_::BiDiv, - BinOp::Rem => hir::BinOp_::BiRem, - BinOp::BitXor => hir::BinOp_::BiBitXor, - BinOp::BitAnd => hir::BinOp_::BiBitAnd, - BinOp::BitOr => hir::BinOp_::BiBitOr, - BinOp::Shl => hir::BinOp_::BiShl, - BinOp::Shr => hir::BinOp_::BiShr, - BinOp::Eq => hir::BinOp_::BiEq, - BinOp::Ne => hir::BinOp_::BiNe, - BinOp::Lt => hir::BinOp_::BiLt, - BinOp::Gt => hir::BinOp_::BiGt, - BinOp::Le => hir::BinOp_::BiLe, - BinOp::Ge => hir::BinOp_::BiGe, + BinOp::Add => hir::BinOpKind::Add, + BinOp::Sub => hir::BinOpKind::Sub, + BinOp::Mul => hir::BinOpKind::Mul, + BinOp::Div => hir::BinOpKind::Div, + BinOp::Rem => hir::BinOpKind::Rem, + BinOp::BitXor => hir::BinOpKind::BitXor, + BinOp::BitAnd => hir::BinOpKind::BitAnd, + BinOp::BitOr => hir::BinOpKind::BitOr, + BinOp::Shl => hir::BinOpKind::Shl, + BinOp::Shr => hir::BinOpKind::Shr, + BinOp::Eq => hir::BinOpKind::Eq, + BinOp::Ne => hir::BinOpKind::Ne, + BinOp::Lt => hir::BinOpKind::Lt, + BinOp::Gt => hir::BinOpKind::Gt, + BinOp::Le => hir::BinOpKind::Le, + BinOp::Ge => hir::BinOpKind::Ge, BinOp::Offset => unreachable!() } } diff --git a/src/librustc/mir/traversal.rs b/src/librustc/mir/traversal.rs index 74c3408c4c2f..c178a9063c9a 100644 --- a/src/librustc/mir/traversal.rs +++ b/src/librustc/mir/traversal.rs @@ -8,10 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::vec; - -use rustc_data_structures::bitvec::BitVector; -use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::bitvec::BitArray; use super::*; @@ -35,7 +32,7 @@ use super::*; #[derive(Clone)] pub struct Preorder<'a, 'tcx: 'a> { mir: &'a Mir<'tcx>, - visited: BitVector, + visited: BitArray, worklist: Vec, } @@ -45,7 +42,7 @@ impl<'a, 'tcx> Preorder<'a, 'tcx> { Preorder { mir, - visited: BitVector::new(mir.basic_blocks().len()), + visited: BitArray::new(mir.basic_blocks().len()), worklist, } } @@ -60,16 +57,14 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> { fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { while let Some(idx) = self.worklist.pop() { - if !self.visited.insert(idx.index()) { + if !self.visited.insert(idx) { continue; } let data = &self.mir[idx]; if let Some(ref term) = data.terminator { - for &succ in term.successors().iter() { - self.worklist.push(succ); - } + self.worklist.extend(term.successors()); } return Some((idx, data)); @@ -77,8 +72,18 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> { None } + + fn size_hint(&self) -> (usize, Option) { + // All the blocks, minus the number of blocks we've visited. + let remaining = self.mir.basic_blocks().len() - self.visited.count(); + + // We will visit all remaining blocks exactly once. + (remaining, Some(remaining)) + } } +impl<'a, 'tcx> ExactSizeIterator for Preorder<'a, 'tcx> {} + /// Postorder traversal of a graph. /// /// Postorder traversal is when each node is visited after all of it's @@ -99,15 +104,15 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> { /// A Postorder traversal of this graph is `D B C A` or `D C B A` pub struct Postorder<'a, 'tcx: 'a> { mir: &'a Mir<'tcx>, - visited: BitVector, - visit_stack: Vec<(BasicBlock, vec::IntoIter)> + visited: BitArray, + visit_stack: Vec<(BasicBlock, Successors<'a>)> } impl<'a, 'tcx> Postorder<'a, 'tcx> { pub fn new(mir: &'a Mir<'tcx>, root: BasicBlock) -> Postorder<'a, 'tcx> { let mut po = Postorder { mir, - visited: BitVector::new(mir.basic_blocks().len()), + visited: BitArray::new(mir.basic_blocks().len()), visit_stack: Vec::new() }; @@ -115,11 +120,8 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { let data = &po.mir[root]; if let Some(ref term) = data.terminator { - po.visited.insert(root.index()); - - let succs = term.successors().into_owned().into_iter(); - - po.visit_stack.push((root, succs)); + po.visited.insert(root); + po.visit_stack.push((root, term.successors())); po.traverse_successor(); } @@ -176,7 +178,7 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { // two iterations yield `C` and finally `A` for a final traversal of [E, D, B, C, A] loop { let bb = if let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() { - if let Some(bb) = iter.next() { + if let Some(&bb) = iter.next() { bb } else { break; @@ -185,10 +187,9 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { break; }; - if self.visited.insert(bb.index()) { - if let Some(ref term) = self.mir[bb].terminator { - let succs = term.successors().into_owned().into_iter(); - self.visit_stack.push((bb, succs)); + if self.visited.insert(bb) { + if let Some(term) = &self.mir[bb].terminator { + self.visit_stack.push((bb, term.successors())); } } } @@ -210,8 +211,18 @@ impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> { next.map(|(bb, _)| (bb, &self.mir[bb])) } + + fn size_hint(&self) -> (usize, Option) { + // All the blocks, minus the number of blocks we've visited. + let remaining = self.mir.basic_blocks().len() - self.visited.count(); + + // We will visit all remaining blocks exactly once. + (remaining, Some(remaining)) + } } +impl<'a, 'tcx> ExactSizeIterator for Postorder<'a, 'tcx> {} + /// Reverse postorder traversal of a graph /// /// Reverse postorder is the reverse order of a postorder traversal. @@ -276,4 +287,10 @@ impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> { self.blocks.get(self.idx).map(|&bb| (bb, &self.mir[bb])) } + + fn size_hint(&self) -> (usize, Option) { + (self.idx, Some(self.idx)) + } } + +impl<'a, 'tcx> ExactSizeIterator for ReversePostorder<'a, 'tcx> {} diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 58067931d562..cab6ed0c122c 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -10,9 +10,8 @@ use hir::def_id::DefId; use ty::subst::Substs; -use ty::{ClosureSubsts, Region, Ty, GeneratorInterior}; +use ty::{CanonicalTy, ClosureSubsts, GeneratorSubsts, Region, Ty}; use mir::*; -use rustc_const_math::ConstUsize; use syntax_pos::Span; // # The MIR Visitor @@ -93,9 +92,9 @@ macro_rules! make_mir_visitor { self.super_basic_block_data(block, data); } - fn visit_visibility_scope_data(&mut self, - scope_data: & $($mutability)* VisibilityScopeData) { - self.super_visibility_scope_data(scope_data); + fn visit_source_scope_data(&mut self, + scope_data: & $($mutability)* SourceScopeData) { + self.super_source_scope_data(scope_data); } fn visit_statement(&mut self, @@ -145,6 +144,13 @@ macro_rules! make_mir_visitor { self.super_operand(operand, location); } + fn visit_user_assert_ty(&mut self, + c_ty: & $($mutability)* CanonicalTy<'tcx>, + local: & $($mutability)* Local, + location: Location) { + self.super_user_assert_ty(c_ty, local, location); + } + fn visit_place(&mut self, place: & $($mutability)* Place<'tcx>, context: PlaceContext<'tcx>, @@ -185,12 +191,6 @@ macro_rules! make_mir_visitor { self.super_constant(constant, location); } - fn visit_literal(&mut self, - literal: & $($mutability)* Literal<'tcx>, - location: Location) { - self.super_literal(literal, location); - } - fn visit_def_id(&mut self, def_id: & $($mutability)* DefId, _: Location) { @@ -237,22 +237,10 @@ macro_rules! make_mir_visitor { self.super_closure_substs(substs); } - fn visit_generator_interior(&mut self, - interior: & $($mutability)* GeneratorInterior<'tcx>, + fn visit_generator_substs(&mut self, + substs: & $($mutability)* GeneratorSubsts<'tcx>, _: Location) { - self.super_generator_interior(interior); - } - - fn visit_const_int(&mut self, - const_int: &ConstInt, - _: Location) { - self.super_const_int(const_int); - } - - fn visit_const_usize(&mut self, - const_usize: & $($mutability)* ConstUsize, - _: Location) { - self.super_const_usize(const_usize); + self.super_generator_substs(substs); } fn visit_local_decl(&mut self, @@ -267,9 +255,9 @@ macro_rules! make_mir_visitor { _location: Location) { } - fn visit_visibility_scope(&mut self, - scope: & $($mutability)* VisibilityScope) { - self.super_visibility_scope(scope); + fn visit_source_scope(&mut self, + scope: & $($mutability)* SourceScope) { + self.super_source_scope(scope); } // The `super_xxx` methods comprise the default behavior and are @@ -277,6 +265,13 @@ macro_rules! make_mir_visitor { fn super_mir(&mut self, mir: & $($mutability)* Mir<'tcx>) { + if let Some(yield_ty) = &$($mutability)* mir.yield_ty { + self.visit_ty(yield_ty, TyContext::YieldTy(SourceInfo { + span: mir.span, + scope: OUTERMOST_SOURCE_SCOPE, + })); + } + // for best performance, we want to use an iterator rather // than a for-loop, to avoid calling Mir::invalidate for // each basic block. @@ -288,13 +283,13 @@ macro_rules! make_mir_visitor { self.visit_basic_block_data(bb, data); } - for scope in &$($mutability)* mir.visibility_scopes { - self.visit_visibility_scope_data(scope); + for scope in &$($mutability)* mir.source_scopes { + self.visit_source_scope_data(scope); } self.visit_ty(&$($mutability)* mir.return_ty(), TyContext::ReturnTy(SourceInfo { span: mir.span, - scope: ARGUMENT_VISIBILITY_SCOPE, + scope: OUTERMOST_SOURCE_SCOPE, })); for local in mir.local_decls.indices() { @@ -326,16 +321,16 @@ macro_rules! make_mir_visitor { } } - fn super_visibility_scope_data(&mut self, - scope_data: & $($mutability)* VisibilityScopeData) { - let VisibilityScopeData { + fn super_source_scope_data(&mut self, + scope_data: & $($mutability)* SourceScopeData) { + let SourceScopeData { ref $($mutability)* span, ref $($mutability)* parent_scope, } = *scope_data; self.visit_span(span); if let Some(ref $($mutability)* parent_scope) = *parent_scope { - self.visit_visibility_scope(parent_scope); + self.visit_source_scope(parent_scope); } } @@ -354,6 +349,11 @@ macro_rules! make_mir_visitor { ref $($mutability)* rvalue) => { self.visit_assign(block, place, rvalue, location); } + StatementKind::ReadForMatch(ref $($mutability)* place) => { + self.visit_place(place, + PlaceContext::Inspect, + location); + } StatementKind::EndRegion(_) => {} StatementKind::Validate(_, ref $($mutability)* places) => { for operand in places { @@ -382,6 +382,10 @@ macro_rules! make_mir_visitor { self.visit_operand(input, location); } } + StatementKind::UserAssertTy(ref $($mutability)* c_ty, + ref $($mutability)* local) => { + self.visit_user_assert_ty(c_ty, local, location); + } StatementKind::Nop => {} } } @@ -419,13 +423,10 @@ macro_rules! make_mir_visitor { TerminatorKind::SwitchInt { ref $($mutability)* discr, ref $($mutability)* switch_ty, - ref values, + values: _, ref targets } => { self.visit_operand(discr, source_location); self.visit_ty(switch_ty, TyContext::Location(source_location)); - for value in &values[..] { - self.visit_const_int(value, source_location); - } for &target in targets { self.visit_branch(block, target); } @@ -488,32 +489,34 @@ macro_rules! make_mir_visitor { self.visit_operand(value, source_location); self.visit_branch(block, resume); drop.map(|t| self.visit_branch(block, t)); - } - TerminatorKind::FalseEdges { real_target, ref imaginary_targets } => { + TerminatorKind::FalseEdges { real_target, ref imaginary_targets} => { self.visit_branch(block, real_target); for target in imaginary_targets { self.visit_branch(block, *target); } } + + TerminatorKind::FalseUnwind { real_target, unwind } => { + self.visit_branch(block, real_target); + if let Some(unwind) = unwind { + self.visit_branch(block, unwind); + } + } } } fn super_assert_message(&mut self, msg: & $($mutability)* AssertMessage<'tcx>, location: Location) { - match *msg { - AssertMessage::BoundsCheck { + use mir::interpret::EvalErrorKind::*; + if let BoundsCheck { ref $($mutability)* len, ref $($mutability)* index - } => { - self.visit_operand(len, location); - self.visit_operand(index, location); - } - AssertMessage::Math(_) => {}, - AssertMessage::GeneratorResumedAfterReturn => {}, - AssertMessage::GeneratorResumedAfterPanic => {}, + } = *msg { + self.visit_operand(len, location); + self.visit_operand(index, location); } } @@ -525,10 +528,8 @@ macro_rules! make_mir_visitor { self.visit_operand(operand, location); } - Rvalue::Repeat(ref $($mutability)* value, - ref $($mutability)* length) => { + Rvalue::Repeat(ref $($mutability)* value, _) => { self.visit_operand(value, location); - self.visit_const_usize(length, location); } Rvalue::Ref(ref $($mutability)* r, bk, ref $($mutability)* path) => { @@ -593,11 +594,10 @@ macro_rules! make_mir_visitor { self.visit_closure_substs(closure_substs, location); } AggregateKind::Generator(ref $($mutability)* def_id, - ref $($mutability)* closure_substs, - ref $($mutability)* interior) => { + ref $($mutability)* generator_substs, + _movability) => { self.visit_def_id(def_id, location); - self.visit_closure_substs(closure_substs, location); - self.visit_generator_interior(interior, location); + self.visit_generator_substs(generator_substs, location); } } @@ -624,6 +624,13 @@ macro_rules! make_mir_visitor { } } + fn super_user_assert_ty(&mut self, + _c_ty: & $($mutability)* CanonicalTy<'tcx>, + local: & $($mutability)* Local, + location: Location) { + self.visit_local(local, PlaceContext::Validate, location); + } + fn super_place(&mut self, place: & $($mutability)* Place<'tcx>, context: PlaceContext<'tcx>, @@ -635,6 +642,9 @@ macro_rules! make_mir_visitor { Place::Static(ref $($mutability)* static_) => { self.visit_static(static_, context, location); } + Place::Promoted(ref $($mutability)* promoted) => { + self.visit_ty(& $($mutability)* promoted.1, TyContext::Location(location)); + }, Place::Projection(ref $($mutability)* proj) => { self.visit_projection(proj, context, location); } @@ -702,8 +712,8 @@ macro_rules! make_mir_visitor { ref $($mutability)* ty, name: _, ref $($mutability)* source_info, + ref $($mutability)* visibility_scope, internal: _, - ref $($mutability)* syntactic_scope, is_user_variable: _, } = *local_decl; @@ -712,11 +722,11 @@ macro_rules! make_mir_visitor { source_info: *source_info, }); self.visit_source_info(source_info); - self.visit_visibility_scope(syntactic_scope); + self.visit_source_scope(visibility_scope); } - fn super_visibility_scope(&mut self, - _scope: & $($mutability)* VisibilityScope) { + fn super_source_scope(&mut self, + _scope: & $($mutability)* SourceScope) { } fn super_branch(&mut self, @@ -735,18 +745,7 @@ macro_rules! make_mir_visitor { self.visit_span(span); self.visit_ty(ty, TyContext::Location(location)); - self.visit_literal(literal, location); - } - - fn super_literal(&mut self, - literal: & $($mutability)* Literal<'tcx>, - location: Location) { - match *literal { - Literal::Value { ref $($mutability)* value } => { - self.visit_const(value, location); - } - Literal::Promoted { index: _ } => {} - } + self.visit_const(literal, location); } fn super_def_id(&mut self, _def_id: & $($mutability)* DefId) { @@ -762,7 +761,7 @@ macro_rules! make_mir_visitor { } = *source_info; self.visit_span(span); - self.visit_visibility_scope(scope); + self.visit_source_scope(scope); } fn super_ty(&mut self, _ty: & $($mutability)* Ty<'tcx>) { @@ -777,20 +776,14 @@ macro_rules! make_mir_visitor { fn super_substs(&mut self, _substs: & $($mutability)* &'tcx Substs<'tcx>) { } - fn super_generator_interior(&mut self, - _interior: & $($mutability)* GeneratorInterior<'tcx>) { + fn super_generator_substs(&mut self, + _substs: & $($mutability)* GeneratorSubsts<'tcx>) { } fn super_closure_substs(&mut self, _substs: & $($mutability)* ClosureSubsts<'tcx>) { } - fn super_const_int(&mut self, _const_int: &ConstInt) { - } - - fn super_const_usize(&mut self, _const_usize: & $($mutability)* ConstUsize) { - } - // Convenience methods fn visit_location(&mut self, mir: & $($mutability)* Mir<'tcx>, location: Location) { @@ -852,6 +845,8 @@ pub enum TyContext { /// The return type of the function. ReturnTy(SourceInfo), + YieldTy(SourceInfo), + /// A type found at some location. Location(Location), } @@ -942,9 +937,10 @@ impl<'tcx> PlaceContext<'tcx> { pub fn is_mutating_use(&self) -> bool { match *self { PlaceContext::Store | PlaceContext::AsmOutput | PlaceContext::Call | - PlaceContext::Borrow { kind: BorrowKind::Mut, .. } | + PlaceContext::Borrow { kind: BorrowKind::Mut { .. }, .. } | PlaceContext::Projection(Mutability::Mut) | PlaceContext::Drop => true, + PlaceContext::Inspect | PlaceContext::Borrow { kind: BorrowKind::Shared, .. } | PlaceContext::Borrow { kind: BorrowKind::Unique, .. } | @@ -962,7 +958,8 @@ impl<'tcx> PlaceContext<'tcx> { PlaceContext::Borrow { kind: BorrowKind::Unique, .. } | PlaceContext::Projection(Mutability::Not) | PlaceContext::Copy | PlaceContext::Move => true, - PlaceContext::Borrow { kind: BorrowKind::Mut, .. } | PlaceContext::Store | + + PlaceContext::Borrow { kind: BorrowKind::Mut { .. }, .. } | PlaceContext::Store | PlaceContext::AsmOutput | PlaceContext::Call | PlaceContext::Projection(Mutability::Mut) | PlaceContext::Drop | PlaceContext::StorageLive | PlaceContext::StorageDead | diff --git a/src/librustc/session/code_stats.rs b/src/librustc/session/code_stats.rs index 64f405e0f24b..32865acb298f 100644 --- a/src/librustc/session/code_stats.rs +++ b/src/librustc/session/code_stats.rs @@ -8,11 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use ty::AdtKind; -use ty::layout::{Align, Size}; - +use rustc_target::abi::{Align, Size}; use rustc_data_structures::fx::{FxHashSet}; - use std::cmp::{self, Ordering}; #[derive(Clone, PartialEq, Eq, Hash, Debug)] @@ -38,16 +35,6 @@ pub struct FieldInfo { pub align: u64, } -impl From for DataTypeKind { - fn from(kind: AdtKind) -> Self { - match kind { - AdtKind::Struct => DataTypeKind::Struct, - AdtKind::Enum => DataTypeKind::Enum, - AdtKind::Union => DataTypeKind::Union, - } - } -} - #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum DataTypeKind { Struct, @@ -62,6 +49,7 @@ pub struct TypeSizeInfo { pub type_description: String, pub align: u64, pub overall_size: u64, + pub packed: bool, pub opt_discr_size: Option, pub variants: Vec, } @@ -79,6 +67,7 @@ impl CodeStats { type_desc: S, align: Align, overall_size: Size, + packed: bool, opt_discr_size: Option, variants: Vec) { let info = TypeSizeInfo { @@ -86,6 +75,7 @@ impl CodeStats { type_description: type_desc.to_string(), align: align.abi(), overall_size: overall_size.bytes(), + packed: packed, opt_discr_size: opt_discr_size.map(|s| s.bytes()), variants, }; @@ -132,8 +122,8 @@ impl CodeStats { let VariantInfo { ref name, kind: _, align: _, size, ref fields } = *variant_info; let indent = if !struct_like { let name = match name.as_ref() { - Some(name) => format!("{}", name), - None => format!("{}", i), + Some(name) => name.to_owned(), + None => i.to_string(), }; println!("print-type-size {}variant `{}`: {} bytes", indent, name, size - discr_size); @@ -153,24 +143,26 @@ impl CodeStats { for field in fields.iter() { let FieldInfo { ref name, offset, size, align } = *field; - // Include field alignment in output only if it caused padding injection - if min_offset != offset { - if offset > min_offset { - let pad = offset - min_offset; - println!("print-type-size {}padding: {} bytes", - indent, pad); - println!("print-type-size {}field `.{}`: {} bytes, \ - alignment: {} bytes", - indent, name, size, align); - } else { - println!("print-type-size {}field `.{}`: {} bytes, \ - offset: {} bytes, \ - alignment: {} bytes", - indent, name, size, offset, align); - } - } else { + if offset > min_offset { + let pad = offset - min_offset; + println!("print-type-size {}padding: {} bytes", + indent, pad); + } + + if offset < min_offset { + // if this happens something is very wrong + println!("print-type-size {}field `.{}`: {} bytes, \ + offset: {} bytes, \ + alignment: {} bytes", + indent, name, size, offset, align); + } else if info.packed || offset == min_offset { println!("print-type-size {}field `.{}`: {} bytes", indent, name, size); + } else { + // Include field alignment in output only if it caused padding injection + println!("print-type-size {}field `.{}`: {} bytes, \ + alignment: {} bytes", + indent, name, size, align); } min_offset = offset + size; diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 69b86416afab..3926ebedd371 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -11,23 +11,19 @@ //! Contains infrastructure for configuring the compiler, including parsing //! command line options. -pub use self::EntryFnType::*; -pub use self::CrateType::*; -pub use self::Passes::*; -pub use self::DebugInfoLevel::*; +use std::str::FromStr; use session::{early_error, early_warn, Session}; use session::search_paths::SearchPaths; -use ich::StableHashingContext; -use rustc_back::{LinkerFlavor, PanicStrategy, RelroLevel}; -use rustc_back::target::Target; -use rustc_data_structures::stable_hasher::ToStableHashKey; +use rustc_target::spec::{LinkerFlavor, PanicStrategy, RelroLevel}; +use rustc_target::spec::{Target, TargetTriple}; use lint; use middle::cstore; use syntax::ast::{self, IntTy, UintTy}; -use syntax::codemap::{FilePathMapping, FileName}; +use syntax::codemap::{FileName, FilePathMapping}; +use syntax::edition::{Edition, EDITION_NAME_LIST, DEFAULT_EDITION}; use syntax::parse::token; use syntax::parse; use syntax::symbol::Symbol; @@ -41,12 +37,12 @@ use std::collections::btree_map::Iter as BTreeMapIter; use std::collections::btree_map::Keys as BTreeMapKeysIter; use std::collections::btree_map::Values as BTreeMapValuesIter; -use std::fmt; +use std::{fmt, str}; use std::hash::Hasher; use std::collections::hash_map::DefaultHasher; use std::collections::HashSet; use std::iter::FromIterator; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; pub struct Config { pub target: Target, @@ -62,25 +58,61 @@ pub enum Sanitizer { Thread, } -#[derive(Clone, Copy, PartialEq, Hash)] +#[derive(Clone, Copy, Debug, PartialEq, Hash)] pub enum OptLevel { - No, // -O0 - Less, // -O1 - Default, // -O2 + No, // -O0 + Less, // -O1 + Default, // -O2 Aggressive, // -O3 - Size, // -Os - SizeMin, // -Oz + Size, // -Os + SizeMin, // -Oz } #[derive(Clone, Copy, PartialEq, Hash)] -pub enum DebugInfoLevel { - NoDebugInfo, - LimitedDebugInfo, - FullDebugInfo, +pub enum Lto { + /// Don't do any LTO whatsoever + No, + + /// Do a full crate graph LTO. The flavor is determined by the compiler + /// (currently the default is "fat"). + Yes, + + /// Do a full crate graph LTO with ThinLTO + Thin, + + /// Do a local graph LTO with ThinLTO (only relevant for multiple codegen + /// units). + ThinLocal, + + /// Do a full crate graph LTO with "fat" LTO + Fat, } -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord, - RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Hash)] +pub enum CrossLangLto { + LinkerPlugin(PathBuf), + LinkerPluginAuto, + Disabled +} + +impl CrossLangLto { + pub fn enabled(&self) -> bool { + match *self { + CrossLangLto::LinkerPlugin(_) | + CrossLangLto::LinkerPluginAuto => true, + CrossLangLto::Disabled => false, + } + } +} + +#[derive(Clone, Copy, PartialEq, Hash)] +pub enum DebugInfo { + None, + Limited, + Full, +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord, RustcEncodable, RustcDecodable)] pub enum OutputType { Bitcode, Assembly, @@ -92,36 +124,18 @@ pub enum OutputType { DepInfo, } -impl_stable_hash_for!(enum self::OutputType { - Bitcode, - Assembly, - LlvmAssembly, - Mir, - Metadata, - Object, - Exe, - DepInfo -}); - -impl<'tcx> ToStableHashKey> for OutputType { - type KeyType = OutputType; - #[inline] - fn to_stable_hash_key(&self, _: &StableHashingContext<'tcx>) -> Self::KeyType { - *self - } -} +impl_stable_hash_via_hash!(OutputType); impl OutputType { fn is_compatible_with_codegen_units_and_single_output_file(&self) -> bool { match *self { - OutputType::Exe | - OutputType::DepInfo => true, - OutputType::Bitcode | - OutputType::Assembly | - OutputType::LlvmAssembly | - OutputType::Mir | - OutputType::Object | - OutputType::Metadata => false, + OutputType::Exe | OutputType::DepInfo => true, + OutputType::Bitcode + | OutputType::Assembly + | OutputType::LlvmAssembly + | OutputType::Mir + | OutputType::Object + | OutputType::Metadata => false, } } @@ -140,14 +154,14 @@ impl OutputType { fn from_shorthand(shorthand: &str) -> Option { Some(match shorthand { - "asm" => OutputType::Assembly, - "llvm-ir" => OutputType::LlvmAssembly, - "mir" => OutputType::Mir, - "llvm-bc" => OutputType::Bitcode, - "obj" => OutputType::Object, - "metadata" => OutputType::Metadata, - "link" => OutputType::Exe, - "dep-info" => OutputType::DepInfo, + "asm" => OutputType::Assembly, + "llvm-ir" => OutputType::LlvmAssembly, + "mir" => OutputType::Mir, + "llvm-bc" => OutputType::Bitcode, + "obj" => OutputType::Object, + "metadata" => OutputType::Metadata, + "link" => OutputType::Exe, + "dep-info" => OutputType::DepInfo, _ => return None, }) } @@ -199,14 +213,13 @@ impl Default for ErrorOutputType { #[derive(Clone, Hash)] pub struct OutputTypes(BTreeMap>); -impl_stable_hash_for!(tuple_struct self::OutputTypes { - map -}); +impl_stable_hash_via_hash!(OutputTypes); impl OutputTypes { pub fn new(entries: &[(OutputType, Option)]) -> OutputTypes { - OutputTypes(BTreeMap::from_iter(entries.iter() - .map(|&(k, ref v)| (k, v.clone())))) + OutputTypes(BTreeMap::from_iter( + entries.iter().map(|&(k, ref v)| (k, v.clone())), + )) } pub fn get(&self, key: &OutputType) -> Option<&Option> { @@ -225,22 +238,24 @@ impl OutputTypes { self.0.values() } + pub fn len(&self) -> usize { + self.0.len() + } + // True if any of the output types require codegen or linking. - pub fn should_trans(&self) -> bool { + pub fn should_codegen(&self) -> bool { self.0.keys().any(|k| match *k { - OutputType::Bitcode | - OutputType::Assembly | - OutputType::LlvmAssembly | - OutputType::Mir | - OutputType::Object | - OutputType::Exe => true, - OutputType::Metadata | - OutputType::DepInfo => false, + OutputType::Bitcode + | OutputType::Assembly + | OutputType::LlvmAssembly + | OutputType::Mir + | OutputType::Object + | OutputType::Exe => true, + OutputType::Metadata | OutputType::DepInfo => false, }) } } - // Use tree-based collections to cheaply get a deterministic Hash implementation. // DO NOT switch BTreeMap or BTreeSet out for an unsorted container type! That // would break dependency tracking for commandline arguments. @@ -265,7 +280,7 @@ macro_rules! hash_option { ($opt_name:ident, $opt_expr:expr, $sub_hashes:expr, [UNTRACKED]) => ({}); ($opt_name:ident, $opt_expr:expr, $sub_hashes:expr, [TRACKED]) => ({ if $sub_hashes.insert(stringify!($opt_name), - $opt_expr as &dep_tracking::DepTrackingHash).is_some() { + $opt_expr as &dyn dep_tracking::DepTrackingHash).is_some() { bug!("Duplicate key in CLI DepTrackingHash: {}", stringify!($opt_name)) } }); @@ -339,19 +354,16 @@ top_level_options!( // Include the debug_assertions flag into dependency tracking, since it // can influence whether overflow checks are done or not. debug_assertions: bool [TRACKED], - debuginfo: DebugInfoLevel [TRACKED], + debuginfo: DebugInfo [TRACKED], lint_opts: Vec<(String, lint::Level)> [TRACKED], lint_cap: Option [TRACKED], describe_lints: bool [UNTRACKED], output_types: OutputTypes [TRACKED], - // FIXME(mw): We track this for now but it actually doesn't make too - // much sense: The search path can stay the same while the - // things discovered there might have changed on disk. - search_paths: SearchPaths [TRACKED], + search_paths: SearchPaths [UNTRACKED], libs: Vec<(String, Option, Option)> [TRACKED], maybe_sysroot: Option [TRACKED], - target_triple: String [TRACKED], + target_triple: TargetTriple [TRACKED], test: bool [TRACKED], error_format: ErrorOutputType [UNTRACKED], @@ -366,13 +378,10 @@ top_level_options!( // version of `debugging_opts.borrowck`, which is just a plain string. borrowck_mode: BorrowckMode [UNTRACKED], cg: CodegenOptions [TRACKED], - // FIXME(mw): We track this for now but it actually doesn't make too - // much sense: The value of this option can stay the same - // while the files they refer to might have changed on disk. - externs: Externs [TRACKED], + externs: Externs [UNTRACKED], crate_name: Option [TRACKED], // An optional name to use as the crate for std during std injection, - // written `extern crate std = "name"`. Default to "std". Used by + // written `extern crate name as std`. Defaults to `std`. Used by // out-of-tree drivers. alt_std_name: Option [TRACKED], // Indicates how the compiler should treat unstable features @@ -389,7 +398,12 @@ top_level_options!( // commands like `--emit llvm-ir` which they're often incompatible with // if we otherwise use the defaults of rustc. cli_forced_codegen_units: Option [UNTRACKED], - cli_forced_thinlto: Option [UNTRACKED], + cli_forced_thinlto_off: bool [UNTRACKED], + + // Remap source path prefixes in all output (messages, object files, debug, etc) + remap_path_prefix: Vec<(PathBuf, PathBuf)> [UNTRACKED], + + edition: Edition [TRACKED], } ); @@ -414,15 +428,28 @@ pub enum BorrowckMode { Ast, Mir, Compare, + Migrate, } impl BorrowckMode { + /// Should we run the MIR-based borrow check, but also fall back + /// on the AST borrow check if the MIR-based one errors. + pub fn migrate(self) -> bool { + match self { + BorrowckMode::Ast => false, + BorrowckMode::Compare => false, + BorrowckMode::Mir => false, + BorrowckMode::Migrate => true, + } + } + /// Should we emit the AST-based borrow checker errors? pub fn use_ast(self) -> bool { match self { BorrowckMode::Ast => true, BorrowckMode::Compare => true, BorrowckMode::Mir => false, + BorrowckMode::Migrate => false, } } /// Should we emit the MIR-based borrow checker errors? @@ -431,6 +458,7 @@ impl BorrowckMode { BorrowckMode::Ast => false, BorrowckMode::Compare => true, BorrowckMode::Mir => true, + BorrowckMode::Migrate => true, } } } @@ -449,14 +477,20 @@ pub enum Input { impl Input { pub fn filestem(&self) -> String { match *self { - Input::File(ref ifile) => ifile.file_stem().unwrap() - .to_str().unwrap().to_string(), + Input::File(ref ifile) => ifile.file_stem().unwrap().to_str().unwrap().to_string(), Input::Str { .. } => "rust_out".to_string(), } } + + pub fn get_input(&mut self) -> Option<&mut String> { + match *self { + Input::File(_) => None, + Input::Str { ref mut input, .. } => Some(input), + } + } } -#[derive(Clone)] +#[derive(Clone, Hash)] pub struct OutputFilenames { pub out_directory: PathBuf, pub out_filestem: String, @@ -465,19 +499,15 @@ pub struct OutputFilenames { pub outputs: OutputTypes, } -impl_stable_hash_for!(struct self::OutputFilenames { - out_directory, - out_filestem, - single_output_file, - extra, - outputs -}); +impl_stable_hash_via_hash!(OutputFilenames); pub const RUST_CGU_EXT: &str = "rcgu"; impl OutputFilenames { pub fn path(&self, flavor: OutputType) -> PathBuf { - self.outputs.get(&flavor).and_then(|p| p.to_owned()) + self.outputs + .get(&flavor) + .and_then(|p| p.to_owned()) .or_else(|| self.single_output_file.clone()) .unwrap_or_else(|| self.temp_path(flavor, None)) } @@ -485,20 +515,14 @@ impl OutputFilenames { /// Get the path where a compilation artifact of the given type for the /// given codegen unit should be placed on disk. If codegen_unit_name is /// None, a path distinct from those of any codegen unit will be generated. - pub fn temp_path(&self, - flavor: OutputType, - codegen_unit_name: Option<&str>) - -> PathBuf { + pub fn temp_path(&self, flavor: OutputType, codegen_unit_name: Option<&str>) -> PathBuf { let extension = flavor.extension(); self.temp_path_ext(extension, codegen_unit_name) } /// Like temp_path, but also supports things where there is no corresponding /// OutputType, like no-opt-bitcode or lto-bitcode. - pub fn temp_path_ext(&self, - ext: &str, - codegen_unit_name: Option<&str>) - -> PathBuf { + pub fn temp_path_ext(&self, ext: &str, codegen_unit_name: Option<&str>) -> PathBuf { let base = self.out_directory.join(&self.filestem()); let mut extension = String::new(); @@ -522,31 +546,14 @@ impl OutputFilenames { } pub fn with_extension(&self, extension: &str) -> PathBuf { - self.out_directory.join(&self.filestem()).with_extension(extension) + self.out_directory + .join(&self.filestem()) + .with_extension(extension) } pub fn filestem(&self) -> String { format!("{}{}", self.out_filestem, self.extra) } - - pub fn contains_path(&self, input_path: &PathBuf) -> bool { - let input_path = input_path.canonicalize().ok(); - if input_path.is_none() { - return false - } - match self.single_output_file { - Some(ref output_path) => output_path.canonicalize().ok() == input_path, - None => { - for k in self.outputs.keys() { - let output_path = self.path(k.to_owned()); - if output_path.canonicalize().ok() == input_path { - return true; - } - } - false - } - } - } } pub fn host_triple() -> &'static str { @@ -558,62 +565,59 @@ pub fn host_triple() -> &'static str { // Instead of grabbing the host triple (for the current host), we grab (at // compile time) the target triple that this rustc is built with and // calling that (at runtime) the host triple. - (option_env!("CFG_COMPILER_HOST_TRIPLE")). - expect("CFG_COMPILER_HOST_TRIPLE") + (option_env!("CFG_COMPILER_HOST_TRIPLE")).expect("CFG_COMPILER_HOST_TRIPLE") } -/// Some reasonable defaults -pub fn basic_options() -> Options { - Options { - crate_types: Vec::new(), - optimize: OptLevel::No, - debuginfo: NoDebugInfo, - lint_opts: Vec::new(), - lint_cap: None, - describe_lints: false, - output_types: OutputTypes(BTreeMap::new()), - search_paths: SearchPaths::new(), - maybe_sysroot: None, - target_triple: host_triple().to_string(), - test: false, - incremental: None, - debugging_opts: basic_debugging_options(), - prints: Vec::new(), - borrowck_mode: BorrowckMode::Ast, - cg: basic_codegen_options(), - error_format: ErrorOutputType::default(), - externs: Externs(BTreeMap::new()), - crate_name: None, - alt_std_name: None, - libs: Vec::new(), - unstable_features: UnstableFeatures::Disallow, - debug_assertions: true, - actually_rustdoc: false, - cli_forced_codegen_units: None, - cli_forced_thinlto: None, +impl Default for Options { + fn default() -> Options { + Options { + crate_types: Vec::new(), + optimize: OptLevel::No, + debuginfo: DebugInfo::None, + lint_opts: Vec::new(), + lint_cap: None, + describe_lints: false, + output_types: OutputTypes(BTreeMap::new()), + search_paths: SearchPaths::new(), + maybe_sysroot: None, + target_triple: TargetTriple::from_triple(host_triple()), + test: false, + incremental: None, + debugging_opts: basic_debugging_options(), + prints: Vec::new(), + borrowck_mode: BorrowckMode::Ast, + cg: basic_codegen_options(), + error_format: ErrorOutputType::default(), + externs: Externs(BTreeMap::new()), + crate_name: None, + alt_std_name: None, + libs: Vec::new(), + unstable_features: UnstableFeatures::Disallow, + debug_assertions: true, + actually_rustdoc: false, + cli_forced_codegen_units: None, + cli_forced_thinlto_off: false, + remap_path_prefix: Vec::new(), + edition: DEFAULT_EDITION, + } } } impl Options { /// True if there is a reason to build the dep graph. pub fn build_dep_graph(&self) -> bool { - self.incremental.is_some() || - self.debugging_opts.dump_dep_graph || - self.debugging_opts.query_dep_graph + self.incremental.is_some() || self.debugging_opts.dump_dep_graph + || self.debugging_opts.query_dep_graph } #[inline(always)] pub fn enable_dep_node_debug_strs(&self) -> bool { - cfg!(debug_assertions) && - (self.debugging_opts.query_dep_graph || self.debugging_opts.incremental_info) + cfg!(debug_assertions) + && (self.debugging_opts.query_dep_graph || self.debugging_opts.incremental_info) } pub fn file_path_mapping(&self) -> FilePathMapping { - FilePathMapping::new( - self.debugging_opts.remap_path_prefix_from.iter().zip( - self.debugging_opts.remap_path_prefix_to.iter() - ).map(|(src, dst)| (src.clone(), dst.clone())).collect() - ) + FilePathMapping::new(self.remap_path_prefix.clone()) } /// True if there will be an output file generated @@ -621,40 +625,56 @@ impl Options { !self.debugging_opts.parse_only && // The file is just being parsed !self.debugging_opts.ls // The file is just being queried } + + #[inline] + pub fn share_generics(&self) -> bool { + match self.debugging_opts.share_generics { + Some(setting) => setting, + None => { + self.incremental.is_some() || + match self.optimize { + OptLevel::No | + OptLevel::Less | + OptLevel::Size | + OptLevel::SizeMin => true, + OptLevel::Default | + OptLevel::Aggressive => false, + } + } + } + } } // The type of entry function, so // users can have their own entry -// functions that don't start a -// scheduler +// functions #[derive(Copy, Clone, PartialEq)] pub enum EntryFnType { - EntryMain, - EntryStart, - EntryNone, + Main, + Start, } #[derive(Copy, PartialEq, PartialOrd, Clone, Ord, Eq, Hash, Debug)] pub enum CrateType { - CrateTypeExecutable, - CrateTypeDylib, - CrateTypeRlib, - CrateTypeStaticlib, - CrateTypeCdylib, - CrateTypeProcMacro, + Executable, + Dylib, + Rlib, + Staticlib, + Cdylib, + ProcMacro, } #[derive(Clone, Hash)] pub enum Passes { - SomePasses(Vec), - AllPasses, + Some(Vec), + All, } impl Passes { pub fn is_empty(&self) -> bool { match *self { - SomePasses(ref v) => v.is_empty(), - AllPasses => false, + Passes::Some(ref v) => v.is_empty(), + Passes::All => false, } } } @@ -775,15 +795,22 @@ macro_rules! options { pub const parse_sanitizer: Option<&'static str> = Some("one of: `address`, `leak`, `memory` or `thread`"); pub const parse_linker_flavor: Option<&'static str> = - Some(::rustc_back::LinkerFlavor::one_of()); + Some(::rustc_target::spec::LinkerFlavor::one_of()); pub const parse_optimization_fuel: Option<&'static str> = Some("crate=integer"); + pub const parse_unpretty: Option<&'static str> = + Some("`string` or `string=string`"); + pub const parse_lto: Option<&'static str> = + Some("one of `thin`, `fat`, or omitted"); + pub const parse_cross_lang_lto: Option<&'static str> = + Some("either a boolean (`yes`, `no`, `on`, `off`, etc), \ + or the path to the linker plugin"); } #[allow(dead_code)] mod $mod_set { - use super::{$struct_name, Passes, SomePasses, AllPasses, Sanitizer}; - use rustc_back::{LinkerFlavor, PanicStrategy, RelroLevel}; + use super::{$struct_name, Passes, Sanitizer, Lto, CrossLangLto}; + use rustc_target::spec::{LinkerFlavor, PanicStrategy, RelroLevel}; use std::path::PathBuf; $( @@ -857,9 +884,7 @@ macro_rules! options { -> bool { match v { Some(s) => { - for s in s.split_whitespace() { - slot.push(s.to_string()); - } + slot.extend(s.split_whitespace().map(|s| s.to_string())); true }, None => false, @@ -895,13 +920,13 @@ macro_rules! options { fn parse_passes(slot: &mut Passes, v: Option<&str>) -> bool { match v { Some("all") => { - *slot = AllPasses; + *slot = Passes::All; true } v => { let mut passes = vec![]; if parse_list(&mut passes, v) { - *slot = SomePasses(passes); + *slot = Passes::Some(passes); true } else { false @@ -965,6 +990,47 @@ macro_rules! options { } } } + + fn parse_unpretty(slot: &mut Option, v: Option<&str>) -> bool { + match v { + None => false, + Some(s) if s.split('=').count() <= 2 => { + *slot = Some(s.to_string()); + true + } + _ => false, + } + } + + fn parse_lto(slot: &mut Lto, v: Option<&str>) -> bool { + *slot = match v { + None => Lto::Yes, + Some("thin") => Lto::Thin, + Some("fat") => Lto::Fat, + Some(_) => return false, + }; + true + } + + fn parse_cross_lang_lto(slot: &mut CrossLangLto, v: Option<&str>) -> bool { + if v.is_some() { + let mut bool_arg = None; + if parse_opt_bool(&mut bool_arg, v) { + *slot = if bool_arg.unwrap() { + CrossLangLto::LinkerPluginAuto + } else { + CrossLangLto::Disabled + }; + return true + } + } + + *slot = match v { + None => CrossLangLto::LinkerPluginAuto, + Some(path) => CrossLangLto::LinkerPlugin(PathBuf::from(path)), + }; + true + } } ) } @@ -981,7 +1047,7 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, "extra arguments to append to the linker invocation (space separated)"), link_dead_code: bool = (false, parse_bool, [UNTRACKED], "don't let linker strip dead code (turning it on can be used for code coverage)"), - lto: bool = (false, parse_bool, [TRACKED], + lto: Lto = (Lto::No, parse_lto, [TRACKED], "perform LLVM link-time optimizations"), target_cpu: Option = (None, parse_opt_string, [TRACKED], "select target processor (rustc --print target-cpus for details)"), @@ -1023,7 +1089,7 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, "extra data to put in each output filename"), codegen_units: Option = (None, parse_opt_uint, [UNTRACKED], "divide crate into N units to optimize in parallel"), - remark: Passes = (SomePasses(Vec::new()), parse_passes, [UNTRACKED], + remark: Passes = (Passes::Some(Vec::new()), parse_passes, [UNTRACKED], "print remarks for these optimization passes (space separated, or \"all\")"), no_stack_check: bool = (false, parse_bool, [UNTRACKED], "the --no-stack-check flag is deprecated and does nothing"), @@ -1032,6 +1098,8 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, 2 = full debug info with variable and type information"), opt_level: Option = (None, parse_opt_string, [TRACKED], "optimize with possible levels 0-3, s, or z"), + force_frame_pointers: Option = (None, parse_opt_bool, [TRACKED], + "force use of the frame pointers"), debug_assertions: Option = (None, parse_opt_bool, [TRACKED], "explicitly enable the cfg(debug_assertions) directive"), inline_threshold: Option = (None, parse_opt_uint, [TRACKED], @@ -1045,6 +1113,8 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, build_debugging_options, "Z", "debugging", DB_OPTIONS, db_type_desc, dbsetters, + codegen_backend: Option = (None, parse_opt_string, [TRACKED], + "the backend to use"), verbose: bool = (false, parse_bool, [UNTRACKED], "in general, enable more debug printouts"), span_free_formats: bool = (false, parse_bool, [UNTRACKED], @@ -1054,9 +1124,11 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, emit_end_regions: bool = (false, parse_bool, [UNTRACKED], "emit EndRegion as part of MIR; enable transforms that solely process EndRegion"), borrowck: Option = (None, parse_opt_string, [UNTRACKED], - "select which borrowck is used (`ast`, `mir`, or `compare`)"), + "select which borrowck is used (`ast`, `mir`, `migrate`, or `compare`)"), two_phase_borrows: bool = (false, parse_bool, [UNTRACKED], "use two-phase reserved/active distinction for `&mut` borrows in MIR borrowck"), + two_phase_beyond_autoref: bool = (false, parse_bool, [UNTRACKED], + "when using two-phase-borrows, allow two phases even for non-autoref `&mut` borrows"), time_passes: bool = (false, parse_bool, [UNTRACKED], "measure time of each rustc pass"), count_llvm_insns: bool = (false, parse_bool, @@ -1066,18 +1138,18 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "count where LLVM instrs originate"), time_llvm_passes: bool = (false, parse_bool, [UNTRACKED_WITH_WARNING(true, "The output of `-Z time-llvm-passes` will only reflect timings of \ - re-translated modules when used with incremental compilation" )], + re-codegened modules when used with incremental compilation" )], "measure time of each LLVM pass"), input_stats: bool = (false, parse_bool, [UNTRACKED], "gather statistics about the input"), - trans_stats: bool = (false, parse_bool, [UNTRACKED_WITH_WARNING(true, - "The output of `-Z trans-stats` might not be accurate when incremental \ + codegen_stats: bool = (false, parse_bool, [UNTRACKED_WITH_WARNING(true, + "The output of `-Z codegen-stats` might not be accurate when incremental \ compilation is enabled")], - "gather trans statistics"), + "gather codegen statistics"), asm_comments: bool = (false, parse_bool, [TRACKED], "generate comments into the assembly (may change behavior)"), - no_verify: bool = (false, parse_bool, [TRACKED], - "skip LLVM verification"), + verify_llvm_ir: bool = (false, parse_bool, [TRACKED], + "verify LLVM IR"), borrowck_stats: bool = (false, parse_bool, [UNTRACKED], "gather borrowck statistics"), no_landing_pads: bool = (false, parse_bool, [TRACKED], @@ -1102,24 +1174,28 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "write syntax and type analysis (in JSON format) information, in \ addition to normal output"), flowgraph_print_loans: bool = (false, parse_bool, [UNTRACKED], - "include loan analysis data in --unpretty flowgraph output"), + "include loan analysis data in -Z unpretty flowgraph output"), flowgraph_print_moves: bool = (false, parse_bool, [UNTRACKED], - "include move analysis data in --unpretty flowgraph output"), + "include move analysis data in -Z unpretty flowgraph output"), flowgraph_print_assigns: bool = (false, parse_bool, [UNTRACKED], - "include assignment analysis data in --unpretty flowgraph output"), + "include assignment analysis data in -Z unpretty flowgraph output"), flowgraph_print_all: bool = (false, parse_bool, [UNTRACKED], - "include all dataflow analysis data in --unpretty flowgraph output"), + "include all dataflow analysis data in -Z unpretty flowgraph output"), print_region_graph: bool = (false, parse_bool, [UNTRACKED], "prints region inference graph. \ Use with RUST_REGION_GRAPH=help for more info"), parse_only: bool = (false, parse_bool, [UNTRACKED], "parse only; do not compile, assemble, or link"), - no_trans: bool = (false, parse_bool, [TRACKED], - "run all passes except translation; no output"), + no_codegen: bool = (false, parse_bool, [TRACKED], + "run all passes except codegen; no output"), treat_err_as_bug: bool = (false, parse_bool, [TRACKED], "treat all errors that occur as bugs"), + report_delayed_bugs: bool = (false, parse_bool, [TRACKED], + "immediately print bugs registered with `delay_span_bug`"), external_macro_backtrace: bool = (false, parse_bool, [UNTRACKED], "show macro backtraces even for non-local macros"), + teach: bool = (false, parse_bool, [TRACKED], + "show extended diagnostic help"), continue_parse_after_error: bool = (false, parse_bool, [TRACKED], "attempt to recover from parse errors (experimental)"), incremental: Option = (None, parse_opt_string, [UNTRACKED], @@ -1154,8 +1230,6 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "for every macro invocation, print its name and arguments"), debug_macros: bool = (false, parse_bool, [TRACKED], "emit line numbers debug info inside macros"), - enable_nonzeroing_move_hints: bool = (false, parse_bool, [TRACKED], - "force nonzeroing move optimization on"), keep_hygiene_data: bool = (false, parse_bool, [UNTRACKED], "don't clear the hygiene data after analysis"), keep_ast: bool = (false, parse_bool, [UNTRACKED], @@ -1164,14 +1238,16 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "show spans for compiler debugging (expr|pat|ty)"), print_type_sizes: bool = (false, parse_bool, [UNTRACKED], "print layout information for each type encountered"), - print_trans_items: Option = (None, parse_opt_string, [UNTRACKED], - "print the result of the translation item collection pass"), + print_mono_items: Option = (None, parse_opt_string, [UNTRACKED], + "print the result of the monomorphization collection pass"), mir_opt_level: usize = (1, parse_uint, [TRACKED], "set the MIR optimization level (0-3, default: 1)"), - mutable_noalias: bool = (false, parse_bool, [UNTRACKED], - "emit noalias metadata for mutable references"), + mutable_noalias: Option = (None, parse_opt_bool, [TRACKED], + "emit noalias metadata for mutable references (default: yes on LLVM >= 6)"), + arg_align_attributes: bool = (false, parse_bool, [TRACKED], + "emit align metadata for reference arguments"), dump_mir: Option = (None, parse_opt_string, [UNTRACKED], - "dump MIR state at various points in translation"), + "dump MIR state at various points in transforms"), dump_mir_dir: String = (String::from("mir_dump"), parse_string, [UNTRACKED], "the directory the MIR is dumped into"), dump_mir_graphviz: bool = (false, parse_bool, [UNTRACKED], @@ -1189,8 +1265,6 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "print some statistics about MIR"), always_encode_mir: bool = (false, parse_bool, [TRACKED], "encode MIR of all functions into the crate metadata"), - miri: bool = (false, parse_bool, [TRACKED], - "check the miri const evaluator against the old ctfe"), osx_rpath_install_name: bool = (false, parse_bool, [TRACKED], "pass `-install_name @rpath/...` to the macOS linker"), sanitizer: Option = (None, parse_sanitizer, [TRACKED], @@ -1201,10 +1275,6 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "set the optimization fuel quota for a crate"), print_fuel: Option = (None, parse_opt_string, [TRACKED], "make Rustc print the total optimization fuel used by a crate"), - remap_path_prefix_from: Vec = (vec![], parse_pathbuf_push, [TRACKED], - "add a source pattern to the file path remapping config"), - remap_path_prefix_to: Vec = (vec![], parse_pathbuf_push, [TRACKED], - "add a mapping target to the file path remapping config"), force_unstable_if_unmarked: bool = (false, parse_bool, [TRACKED], "force all crates to be `rustc_private` unstable"), pre_link_arg: Vec = (vec![], parse_string_push, [UNTRACKED], @@ -1213,14 +1283,30 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "extra arguments to prepend to the linker invocation (space separated)"), profile: bool = (false, parse_bool, [TRACKED], "insert profiling code"), + pgo_gen: Option = (None, parse_opt_string, [TRACKED], + "Generate PGO profile data, to a given file, or to the default \ + location if it's empty."), + pgo_use: String = (String::new(), parse_string, [TRACKED], + "Use PGO profile data from the given profile file."), + disable_instrumentation_preinliner: bool = + (false, parse_bool, [TRACKED], "Disable the instrumentation pre-inliner, \ + useful for profiling / PGO."), relro_level: Option = (None, parse_relro_level, [TRACKED], "choose which RELRO level to use"), - nll: bool = (false, parse_bool, [UNTRACKED], - "run the non-lexical lifetimes MIR pass"), - nll_dump_cause: bool = (false, parse_bool, [UNTRACKED], - "dump cause information when reporting errors from NLL"), - trans_time_graph: bool = (false, parse_bool, [UNTRACKED], - "generate a graphical HTML report of time spent in trans and LLVM"), + disable_ast_check_for_mutation_in_guard: bool = (false, parse_bool, [UNTRACKED], + "skip AST-based mutation-in-guard check (mir-borrowck provides more precise check)"), + nll_subminimal_causes: bool = (false, parse_bool, [UNTRACKED], + "when tracking region error causes, accept subminimal results for faster execution."), + nll_facts: bool = (false, parse_bool, [UNTRACKED], + "dump facts from NLL analysis into side files"), + disable_nll_user_type_assert: bool = (false, parse_bool, [UNTRACKED], + "disable user provided type assertion in NLL"), + nll_dont_emit_read_for_match: bool = (false, parse_bool, [UNTRACKED], + "in match codegen, do not include ReadForMatch statements (used by mir-borrowck)"), + polonius: bool = (false, parse_bool, [UNTRACKED], + "enable polonius-based borrow-checker"), + codegen_time_graph: bool = (false, parse_bool, [UNTRACKED], + "generate a graphical HTML report of time spent in codegen and LLVM"), thinlto: Option = (None, parse_opt_bool, [TRACKED], "enable ThinLTO when possible"), inline_in_all_cgus: Option = (None, parse_opt_bool, [TRACKED], @@ -1232,17 +1318,48 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, the max/min integer respectively, and NaN is mapped to 0"), lower_128bit_ops: Option = (None, parse_opt_bool, [TRACKED], "rewrite operators on i128 and u128 into lang item calls (typically provided \ - by compiler-builtins) so translation doesn't need to support them, + by compiler-builtins) so codegen doesn't need to support them, overriding the default for the current target"), human_readable_cgu_names: bool = (false, parse_bool, [TRACKED], "generate human-readable, predictable names for codegen units"), dep_info_omit_d_target: bool = (false, parse_bool, [TRACKED], "in dep-info output, omit targets for tracking dependencies of the dep-info files \ themselves"), + unpretty: Option = (None, parse_unpretty, [UNTRACKED], + "Present the input source, unstable (and less-pretty) variants; + valid types are any of the types for `--pretty`, as well as: + `flowgraph=` (graphviz formatted flowgraph for node), + `everybody_loops` (all function bodies replaced with `loop {}`), + `hir` (the HIR), `hir,identified`, or + `hir,typed` (HIR with types for each node)."), + run_dsymutil: Option = (None, parse_opt_bool, [TRACKED], + "run `dsymutil` and delete intermediate object files"), + ui_testing: bool = (false, parse_bool, [UNTRACKED], + "format compiler diagnostics in a way that's better suitable for UI testing"), + embed_bitcode: bool = (false, parse_bool, [TRACKED], + "embed LLVM bitcode in object files"), + strip_debuginfo_if_disabled: Option = (None, parse_opt_bool, [TRACKED], + "tell the linker to strip debuginfo when building without debuginfo enabled."), + share_generics: Option = (None, parse_opt_bool, [TRACKED], + "make the current crate share its generic instantiations"), + chalk: bool = (false, parse_bool, [TRACKED], + "enable the experimental Chalk-based trait solving engine"), + cross_lang_lto: CrossLangLto = (CrossLangLto::Disabled, parse_cross_lang_lto, [TRACKED], + "generate build artifacts that are compatible with linker-based LTO."), + no_parallel_llvm: bool = (false, parse_bool, [UNTRACKED], + "don't run LLVM in parallel (while keeping codegen-units and ThinLTO)"), + no_leak_check: bool = (false, parse_bool, [UNTRACKED], + "disables the 'leak check' for subtyping; unsound, but useful for tests"), + crate_attr: Vec = (Vec::new(), parse_string_push, [TRACKED], + "inject the given attribute in the crate"), + self_profile: bool = (false, parse_bool, [UNTRACKED], + "run the self profiler"), + profile_json: bool = (false, parse_bool, [UNTRACKED], + "output a json file with profiler results"), } pub fn default_lib_output() -> CrateType { - CrateTypeRlib + CrateType::Rlib } pub fn default_configuration(sess: &Session) -> ast::CrateConfig { @@ -1254,6 +1371,7 @@ pub fn default_configuration(sess: &Session) -> ast::CrateConfig { let vendor = &sess.target.target.target_vendor; let min_atomic_width = sess.target.target.min_atomic_width(); let max_atomic_width = sess.target.target.max_atomic_width(); + let atomic_cas = sess.target.target.options.atomic_cas; let mut ret = HashSet::new(); // Target bindings. @@ -1266,33 +1384,46 @@ pub fn default_configuration(sess: &Session) -> ast::CrateConfig { } ret.insert((Symbol::intern("target_arch"), Some(Symbol::intern(arch)))); ret.insert((Symbol::intern("target_endian"), Some(Symbol::intern(end)))); - ret.insert((Symbol::intern("target_pointer_width"), Some(Symbol::intern(wordsz)))); + ret.insert(( + Symbol::intern("target_pointer_width"), + Some(Symbol::intern(wordsz)), + )); ret.insert((Symbol::intern("target_env"), Some(Symbol::intern(env)))); - ret.insert((Symbol::intern("target_vendor"), Some(Symbol::intern(vendor)))); + ret.insert(( + Symbol::intern("target_vendor"), + Some(Symbol::intern(vendor)), + )); if sess.target.target.options.has_elf_tls { ret.insert((Symbol::intern("target_thread_local"), None)); } for &i in &[8, 16, 32, 64, 128] { if i >= min_atomic_width && i <= max_atomic_width { let s = i.to_string(); - ret.insert((Symbol::intern("target_has_atomic"), Some(Symbol::intern(&s)))); + ret.insert(( + Symbol::intern("target_has_atomic"), + Some(Symbol::intern(&s)), + )); if &s == wordsz { - ret.insert((Symbol::intern("target_has_atomic"), Some(Symbol::intern("ptr")))); + ret.insert(( + Symbol::intern("target_has_atomic"), + Some(Symbol::intern("ptr")), + )); } } } + if atomic_cas { + ret.insert((Symbol::intern("target_has_atomic"), Some(Symbol::intern("cas")))); + } if sess.opts.debug_assertions { ret.insert((Symbol::intern("debug_assertions"), None)); } - if sess.opts.crate_types.contains(&CrateTypeProcMacro) { + if sess.opts.crate_types.contains(&CrateType::ProcMacro) { ret.insert((Symbol::intern("proc_macro"), None)); } return ret; } -pub fn build_configuration(sess: &Session, - mut user_cfg: ast::CrateConfig) - -> ast::CrateConfig { +pub fn build_configuration(sess: &Session, mut user_cfg: ast::CrateConfig) -> ast::CrateConfig { // Combine the configuration requested by the session (command line) with // some default and generated configuration items let default_cfg = default_configuration(sess); @@ -1311,7 +1442,7 @@ pub fn build_target_config(opts: &Options, sp: &Handler) -> Config { sp.struct_fatal(&format!("Error loading target specification: {}", e)) .help("Use `--print target-list` for a list of built-in targets") .emit(); - panic!(FatalError); + FatalError.raise(); } }; @@ -1319,8 +1450,11 @@ pub fn build_target_config(opts: &Options, sp: &Handler) -> Config { "16" => (ast::IntTy::I16, ast::UintTy::U16), "32" => (ast::IntTy::I32, ast::UintTy::U32), "64" => (ast::IntTy::I64, ast::UintTy::U64), - w => panic!(sp.fatal(&format!("target specification was invalid: \ - unrecognized target-pointer-width {}", w))), + w => sp.fatal(&format!( + "target specification was invalid: \ + unrecognized target-pointer-width {}", + w + )).raise(), }; Config { @@ -1338,7 +1472,7 @@ pub enum OptionStability { } pub struct RustcOptGroup { - pub apply: Box &mut getopts::Options>, + pub apply: Box &mut getopts::Options>, pub name: &'static str, pub stability: OptionStability, } @@ -1349,7 +1483,8 @@ impl RustcOptGroup { } pub fn stable(name: &'static str, f: F) -> RustcOptGroup - where F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static, + where + F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static, { RustcOptGroup { name, @@ -1359,7 +1494,8 @@ impl RustcOptGroup { } pub fn unstable(name: &'static str, f: F) -> RustcOptGroup - where F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static, + where + F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static, { RustcOptGroup { name, @@ -1387,13 +1523,15 @@ mod opt { pub type S = &'static str; fn stable(name: S, f: F) -> R - where F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static + where + F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static, { RustcOptGroup::stable(name, f) } fn unstable(name: S, f: F) -> R - where F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static + where + F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static, { RustcOptGroup::unstable(name, f) } @@ -1446,42 +1584,88 @@ pub fn rustc_short_optgroups() -> Vec { vec![ opt::flag_s("h", "help", "Display this message"), opt::multi_s("", "cfg", "Configure the compilation environment", "SPEC"), - opt::multi_s("L", "", "Add a directory to the library search path. The + opt::multi_s( + "L", + "", + "Add a directory to the library search path. The optional KIND can be one of dependency, crate, native, - framework or all (the default).", "[KIND=]PATH"), - opt::multi_s("l", "", "Link the generated crate(s) to the specified native + framework or all (the default).", + "[KIND=]PATH", + ), + opt::multi_s( + "l", + "", + "Link the generated crate(s) to the specified native library NAME. The optional KIND can be one of static, dylib, or framework. If omitted, dylib is - assumed.", "[KIND=]NAME"), - opt::multi_s("", "crate-type", "Comma separated list of types of crates + assumed.", + "[KIND=]NAME", + ), + opt::multi_s( + "", + "crate-type", + "Comma separated list of types of crates for the compiler to emit", - "[bin|lib|rlib|dylib|cdylib|staticlib|proc-macro]"), - opt::opt_s("", "crate-name", "Specify the name of the crate being built", - "NAME"), - opt::multi_s("", "emit", "Comma separated list of types of output for \ - the compiler to emit", - "[asm|llvm-bc|llvm-ir|obj|metadata|link|dep-info|mir]"), - opt::multi_s("", "print", "Comma separated list of compiler information to \ - print on stdout", - "[crate-name|file-names|sysroot|cfg|target-list|\ - target-cpus|target-features|relocation-models|\ - code-models|tls-models|target-spec-json|native-static-libs]"), - opt::flagmulti_s("g", "", "Equivalent to -C debuginfo=2"), + "[bin|lib|rlib|dylib|cdylib|staticlib|proc-macro]", + ), + opt::opt_s( + "", + "crate-name", + "Specify the name of the crate being built", + "NAME", + ), + opt::multi_s( + "", + "emit", + "Comma separated list of types of output for \ + the compiler to emit", + "[asm|llvm-bc|llvm-ir|obj|metadata|link|dep-info|mir]", + ), + opt::multi_s( + "", + "print", + "Comma separated list of compiler information to \ + print on stdout", + "[crate-name|file-names|sysroot|cfg|target-list|\ + target-cpus|target-features|relocation-models|\ + code-models|tls-models|target-spec-json|native-static-libs]", + ), + opt::flagmulti_s("g", "", "Equivalent to -C debuginfo=2"), opt::flagmulti_s("O", "", "Equivalent to -C opt-level=2"), opt::opt_s("o", "", "Write output to ", "FILENAME"), - opt::opt_s("", "out-dir", "Write output to compiler-chosen filename \ - in

", "DIR"), - opt::opt_s("", "explain", "Provide a detailed explanation of an error \ - message", "OPT"), + opt::opt_s( + "", + "out-dir", + "Write output to compiler-chosen filename \ + in ", + "DIR", + ), + opt::opt_s( + "", + "explain", + "Provide a detailed explanation of an error \ + message", + "OPT", + ), opt::flag_s("", "test", "Build a test harness"), - opt::opt_s("", "target", "Target triple for which the code is compiled", "TARGET"), + opt::opt_s( + "", + "target", + "Target triple for which the code is compiled", + "TARGET", + ), opt::multi_s("W", "warn", "Set lint warnings", "OPT"), opt::multi_s("A", "allow", "Set lint allowed", "OPT"), opt::multi_s("D", "deny", "Set lint denied", "OPT"), opt::multi_s("F", "forbid", "Set lint forbidden", "OPT"), - opt::multi_s("", "cap-lints", "Set the most restrictive lint level. \ - More restrictive lints are capped at this \ - level", "LEVEL"), + opt::multi_s( + "", + "cap-lints", + "Set the most restrictive lint level. \ + More restrictive lints are capped at this \ + level", + "LEVEL", + ), opt::multi_s("C", "codegen", "Set a codegen option", "OPT[=VALUE]"), opt::flag_s("V", "version", "Print version info and exit"), opt::flag_s("v", "verbose", "Use verbose output"), @@ -1494,108 +1678,86 @@ pub fn rustc_short_optgroups() -> Vec { pub fn rustc_optgroups() -> Vec { let mut opts = rustc_short_optgroups(); opts.extend(vec![ - opt::multi_s("", "extern", "Specify where an external rust library is located", - "NAME=PATH"), + opt::multi_s( + "", + "extern", + "Specify where an external rust library is located", + "NAME=PATH", + ), opt::opt_s("", "sysroot", "Override the system root", "PATH"), opt::multi("Z", "", "Set internal debugging options", "FLAG"), - opt::opt_s("", "error-format", - "How errors and other messages are produced", - "human|json|short"), - opt::opt_s("", "color", "Configure coloring of output: + opt::opt_s( + "", + "error-format", + "How errors and other messages are produced", + "human|json|short", + ), + opt::opt_s( + "", + "color", + "Configure coloring of output: auto = colorize, if output goes to a tty (default); always = always colorize output; - never = never colorize output", "auto|always|never"), - - opt::opt("", "pretty", - "Pretty-print the input instead of compiling; + never = never colorize output", + "auto|always|never", + ), + opt::opt( + "", + "pretty", + "Pretty-print the input instead of compiling; valid types are: `normal` (un-annotated source), `expanded` (crates expanded), or `expanded,identified` (fully parenthesized, AST nodes with IDs).", - "TYPE"), - opt::opt("", "unpretty", - "Present the input source, unstable (and less-pretty) variants; - valid types are any of the types for `--pretty`, as well as: - `flowgraph=` (graphviz formatted flowgraph for node), - `everybody_loops` (all function bodies replaced with `loop {}`), - `hir` (the HIR), `hir,identified`, or - `hir,typed` (HIR with types for each node).", - "TYPE"), + "TYPE", + ), + opt::opt_s( + "", + "edition", + "Specify which edition of the compiler to use when compiling code.", + EDITION_NAME_LIST, + ), + opt::multi_s( + "", + "remap-path-prefix", + "Remap source names in all output (compiler messages and output files)", + "FROM=TO", + ), ]); opts } // Convert strings provided as --cfg [cfgspec] into a crate_cfg -pub fn parse_cfgspecs(cfgspecs: Vec ) -> ast::CrateConfig { - cfgspecs.into_iter().map(|s| { - let sess = parse::ParseSess::new(FilePathMapping::empty()); - let mut parser = - parse::new_parser_from_source_str(&sess, FileName::CfgSpec, s.to_string()); +pub fn parse_cfgspecs(cfgspecs: Vec) -> ast::CrateConfig { + cfgspecs + .into_iter() + .map(|s| { + let sess = parse::ParseSess::new(FilePathMapping::empty()); + let mut parser = + parse::new_parser_from_source_str(&sess, FileName::CfgSpec, s.to_string()); - let meta_item = panictry!(parser.parse_meta_item()); + let meta_item = panictry!(parser.parse_meta_item()); - if parser.token != token::Eof { - early_error(ErrorOutputType::default(), &format!("invalid --cfg argument: {}", s)) - } else if meta_item.is_meta_item_list() { - let msg = - format!("invalid predicate in --cfg command line argument: `{}`", meta_item.name()); - early_error(ErrorOutputType::default(), &msg) - } + if parser.token != token::Eof { + early_error( + ErrorOutputType::default(), + &format!("invalid --cfg argument: {}", s), + ) + } else if meta_item.is_meta_item_list() { + let msg = format!( + "invalid predicate in --cfg command line argument: `{}`", + meta_item.ident + ); + early_error(ErrorOutputType::default(), &msg) + } - (meta_item.name(), meta_item.value_str()) - }).collect::() + (meta_item.name(), meta_item.value_str()) + }) + .collect::() } -pub fn build_session_options_and_crate_config(matches: &getopts::Matches) - -> (Options, ast::CrateConfig) { - let color = match matches.opt_str("color").as_ref().map(|s| &s[..]) { - Some("auto") => ColorConfig::Auto, - Some("always") => ColorConfig::Always, - Some("never") => ColorConfig::Never, - - None => ColorConfig::Auto, - - Some(arg) => { - early_error(ErrorOutputType::default(), &format!("argument for --color must be auto, \ - always or never (instead was `{}`)", - arg)) - } - }; - - // We need the opts_present check because the driver will send us Matches - // with only stable options if no unstable options are used. Since error-format - // is unstable, it will not be present. We have to use opts_present not - // opt_present because the latter will panic. - let error_format = if matches.opts_present(&["error-format".to_owned()]) { - match matches.opt_str("error-format").as_ref().map(|s| &s[..]) { - Some("human") => ErrorOutputType::HumanReadable(color), - Some("json") => ErrorOutputType::Json(false), - Some("pretty-json") => ErrorOutputType::Json(true), - Some("short") => { - if nightly_options::is_unstable_enabled(matches) { - ErrorOutputType::Short(color) - } else { - early_error(ErrorOutputType::default(), - &format!("the `-Z unstable-options` flag must also be passed to \ - enable the short error message option")); - } - } - None => ErrorOutputType::HumanReadable(color), - - Some(arg) => { - early_error(ErrorOutputType::HumanReadable(color), - &format!("argument for --error-format must be `human`, `json` or \ - `short` (instead was `{}`)", - arg)) - } - } - } else { - ErrorOutputType::HumanReadable(color) - }; - - let unparsed_crate_types = matches.opt_strs("crate-type"); - let crate_types = parse_crate_types_from_list(unparsed_crate_types) - .unwrap_or_else(|e| early_error(error_format, &e[..])); - +pub fn get_cmd_lint_options(matches: &getopts::Matches, + error_format: ErrorOutputType) + -> (Vec<(String, lint::Level)>, bool, Option) { let mut lint_opts = vec![]; let mut describe_lints = false; @@ -1610,16 +1772,105 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) } let lint_cap = matches.opt_str("cap-lints").map(|cap| { - lint::Level::from_str(&cap).unwrap_or_else(|| { - early_error(error_format, &format!("unknown lint level: `{}`", cap)) - }) + lint::Level::from_str(&cap) + .unwrap_or_else(|| early_error(error_format, &format!("unknown lint level: `{}`", cap))) }); + (lint_opts, describe_lints, lint_cap) +} + +pub fn build_session_options_and_crate_config( + matches: &getopts::Matches, +) -> (Options, ast::CrateConfig) { + let color = match matches.opt_str("color").as_ref().map(|s| &s[..]) { + Some("auto") => ColorConfig::Auto, + Some("always") => ColorConfig::Always, + Some("never") => ColorConfig::Never, + + None => ColorConfig::Auto, + + Some(arg) => early_error( + ErrorOutputType::default(), + &format!( + "argument for --color must be auto, \ + always or never (instead was `{}`)", + arg + ), + ), + }; + + let edition = match matches.opt_str("edition") { + Some(arg) => match Edition::from_str(&arg){ + Ok(edition) => edition, + Err(_) => early_error( + ErrorOutputType::default(), + &format!( + "argument for --edition must be one of: \ + {}. (instead was `{}`)", + EDITION_NAME_LIST, + arg + ), + ), + } + None => DEFAULT_EDITION, + }; + + if !edition.is_stable() && !nightly_options::is_nightly_build() { + early_error( + ErrorOutputType::default(), + &format!( + "Edition {} is unstable and only \ + available for nightly builds of rustc.", + edition, + ) + ) + } + + + // We need the opts_present check because the driver will send us Matches + // with only stable options if no unstable options are used. Since error-format + // is unstable, it will not be present. We have to use opts_present not + // opt_present because the latter will panic. + let error_format = if matches.opts_present(&["error-format".to_owned()]) { + match matches.opt_str("error-format").as_ref().map(|s| &s[..]) { + Some("human") => ErrorOutputType::HumanReadable(color), + Some("json") => ErrorOutputType::Json(false), + Some("pretty-json") => ErrorOutputType::Json(true), + Some("short") => ErrorOutputType::Short(color), + None => ErrorOutputType::HumanReadable(color), + + Some(arg) => early_error( + ErrorOutputType::HumanReadable(color), + &format!( + "argument for --error-format must be `human`, `json` or \ + `short` (instead was `{}`)", + arg + ), + ), + } + } else { + ErrorOutputType::HumanReadable(color) + }; + + let unparsed_crate_types = matches.opt_strs("crate-type"); + let crate_types = parse_crate_types_from_list(unparsed_crate_types) + .unwrap_or_else(|e| early_error(error_format, &e[..])); + + let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(matches, error_format); let mut debugging_opts = build_debugging_options(matches, error_format); if !debugging_opts.unstable_options && error_format == ErrorOutputType::Json(true) { - early_error(ErrorOutputType::Json(false), - "--error-format=pretty-json is unstable"); + early_error( + ErrorOutputType::Json(false), + "--error-format=pretty-json is unstable", + ); + } + + if debugging_opts.pgo_gen.is_some() && !debugging_opts.pgo_use.is_empty() { + early_error( + error_format, + "options `-Z pgo-gen` and `-Z pgo-use` are exclusive", + ); } let mut output_types = BTreeMap::new(); @@ -1630,10 +1881,14 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) let shorthand = parts.next().unwrap(); let output_type = match OutputType::from_shorthand(shorthand) { Some(output_type) => output_type, - None => early_error(error_format, &format!( - "unknown emission type: `{}` - expected one of: {}", - shorthand, OutputType::shorthands_display(), - )), + None => early_error( + error_format, + &format!( + "unknown emission type: `{}` - expected one of: {}", + shorthand, + OutputType::shorthands_display(), + ), + ), }; let path = parts.next().map(PathBuf::from); output_types.insert(output_type, path); @@ -1644,34 +1899,16 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) output_types.insert(OutputType::Exe, None); } - let remap_path_prefix_sources = debugging_opts.remap_path_prefix_from.len(); - let remap_path_prefix_targets = debugging_opts.remap_path_prefix_from.len(); - - if remap_path_prefix_targets < remap_path_prefix_sources { - for source in &debugging_opts.remap_path_prefix_from[remap_path_prefix_targets..] { - early_error(error_format, - &format!("option `-Zremap-path-prefix-from='{}'` does not have \ - a corresponding `-Zremap-path-prefix-to`", source.display())) - } - } else if remap_path_prefix_targets > remap_path_prefix_sources { - for target in &debugging_opts.remap_path_prefix_to[remap_path_prefix_sources..] { - early_error(error_format, - &format!("option `-Zremap-path-prefix-to='{}'` does not have \ - a corresponding `-Zremap-path-prefix-from`", target.display())) - } - } - let mut cg = build_codegen_options(matches, error_format); let mut codegen_units = cg.codegen_units; - let mut thinlto = None; + let mut disable_thinlto = false; // Issue #30063: if user requests llvm-related output to one // particular path, disable codegen-units. - let incompatible: Vec<_> = output_types.iter() + let incompatible: Vec<_> = output_types + .iter() .map(|ot_path| ot_path.0) - .filter(|ot| { - !ot.is_compatible_with_codegen_units_and_single_output_file() - }) + .filter(|ot| !ot.is_compatible_with_codegen_units_and_single_output_file()) .map(|ot| ot.shorthand()) .collect(); if !incompatible.is_empty() { @@ -1679,38 +1916,59 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) Some(n) if n > 1 => { if matches.opt_present("o") { for ot in &incompatible { - early_warn(error_format, &format!("--emit={} with -o incompatible with \ - -C codegen-units=N for N > 1", - ot)); + early_warn( + error_format, + &format!( + "--emit={} with -o incompatible with \ + -C codegen-units=N for N > 1", + ot + ), + ); } early_warn(error_format, "resetting to default -C codegen-units=1"); codegen_units = Some(1); - thinlto = Some(false); + disable_thinlto = true; } } _ => { codegen_units = Some(1); - thinlto = Some(false); + disable_thinlto = true; } } } if debugging_opts.query_threads == Some(0) { - early_error(error_format, "Value for query threads must be a positive nonzero integer"); + early_error( + error_format, + "Value for query threads must be a positive nonzero integer", + ); + } + + if debugging_opts.query_threads.unwrap_or(1) > 1 && debugging_opts.fuel.is_some() { + early_error( + error_format, + "Optimization fuel is incompatible with multiple query threads", + ); } if codegen_units == Some(0) { - early_error(error_format, "Value for codegen units must be a positive nonzero integer"); + early_error( + error_format, + "Value for codegen units must be a positive nonzero integer", + ); } let incremental = match (&debugging_opts.incremental, &cg.incremental) { (&Some(ref path1), &Some(ref path2)) => { if path1 != path2 { - early_error(error_format, - &format!("conflicting paths for `-Z incremental` and \ - `-C incremental` specified: {} versus {}", - path1, - path2)); + early_error( + error_format, + &format!( + "conflicting paths for `-Z incremental` and \ + `-C incremental` specified: {} versus {}", + path1, path2 + ), + ); } else { Some(path1) } @@ -1720,8 +1978,11 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) (&None, &None) => None, }.map(|m| PathBuf::from(m)); - if cg.lto && incremental.is_some() { - early_error(error_format, "can't perform LTO when compiling incrementally"); + if debugging_opts.profile && incremental.is_some() { + early_error( + error_format, + "can't instrument with gcov profiling when compiling incrementally", + ); } let mut prints = Vec::::new(); @@ -1741,7 +2002,11 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) prints.push(PrintRequest::CodeModels); cg.code_model = None; } - if debugging_opts.tls_model.as_ref().map_or(false, |s| s == "help") { + if debugging_opts + .tls_model + .as_ref() + .map_or(false, |s| s == "help") + { prints.push(PrintRequest::TlsModels); debugging_opts.tls_model = None; } @@ -1749,8 +2014,21 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) let cg = cg; let sysroot_opt = matches.opt_str("sysroot").map(|m| PathBuf::from(&m)); - let target = matches.opt_str("target").unwrap_or( - host_triple().to_string()); + let target_triple = if let Some(target) = matches.opt_str("target") { + if target.ends_with(".json") { + let path = Path::new(&target); + match TargetTriple::from_path(&path) { + Ok(triple) => triple, + Err(_) => { + early_error(error_format, &format!("target file {:?} does not exist", path)) + } + } + } else { + TargetTriple::TargetTriple(target) + } + } else { + TargetTriple::from_triple(host_triple()) + }; let opt_level = { if matches.opt_present("O") { if cg.opt_level.is_some() { @@ -1758,23 +2036,23 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) } OptLevel::Default } else { - match (cg.opt_level.as_ref().map(String::as_ref), - nightly_options::is_nightly_build()) { - (None, _) => OptLevel::No, - (Some("0"), _) => OptLevel::No, - (Some("1"), _) => OptLevel::Less, - (Some("2"), _) => OptLevel::Default, - (Some("3"), _) => OptLevel::Aggressive, - (Some("s"), true) => OptLevel::Size, - (Some("z"), true) => OptLevel::SizeMin, - (Some("s"), false) | (Some("z"), false) => { - early_error(error_format, &format!("the optimizations s or z are only \ - accepted on the nightly compiler")); - }, - (Some(arg), _) => { - early_error(error_format, &format!("optimization level needs to be \ - between 0-3 (instead was `{}`)", - arg)); + match cg.opt_level.as_ref().map(String::as_ref) { + None => OptLevel::No, + Some("0") => OptLevel::No, + Some("1") => OptLevel::Less, + Some("2") => OptLevel::Default, + Some("3") => OptLevel::Aggressive, + Some("s") => OptLevel::Size, + Some("z") => OptLevel::SizeMin, + Some(arg) => { + early_error( + error_format, + &format!( + "optimization level needs to be \ + between 0-3 (instead was `{}`)", + arg + ), + ); } } } @@ -1784,16 +2062,21 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) if cg.debuginfo.is_some() { early_error(error_format, "-g and -C debuginfo both provided"); } - FullDebugInfo + DebugInfo::Full } else { match cg.debuginfo { - None | Some(0) => NoDebugInfo, - Some(1) => LimitedDebugInfo, - Some(2) => FullDebugInfo, + None | Some(0) => DebugInfo::None, + Some(1) => DebugInfo::Limited, + Some(2) => DebugInfo::Full, Some(arg) => { - early_error(error_format, &format!("debug info level needs to be between \ - 0-2 (instead was `{}`)", - arg)); + early_error( + error_format, + &format!( + "debug info level needs to be between \ + 0-2 (instead was `{}`)", + arg + ), + ); } } }; @@ -1803,79 +2086,95 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) search_paths.add_path(&s[..], error_format); } - let libs = matches.opt_strs("l").into_iter().map(|s| { - // Parse string of the form "[KIND=]lib[:new_name]", - // where KIND is one of "dylib", "framework", "static". - let mut parts = s.splitn(2, '='); - let kind = parts.next().unwrap(); - let (name, kind) = match (parts.next(), kind) { - (None, name) => (name, None), - (Some(name), "dylib") => (name, Some(cstore::NativeUnknown)), - (Some(name), "framework") => (name, Some(cstore::NativeFramework)), - (Some(name), "static") => (name, Some(cstore::NativeStatic)), - (Some(name), "static-nobundle") => (name, Some(cstore::NativeStaticNobundle)), - (_, s) => { - early_error(error_format, &format!("unknown library kind `{}`, expected \ - one of dylib, framework, or static", - s)); + let libs = matches + .opt_strs("l") + .into_iter() + .map(|s| { + // Parse string of the form "[KIND=]lib[:new_name]", + // where KIND is one of "dylib", "framework", "static". + let mut parts = s.splitn(2, '='); + let kind = parts.next().unwrap(); + let (name, kind) = match (parts.next(), kind) { + (None, name) => (name, None), + (Some(name), "dylib") => (name, Some(cstore::NativeUnknown)), + (Some(name), "framework") => (name, Some(cstore::NativeFramework)), + (Some(name), "static") => (name, Some(cstore::NativeStatic)), + (Some(name), "static-nobundle") => (name, Some(cstore::NativeStaticNobundle)), + (_, s) => { + early_error( + error_format, + &format!( + "unknown library kind `{}`, expected \ + one of dylib, framework, or static", + s + ), + ); + } + }; + if kind == Some(cstore::NativeStaticNobundle) && !nightly_options::is_nightly_build() { + early_error( + error_format, + &format!( + "the library kind 'static-nobundle' is only \ + accepted on the nightly compiler" + ), + ); } - }; - if kind == Some(cstore::NativeStaticNobundle) && !nightly_options::is_nightly_build() { - early_error(error_format, &format!("the library kind 'static-nobundle' is only \ - accepted on the nightly compiler")); - } - let mut name_parts = name.splitn(2, ':'); - let name = name_parts.next().unwrap(); - let new_name = name_parts.next(); - (name.to_string(), new_name.map(|n| n.to_string()), kind) - }).collect(); + let mut name_parts = name.splitn(2, ':'); + let name = name_parts.next().unwrap(); + let new_name = name_parts.next(); + (name.to_string(), new_name.map(|n| n.to_string()), kind) + }) + .collect(); let cfg = parse_cfgspecs(matches.opt_strs("cfg")); let test = matches.opt_present("test"); - prints.extend(matches.opt_strs("print").into_iter().map(|s| { - match &*s { - "crate-name" => PrintRequest::CrateName, - "file-names" => PrintRequest::FileNames, - "sysroot" => PrintRequest::Sysroot, - "cfg" => PrintRequest::Cfg, - "target-list" => PrintRequest::TargetList, - "target-cpus" => PrintRequest::TargetCPUs, - "target-features" => PrintRequest::TargetFeatures, - "relocation-models" => PrintRequest::RelocationModels, - "code-models" => PrintRequest::CodeModels, - "tls-models" => PrintRequest::TlsModels, - "native-static-libs" => PrintRequest::NativeStaticLibs, - "target-spec-json" => { - if nightly_options::is_unstable_enabled(matches) { - PrintRequest::TargetSpec - } else { - early_error(error_format, - &format!("the `-Z unstable-options` flag must also be passed to \ - enable the target-spec-json print option")); - } - }, - req => { - early_error(error_format, &format!("unknown print request `{}`", req)) + prints.extend(matches.opt_strs("print").into_iter().map(|s| match &*s { + "crate-name" => PrintRequest::CrateName, + "file-names" => PrintRequest::FileNames, + "sysroot" => PrintRequest::Sysroot, + "cfg" => PrintRequest::Cfg, + "target-list" => PrintRequest::TargetList, + "target-cpus" => PrintRequest::TargetCPUs, + "target-features" => PrintRequest::TargetFeatures, + "relocation-models" => PrintRequest::RelocationModels, + "code-models" => PrintRequest::CodeModels, + "tls-models" => PrintRequest::TlsModels, + "native-static-libs" => PrintRequest::NativeStaticLibs, + "target-spec-json" => { + if nightly_options::is_unstable_enabled(matches) { + PrintRequest::TargetSpec + } else { + early_error( + error_format, + &format!( + "the `-Z unstable-options` flag must also be passed to \ + enable the target-spec-json print option" + ), + ); } } + req => early_error(error_format, &format!("unknown print request `{}`", req)), })); let borrowck_mode = match debugging_opts.borrowck.as_ref().map(|s| &s[..]) { None | Some("ast") => BorrowckMode::Ast, Some("mir") => BorrowckMode::Mir, Some("compare") => BorrowckMode::Compare, - Some(m) => { - early_error(error_format, &format!("unknown borrowck mode `{}`", m)) - }, + Some("migrate") => BorrowckMode::Migrate, + Some(m) => early_error(error_format, &format!("unknown borrowck mode `{}`", m)), }; - if !cg.remark.is_empty() && debuginfo == NoDebugInfo { - early_warn(error_format, "-C remark will not show source locations without \ - --debuginfo"); + if !cg.remark.is_empty() && debuginfo == DebugInfo::None { + early_warn( + error_format, + "-C remark will not show source locations without \ + --debuginfo", + ); } - let mut externs = BTreeMap::new(); + let mut externs: BTreeMap<_, BTreeSet<_>> = BTreeMap::new(); for arg in &matches.opt_strs("extern") { let mut parts = arg.splitn(2, '='); let name = match parts.next() { @@ -1884,63 +2183,86 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches) }; let location = match parts.next() { Some(s) => s, - None => early_error(error_format, "--extern value must be of the format `foo=bar`"), + None => early_error( + error_format, + "--extern value must be of the format `foo=bar`", + ), }; - externs.entry(name.to_string()) - .or_insert_with(BTreeSet::new) - .insert(location.to_string()); + externs + .entry(name.to_string()) + .or_default() + .insert(location.to_string()); } let crate_name = matches.opt_str("crate-name"); - (Options { - crate_types, - optimize: opt_level, - debuginfo, - lint_opts, - lint_cap, - describe_lints, - output_types: OutputTypes(output_types), - search_paths, - maybe_sysroot: sysroot_opt, - target_triple: target, - test, - incremental, - debugging_opts, - prints, - borrowck_mode, - cg, - error_format, - externs: Externs(externs), - crate_name, - alt_std_name: None, - libs, - unstable_features: UnstableFeatures::from_environment(), - debug_assertions, - actually_rustdoc: false, - cli_forced_codegen_units: codegen_units, - cli_forced_thinlto: thinlto, - }, - cfg) + let remap_path_prefix = matches + .opt_strs("remap-path-prefix") + .into_iter() + .map(|remap| { + let mut parts = remap.rsplitn(2, '='); // reverse iterator + let to = parts.next(); + let from = parts.next(); + match (from, to) { + (Some(from), Some(to)) => (PathBuf::from(from), PathBuf::from(to)), + _ => early_error( + error_format, + "--remap-path-prefix must contain '=' between FROM and TO", + ), + } + }) + .collect(); + + ( + Options { + crate_types, + optimize: opt_level, + debuginfo, + lint_opts, + lint_cap, + describe_lints, + output_types: OutputTypes(output_types), + search_paths, + maybe_sysroot: sysroot_opt, + target_triple, + test, + incremental, + debugging_opts, + prints, + borrowck_mode, + cg, + error_format, + externs: Externs(externs), + crate_name, + alt_std_name: None, + libs, + unstable_features: UnstableFeatures::from_environment(), + debug_assertions, + actually_rustdoc: false, + cli_forced_codegen_units: codegen_units, + cli_forced_thinlto_off: disable_thinlto, + remap_path_prefix, + edition, + }, + cfg, + ) } -pub fn parse_crate_types_from_list(list_list: Vec) - -> Result, String> { +pub fn parse_crate_types_from_list(list_list: Vec) -> Result, String> { let mut crate_types: Vec = Vec::new(); for unparsed_crate_type in &list_list { for part in unparsed_crate_type.split(',') { let new_part = match part { - "lib" => default_lib_output(), - "rlib" => CrateTypeRlib, - "staticlib" => CrateTypeStaticlib, - "dylib" => CrateTypeDylib, - "cdylib" => CrateTypeCdylib, - "bin" => CrateTypeExecutable, - "proc-macro" => CrateTypeProcMacro, + "lib" => default_lib_output(), + "rlib" => CrateType::Rlib, + "staticlib" => CrateType::Staticlib, + "dylib" => CrateType::Dylib, + "cdylib" => CrateType::Cdylib, + "bin" => CrateType::Executable, + "proc-macro" => CrateType::ProcMacro, _ => { - return Err(format!("unknown crate type: `{}`", - part)); + return Err(format!("unknown crate type: `{}`", part)); } }; if !crate_types.contains(&new_part) { @@ -1959,7 +2281,11 @@ pub mod nightly_options { use session::early_error; pub fn is_unstable_enabled(matches: &getopts::Matches) -> bool { - is_nightly_build() && matches.opt_strs("Z").iter().any(|x| *x == "unstable-options") + is_nightly_build() + && matches + .opt_strs("Z") + .iter() + .any(|x| *x == "unstable-options") } pub fn is_nightly_build() -> bool { @@ -1967,30 +2293,40 @@ pub mod nightly_options { } pub fn check_nightly_options(matches: &getopts::Matches, flags: &[RustcOptGroup]) { - let has_z_unstable_option = matches.opt_strs("Z").iter().any(|x| *x == "unstable-options"); - let really_allows_unstable_options = UnstableFeatures::from_environment() - .is_nightly_build(); + let has_z_unstable_option = matches + .opt_strs("Z") + .iter() + .any(|x| *x == "unstable-options"); + let really_allows_unstable_options = + UnstableFeatures::from_environment().is_nightly_build(); for opt in flags.iter() { if opt.stability == OptionStability::Stable { - continue + continue; } if !matches.opt_present(opt.name) { - continue + continue; } if opt.name != "Z" && !has_z_unstable_option { - early_error(ErrorOutputType::default(), - &format!("the `-Z unstable-options` flag must also be passed to enable \ - the flag `{}`", - opt.name)); + early_error( + ErrorOutputType::default(), + &format!( + "the `-Z unstable-options` flag must also be passed to enable \ + the flag `{}`", + opt.name + ), + ); } if really_allows_unstable_options { - continue + continue; } match opt.stability { OptionStability::Unstable => { - let msg = format!("the option `{}` is only accepted on the \ - nightly compiler", opt.name); + let msg = format!( + "the option `{}` is only accepted on the \ + nightly compiler", + opt.name + ); early_error(ErrorOutputType::default(), &msg); } OptionStability::Stable => {} @@ -2002,12 +2338,12 @@ pub mod nightly_options { impl fmt::Display for CrateType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - CrateTypeExecutable => "bin".fmt(f), - CrateTypeDylib => "dylib".fmt(f), - CrateTypeRlib => "rlib".fmt(f), - CrateTypeStaticlib => "staticlib".fmt(f), - CrateTypeCdylib => "cdylib".fmt(f), - CrateTypeProcMacro => "proc-macro".fmt(f), + CrateType::Executable => "bin".fmt(f), + CrateType::Dylib => "dylib".fmt(f), + CrateType::Rlib => "rlib".fmt(f), + CrateType::Staticlib => "staticlib".fmt(f), + CrateType::Cdylib => "cdylib".fmt(f), + CrateType::ProcMacro => "proc-macro".fmt(f), } } } @@ -2033,15 +2369,15 @@ impl fmt::Display for CrateType { mod dep_tracking { use lint; use middle::cstore; - use session::search_paths::{PathKind, SearchPaths}; use std::collections::BTreeMap; use std::hash::Hash; use std::path::PathBuf; use std::collections::hash_map::DefaultHasher; - use super::{Passes, CrateType, OptLevel, DebugInfoLevel, - OutputTypes, Externs, ErrorOutputType, Sanitizer}; + use super::{CrateType, DebugInfo, ErrorOutputType, Lto, OptLevel, OutputTypes, + Passes, Sanitizer, CrossLangLto}; use syntax::feature_gate::UnstableFeatures; - use rustc_back::{PanicStrategy, RelroLevel}; + use rustc_target::spec::{PanicStrategy, RelroLevel, TargetTriple}; + use syntax::edition::Edition; pub trait DepTrackingHash { fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType); @@ -2093,34 +2429,32 @@ mod dep_tracking { impl_dep_tracking_hash_via_hash!(RelroLevel); impl_dep_tracking_hash_via_hash!(Passes); impl_dep_tracking_hash_via_hash!(OptLevel); - impl_dep_tracking_hash_via_hash!(DebugInfoLevel); + impl_dep_tracking_hash_via_hash!(Lto); + impl_dep_tracking_hash_via_hash!(DebugInfo); impl_dep_tracking_hash_via_hash!(UnstableFeatures); - impl_dep_tracking_hash_via_hash!(Externs); impl_dep_tracking_hash_via_hash!(OutputTypes); impl_dep_tracking_hash_via_hash!(cstore::NativeLibraryKind); impl_dep_tracking_hash_via_hash!(Sanitizer); impl_dep_tracking_hash_via_hash!(Option); + impl_dep_tracking_hash_via_hash!(TargetTriple); + impl_dep_tracking_hash_via_hash!(Edition); + impl_dep_tracking_hash_via_hash!(CrossLangLto); impl_dep_tracking_hash_for_sortable_vec_of!(String); impl_dep_tracking_hash_for_sortable_vec_of!(PathBuf); impl_dep_tracking_hash_for_sortable_vec_of!(CrateType); impl_dep_tracking_hash_for_sortable_vec_of!((String, lint::Level)); - impl_dep_tracking_hash_for_sortable_vec_of!((String, Option, - Option)); + impl_dep_tracking_hash_for_sortable_vec_of!(( + String, + Option, + Option + )); impl_dep_tracking_hash_for_sortable_vec_of!((String, u64)); - impl DepTrackingHash for SearchPaths { - fn hash(&self, hasher: &mut DefaultHasher, _: ErrorOutputType) { - let mut elems: Vec<_> = self - .iter(PathKind::All) - .collect(); - elems.sort(); - Hash::hash(&elems, hasher); - } - } impl DepTrackingHash for (T1, T2) - where T1: DepTrackingHash, - T2: DepTrackingHash + where + T1: DepTrackingHash, + T2: DepTrackingHash, { fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) { Hash::hash(&0, hasher); @@ -2131,9 +2465,10 @@ mod dep_tracking { } impl DepTrackingHash for (T1, T2, T3) - where T1: DepTrackingHash, - T2: DepTrackingHash, - T3: DepTrackingHash + where + T1: DepTrackingHash, + T2: DepTrackingHash, + T3: DepTrackingHash, { fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) { Hash::hash(&0, hasher); @@ -2146,9 +2481,11 @@ mod dep_tracking { } // This is a stable hash because BTreeMap is a sorted container - pub fn stable_hash(sub_hashes: BTreeMap<&'static str, &DepTrackingHash>, - hasher: &mut DefaultHasher, - error_format: ErrorOutputType) { + pub fn stable_hash( + sub_hashes: BTreeMap<&'static str, &dyn DepTrackingHash>, + hasher: &mut DefaultHasher, + error_format: ErrorOutputType, + ) { for (key, sub_hash) in sub_hashes { // Using Hash::hash() instead of DepTrackingHash::hash() is fine for // the keys, as they are just plain strings @@ -2166,20 +2503,24 @@ mod tests { use lint; use middle::cstore; use session::config::{build_configuration, build_session_options_and_crate_config}; + use session::config::{Lto, CrossLangLto}; use session::build_session; use std::collections::{BTreeMap, BTreeSet}; use std::iter::FromIterator; use std::path::PathBuf; - use super::{OutputType, OutputTypes, Externs}; - use rustc_back::{PanicStrategy, RelroLevel}; + use super::{Externs, OutputType, OutputTypes}; + use rustc_target::spec::{PanicStrategy, RelroLevel}; use syntax::symbol::Symbol; + use syntax::edition::{Edition, DEFAULT_EDITION}; + use syntax; + use super::Options; fn optgroups() -> getopts::Options { let mut opts = getopts::Options::new(); for group in super::rustc_optgroups() { (group.apply)(&mut opts); } - return opts + return opts; } fn mk_map(entries: Vec<(K, V)>) -> BTreeMap { @@ -2193,82 +2534,78 @@ mod tests { // When the user supplies --test we should implicitly supply --cfg test #[test] fn test_switch_implies_cfg_test() { - let matches = - &match optgroups().parse(&["--test".to_string()]) { - Ok(m) => m, - Err(f) => panic!("test_switch_implies_cfg_test: {}", f) + syntax::with_globals(|| { + let matches = &match optgroups().parse(&["--test".to_string()]) { + Ok(m) => m, + Err(f) => panic!("test_switch_implies_cfg_test: {}", f), }; - let registry = errors::registry::Registry::new(&[]); - let (sessopts, cfg) = build_session_options_and_crate_config(matches); - let sess = build_session(sessopts, None, registry); - let cfg = build_configuration(&sess, cfg); - assert!(cfg.contains(&(Symbol::intern("test"), None))); + let registry = errors::registry::Registry::new(&[]); + let (sessopts, cfg) = build_session_options_and_crate_config(matches); + let sess = build_session(sessopts, None, registry); + let cfg = build_configuration(&sess, cfg); + assert!(cfg.contains(&(Symbol::intern("test"), None))); + }); } // When the user supplies --test and --cfg test, don't implicitly add // another --cfg test #[test] fn test_switch_implies_cfg_test_unless_cfg_test() { - let matches = - &match optgroups().parse(&["--test".to_string(), "--cfg=test".to_string()]) { - Ok(m) => m, - Err(f) => { - panic!("test_switch_implies_cfg_test_unless_cfg_test: {}", f) - } + syntax::with_globals(|| { + let matches = &match optgroups().parse(&["--test".to_string(), + "--cfg=test".to_string()]) { + Ok(m) => m, + Err(f) => panic!("test_switch_implies_cfg_test_unless_cfg_test: {}", f), }; - let registry = errors::registry::Registry::new(&[]); - let (sessopts, cfg) = build_session_options_and_crate_config(matches); - let sess = build_session(sessopts, None, registry); - let cfg = build_configuration(&sess, cfg); - let mut test_items = cfg.iter().filter(|&&(name, _)| name == "test"); - assert!(test_items.next().is_some()); - assert!(test_items.next().is_none()); + let registry = errors::registry::Registry::new(&[]); + let (sessopts, cfg) = build_session_options_and_crate_config(matches); + let sess = build_session(sessopts, None, registry); + let cfg = build_configuration(&sess, cfg); + let mut test_items = cfg.iter().filter(|&&(name, _)| name == "test"); + assert!(test_items.next().is_some()); + assert!(test_items.next().is_none()); + }); } #[test] fn test_can_print_warnings() { - { - let matches = optgroups().parse(&[ - "-Awarnings".to_string() - ]).unwrap(); + syntax::with_globals(|| { + let matches = optgroups().parse(&["-Awarnings".to_string()]).unwrap(); let registry = errors::registry::Registry::new(&[]); let (sessopts, _) = build_session_options_and_crate_config(&matches); let sess = build_session(sessopts, None, registry); assert!(!sess.diagnostic().flags.can_emit_warnings); - } + }); - { - let matches = optgroups().parse(&[ - "-Awarnings".to_string(), - "-Dwarnings".to_string() - ]).unwrap(); + syntax::with_globals(|| { + let matches = optgroups() + .parse(&["-Awarnings".to_string(), "-Dwarnings".to_string()]) + .unwrap(); let registry = errors::registry::Registry::new(&[]); let (sessopts, _) = build_session_options_and_crate_config(&matches); let sess = build_session(sessopts, None, registry); assert!(sess.diagnostic().flags.can_emit_warnings); - } + }); - { - let matches = optgroups().parse(&[ - "-Adead_code".to_string() - ]).unwrap(); + syntax::with_globals(|| { + let matches = optgroups().parse(&["-Adead_code".to_string()]).unwrap(); let registry = errors::registry::Registry::new(&[]); let (sessopts, _) = build_session_options_and_crate_config(&matches); let sess = build_session(sessopts, None, registry); assert!(sess.diagnostic().flags.can_emit_warnings); - } + }); } #[test] fn test_output_types_tracking_hash_different_paths() { - let mut v1 = super::basic_options(); - let mut v2 = super::basic_options(); - let mut v3 = super::basic_options(); + let mut v1 = Options::default(); + let mut v2 = Options::default(); + let mut v3 = Options::default(); - v1.output_types = OutputTypes::new(&[(OutputType::Exe, - Some(PathBuf::from("./some/thing")))]); - v2.output_types = OutputTypes::new(&[(OutputType::Exe, - Some(PathBuf::from("/some/thing")))]); + v1.output_types = + OutputTypes::new(&[(OutputType::Exe, Some(PathBuf::from("./some/thing")))]); + v2.output_types = + OutputTypes::new(&[(OutputType::Exe, Some(PathBuf::from("/some/thing")))]); v3.output_types = OutputTypes::new(&[(OutputType::Exe, None)]); assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash()); @@ -2283,8 +2620,8 @@ mod tests { #[test] fn test_output_types_tracking_hash_different_construction_order() { - let mut v1 = super::basic_options(); - let mut v2 = super::basic_options(); + let mut v1 = Options::default(); + let mut v2 = Options::default(); v1.output_types = OutputTypes::new(&[ (OutputType::Exe, Some(PathBuf::from("./some/thing"))), @@ -2302,68 +2639,43 @@ mod tests { assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); } - #[test] - fn test_externs_tracking_hash_different_values() { - let mut v1 = super::basic_options(); - let mut v2 = super::basic_options(); - let mut v3 = super::basic_options(); - - v1.externs = Externs::new(mk_map(vec![ - (String::from("a"), mk_set(vec![String::from("b"), - String::from("c")])), - (String::from("d"), mk_set(vec![String::from("e"), - String::from("f")])), - ])); - - v2.externs = Externs::new(mk_map(vec![ - (String::from("a"), mk_set(vec![String::from("b"), - String::from("c")])), - (String::from("X"), mk_set(vec![String::from("e"), - String::from("f")])), - ])); - - v3.externs = Externs::new(mk_map(vec![ - (String::from("a"), mk_set(vec![String::from("b"), - String::from("c")])), - (String::from("d"), mk_set(vec![String::from("X"), - String::from("f")])), - ])); - - assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash()); - assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash()); - assert!(v2.dep_tracking_hash() != v3.dep_tracking_hash()); - - // Check clone - assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); - assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); - assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash()); - } - #[test] fn test_externs_tracking_hash_different_construction_order() { - let mut v1 = super::basic_options(); - let mut v2 = super::basic_options(); - let mut v3 = super::basic_options(); + let mut v1 = Options::default(); + let mut v2 = Options::default(); + let mut v3 = Options::default(); v1.externs = Externs::new(mk_map(vec![ - (String::from("a"), mk_set(vec![String::from("b"), - String::from("c")])), - (String::from("d"), mk_set(vec![String::from("e"), - String::from("f")])), + ( + String::from("a"), + mk_set(vec![String::from("b"), String::from("c")]), + ), + ( + String::from("d"), + mk_set(vec![String::from("e"), String::from("f")]), + ), ])); v2.externs = Externs::new(mk_map(vec![ - (String::from("d"), mk_set(vec![String::from("e"), - String::from("f")])), - (String::from("a"), mk_set(vec![String::from("b"), - String::from("c")])), + ( + String::from("d"), + mk_set(vec![String::from("e"), String::from("f")]), + ), + ( + String::from("a"), + mk_set(vec![String::from("b"), String::from("c")]), + ), ])); v3.externs = Externs::new(mk_map(vec![ - (String::from("a"), mk_set(vec![String::from("b"), - String::from("c")])), - (String::from("d"), mk_set(vec![String::from("f"), - String::from("e")])), + ( + String::from("a"), + mk_set(vec![String::from("b"), String::from("c")]), + ), + ( + String::from("d"), + mk_set(vec![String::from("f"), String::from("e")]), + ), ])); assert_eq!(v1.dep_tracking_hash(), v2.dep_tracking_hash()); @@ -2378,24 +2690,30 @@ mod tests { #[test] fn test_lints_tracking_hash_different_values() { - let mut v1 = super::basic_options(); - let mut v2 = super::basic_options(); - let mut v3 = super::basic_options(); + let mut v1 = Options::default(); + let mut v2 = Options::default(); + let mut v3 = Options::default(); - v1.lint_opts = vec![(String::from("a"), lint::Allow), - (String::from("b"), lint::Warn), - (String::from("c"), lint::Deny), - (String::from("d"), lint::Forbid)]; + v1.lint_opts = vec![ + (String::from("a"), lint::Allow), + (String::from("b"), lint::Warn), + (String::from("c"), lint::Deny), + (String::from("d"), lint::Forbid), + ]; - v2.lint_opts = vec![(String::from("a"), lint::Allow), - (String::from("b"), lint::Warn), - (String::from("X"), lint::Deny), - (String::from("d"), lint::Forbid)]; + v2.lint_opts = vec![ + (String::from("a"), lint::Allow), + (String::from("b"), lint::Warn), + (String::from("X"), lint::Deny), + (String::from("d"), lint::Forbid), + ]; - v3.lint_opts = vec![(String::from("a"), lint::Allow), - (String::from("b"), lint::Warn), - (String::from("c"), lint::Forbid), - (String::from("d"), lint::Deny)]; + v3.lint_opts = vec![ + (String::from("a"), lint::Allow), + (String::from("b"), lint::Warn), + (String::from("c"), lint::Forbid), + (String::from("d"), lint::Deny), + ]; assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash()); assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash()); @@ -2409,18 +2727,22 @@ mod tests { #[test] fn test_lints_tracking_hash_different_construction_order() { - let mut v1 = super::basic_options(); - let mut v2 = super::basic_options(); + let mut v1 = Options::default(); + let mut v2 = Options::default(); - v1.lint_opts = vec![(String::from("a"), lint::Allow), - (String::from("b"), lint::Warn), - (String::from("c"), lint::Deny), - (String::from("d"), lint::Forbid)]; + v1.lint_opts = vec![ + (String::from("a"), lint::Allow), + (String::from("b"), lint::Warn), + (String::from("c"), lint::Deny), + (String::from("d"), lint::Forbid), + ]; - v2.lint_opts = vec![(String::from("a"), lint::Allow), - (String::from("c"), lint::Deny), - (String::from("b"), lint::Warn), - (String::from("d"), lint::Forbid)]; + v2.lint_opts = vec![ + (String::from("a"), lint::Allow), + (String::from("c"), lint::Deny), + (String::from("b"), lint::Warn), + (String::from("d"), lint::Forbid), + ]; assert_eq!(v1.dep_tracking_hash(), v2.dep_tracking_hash()); @@ -2429,100 +2751,57 @@ mod tests { assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); } - #[test] - fn test_search_paths_tracking_hash_different_values() { - let mut v1 = super::basic_options(); - let mut v2 = super::basic_options(); - let mut v3 = super::basic_options(); - let mut v4 = super::basic_options(); - let mut v5 = super::basic_options(); - - // Reference - v1.search_paths.add_path("native=abc", super::ErrorOutputType::Json(false)); - v1.search_paths.add_path("crate=def", super::ErrorOutputType::Json(false)); - v1.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json(false)); - v1.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json(false)); - v1.search_paths.add_path("all=mno", super::ErrorOutputType::Json(false)); - - // Native changed - v2.search_paths.add_path("native=XXX", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("crate=def", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("all=mno", super::ErrorOutputType::Json(false)); - - // Crate changed - v2.search_paths.add_path("native=abc", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("crate=XXX", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("all=mno", super::ErrorOutputType::Json(false)); - - // Dependency changed - v3.search_paths.add_path("native=abc", super::ErrorOutputType::Json(false)); - v3.search_paths.add_path("crate=def", super::ErrorOutputType::Json(false)); - v3.search_paths.add_path("dependency=XXX", super::ErrorOutputType::Json(false)); - v3.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json(false)); - v3.search_paths.add_path("all=mno", super::ErrorOutputType::Json(false)); - - // Framework changed - v4.search_paths.add_path("native=abc", super::ErrorOutputType::Json(false)); - v4.search_paths.add_path("crate=def", super::ErrorOutputType::Json(false)); - v4.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json(false)); - v4.search_paths.add_path("framework=XXX", super::ErrorOutputType::Json(false)); - v4.search_paths.add_path("all=mno", super::ErrorOutputType::Json(false)); - - // All changed - v5.search_paths.add_path("native=abc", super::ErrorOutputType::Json(false)); - v5.search_paths.add_path("crate=def", super::ErrorOutputType::Json(false)); - v5.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json(false)); - v5.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json(false)); - v5.search_paths.add_path("all=XXX", super::ErrorOutputType::Json(false)); - - assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash()); - assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash()); - assert!(v1.dep_tracking_hash() != v4.dep_tracking_hash()); - assert!(v1.dep_tracking_hash() != v5.dep_tracking_hash()); - - // Check clone - assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); - assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); - assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash()); - assert_eq!(v4.dep_tracking_hash(), v4.clone().dep_tracking_hash()); - assert_eq!(v5.dep_tracking_hash(), v5.clone().dep_tracking_hash()); - } - #[test] fn test_search_paths_tracking_hash_different_order() { - let mut v1 = super::basic_options(); - let mut v2 = super::basic_options(); - let mut v3 = super::basic_options(); - let mut v4 = super::basic_options(); + let mut v1 = Options::default(); + let mut v2 = Options::default(); + let mut v3 = Options::default(); + let mut v4 = Options::default(); // Reference - v1.search_paths.add_path("native=abc", super::ErrorOutputType::Json(false)); - v1.search_paths.add_path("crate=def", super::ErrorOutputType::Json(false)); - v1.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json(false)); - v1.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json(false)); - v1.search_paths.add_path("all=mno", super::ErrorOutputType::Json(false)); + v1.search_paths + .add_path("native=abc", super::ErrorOutputType::Json(false)); + v1.search_paths + .add_path("crate=def", super::ErrorOutputType::Json(false)); + v1.search_paths + .add_path("dependency=ghi", super::ErrorOutputType::Json(false)); + v1.search_paths + .add_path("framework=jkl", super::ErrorOutputType::Json(false)); + v1.search_paths + .add_path("all=mno", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("native=abc", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("crate=def", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json(false)); - v2.search_paths.add_path("all=mno", super::ErrorOutputType::Json(false)); + v2.search_paths + .add_path("native=abc", super::ErrorOutputType::Json(false)); + v2.search_paths + .add_path("dependency=ghi", super::ErrorOutputType::Json(false)); + v2.search_paths + .add_path("crate=def", super::ErrorOutputType::Json(false)); + v2.search_paths + .add_path("framework=jkl", super::ErrorOutputType::Json(false)); + v2.search_paths + .add_path("all=mno", super::ErrorOutputType::Json(false)); - v3.search_paths.add_path("crate=def", super::ErrorOutputType::Json(false)); - v3.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json(false)); - v3.search_paths.add_path("native=abc", super::ErrorOutputType::Json(false)); - v3.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json(false)); - v3.search_paths.add_path("all=mno", super::ErrorOutputType::Json(false)); + v3.search_paths + .add_path("crate=def", super::ErrorOutputType::Json(false)); + v3.search_paths + .add_path("framework=jkl", super::ErrorOutputType::Json(false)); + v3.search_paths + .add_path("native=abc", super::ErrorOutputType::Json(false)); + v3.search_paths + .add_path("dependency=ghi", super::ErrorOutputType::Json(false)); + v3.search_paths + .add_path("all=mno", super::ErrorOutputType::Json(false)); - v4.search_paths.add_path("all=mno", super::ErrorOutputType::Json(false)); - v4.search_paths.add_path("native=abc", super::ErrorOutputType::Json(false)); - v4.search_paths.add_path("crate=def", super::ErrorOutputType::Json(false)); - v4.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json(false)); - v4.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json(false)); + v4.search_paths + .add_path("all=mno", super::ErrorOutputType::Json(false)); + v4.search_paths + .add_path("native=abc", super::ErrorOutputType::Json(false)); + v4.search_paths + .add_path("crate=def", super::ErrorOutputType::Json(false)); + v4.search_paths + .add_path("dependency=ghi", super::ErrorOutputType::Json(false)); + v4.search_paths + .add_path("framework=jkl", super::ErrorOutputType::Json(false)); assert!(v1.dep_tracking_hash() == v2.dep_tracking_hash()); assert!(v1.dep_tracking_hash() == v3.dep_tracking_hash()); @@ -2537,30 +2816,42 @@ mod tests { #[test] fn test_native_libs_tracking_hash_different_values() { - let mut v1 = super::basic_options(); - let mut v2 = super::basic_options(); - let mut v3 = super::basic_options(); - let mut v4 = super::basic_options(); + let mut v1 = Options::default(); + let mut v2 = Options::default(); + let mut v3 = Options::default(); + let mut v4 = Options::default(); // Reference - v1.libs = vec![(String::from("a"), None, Some(cstore::NativeStatic)), - (String::from("b"), None, Some(cstore::NativeFramework)), - (String::from("c"), None, Some(cstore::NativeUnknown))]; + v1.libs = vec![ + (String::from("a"), None, Some(cstore::NativeStatic)), + (String::from("b"), None, Some(cstore::NativeFramework)), + (String::from("c"), None, Some(cstore::NativeUnknown)), + ]; // Change label - v2.libs = vec![(String::from("a"), None, Some(cstore::NativeStatic)), - (String::from("X"), None, Some(cstore::NativeFramework)), - (String::from("c"), None, Some(cstore::NativeUnknown))]; + v2.libs = vec![ + (String::from("a"), None, Some(cstore::NativeStatic)), + (String::from("X"), None, Some(cstore::NativeFramework)), + (String::from("c"), None, Some(cstore::NativeUnknown)), + ]; // Change kind - v3.libs = vec![(String::from("a"), None, Some(cstore::NativeStatic)), - (String::from("b"), None, Some(cstore::NativeStatic)), - (String::from("c"), None, Some(cstore::NativeUnknown))]; + v3.libs = vec![ + (String::from("a"), None, Some(cstore::NativeStatic)), + (String::from("b"), None, Some(cstore::NativeStatic)), + (String::from("c"), None, Some(cstore::NativeUnknown)), + ]; // Change new-name - v4.libs = vec![(String::from("a"), None, Some(cstore::NativeStatic)), - (String::from("b"), Some(String::from("X")), Some(cstore::NativeFramework)), - (String::from("c"), None, Some(cstore::NativeUnknown))]; + v4.libs = vec![ + (String::from("a"), None, Some(cstore::NativeStatic)), + ( + String::from("b"), + Some(String::from("X")), + Some(cstore::NativeFramework), + ), + (String::from("c"), None, Some(cstore::NativeUnknown)), + ]; assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash()); assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash()); @@ -2575,22 +2866,28 @@ mod tests { #[test] fn test_native_libs_tracking_hash_different_order() { - let mut v1 = super::basic_options(); - let mut v2 = super::basic_options(); - let mut v3 = super::basic_options(); + let mut v1 = Options::default(); + let mut v2 = Options::default(); + let mut v3 = Options::default(); // Reference - v1.libs = vec![(String::from("a"), None, Some(cstore::NativeStatic)), - (String::from("b"), None, Some(cstore::NativeFramework)), - (String::from("c"), None, Some(cstore::NativeUnknown))]; + v1.libs = vec![ + (String::from("a"), None, Some(cstore::NativeStatic)), + (String::from("b"), None, Some(cstore::NativeFramework)), + (String::from("c"), None, Some(cstore::NativeUnknown)), + ]; - v2.libs = vec![(String::from("b"), None, Some(cstore::NativeFramework)), - (String::from("a"), None, Some(cstore::NativeStatic)), - (String::from("c"), None, Some(cstore::NativeUnknown))]; + v2.libs = vec![ + (String::from("b"), None, Some(cstore::NativeFramework)), + (String::from("a"), None, Some(cstore::NativeStatic)), + (String::from("c"), None, Some(cstore::NativeUnknown)), + ]; - v3.libs = vec![(String::from("c"), None, Some(cstore::NativeUnknown)), - (String::from("a"), None, Some(cstore::NativeStatic)), - (String::from("b"), None, Some(cstore::NativeFramework))]; + v3.libs = vec![ + (String::from("c"), None, Some(cstore::NativeUnknown)), + (String::from("a"), None, Some(cstore::NativeStatic)), + (String::from("b"), None, Some(cstore::NativeFramework)), + ]; assert!(v1.dep_tracking_hash() == v2.dep_tracking_hash()); assert!(v1.dep_tracking_hash() == v3.dep_tracking_hash()); @@ -2604,8 +2901,8 @@ mod tests { #[test] fn test_codegen_options_tracking_hash() { - let reference = super::basic_options(); - let mut opts = super::basic_options(); + let reference = Options::default(); + let mut opts = Options::default(); // Make sure the changing an [UNTRACKED] option leaves the hash unchanged opts.cg.ar = Some(String::from("abc")); @@ -2629,8 +2926,7 @@ mod tests { opts.cg.codegen_units = Some(42); assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); - opts.cg.remark = super::SomePasses(vec![String::from("pass1"), - String::from("pass2")]); + opts.cg.remark = super::Passes::Some(vec![String::from("pass1"), String::from("pass2")]); assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.cg.save_temps = true; @@ -2639,10 +2935,9 @@ mod tests { opts.cg.incremental = Some(String::from("abc")); assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); - // Make sure changing a [TRACKED] option changes the hash opts = reference.clone(); - opts.cg.lto = true; + opts.cg.lto = Lto::Fat; assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); opts = reference.clone(); @@ -2705,6 +3000,14 @@ mod tests { opts.debugging_opts.tls_model = Some(String::from("tls model")); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + opts = reference.clone(); + opts.debugging_opts.pgo_gen = Some(String::from("abc")); + assert_ne!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.pgo_use = String::from("abc"); + assert_ne!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts = reference.clone(); opts.cg.metadata = vec![String::from("A"), String::from("B")]; assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); @@ -2717,6 +3020,10 @@ mod tests { opts.cg.debuginfo = Some(0xba5eba11); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + opts = reference.clone(); + opts.cg.force_frame_pointers = Some(false); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + opts = reference.clone(); opts.cg.debug_assertions = Some(true); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); @@ -2732,8 +3039,8 @@ mod tests { #[test] fn test_debugging_options_tracking_hash() { - let reference = super::basic_options(); - let mut opts = super::basic_options(); + let reference = Options::default(); + let mut opts = Options::default(); // Make sure the changing an [UNTRACKED] option leaves the hash unchanged opts.debugging_opts.verbose = true; @@ -2746,7 +3053,7 @@ mod tests { assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.debugging_opts.input_stats = true; assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); - opts.debugging_opts.trans_stats = true; + opts.debugging_opts.codegen_stats = true; assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.debugging_opts.borrowck_stats = true; assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); @@ -2792,7 +3099,7 @@ mod tests { assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.debugging_opts.keep_ast = true; assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); - opts.debugging_opts.print_trans_items = Some(String::from("abc")); + opts.debugging_opts.print_mono_items = Some(String::from("abc")); assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); opts.debugging_opts.dump_mir = Some(String::from("abc")); assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); @@ -2807,7 +3114,7 @@ mod tests { assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); opts = reference.clone(); - opts.debugging_opts.no_verify = true; + opts.debugging_opts.verify_llvm_ir = true; assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); opts = reference.clone(); @@ -2819,13 +3126,17 @@ mod tests { assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); opts = reference.clone(); - opts.debugging_opts.no_trans = true; + opts.debugging_opts.no_codegen = true; assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); opts = reference.clone(); opts.debugging_opts.treat_err_as_bug = true; assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + opts = reference.clone(); + opts.debugging_opts.report_delayed_bugs = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + opts = reference.clone(); opts.debugging_opts.continue_parse_after_error = true; assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); @@ -2838,10 +3149,6 @@ mod tests { opts.debugging_opts.force_overflow_checks = Some(true); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); - opts = reference.clone(); - opts.debugging_opts.enable_nonzeroing_move_hints = true; - assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); - opts = reference.clone(); opts.debugging_opts.show_span = Some(String::from("abc")); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); @@ -2853,5 +3160,22 @@ mod tests { opts = reference.clone(); opts.debugging_opts.relro_level = Some(RelroLevel::Full); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.cross_lang_lto = CrossLangLto::LinkerPluginAuto; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + } + + #[test] + fn test_edition_parsing() { + // test default edition + let options = Options::default(); + assert!(options.edition == DEFAULT_EDITION); + + let matches = optgroups() + .parse(&["--edition=2018".to_string()]) + .unwrap(); + let (sessopts, _) = build_session_options_and_crate_config(&matches); + assert!(sessopts.edition == Edition::Edition2018) } } diff --git a/src/librustc/session/filesearch.rs b/src/librustc/session/filesearch.rs index b636fc6c9950..32044fdf2a8c 100644 --- a/src/librustc/session/filesearch.rs +++ b/src/librustc/session/filesearch.rs @@ -19,7 +19,7 @@ use std::fs; use std::path::{Path, PathBuf}; use session::search_paths::{SearchPaths, PathKind}; -use util::fs as rustcfs; +use rustc_fs_util::fix_windows_verbatim_for_gcc; #[derive(Copy, Clone)] pub enum FileMatch { @@ -151,7 +151,7 @@ pub fn get_or_default_sysroot() -> PathBuf { // See comments on this target function, but the gist is that // gcc chokes on verbatim paths which fs::canonicalize generates // so we try to avoid those kinds of paths. - Ok(canon) => Some(rustcfs::fix_windows_verbatim_for_gcc(&canon)), + Ok(canon) => Some(fix_windows_verbatim_for_gcc(&canon)), Err(e) => bug!("failed to get realpath: {}", e), } }) diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs index 3ae7d01823c8..3dc697e6adb5 100644 --- a/src/librustc/session/mod.rs +++ b/src/librustc/session/mod.rs @@ -8,46 +8,53 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -pub use self::code_stats::{CodeStats, DataTypeKind, FieldInfo}; -pub use self::code_stats::{SizeKind, TypeSizeInfo, VariantInfo}; +pub use self::code_stats::{DataTypeKind, SizeKind, FieldInfo, VariantInfo}; +use self::code_stats::CodeStats; -use hir::def_id::{CrateNum, DefIndex}; -use ich::Fingerprint; +use hir::def_id::CrateNum; +use rustc_data_structures::fingerprint::Fingerprint; use lint; +use lint::builtin::BuiltinLintDiagnostics; use middle::allocator::AllocatorKind; use middle::dependency_format; use session::search_paths::PathKind; -use session::config::{BorrowckMode, DebugInfoLevel, OutputType}; -use ty::tls; +use session::config::{OutputType, Lto}; use util::nodemap::{FxHashMap, FxHashSet}; use util::common::{duration_to_secs_str, ErrorReported}; +use util::common::ProfileQueriesMsg; + +use rustc_data_structures::base_n; +use rustc_data_structures::sync::{self, Lrc, Lock, LockCell, OneThread, Once, RwLock}; use syntax::ast::NodeId; use errors::{self, DiagnosticBuilder, DiagnosticId}; use errors::emitter::{Emitter, EmitterWriter}; +use syntax::edition::Edition; use syntax::json::JsonEmitter; use syntax::feature_gate; use syntax::parse; use syntax::parse::ParseSess; use syntax::{ast, codemap}; use syntax::feature_gate::AttributeType; -use syntax_pos::{Span, MultiSpan}; +use syntax_pos::{MultiSpan, Span}; +use util::profiling::SelfProfiler; -use rustc_back::{LinkerFlavor, PanicStrategy}; -use rustc_back::target::Target; +use rustc_target::spec::{LinkerFlavor, PanicStrategy}; +use rustc_target::spec::{Target, TargetTriple}; use rustc_data_structures::flock; use jobserver::Client; +use std; use std::cell::{self, Cell, RefCell}; use std::collections::HashMap; use std::env; use std::fmt; use std::io::Write; use std::path::{Path, PathBuf}; -use std::rc::Rc; -use std::sync::{Once, ONCE_INIT}; use std::time::Duration; +use std::sync::mpsc; +use std::sync::atomic::{AtomicUsize, Ordering}; mod code_stats; pub mod config; @@ -62,10 +69,9 @@ pub struct Session { pub opts: config::Options, pub parse_sess: ParseSess, /// For a library crate, this is always none - pub entry_fn: RefCell>, - pub entry_type: Cell>, - pub plugin_registrar_fn: Cell>, - pub derive_registrar_fn: Cell>, + pub entry_fn: Once>, + pub plugin_registrar_fn: Once>, + pub derive_registrar_fn: Once>, pub default_sysroot: Option, /// The name of the root source file of the crate, in the local file system. /// `None` means that there is no source file. @@ -73,88 +79,102 @@ pub struct Session { /// The directory the compiler has been executed in plus a flag indicating /// if the value stored here has been affected by path remapping. pub working_dir: (PathBuf, bool), - pub lint_store: RefCell, - pub buffered_lints: RefCell>, + + // FIXME: lint_store and buffered_lints are not thread-safe, + // but are only used in a single thread + pub lint_store: RwLock, + pub buffered_lints: Lock>, + /// Set of (DiagnosticId, Option, message) tuples tracking /// (sub)diagnostics that have been set once, but should not be set again, /// in order to avoid redundantly verbose output (Issue #24690, #44953). - pub one_time_diagnostics: RefCell, String)>>, - pub plugin_llvm_passes: RefCell>, - pub plugin_attributes: RefCell>, - pub crate_types: RefCell>, - pub dependency_formats: RefCell, - /// The crate_disambiguator is constructed out of all the `-C metadata` + pub one_time_diagnostics: Lock, String)>>, + pub plugin_llvm_passes: OneThread>>, + pub plugin_attributes: OneThread>>, + pub crate_types: Once>, + pub dependency_formats: Once, + /// The crate_disambiguator is constructed out of all the `-C metadata` /// arguments passed to the compiler. Its value together with the crate-name /// forms a unique global identifier for the crate. It is used to allow /// multiple crates with the same name to coexist. See the - /// trans::back::symbol_names module for more information. - pub crate_disambiguator: RefCell>, - pub features: RefCell, + /// rustc_codegen_llvm::back::symbol_names module for more information. + pub crate_disambiguator: Once, + + features: Once, /// The maximum recursion limit for potentially infinitely recursive /// operations such as auto-dereference and monomorphization. - pub recursion_limit: Cell, + pub recursion_limit: Once, /// The maximum length of types during monomorphization. - pub type_length_limit: Cell, + pub type_length_limit: Once, + + /// The maximum number of stackframes allowed in const eval + pub const_eval_stack_frame_limit: usize, /// The metadata::creader module may inject an allocator/panic_runtime /// dependency if it didn't already find one, and this tracks what was /// injected. - pub injected_allocator: Cell>, - pub allocator_kind: Cell>, - pub injected_panic_runtime: Cell>, + pub injected_allocator: Once>, + pub allocator_kind: Once>, + pub injected_panic_runtime: Once>, /// Map from imported macro spans (which consist of /// the localized span for the macro body) to the /// macro name and definition span in the source crate. - pub imported_macro_spans: RefCell>, + pub imported_macro_spans: OneThread>>, - incr_comp_session: RefCell, + incr_comp_session: OneThread>, + + /// Used by -Z profile-queries in util::common + pub profile_channel: Lock>>, + + /// Used by -Z self-profile + pub self_profiling: Lock, /// Some measurements that are being gathered during compilation. pub perf_stats: PerfStats, /// Data about code being compiled, gathered during compilation. - pub code_stats: RefCell, + pub code_stats: Lock, - next_node_id: Cell, + next_node_id: OneThread>, /// If -zfuel=crate=n is specified, Some(crate). optimization_fuel_crate: Option, /// If -zfuel=crate=n is specified, initially set to n. Otherwise 0. - optimization_fuel_limit: Cell, + optimization_fuel_limit: LockCell, /// We're rejecting all further optimizations. - out_of_fuel: Cell, + out_of_fuel: LockCell, // The next two are public because the driver needs to read them. - /// If -zprint-fuel=crate, Some(crate). pub print_fuel_crate: Option, /// Always set to zero and incremented so that we can print fuel expended by a crate. - pub print_fuel: Cell, + pub print_fuel: LockCell, /// Loaded up early on in the initialization of this `Session` to avoid /// false positives about a job server in our environment. - pub jobserver_from_env: Option, + pub jobserver: Client, /// Metadata about the allocators for the current crate being compiled - pub has_global_allocator: Cell, + pub has_global_allocator: Once, + + /// Cap lint level specified by a driver specifically. + pub driver_lint_caps: FxHashMap, } pub struct PerfStats { - /// The accumulated time needed for computing the SVH of the crate - pub svh_time: Cell, - /// The accumulated time spent on computing incr. comp. hashes - pub incr_comp_hashes_time: Cell, - /// The number of incr. comp. hash computations performed - pub incr_comp_hashes_count: Cell, - /// The number of bytes hashed when computing ICH values - pub incr_comp_bytes_hashed: Cell, /// The accumulated time spent on computing symbol hashes - pub symbol_hash_time: Cell, + pub symbol_hash_time: Lock, /// The accumulated time spent decoding def path tables from metadata - pub decode_def_path_tables_time: Cell, + pub decode_def_path_tables_time: Lock, + /// Total number of values canonicalized queries constructed. + pub queries_canonicalized: AtomicUsize, + /// Number of times this query is invoked. + pub normalize_ty_after_erasing_regions: AtomicUsize, + /// Number of times this query is invoked. + pub normalize_projection_ty: AtomicUsize, } /// Enum to support dispatch of one-time diagnostics (in Session.diag_once) @@ -162,7 +182,7 @@ enum DiagnosticBuilderMethod { Note, SpanNote, SpanSuggestion(String), // suggestion - // add more variants as needed to support one-time diagnostics + // add more variants as needed to support one-time diagnostics } /// Diagnostic message ID—used by `Session.one_time_diagnostics` to avoid @@ -171,7 +191,7 @@ enum DiagnosticBuilderMethod { pub enum DiagnosticMessageId { ErrorId(u16), // EXXXX error code as integer LintId(lint::LintId), - StabilityId(u32) // issue number + StabilityId(u32), // issue number } impl From<&'static lint::Lint> for DiagnosticMessageId { @@ -182,38 +202,40 @@ impl From<&'static lint::Lint> for DiagnosticMessageId { impl Session { pub fn local_crate_disambiguator(&self) -> CrateDisambiguator { - match *self.crate_disambiguator.borrow() { - Some(value) => value, - None => bug!("accessing disambiguator before initialization"), - } + *self.crate_disambiguator.get() } - pub fn struct_span_warn<'a, S: Into>(&'a self, - sp: S, - msg: &str) - -> DiagnosticBuilder<'a> { + + pub fn struct_span_warn<'a, S: Into>( + &'a self, + sp: S, + msg: &str, + ) -> DiagnosticBuilder<'a> { self.diagnostic().struct_span_warn(sp, msg) } - pub fn struct_span_warn_with_code<'a, S: Into>(&'a self, - sp: S, - msg: &str, - code: DiagnosticId) - -> DiagnosticBuilder<'a> { + pub fn struct_span_warn_with_code<'a, S: Into>( + &'a self, + sp: S, + msg: &str, + code: DiagnosticId, + ) -> DiagnosticBuilder<'a> { self.diagnostic().struct_span_warn_with_code(sp, msg, code) } - pub fn struct_warn<'a>(&'a self, msg: &str) -> DiagnosticBuilder<'a> { + pub fn struct_warn<'a>(&'a self, msg: &str) -> DiagnosticBuilder<'a> { self.diagnostic().struct_warn(msg) } - pub fn struct_span_err<'a, S: Into>(&'a self, - sp: S, - msg: &str) - -> DiagnosticBuilder<'a> { + pub fn struct_span_err<'a, S: Into>( + &'a self, + sp: S, + msg: &str, + ) -> DiagnosticBuilder<'a> { self.diagnostic().struct_span_err(sp, msg) } - pub fn struct_span_err_with_code<'a, S: Into>(&'a self, - sp: S, - msg: &str, - code: DiagnosticId) - -> DiagnosticBuilder<'a> { + pub fn struct_span_err_with_code<'a, S: Into>( + &'a self, + sp: S, + msg: &str, + code: DiagnosticId, + ) -> DiagnosticBuilder<'a> { self.diagnostic().struct_span_err_with_code(sp, msg, code) } // FIXME: This method should be removed (every error should have an associated error code). @@ -227,25 +249,27 @@ impl Session { ) -> DiagnosticBuilder<'a> { self.diagnostic().struct_err_with_code(msg, code) } - pub fn struct_span_fatal<'a, S: Into>(&'a self, - sp: S, - msg: &str) - -> DiagnosticBuilder<'a> { + pub fn struct_span_fatal<'a, S: Into>( + &'a self, + sp: S, + msg: &str, + ) -> DiagnosticBuilder<'a> { self.diagnostic().struct_span_fatal(sp, msg) } - pub fn struct_span_fatal_with_code<'a, S: Into>(&'a self, - sp: S, - msg: &str, - code: DiagnosticId) - -> DiagnosticBuilder<'a> { + pub fn struct_span_fatal_with_code<'a, S: Into>( + &'a self, + sp: S, + msg: &str, + code: DiagnosticId, + ) -> DiagnosticBuilder<'a> { self.diagnostic().struct_span_fatal_with_code(sp, msg, code) } - pub fn struct_fatal<'a>(&'a self, msg: &str) -> DiagnosticBuilder<'a> { + pub fn struct_fatal<'a>(&'a self, msg: &str) -> DiagnosticBuilder<'a> { self.diagnostic().struct_fatal(msg) } pub fn span_fatal>(&self, sp: S, msg: &str) -> ! { - panic!(self.diagnostic().span_fatal(sp, msg)) + self.diagnostic().span_fatal(sp, msg).raise() } pub fn span_fatal_with_code>( &self, @@ -253,10 +277,12 @@ impl Session { msg: &str, code: DiagnosticId, ) -> ! { - panic!(self.diagnostic().span_fatal_with_code(sp, msg, code)) + self.diagnostic() + .span_fatal_with_code(sp, msg, code) + .raise() } pub fn fatal(&self, msg: &str) -> ! { - panic!(self.diagnostic().fatal(msg)) + self.diagnostic().fatal(msg).raise() } pub fn span_err_or_warn>(&self, is_warning: bool, sp: S, msg: &str) { if is_warning { @@ -287,7 +313,8 @@ impl Session { compile_result_from_err_count(self.err_count()) } pub fn track_errors(&self, f: F) -> Result - where F: FnOnce() -> T + where + F: FnOnce() -> T, { let old_count = self.err_count(); let result = f(); @@ -330,13 +357,31 @@ impl Session { self.diagnostic().unimpl(msg) } - pub fn buffer_lint>(&self, - lint: &'static lint::Lint, - id: ast::NodeId, - sp: S, - msg: &str) { + pub fn buffer_lint>( + &self, + lint: &'static lint::Lint, + id: ast::NodeId, + sp: S, + msg: &str, + ) { match *self.buffered_lints.borrow_mut() { - Some(ref mut buffer) => buffer.add_lint(lint, id, sp.into(), msg), + Some(ref mut buffer) => { + buffer.add_lint(lint, id, sp.into(), msg, BuiltinLintDiagnostics::Normal) + } + None => bug!("can't buffer lints after HIR lowering"), + } + } + + pub fn buffer_lint_with_diagnostic>( + &self, + lint: &'static lint::Lint, + id: ast::NodeId, + sp: S, + msg: &str, + diagnostic: BuiltinLintDiagnostics, + ) { + match *self.buffered_lints.borrow_mut() { + Some(ref mut buffer) => buffer.add_lint(lint, id, sp.into(), msg, diagnostic), None => bug!("can't buffer lints after HIR lowering"), } } @@ -348,7 +393,7 @@ impl Session { Some(next) => { self.next_node_id.set(ast::NodeId::new(next)); } - None => bug!("Input too large, ran out of node ids!") + None => bug!("Input too large, ran out of node ids!"), } id @@ -362,24 +407,27 @@ impl Session { /// Analogous to calling methods on the given `DiagnosticBuilder`, but /// deduplicates on lint ID, span (if any), and message for this `Session` - fn diag_once<'a, 'b>(&'a self, - diag_builder: &'b mut DiagnosticBuilder<'a>, - method: DiagnosticBuilderMethod, - msg_id: DiagnosticMessageId, - message: &str, - span_maybe: Option) { - + fn diag_once<'a, 'b>( + &'a self, + diag_builder: &'b mut DiagnosticBuilder<'a>, + method: DiagnosticBuilderMethod, + msg_id: DiagnosticMessageId, + message: &str, + span_maybe: Option, + ) { let id_span_message = (msg_id, span_maybe, message.to_owned()); - let fresh = self.one_time_diagnostics.borrow_mut().insert(id_span_message); + let fresh = self.one_time_diagnostics + .borrow_mut() + .insert(id_span_message); if fresh { match method { DiagnosticBuilderMethod::Note => { diag_builder.note(message); - }, + } DiagnosticBuilderMethod::SpanNote => { let span = span_maybe.expect("span_note needs a span"); diag_builder.span_note(span, message); - }, + } DiagnosticBuilderMethod::SpanSuggestion(suggestion) => { let span = span_maybe.expect("span_suggestion needs a span"); diag_builder.span_suggestion(span, message, suggestion); @@ -388,37 +436,66 @@ impl Session { } } - pub fn diag_span_note_once<'a, 'b>(&'a self, - diag_builder: &'b mut DiagnosticBuilder<'a>, - msg_id: DiagnosticMessageId, span: Span, message: &str) { - self.diag_once(diag_builder, DiagnosticBuilderMethod::SpanNote, - msg_id, message, Some(span)); + pub fn diag_span_note_once<'a, 'b>( + &'a self, + diag_builder: &'b mut DiagnosticBuilder<'a>, + msg_id: DiagnosticMessageId, + span: Span, + message: &str, + ) { + self.diag_once( + diag_builder, + DiagnosticBuilderMethod::SpanNote, + msg_id, + message, + Some(span), + ); } - pub fn diag_note_once<'a, 'b>(&'a self, - diag_builder: &'b mut DiagnosticBuilder<'a>, - msg_id: DiagnosticMessageId, message: &str) { - self.diag_once(diag_builder, DiagnosticBuilderMethod::Note, msg_id, message, None); + pub fn diag_note_once<'a, 'b>( + &'a self, + diag_builder: &'b mut DiagnosticBuilder<'a>, + msg_id: DiagnosticMessageId, + message: &str, + ) { + self.diag_once( + diag_builder, + DiagnosticBuilderMethod::Note, + msg_id, + message, + None, + ); } - pub fn diag_span_suggestion_once<'a, 'b>(&'a self, - diag_builder: &'b mut DiagnosticBuilder<'a>, - msg_id: DiagnosticMessageId, - span: Span, - message: &str, - suggestion: String) { - self.diag_once(diag_builder, DiagnosticBuilderMethod::SpanSuggestion(suggestion), - msg_id, message, Some(span)); + pub fn diag_span_suggestion_once<'a, 'b>( + &'a self, + diag_builder: &'b mut DiagnosticBuilder<'a>, + msg_id: DiagnosticMessageId, + span: Span, + message: &str, + suggestion: String, + ) { + self.diag_once( + diag_builder, + DiagnosticBuilderMethod::SpanSuggestion(suggestion), + msg_id, + message, + Some(span), + ); } pub fn codemap<'a>(&'a self) -> &'a codemap::CodeMap { self.parse_sess.codemap() } - pub fn verbose(&self) -> bool { self.opts.debugging_opts.verbose } - pub fn time_passes(&self) -> bool { self.opts.debugging_opts.time_passes } + pub fn verbose(&self) -> bool { + self.opts.debugging_opts.verbose + } + pub fn time_passes(&self) -> bool { + self.opts.debugging_opts.time_passes + } pub fn profile_queries(&self) -> bool { - self.opts.debugging_opts.profile_queries || - self.opts.debugging_opts.profile_queries_and_keys + self.opts.debugging_opts.profile_queries + || self.opts.debugging_opts.profile_queries_and_keys } pub fn profile_queries_and_keys(&self) -> bool { self.opts.debugging_opts.profile_queries_and_keys @@ -429,85 +506,114 @@ impl Session { pub fn time_llvm_passes(&self) -> bool { self.opts.debugging_opts.time_llvm_passes } - pub fn trans_stats(&self) -> bool { self.opts.debugging_opts.trans_stats } - pub fn meta_stats(&self) -> bool { self.opts.debugging_opts.meta_stats } - pub fn asm_comments(&self) -> bool { self.opts.debugging_opts.asm_comments } - pub fn no_verify(&self) -> bool { self.opts.debugging_opts.no_verify } - pub fn borrowck_stats(&self) -> bool { self.opts.debugging_opts.borrowck_stats } + pub fn codegen_stats(&self) -> bool { + self.opts.debugging_opts.codegen_stats + } + pub fn meta_stats(&self) -> bool { + self.opts.debugging_opts.meta_stats + } + pub fn asm_comments(&self) -> bool { + self.opts.debugging_opts.asm_comments + } + pub fn verify_llvm_ir(&self) -> bool { + self.opts.debugging_opts.verify_llvm_ir + } + pub fn borrowck_stats(&self) -> bool { + self.opts.debugging_opts.borrowck_stats + } pub fn print_llvm_passes(&self) -> bool { self.opts.debugging_opts.print_llvm_passes } - /// If true, we should use NLL-style region checking instead of - /// lexical style. - pub fn nll(&self) -> bool { - self.features.borrow().nll || self.opts.debugging_opts.nll + /// Get the features enabled for the current compilation session. + /// DO NOT USE THIS METHOD if there is a TyCtxt available, as it circumvents + /// dependency tracking. Use tcx.features() instead. + #[inline] + pub fn features_untracked(&self) -> &feature_gate::Features { + self.features.get() } - /// If true, we should use the MIR-based borrowck (we may *also* use - /// the AST-based borrowck). - pub fn use_mir(&self) -> bool { - self.borrowck_mode().use_mir() + pub fn init_features(&self, features: feature_gate::Features) { + self.features.set(features); } - /// If true, we should gather causal information during NLL - /// checking. This will eventually be the normal thing, but right - /// now it is too unoptimized. - pub fn nll_dump_cause(&self) -> bool { - self.opts.debugging_opts.nll_dump_cause - } + /// Calculates the flavor of LTO to use for this compilation. + pub fn lto(&self) -> config::Lto { + // If our target has codegen requirements ignore the command line + if self.target.target.options.requires_lto { + return config::Lto::Fat; + } - /// If true, we should enable two-phase borrows checks. This is - /// done with either `-Ztwo-phase-borrows` or with - /// `#![feature(nll)]`. - pub fn two_phase_borrows(&self) -> bool { - self.features.borrow().nll || self.opts.debugging_opts.two_phase_borrows - } + // If the user specified something, return that. If they only said `-C + // lto` and we've for whatever reason forced off ThinLTO via the CLI, + // then ensure we can't use a ThinLTO. + match self.opts.cg.lto { + config::Lto::No => {} + config::Lto::Yes if self.opts.cli_forced_thinlto_off => return config::Lto::Fat, + other => return other, + } - /// What mode(s) of borrowck should we run? AST? MIR? both? - /// (Also considers the `#![feature(nll)]` setting.) - pub fn borrowck_mode(&self) -> BorrowckMode { - match self.opts.borrowck_mode { - mode @ BorrowckMode::Mir | - mode @ BorrowckMode::Compare => mode, + // Ok at this point the target doesn't require anything and the user + // hasn't asked for anything. Our next decision is whether or not + // we enable "auto" ThinLTO where we use multiple codegen units and + // then do ThinLTO over those codegen units. The logic below will + // either return `No` or `ThinLocal`. - mode @ BorrowckMode::Ast => { - if self.nll() { - BorrowckMode::Mir - } else { - mode - } + // If processing command line options determined that we're incompatible + // with ThinLTO (e.g. `-C lto --emit llvm-ir`) then return that option. + if self.opts.cli_forced_thinlto_off { + return config::Lto::No; + } + + // If `-Z thinlto` specified process that, but note that this is mostly + // a deprecated option now that `-C lto=thin` exists. + if let Some(enabled) = self.opts.debugging_opts.thinlto { + if enabled { + return config::Lto::ThinLocal; + } else { + return config::Lto::No; } + } + // If there's only one codegen unit and LTO isn't enabled then there's + // no need for ThinLTO so just return false. + if self.codegen_units() == 1 { + return config::Lto::No; + } + + // Right now ThinLTO isn't compatible with incremental compilation. + if self.opts.incremental.is_some() { + return config::Lto::No; + } + + // Now we're in "defaults" territory. By default we enable ThinLTO for + // optimized compiles (anything greater than O0). + match self.opts.optimize { + config::OptLevel::No => config::Lto::No, + _ => config::Lto::ThinLocal, } } - /// Should we emit EndRegion MIR statements? These are consumed by - /// MIR borrowck, but not when NLL is used. They are also consumed - /// by the validation stuff. - pub fn emit_end_regions(&self) -> bool { - // FIXME(#46875) -- we should not emit end regions when NLL is enabled, - // but for now we can't stop doing so because it causes false positives - self.opts.debugging_opts.emit_end_regions || - self.opts.debugging_opts.mir_emit_validate > 0 || - self.use_mir() - } - - pub fn lto(&self) -> bool { - self.opts.cg.lto || self.target.target.options.requires_lto - } /// Returns the panic strategy for this compile session. If the user explicitly selected one /// using '-C panic', use that, otherwise use the panic strategy defined by the target. pub fn panic_strategy(&self) -> PanicStrategy { - self.opts.cg.panic.unwrap_or(self.target.target.options.panic_strategy) + self.opts + .cg + .panic + .unwrap_or(self.target.target.options.panic_strategy) } pub fn linker_flavor(&self) -> LinkerFlavor { - self.opts.debugging_opts.linker_flavor.unwrap_or(self.target.target.linker_flavor) + self.opts + .debugging_opts + .linker_flavor + .unwrap_or(self.target.target.linker_flavor) } pub fn fewer_names(&self) -> bool { - let more_names = self.opts.output_types.contains_key(&OutputType::LlvmAssembly) || - self.opts.output_types.contains_key(&OutputType::Bitcode); + let more_names = self.opts + .output_types + .contains_key(&OutputType::LlvmAssembly) + || self.opts.output_types.contains_key(&OutputType::Bitcode); self.opts.debugging_opts.fewer_names || !more_names } @@ -517,11 +623,10 @@ impl Session { pub fn unstable_options(&self) -> bool { self.opts.debugging_opts.unstable_options } - pub fn nonzeroing_move_hints(&self) -> bool { - self.opts.debugging_opts.enable_nonzeroing_move_hints - } pub fn overflow_checks(&self) -> bool { - self.opts.cg.overflow_checks + self.opts + .cg + .overflow_checks .or(self.opts.debugging_opts.force_overflow_checks) .unwrap_or(self.opts.debug_assertions) } @@ -551,53 +656,70 @@ impl Session { } } + pub fn target_cpu(&self) -> &str { + match self.opts.cg.target_cpu { + Some(ref s) => &**s, + None => &*self.target.target.options.cpu + } + } + pub fn must_not_eliminate_frame_pointers(&self) -> bool { - self.opts.debuginfo != DebugInfoLevel::NoDebugInfo || - !self.target.target.options.eliminate_frame_pointer + if let Some(x) = self.opts.cg.force_frame_pointers { + x + } else { + !self.target.target.options.eliminate_frame_pointer + } } /// Returns the symbol name for the registrar function, /// given the crate Svh and the function DefIndex. - pub fn generate_plugin_registrar_symbol(&self, disambiguator: CrateDisambiguator, - index: DefIndex) - -> String { - format!("__rustc_plugin_registrar__{}_{}", disambiguator.to_fingerprint().to_hex(), - index.to_proc_macro_index()) + pub fn generate_plugin_registrar_symbol(&self, disambiguator: CrateDisambiguator) -> String { + format!( + "__rustc_plugin_registrar_{}__", + disambiguator.to_fingerprint().to_hex() + ) } - pub fn generate_derive_registrar_symbol(&self, disambiguator: CrateDisambiguator, - index: DefIndex) - -> String { - format!("__rustc_derive_registrar__{}_{}", disambiguator.to_fingerprint().to_hex(), - index.to_proc_macro_index()) + pub fn generate_derive_registrar_symbol(&self, disambiguator: CrateDisambiguator) -> String { + format!( + "__rustc_derive_registrar_{}__", + disambiguator.to_fingerprint().to_hex() + ) } pub fn sysroot<'a>(&'a self) -> &'a Path { match self.opts.maybe_sysroot { - Some (ref sysroot) => sysroot, - None => self.default_sysroot.as_ref() - .expect("missing sysroot and default_sysroot in Session") + Some(ref sysroot) => sysroot, + None => self.default_sysroot + .as_ref() + .expect("missing sysroot and default_sysroot in Session"), } } pub fn target_filesearch(&self, kind: PathKind) -> filesearch::FileSearch { - filesearch::FileSearch::new(self.sysroot(), - &self.opts.target_triple, - &self.opts.search_paths, - kind) + filesearch::FileSearch::new( + self.sysroot(), + self.opts.target_triple.triple(), + &self.opts.search_paths, + kind, + ) } pub fn host_filesearch(&self, kind: PathKind) -> filesearch::FileSearch { filesearch::FileSearch::new( self.sysroot(), config::host_triple(), &self.opts.search_paths, - kind) + kind, + ) } pub fn set_incr_session_load_dep_graph(&self, load: bool) { let mut incr_comp_session = self.incr_comp_session.borrow_mut(); match *incr_comp_session { - IncrCompSession::Active { ref mut load_dep_graph, .. } => { + IncrCompSession::Active { + ref mut load_dep_graph, + .. + } => { *load_dep_graph = load; } _ => {} @@ -612,14 +734,20 @@ impl Session { } } - pub fn init_incr_comp_session(&self, - session_dir: PathBuf, - lock_file: flock::Lock, - load_dep_graph: bool) { + pub fn init_incr_comp_session( + &self, + session_dir: PathBuf, + lock_file: flock::Lock, + load_dep_graph: bool, + ) { let mut incr_comp_session = self.incr_comp_session.borrow_mut(); - if let IncrCompSession::NotInitialized = *incr_comp_session { } else { - bug!("Trying to initialize IncrCompSession `{:?}`", *incr_comp_session) + if let IncrCompSession::NotInitialized = *incr_comp_session { + } else { + bug!( + "Trying to initialize IncrCompSession `{:?}`", + *incr_comp_session + ) } *incr_comp_session = IncrCompSession::Active { @@ -632,8 +760,12 @@ impl Session { pub fn finalize_incr_comp_session(&self, new_directory_path: PathBuf) { let mut incr_comp_session = self.incr_comp_session.borrow_mut(); - if let IncrCompSession::Active { .. } = *incr_comp_session { } else { - bug!("Trying to finalize IncrCompSession `{:?}`", *incr_comp_session) + if let IncrCompSession::Active { .. } = *incr_comp_session { + } else { + bug!( + "Trying to finalize IncrCompSession `{:?}`", + *incr_comp_session + ) } // Note: This will also drop the lock file, thus unlocking the directory @@ -646,35 +778,42 @@ impl Session { let mut incr_comp_session = self.incr_comp_session.borrow_mut(); let session_directory = match *incr_comp_session { - IncrCompSession::Active { ref session_directory, .. } => { - session_directory.clone() - } + IncrCompSession::Active { + ref session_directory, + .. + } => session_directory.clone(), IncrCompSession::InvalidBecauseOfErrors { .. } => return, - _ => bug!("Trying to invalidate IncrCompSession `{:?}`", - *incr_comp_session), + _ => bug!( + "Trying to invalidate IncrCompSession `{:?}`", + *incr_comp_session + ), }; // Note: This will also drop the lock file, thus unlocking the directory - *incr_comp_session = IncrCompSession::InvalidBecauseOfErrors { - session_directory, - }; + *incr_comp_session = IncrCompSession::InvalidBecauseOfErrors { session_directory }; } pub fn incr_comp_session_dir(&self) -> cell::Ref { let incr_comp_session = self.incr_comp_session.borrow(); - cell::Ref::map(incr_comp_session, |incr_comp_session| { - match *incr_comp_session { - IncrCompSession::NotInitialized => { - bug!("Trying to get session directory from IncrCompSession `{:?}`", - *incr_comp_session) + cell::Ref::map( + incr_comp_session, + |incr_comp_session| match *incr_comp_session { + IncrCompSession::NotInitialized => bug!( + "Trying to get session directory from IncrCompSession `{:?}`", + *incr_comp_session + ), + IncrCompSession::Active { + ref session_directory, + .. } - IncrCompSession::Active { ref session_directory, .. } | - IncrCompSession::Finalized { ref session_directory } | - IncrCompSession::InvalidBecauseOfErrors { ref session_directory } => { - session_directory + | IncrCompSession::Finalized { + ref session_directory, } - } - }) + | IncrCompSession::InvalidBecauseOfErrors { + ref session_directory, + } => session_directory, + }, + ) } pub fn incr_comp_session_dir_opt(&self) -> Option> { @@ -685,26 +824,36 @@ impl Session { } } + pub fn profiler ()>(&self, f: F) { + let mut profiler = self.self_profiling.borrow_mut(); + f(&mut profiler); + } + + pub fn print_profiler_results(&self) { + let mut profiler = self.self_profiling.borrow_mut(); + profiler.print_results(&self.opts); + } + + pub fn save_json_results(&self) { + let profiler = self.self_profiling.borrow(); + profiler.save_results(&self.opts); + } + pub fn print_perf_stats(&self) { - println!("Total time spent computing SVHs: {}", - duration_to_secs_str(self.perf_stats.svh_time.get())); - println!("Total time spent computing incr. comp. hashes: {}", - duration_to_secs_str(self.perf_stats.incr_comp_hashes_time.get())); - println!("Total number of incr. comp. hashes computed: {}", - self.perf_stats.incr_comp_hashes_count.get()); - println!("Total number of bytes hashed for incr. comp.: {}", - self.perf_stats.incr_comp_bytes_hashed.get()); - if self.perf_stats.incr_comp_hashes_count.get() != 0 { - println!("Average bytes hashed per incr. comp. HIR node: {}", - self.perf_stats.incr_comp_bytes_hashed.get() / - self.perf_stats.incr_comp_hashes_count.get()); - } else { - println!("Average bytes hashed per incr. comp. HIR node: N/A"); - } - println!("Total time spent computing symbol hashes: {}", - duration_to_secs_str(self.perf_stats.symbol_hash_time.get())); - println!("Total time spent decoding DefPath tables: {}", - duration_to_secs_str(self.perf_stats.decode_def_path_tables_time.get())); + println!( + "Total time spent computing symbol hashes: {}", + duration_to_secs_str(*self.perf_stats.symbol_hash_time.lock()) + ); + println!( + "Total time spent decoding DefPath tables: {}", + duration_to_secs_str(*self.perf_stats.decode_def_path_tables_time.lock()) + ); + println!("Total queries canonicalized: {}", + self.perf_stats.queries_canonicalized.load(Ordering::Relaxed)); + println!("normalize_ty_after_erasing_regions: {}", + self.perf_stats.normalize_ty_after_erasing_regions.load(Ordering::Relaxed)); + println!("normalize_projection_ty: {}", + self.perf_stats.normalize_projection_ty.load(Ordering::Relaxed)); } /// We want to know if we're allowed to do an optimization for crate foo from -z fuel=foo=n. @@ -713,50 +862,58 @@ impl Session { let mut ret = true; match self.optimization_fuel_crate { Some(ref c) if c == crate_name => { + assert!(self.query_threads() == 1); let fuel = self.optimization_fuel_limit.get(); ret = fuel != 0; if fuel == 0 && !self.out_of_fuel.get() { println!("optimization-fuel-exhausted: {}", msg()); self.out_of_fuel.set(true); } else if fuel > 0 { - self.optimization_fuel_limit.set(fuel-1); + self.optimization_fuel_limit.set(fuel - 1); } } _ => {} } match self.print_fuel_crate { - Some(ref c) if c == crate_name=> { - self.print_fuel.set(self.print_fuel.get()+1); - }, + Some(ref c) if c == crate_name => { + assert!(self.query_threads() == 1); + self.print_fuel.set(self.print_fuel.get() + 1); + } _ => {} } ret } + /// Returns the number of query threads that should be used for this + /// compilation + pub fn query_threads_from_opts(opts: &config::Options) -> usize { + opts.debugging_opts.query_threads.unwrap_or(1) + } + /// Returns the number of query threads that should be used for this /// compilation pub fn query_threads(&self) -> usize { - self.opts.debugging_opts.query_threads.unwrap_or(1) + Self::query_threads_from_opts(&self.opts) } /// Returns the number of codegen units that should be used for this /// compilation pub fn codegen_units(&self) -> usize { if let Some(n) = self.opts.cli_forced_codegen_units { - return n + return n; } if let Some(n) = self.target.target.options.default_codegen_units { - return n as usize + return n as usize; } // Why is 16 codegen units the default all the time? // // The main reason for enabling multiple codegen units by default is to - // leverage the ability for the trans backend to do translation and - // codegen in parallel. This allows us, especially for large crates, to + // leverage the ability for the codegen backend to do codegen and + // optimization in parallel. This allows us, especially for large crates, to // make good use of all available resources on the machine once we've // hit that stage of compilation. Large crates especially then often - // take a long time in trans/codegen and this helps us amortize that + // take a long time in codegen/optimization and this helps us amortize that // cost. // // Note that a high number here doesn't mean that we'll be spawning a @@ -802,62 +959,48 @@ impl Session { 16 } - /// Returns whether ThinLTO is enabled for this compilation - pub fn thinlto(&self) -> bool { - // If processing command line options determined that we're incompatible - // with ThinLTO (e.g. `-C lto --emit llvm-ir`) then return that option. - if let Some(enabled) = self.opts.cli_forced_thinlto { - return enabled - } + pub fn teach(&self, code: &DiagnosticId) -> bool { + self.opts.debugging_opts.teach && self.diagnostic().must_teach(code) + } - // If explicitly specified, use that with the next highest priority - if let Some(enabled) = self.opts.debugging_opts.thinlto { - return enabled - } + /// Are we allowed to use features from the Rust 2018 edition? + pub fn rust_2018(&self) -> bool { + self.opts.edition >= Edition::Edition2018 + } - // If there's only one codegen unit and LTO isn't enabled then there's - // no need for ThinLTO so just return false. - if self.codegen_units() == 1 && !self.lto() { - return false - } - - // Right now ThinLTO isn't compatible with incremental compilation. - if self.opts.incremental.is_some() { - return false - } - - // Now we're in "defaults" territory. By default we enable ThinLTO for - // optimized compiles (anything greater than O0). - match self.opts.optimize { - config::OptLevel::No => false, - _ => true, - } + pub fn edition(&self) -> Edition { + self.opts.edition } } -pub fn build_session(sopts: config::Options, - local_crate_source_file: Option, - registry: errors::registry::Registry) - -> Session { +pub fn build_session( + sopts: config::Options, + local_crate_source_file: Option, + registry: errors::registry::Registry, +) -> Session { let file_path_mapping = sopts.file_path_mapping(); - build_session_with_codemap(sopts, - local_crate_source_file, - registry, - Rc::new(codemap::CodeMap::new(file_path_mapping)), - None) + build_session_with_codemap( + sopts, + local_crate_source_file, + registry, + Lrc::new(codemap::CodeMap::new(file_path_mapping)), + None, + ) } -pub fn build_session_with_codemap(sopts: config::Options, - local_crate_source_file: Option, - registry: errors::registry::Registry, - codemap: Rc, - emitter_dest: Option>) - -> Session { +pub fn build_session_with_codemap( + sopts: config::Options, + local_crate_source_file: Option, + registry: errors::registry::Registry, + codemap: Lrc, + emitter_dest: Option>, +) -> Session { // FIXME: This is not general enough to make the warning lint completely override // normal diagnostic warnings, since the warning lint can also be denied and changed // later via the source code. - let warnings_allow = sopts.lint_opts + let warnings_allow = sopts + .lint_opts .iter() .filter(|&&(ref key, _)| *key == "warnings") .map(|&(_, ref level)| *level == lint::Allow) @@ -868,55 +1011,74 @@ pub fn build_session_with_codemap(sopts: config::Options, let can_emit_warnings = !(warnings_allow || cap_lints_allow); let treat_err_as_bug = sopts.debugging_opts.treat_err_as_bug; + let report_delayed_bugs = sopts.debugging_opts.report_delayed_bugs; let external_macro_backtrace = sopts.debugging_opts.external_macro_backtrace; - let emitter: Box = match (sopts.error_format, emitter_dest) { - (config::ErrorOutputType::HumanReadable(color_config), None) => { - Box::new(EmitterWriter::stderr(color_config, Some(codemap.clone()), false)) - } - (config::ErrorOutputType::HumanReadable(_), Some(dst)) => { - Box::new(EmitterWriter::new(dst, Some(codemap.clone()), false)) - } - (config::ErrorOutputType::Json(pretty), None) => { - Box::new(JsonEmitter::stderr(Some(registry), codemap.clone(), pretty)) - } - (config::ErrorOutputType::Json(pretty), Some(dst)) => { - Box::new(JsonEmitter::new(dst, Some(registry), codemap.clone(), pretty)) - } - (config::ErrorOutputType::Short(color_config), None) => { - Box::new(EmitterWriter::stderr(color_config, Some(codemap.clone()), true)) - } - (config::ErrorOutputType::Short(_), Some(dst)) => { - Box::new(EmitterWriter::new(dst, Some(codemap.clone()), true)) - } - }; + let emitter: Box = + match (sopts.error_format, emitter_dest) { + (config::ErrorOutputType::HumanReadable(color_config), None) => Box::new( + EmitterWriter::stderr( + color_config, + Some(codemap.clone()), + false, + sopts.debugging_opts.teach, + ).ui_testing(sopts.debugging_opts.ui_testing), + ), + (config::ErrorOutputType::HumanReadable(_), Some(dst)) => Box::new( + EmitterWriter::new(dst, Some(codemap.clone()), false, false) + .ui_testing(sopts.debugging_opts.ui_testing), + ), + (config::ErrorOutputType::Json(pretty), None) => Box::new( + JsonEmitter::stderr( + Some(registry), + codemap.clone(), + pretty, + ).ui_testing(sopts.debugging_opts.ui_testing), + ), + (config::ErrorOutputType::Json(pretty), Some(dst)) => Box::new( + JsonEmitter::new( + dst, + Some(registry), + codemap.clone(), + pretty, + ).ui_testing(sopts.debugging_opts.ui_testing), + ), + (config::ErrorOutputType::Short(color_config), None) => Box::new( + EmitterWriter::stderr(color_config, Some(codemap.clone()), true, false), + ), + (config::ErrorOutputType::Short(_), Some(dst)) => { + Box::new(EmitterWriter::new(dst, Some(codemap.clone()), true, false)) + } + }; - let diagnostic_handler = - errors::Handler::with_emitter_and_flags( - emitter, - errors::HandlerFlags { - can_emit_warnings, - treat_err_as_bug, - external_macro_backtrace, - .. Default::default() - }); + let diagnostic_handler = errors::Handler::with_emitter_and_flags( + emitter, + errors::HandlerFlags { + can_emit_warnings, + treat_err_as_bug, + report_delayed_bugs, + external_macro_backtrace, + ..Default::default() + }, + ); - build_session_(sopts, - local_crate_source_file, - diagnostic_handler, - codemap) + build_session_(sopts, local_crate_source_file, diagnostic_handler, codemap) } -pub fn build_session_(sopts: config::Options, - local_crate_source_file: Option, - span_diagnostic: errors::Handler, - codemap: Rc) - -> Session { - let host = match Target::search(config::host_triple()) { +pub fn build_session_( + sopts: config::Options, + local_crate_source_file: Option, + span_diagnostic: errors::Handler, + codemap: Lrc, +) -> Session { + let host_triple = TargetTriple::from_triple(config::host_triple()); + let host = match Target::search(&host_triple) { Ok(t) => t, Err(e) => { - panic!(span_diagnostic.fatal(&format!("Error loading host specification: {}", e))); + span_diagnostic + .fatal(&format!("Error loading host specification: {}", e)) + .raise(); } }; let target_cfg = config::build_target_config(&sopts, &span_diagnostic); @@ -924,26 +1086,25 @@ pub fn build_session_(sopts: config::Options, let p_s = parse::ParseSess::with_span_handler(span_diagnostic, codemap); let default_sysroot = match sopts.maybe_sysroot { Some(_) => None, - None => Some(filesearch::get_or_default_sysroot()) + None => Some(filesearch::get_or_default_sysroot()), }; let file_path_mapping = sopts.file_path_mapping(); - let local_crate_source_file = local_crate_source_file.map(|path| { - file_path_mapping.map_prefix(path).0 - }); + let local_crate_source_file = + local_crate_source_file.map(|path| file_path_mapping.map_prefix(path).0); let optimization_fuel_crate = sopts.debugging_opts.fuel.as_ref().map(|i| i.0.clone()); - let optimization_fuel_limit = Cell::new(sopts.debugging_opts.fuel.as_ref() - .map(|i| i.1).unwrap_or(0)); + let optimization_fuel_limit = + LockCell::new(sopts.debugging_opts.fuel.as_ref().map(|i| i.1).unwrap_or(0)); let print_fuel_crate = sopts.debugging_opts.print_fuel.clone(); - let print_fuel = Cell::new(0); + let print_fuel = LockCell::new(0); let working_dir = match env::current_dir() { Ok(dir) => dir, - Err(e) => { - panic!(p_s.span_diagnostic.fatal(&format!("Current directory is invalid: {}", e))) - } + Err(e) => p_s.span_diagnostic + .fatal(&format!("Current directory is invalid: {}", e)) + .raise(), }; let working_dir = file_path_mapping.map_prefix(working_dir); @@ -953,67 +1114,103 @@ pub fn build_session_(sopts: config::Options, opts: sopts, parse_sess: p_s, // For a library crate, this is always none - entry_fn: RefCell::new(None), - entry_type: Cell::new(None), - plugin_registrar_fn: Cell::new(None), - derive_registrar_fn: Cell::new(None), + entry_fn: Once::new(), + plugin_registrar_fn: Once::new(), + derive_registrar_fn: Once::new(), default_sysroot, local_crate_source_file, working_dir, - lint_store: RefCell::new(lint::LintStore::new()), - buffered_lints: RefCell::new(Some(lint::LintBuffer::new())), - one_time_diagnostics: RefCell::new(FxHashSet()), - plugin_llvm_passes: RefCell::new(Vec::new()), - plugin_attributes: RefCell::new(Vec::new()), - crate_types: RefCell::new(Vec::new()), - dependency_formats: RefCell::new(FxHashMap()), - crate_disambiguator: RefCell::new(None), - features: RefCell::new(feature_gate::Features::new()), - recursion_limit: Cell::new(64), - type_length_limit: Cell::new(1048576), - next_node_id: Cell::new(NodeId::new(1)), - injected_allocator: Cell::new(None), - allocator_kind: Cell::new(None), - injected_panic_runtime: Cell::new(None), - imported_macro_spans: RefCell::new(HashMap::new()), - incr_comp_session: RefCell::new(IncrCompSession::NotInitialized), + lint_store: RwLock::new(lint::LintStore::new()), + buffered_lints: Lock::new(Some(lint::LintBuffer::new())), + one_time_diagnostics: Lock::new(FxHashSet()), + plugin_llvm_passes: OneThread::new(RefCell::new(Vec::new())), + plugin_attributes: OneThread::new(RefCell::new(Vec::new())), + crate_types: Once::new(), + dependency_formats: Once::new(), + crate_disambiguator: Once::new(), + features: Once::new(), + recursion_limit: Once::new(), + type_length_limit: Once::new(), + const_eval_stack_frame_limit: 100, + next_node_id: OneThread::new(Cell::new(NodeId::new(1))), + injected_allocator: Once::new(), + allocator_kind: Once::new(), + injected_panic_runtime: Once::new(), + imported_macro_spans: OneThread::new(RefCell::new(HashMap::new())), + incr_comp_session: OneThread::new(RefCell::new(IncrCompSession::NotInitialized)), + self_profiling: Lock::new(SelfProfiler::new()), + profile_channel: Lock::new(None), perf_stats: PerfStats { - svh_time: Cell::new(Duration::from_secs(0)), - incr_comp_hashes_time: Cell::new(Duration::from_secs(0)), - incr_comp_hashes_count: Cell::new(0), - incr_comp_bytes_hashed: Cell::new(0), - symbol_hash_time: Cell::new(Duration::from_secs(0)), - decode_def_path_tables_time: Cell::new(Duration::from_secs(0)), + symbol_hash_time: Lock::new(Duration::from_secs(0)), + decode_def_path_tables_time: Lock::new(Duration::from_secs(0)), + queries_canonicalized: AtomicUsize::new(0), + normalize_ty_after_erasing_regions: AtomicUsize::new(0), + normalize_projection_ty: AtomicUsize::new(0), }, - code_stats: RefCell::new(CodeStats::new()), + code_stats: Lock::new(CodeStats::new()), optimization_fuel_crate, optimization_fuel_limit, print_fuel_crate, print_fuel, - out_of_fuel: Cell::new(false), + out_of_fuel: LockCell::new(false), // Note that this is unsafe because it may misinterpret file descriptors // on Unix as jobserver file descriptors. We hopefully execute this near // the beginning of the process though to ensure we don't get false // positives, or in other words we try to execute this before we open // any file descriptors ourselves. // + // Pick a "reasonable maximum" if we don't otherwise have + // a jobserver in our environment, capping out at 32 so we + // don't take everything down by hogging the process run queue. + // The fixed number is used to have deterministic compilation + // across machines. + // // Also note that we stick this in a global because there could be // multiple `Session` instances in this process, and the jobserver is // per-process. - jobserver_from_env: unsafe { - static mut GLOBAL_JOBSERVER: *mut Option = 0 as *mut _; - static INIT: Once = ONCE_INIT; + jobserver: unsafe { + static mut GLOBAL_JOBSERVER: *mut Client = 0 as *mut _; + static INIT: std::sync::Once = std::sync::ONCE_INIT; INIT.call_once(|| { - GLOBAL_JOBSERVER = Box::into_raw(Box::new(Client::from_env())); + let client = Client::from_env().unwrap_or_else(|| { + Client::new(32).expect("failed to create jobserver") + }); + GLOBAL_JOBSERVER = Box::into_raw(Box::new(client)); }); (*GLOBAL_JOBSERVER).clone() }, - has_global_allocator: Cell::new(false), + has_global_allocator: Once::new(), + driver_lint_caps: FxHashMap(), }; + validate_commandline_args_with_session_available(&sess); + sess } +// If it is useful to have a Session available already for validating a +// commandline argument, you can do so here. +fn validate_commandline_args_with_session_available(sess: &Session) { + + if sess.lto() != Lto::No && sess.opts.incremental.is_some() { + sess.err("can't perform LTO when compiling incrementally"); + } + + // Since we don't know if code in an rlib will be linked to statically or + // dynamically downstream, rustc generates `__imp_` symbols that help the + // MSVC linker deal with this lack of knowledge (#27438). Unfortunately, + // these manually generated symbols confuse LLD when it tries to merge + // bitcode during ThinLTO. Therefore we disallow dynamic linking on MSVC + // when compiling for LLD ThinLTO. This way we can validly just not generate + // the `dllimport` attributes and `__imp_` symbols in that case. + if sess.opts.debugging_opts.cross_lang_lto.enabled() && + sess.opts.cg.prefer_dynamic && + sess.target.target.options.is_like_msvc { + sess.err("Linker plugin based LTO is not supported together with \ + `-C prefer-dynamic` when targeting MSVC"); + } +} + /// Hash value constructed out of all the `-C metadata` arguments passed to the /// compiler. Together with the crate-name forms a unique global identifier for /// the crate. @@ -1026,13 +1223,21 @@ impl CrateDisambiguator { } } +impl fmt::Display for CrateDisambiguator { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + let (a, b) = self.0.as_value(); + let as_u128 = a as u128 | ((b as u128) << 64); + f.write_str(&base_n::encode(as_u128, base_n::CASE_INSENSITIVE)) + } +} + impl From for CrateDisambiguator { fn from(fingerprint: Fingerprint) -> CrateDisambiguator { CrateDisambiguator(fingerprint) } } -impl_stable_hash_for!(tuple_struct CrateDisambiguator { fingerprint }); +impl_stable_hash_via_hash!(CrateDisambiguator); /// Holds data on the current incremental compilation session, if there is one. #[derive(Debug)] @@ -1049,40 +1254,36 @@ pub enum IncrCompSession { }, /// This is the state after the session directory has been finalized. In this /// state, the contents of the directory must not be modified any more. - Finalized { - session_directory: PathBuf, - }, + Finalized { session_directory: PathBuf }, /// This is an error state that is reached when some compilation error has /// occurred. It indicates that the contents of the session directory must /// not be used, since they might be invalid. - InvalidBecauseOfErrors { - session_directory: PathBuf, - } + InvalidBecauseOfErrors { session_directory: PathBuf }, } pub fn early_error(output: config::ErrorOutputType, msg: &str) -> ! { - let emitter: Box = match output { + let emitter: Box = match output { config::ErrorOutputType::HumanReadable(color_config) => { - Box::new(EmitterWriter::stderr(color_config, None, false)) + Box::new(EmitterWriter::stderr(color_config, None, false, false)) } config::ErrorOutputType::Json(pretty) => Box::new(JsonEmitter::basic(pretty)), config::ErrorOutputType::Short(color_config) => { - Box::new(EmitterWriter::stderr(color_config, None, true)) + Box::new(EmitterWriter::stderr(color_config, None, true, false)) } }; let handler = errors::Handler::with_emitter(true, false, emitter); handler.emit(&MultiSpan::new(), msg, errors::Level::Fatal); - panic!(errors::FatalError); + errors::FatalError.raise(); } pub fn early_warn(output: config::ErrorOutputType, msg: &str) { - let emitter: Box = match output { + let emitter: Box = match output { config::ErrorOutputType::HumanReadable(color_config) => { - Box::new(EmitterWriter::stderr(color_config, None, false)) + Box::new(EmitterWriter::stderr(color_config, None, false, false)) } config::ErrorOutputType::Json(pretty) => Box::new(JsonEmitter::basic(pretty)), config::ErrorOutputType::Short(color_config) => { - Box::new(EmitterWriter::stderr(color_config, None, true)) + Box::new(EmitterWriter::stderr(color_config, None, true, false)) } }; let handler = errors::Handler::with_emitter(true, false, emitter); @@ -1092,7 +1293,7 @@ pub fn early_warn(output: config::ErrorOutputType, msg: &str) { #[derive(Copy, Clone, Debug)] pub enum CompileIncomplete { Stopped, - Errored(ErrorReported) + Errored(ErrorReported), } impl From for CompileIncomplete { fn from(err: ErrorReported) -> CompileIncomplete { @@ -1108,35 +1309,3 @@ pub fn compile_result_from_err_count(err_count: usize) -> CompileResult { Err(CompileIncomplete::Errored(ErrorReported)) } } - -#[cold] -#[inline(never)] -pub fn bug_fmt(file: &'static str, line: u32, args: fmt::Arguments) -> ! { - // this wrapper mostly exists so I don't have to write a fully - // qualified path of None:: inside the bug!() macro definition - opt_span_bug_fmt(file, line, None::, args); -} - -#[cold] -#[inline(never)] -pub fn span_bug_fmt>(file: &'static str, - line: u32, - span: S, - args: fmt::Arguments) -> ! { - opt_span_bug_fmt(file, line, Some(span), args); -} - -fn opt_span_bug_fmt>(file: &'static str, - line: u32, - span: Option, - args: fmt::Arguments) -> ! { - tls::with_opt(move |tcx| { - let msg = format!("{}:{}: {}", file, line, args); - match (tcx, span) { - (Some(tcx), Some(span)) => tcx.sess.diagnostic().span_bug(span, &msg), - (Some(tcx), None) => tcx.sess.diagnostic().bug(&msg), - (None, _) => panic!(msg) - } - }); - unreachable!(); -} diff --git a/src/librustc/session/search_paths.rs b/src/librustc/session/search_paths.rs index 5bbc6841693e..d2dca9f60845 100644 --- a/src/librustc/session/search_paths.rs +++ b/src/librustc/session/search_paths.rs @@ -78,4 +78,11 @@ impl<'a> Iterator for Iter<'a> { } } } + + fn size_hint(&self) -> (usize, Option) { + // This iterator will never return more elements than the base iterator; + // but it can ignore all the remaining elements. + let (_, upper) = self.iter.size_hint(); + (0, upper) + } } diff --git a/src/librustc/traits/README.md b/src/librustc/traits/README.md deleted file mode 100644 index fa7f5c08608b..000000000000 --- a/src/librustc/traits/README.md +++ /dev/null @@ -1,482 +0,0 @@ -# TRAIT RESOLUTION - -This document describes the general process and points out some non-obvious -things. - -## Major concepts - -Trait resolution is the process of pairing up an impl with each -reference to a trait. So, for example, if there is a generic function like: - -```rust -fn clone_slice(x: &[T]) -> Vec { /*...*/ } -``` - -and then a call to that function: - -```rust -let v: Vec = clone_slice(&[1, 2, 3]) -``` - -it is the job of trait resolution to figure out (in which case) -whether there exists an impl of `isize : Clone` - -Note that in some cases, like generic functions, we may not be able to -find a specific impl, but we can figure out that the caller must -provide an impl. To see what I mean, consider the body of `clone_slice`: - -```rust -fn clone_slice(x: &[T]) -> Vec { - let mut v = Vec::new(); - for e in &x { - v.push((*e).clone()); // (*) - } -} -``` - -The line marked `(*)` is only legal if `T` (the type of `*e`) -implements the `Clone` trait. Naturally, since we don't know what `T` -is, we can't find the specific impl; but based on the bound `T:Clone`, -we can say that there exists an impl which the caller must provide. - -We use the term *obligation* to refer to a trait reference in need of -an impl. - -## Overview - -Trait resolution consists of three major parts: - -- SELECTION: Deciding how to resolve a specific obligation. For - example, selection might decide that a specific obligation can be - resolved by employing an impl which matches the self type, or by - using a parameter bound. In the case of an impl, Selecting one - obligation can create *nested obligations* because of where clauses - on the impl itself. It may also require evaluating those nested - obligations to resolve ambiguities. - -- FULFILLMENT: The fulfillment code is what tracks that obligations - are completely fulfilled. Basically it is a worklist of obligations - to be selected: once selection is successful, the obligation is - removed from the worklist and any nested obligations are enqueued. - -- COHERENCE: The coherence checks are intended to ensure that there - are never overlapping impls, where two impls could be used with - equal precedence. - -## Selection - -Selection is the process of deciding whether an obligation can be -resolved and, if so, how it is to be resolved (via impl, where clause, etc). -The main interface is the `select()` function, which takes an obligation -and returns a `SelectionResult`. There are three possible outcomes: - -- `Ok(Some(selection))` -- yes, the obligation can be resolved, and - `selection` indicates how. If the impl was resolved via an impl, - then `selection` may also indicate nested obligations that are required - by the impl. - -- `Ok(None)` -- we are not yet sure whether the obligation can be - resolved or not. This happens most commonly when the obligation - contains unbound type variables. - -- `Err(err)` -- the obligation definitely cannot be resolved due to a - type error, or because there are no impls that could possibly apply, - etc. - -The basic algorithm for selection is broken into two big phases: -candidate assembly and confirmation. - -### Candidate assembly - -Searches for impls/where-clauses/etc that might -possibly be used to satisfy the obligation. Each of those is called -a candidate. To avoid ambiguity, we want to find exactly one -candidate that is definitively applicable. In some cases, we may not -know whether an impl/where-clause applies or not -- this occurs when -the obligation contains unbound inference variables. - -The basic idea for candidate assembly is to do a first pass in which -we identify all possible candidates. During this pass, all that we do -is try and unify the type parameters. (In particular, we ignore any -nested where clauses.) Presuming that this unification succeeds, the -impl is added as a candidate. - -Once this first pass is done, we can examine the set of candidates. If -it is a singleton set, then we are done: this is the only impl in -scope that could possibly apply. Otherwise, we can winnow down the set -of candidates by using where clauses and other conditions. If this -reduced set yields a single, unambiguous entry, we're good to go, -otherwise the result is considered ambiguous. - -#### The basic process: Inferring based on the impls we see - -This process is easier if we work through some examples. Consider -the following trait: - -```rust -trait Convert { - fn convert(&self) -> Target; -} -``` - -This trait just has one method. It's about as simple as it gets. It -converts from the (implicit) `Self` type to the `Target` type. If we -wanted to permit conversion between `isize` and `usize`, we might -implement `Convert` like so: - -```rust -impl Convert for isize { /*...*/ } // isize -> usize -impl Convert for usize { /*...*/ } // usize -> isize -``` - -Now imagine there is some code like the following: - -```rust -let x: isize = ...; -let y = x.convert(); -``` - -The call to convert will generate a trait reference `Convert<$Y> for -isize`, where `$Y` is the type variable representing the type of -`y`. When we match this against the two impls we can see, we will find -that only one remains: `Convert for isize`. Therefore, we can -select this impl, which will cause the type of `$Y` to be unified to -`usize`. (Note that while assembling candidates, we do the initial -unifications in a transaction, so that they don't affect one another.) - -There are tests to this effect in src/test/run-pass: - - traits-multidispatch-infer-convert-source-and-target.rs - traits-multidispatch-infer-convert-target.rs - -#### Winnowing: Resolving ambiguities - -But what happens if there are multiple impls where all the types -unify? Consider this example: - -```rust -trait Get { - fn get(&self) -> Self; -} - -impl Get for T { - fn get(&self) -> T { *self } -} - -impl Get for Box { - fn get(&self) -> Box { box get_it(&**self) } -} -``` - -What happens when we invoke `get_it(&box 1_u16)`, for example? In this -case, the `Self` type is `Box` -- that unifies with both impls, -because the first applies to all types, and the second to all -boxes. In the olden days we'd have called this ambiguous. But what we -do now is do a second *winnowing* pass that considers where clauses -and attempts to remove candidates -- in this case, the first impl only -applies if `Box : Copy`, which doesn't hold. After winnowing, -then, we are left with just one candidate, so we can proceed. There is -a test of this in `src/test/run-pass/traits-conditional-dispatch.rs`. - -#### Matching - -The subroutines that decide whether a particular impl/where-clause/etc -applies to a particular obligation. At the moment, this amounts to -unifying the self types, but in the future we may also recursively -consider some of the nested obligations, in the case of an impl. - -#### Lifetimes and selection - -Because of how that lifetime inference works, it is not possible to -give back immediate feedback as to whether a unification or subtype -relationship between lifetimes holds or not. Therefore, lifetime -matching is *not* considered during selection. This is reflected in -the fact that subregion assignment is infallible. This may yield -lifetime constraints that will later be found to be in error (in -contrast, the non-lifetime-constraints have already been checked -during selection and can never cause an error, though naturally they -may lead to other errors downstream). - -#### Where clauses - -Besides an impl, the other major way to resolve an obligation is via a -where clause. The selection process is always given a *parameter -environment* which contains a list of where clauses, which are -basically obligations that can assume are satisfiable. We will iterate -over that list and check whether our current obligation can be found -in that list, and if so it is considered satisfied. More precisely, we -want to check whether there is a where-clause obligation that is for -the same trait (or some subtrait) and for which the self types match, -using the definition of *matching* given above. - -Consider this simple example: - -```rust -trait A1 { /*...*/ } -trait A2 : A1 { /*...*/ } - -trait B { /*...*/ } - -fn foo { /*...*/ } -``` - -Clearly we can use methods offered by `A1`, `A2`, or `B` within the -body of `foo`. In each case, that will incur an obligation like `X : -A1` or `X : A2`. The parameter environment will contain two -where-clauses, `X : A2` and `X : B`. For each obligation, then, we -search this list of where-clauses. To resolve an obligation `X:A1`, -we would note that `X:A2` implies that `X:A1`. - -### Confirmation - -Confirmation unifies the output type parameters of the trait with the -values found in the obligation, possibly yielding a type error. If we -return to our example of the `Convert` trait from the previous -section, confirmation is where an error would be reported, because the -impl specified that `T` would be `usize`, but the obligation reported -`char`. Hence the result of selection would be an error. - -### Selection during translation - -During type checking, we do not store the results of trait selection. -We simply wish to verify that trait selection will succeed. Then -later, at trans time, when we have all concrete types available, we -can repeat the trait selection. In this case, we do not consider any -where-clauses to be in scope. We know that therefore each resolution -will resolve to a particular impl. - -One interesting twist has to do with nested obligations. In general, in trans, -we only need to do a "shallow" selection for an obligation. That is, we wish to -identify which impl applies, but we do not (yet) need to decide how to select -any nested obligations. Nonetheless, we *do* currently do a complete resolution, -and that is because it can sometimes inform the results of type inference. That is, -we do not have the full substitutions in terms of the type variables of the impl available -to us, so we must run trait selection to figure everything out. - -Here is an example: - -```rust -trait Foo { /*...*/ } -impl> Foo for Vec { /*...*/ } - -impl Bar for isize { /*...*/ } -``` - -After one shallow round of selection for an obligation like `Vec -: Foo`, we would know which impl we want, and we would know that -`T=isize`, but we do not know the type of `U`. We must select the -nested obligation `isize : Bar` to find out that `U=usize`. - -It would be good to only do *just as much* nested resolution as -necessary. Currently, though, we just do a full resolution. - -# Higher-ranked trait bounds - -One of the more subtle concepts at work are *higher-ranked trait -bounds*. An example of such a bound is `for<'a> MyTrait<&'a isize>`. -Let's walk through how selection on higher-ranked trait references -works. - -## Basic matching and skolemization leaks - -Let's walk through the test `compile-fail/hrtb-just-for-static.rs` to see -how it works. The test starts with the trait `Foo`: - -```rust -trait Foo { - fn foo(&self, x: X) { } -} -``` - -Let's say we have a function `want_hrtb` that wants a type which -implements `Foo<&'a isize>` for any `'a`: - -```rust -fn want_hrtb() where T : for<'a> Foo<&'a isize> { ... } -``` - -Now we have a struct `AnyInt` that implements `Foo<&'a isize>` for any -`'a`: - -```rust -struct AnyInt; -impl<'a> Foo<&'a isize> for AnyInt { } -``` - -And the question is, does `AnyInt : for<'a> Foo<&'a isize>`? We want the -answer to be yes. The algorithm for figuring it out is closely related -to the subtyping for higher-ranked types (which is described in -`middle::infer::higher_ranked::doc`, but also in a [paper by SPJ] that -I recommend you read). - -1. Skolemize the obligation. -2. Match the impl against the skolemized obligation. -3. Check for skolemization leaks. - -[paper by SPJ]: http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/ - -So let's work through our example. The first thing we would do is to -skolemize the obligation, yielding `AnyInt : Foo<&'0 isize>` (here `'0` -represents skolemized region #0). Note that now have no quantifiers; -in terms of the compiler type, this changes from a `ty::PolyTraitRef` -to a `TraitRef`. We would then create the `TraitRef` from the impl, -using fresh variables for it's bound regions (and thus getting -`Foo<&'$a isize>`, where `'$a` is the inference variable for `'a`). Next -we relate the two trait refs, yielding a graph with the constraint -that `'0 == '$a`. Finally, we check for skolemization "leaks" -- a -leak is basically any attempt to relate a skolemized region to another -skolemized region, or to any region that pre-existed the impl match. -The leak check is done by searching from the skolemized region to find -the set of regions that it is related to in any way. This is called -the "taint" set. To pass the check, that set must consist *solely* of -itself and region variables from the impl. If the taint set includes -any other region, then the match is a failure. In this case, the taint -set for `'0` is `{'0, '$a}`, and hence the check will succeed. - -Let's consider a failure case. Imagine we also have a struct - -```rust -struct StaticInt; -impl Foo<&'static isize> for StaticInt; -``` - -We want the obligation `StaticInt : for<'a> Foo<&'a isize>` to be -considered unsatisfied. The check begins just as before. `'a` is -skolemized to `'0` and the impl trait reference is instantiated to -`Foo<&'static isize>`. When we relate those two, we get a constraint -like `'static == '0`. This means that the taint set for `'0` is `{'0, -'static}`, which fails the leak check. - -## Higher-ranked trait obligations - -Once the basic matching is done, we get to another interesting topic: -how to deal with impl obligations. I'll work through a simple example -here. Imagine we have the traits `Foo` and `Bar` and an associated impl: - -```rust -trait Foo { - fn foo(&self, x: X) { } -} - -trait Bar { - fn bar(&self, x: X) { } -} - -impl Foo for F - where F : Bar -{ -} -``` - -Now let's say we have a obligation `for<'a> Foo<&'a isize>` and we match -this impl. What obligation is generated as a result? We want to get -`for<'a> Bar<&'a isize>`, but how does that happen? - -After the matching, we are in a position where we have a skolemized -substitution like `X => &'0 isize`. If we apply this substitution to the -impl obligations, we get `F : Bar<&'0 isize>`. Obviously this is not -directly usable because the skolemized region `'0` cannot leak out of -our computation. - -What we do is to create an inverse mapping from the taint set of `'0` -back to the original bound region (`'a`, here) that `'0` resulted -from. (This is done in `higher_ranked::plug_leaks`). We know that the -leak check passed, so this taint set consists solely of the skolemized -region itself plus various intermediate region variables. We then walk -the trait-reference and convert every region in that taint set back to -a late-bound region, so in this case we'd wind up with `for<'a> F : -Bar<&'a isize>`. - -# Caching and subtle considerations therewith - -In general we attempt to cache the results of trait selection. This -is a somewhat complex process. Part of the reason for this is that we -want to be able to cache results even when all the types in the trait -reference are not fully known. In that case, it may happen that the -trait selection process is also influencing type variables, so we have -to be able to not only cache the *result* of the selection process, -but *replay* its effects on the type variables. - -## An example - -The high-level idea of how the cache works is that we first replace -all unbound inference variables with skolemized versions. Therefore, -if we had a trait reference `usize : Foo<$1>`, where `$n` is an unbound -inference variable, we might replace it with `usize : Foo<%0>`, where -`%n` is a skolemized type. We would then look this up in the cache. -If we found a hit, the hit would tell us the immediate next step to -take in the selection process: i.e., apply impl #22, or apply where -clause `X : Foo`. Let's say in this case there is no hit. -Therefore, we search through impls and where clauses and so forth, and -we come to the conclusion that the only possible impl is this one, -with def-id 22: - -```rust -impl Foo for usize { ... } // Impl #22 -``` - -We would then record in the cache `usize : Foo<%0> ==> -ImplCandidate(22)`. Next we would confirm `ImplCandidate(22)`, which -would (as a side-effect) unify `$1` with `isize`. - -Now, at some later time, we might come along and see a `usize : -Foo<$3>`. When skolemized, this would yield `usize : Foo<%0>`, just as -before, and hence the cache lookup would succeed, yielding -`ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would -(as a side-effect) unify `$3` with `isize`. - -## Where clauses and the local vs global cache - -One subtle interaction is that the results of trait lookup will vary -depending on what where clauses are in scope. Therefore, we actually -have *two* caches, a local and a global cache. The local cache is -attached to the `ParamEnv` and the global cache attached to the -`tcx`. We use the local cache whenever the result might depend on the -where clauses that are in scope. The determination of which cache to -use is done by the method `pick_candidate_cache` in `select.rs`. At -the moment, we use a very simple, conservative rule: if there are any -where-clauses in scope, then we use the local cache. We used to try -and draw finer-grained distinctions, but that led to a serious of -annoying and weird bugs like #22019 and #18290. This simple rule seems -to be pretty clearly safe and also still retains a very high hit rate -(~95% when compiling rustc). - -# Specialization - -Defined in the `specialize` module. - -The basic strategy is to build up a *specialization graph* during -coherence checking. Insertion into the graph locates the right place -to put an impl in the specialization hierarchy; if there is no right -place (due to partial overlap but no containment), you get an overlap -error. Specialization is consulted when selecting an impl (of course), -and the graph is consulted when propagating defaults down the -specialization hierarchy. - -You might expect that the specialization graph would be used during -selection -- i.e., when actually performing specialization. This is -not done for two reasons: - -- It's merely an optimization: given a set of candidates that apply, - we can determine the most specialized one by comparing them directly - for specialization, rather than consulting the graph. Given that we - also cache the results of selection, the benefit of this - optimization is questionable. - -- To build the specialization graph in the first place, we need to use - selection (because we need to determine whether one impl specializes - another). Dealing with this reentrancy would require some additional - mode switch for selection. Given that there seems to be no strong - reason to use the graph anyway, we stick with a simpler approach in - selection, and use the graph only for propagating default - implementations. - -Trait impl selection can succeed even when multiple impls can apply, -as long as they are part of the same specialization family. In that -case, it returns a *single* impl on success -- this is the most -specialized impl *known* to apply. However, if there are any inference -variables in play, the returned impl may not be the actual impl we -will use at trans time. Thus, we take special care to avoid projecting -associated types unless either (1) the associated type does not use -`default` and thus cannot be overridden or (2) all input types are -known concretely. diff --git a/src/librustc/traits/auto_trait.rs b/src/librustc/traits/auto_trait.rs new file mode 100644 index 000000000000..fd8c2d45e644 --- /dev/null +++ b/src/librustc/traits/auto_trait.rs @@ -0,0 +1,748 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Support code for rustdoc and external tools . You really don't +//! want to be using this unless you need to. + +use super::*; + +use std::collections::hash_map::Entry; +use std::collections::VecDeque; + +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; + +use infer::region_constraints::{Constraint, RegionConstraintData}; +use infer::{InferCtxt, RegionObligation}; + +use ty::fold::TypeFolder; +use ty::{Region, RegionVid}; + +// FIXME(twk): this is obviously not nice to duplicate like that +#[derive(Eq, PartialEq, Hash, Copy, Clone, Debug)] +pub enum RegionTarget<'tcx> { + Region(Region<'tcx>), + RegionVid(RegionVid), +} + +#[derive(Default, Debug, Clone)] +pub struct RegionDeps<'tcx> { + larger: FxHashSet>, + smaller: FxHashSet>, +} + +pub enum AutoTraitResult { + ExplicitImpl, + PositiveImpl(A), + NegativeImpl, +} + +impl AutoTraitResult { + fn is_auto(&self) -> bool { + match *self { + AutoTraitResult::PositiveImpl(_) | AutoTraitResult::NegativeImpl => true, + _ => false, + } + } +} + +pub struct AutoTraitInfo<'cx> { + pub full_user_env: ty::ParamEnv<'cx>, + pub region_data: RegionConstraintData<'cx>, + pub names_map: FxHashSet, + pub vid_to_region: FxHashMap>, +} + +pub struct AutoTraitFinder<'a, 'tcx: 'a> { + tcx: &'a TyCtxt<'a, 'tcx, 'tcx>, +} + +impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { + pub fn new(tcx: &'a TyCtxt<'a, 'tcx, 'tcx>) -> Self { + AutoTraitFinder { tcx } + } + + /// Make a best effort to determine whether and under which conditions an auto trait is + /// implemented for a type. For example, if you have + /// + /// ``` + /// struct Foo { data: Box } + /// ``` + + /// then this might return that Foo: Send if T: Send (encoded in the AutoTraitResult type). + /// The analysis attempts to account for custom impls as well as other complex cases. This + /// result is intended for use by rustdoc and other such consumers. + + /// (Note that due to the coinductive nature of Send, the full and correct result is actually + /// quite simple to generate. That is, when a type has no custom impl, it is Send iff its field + /// types are all Send. So, in our example, we might have that Foo: Send if Box: Send. + /// But this is often not the best way to present to the user.) + + /// Warning: The API should be considered highly unstable, and it may be refactored or removed + /// in the future. + pub fn find_auto_trait_generics( + &self, + did: DefId, + trait_did: DefId, + generics: &ty::Generics, + auto_trait_callback: impl for<'i> Fn(&InferCtxt<'_, 'tcx, 'i>, AutoTraitInfo<'i>) -> A, + ) -> AutoTraitResult { + let tcx = self.tcx; + let ty = self.tcx.type_of(did); + + let orig_params = tcx.param_env(did); + + let trait_ref = ty::TraitRef { + def_id: trait_did, + substs: tcx.mk_substs_trait(ty, &[]), + }; + + let trait_pred = ty::Binder::bind(trait_ref); + + let bail_out = tcx.infer_ctxt().enter(|infcx| { + let mut selcx = SelectionContext::with_negative(&infcx, true); + let result = selcx.select(&Obligation::new( + ObligationCause::dummy(), + orig_params, + trait_pred.to_poly_trait_predicate(), + )); + match result { + Ok(Some(Vtable::VtableImpl(_))) => { + debug!( + "find_auto_trait_generics(did={:?}, trait_did={:?}, generics={:?}): \ + manual impl found, bailing out", + did, trait_did, generics + ); + return true; + } + _ => return false, + }; + }); + + // If an explicit impl exists, it always takes priority over an auto impl + if bail_out { + return AutoTraitResult::ExplicitImpl; + } + + return tcx.infer_ctxt().enter(|mut infcx| { + let mut fresh_preds = FxHashSet(); + + // Due to the way projections are handled by SelectionContext, we need to run + // evaluate_predicates twice: once on the original param env, and once on the result of + // the first evaluate_predicates call. + // + // The problem is this: most of rustc, including SelectionContext and traits::project, + // are designed to work with a concrete usage of a type (e.g. Vec + // fn() { Vec }. This information will generally never change - given + // the 'T' in fn() { ... }, we'll never know anything else about 'T'. + // If we're unable to prove that 'T' implements a particular trait, we're done - + // there's nothing left to do but error out. + // + // However, synthesizing an auto trait impl works differently. Here, we start out with + // a set of initial conditions - the ParamEnv of the struct/enum/union we're dealing + // with - and progressively discover the conditions we need to fulfill for it to + // implement a certain auto trait. This ends up breaking two assumptions made by trait + // selection and projection: + // + // * We can always cache the result of a particular trait selection for the lifetime of + // an InfCtxt + // * Given a projection bound such as '::SomeItem = K', if 'T: + // SomeTrait' doesn't hold, then we don't need to care about the 'SomeItem = K' + // + // We fix the first assumption by manually clearing out all of the InferCtxt's caches + // in between calls to SelectionContext.select. This allows us to keep all of the + // intermediate types we create bound to the 'tcx lifetime, rather than needing to lift + // them between calls. + // + // We fix the second assumption by reprocessing the result of our first call to + // evaluate_predicates. Using the example of '::SomeItem = K', our first + // pass will pick up 'T: SomeTrait', but not 'SomeItem = K'. On our second pass, + // traits::project will see that 'T: SomeTrait' is in our ParamEnv, allowing + // SelectionContext to return it back to us. + + let (new_env, user_env) = match self.evaluate_predicates( + &mut infcx, + did, + trait_did, + ty, + orig_params.clone(), + orig_params, + &mut fresh_preds, + false, + ) { + Some(e) => e, + None => return AutoTraitResult::NegativeImpl, + }; + + let (full_env, full_user_env) = self.evaluate_predicates( + &mut infcx, + did, + trait_did, + ty, + new_env.clone(), + user_env, + &mut fresh_preds, + true, + ).unwrap_or_else(|| { + panic!( + "Failed to fully process: {:?} {:?} {:?}", + ty, trait_did, orig_params + ) + }); + + debug!( + "find_auto_trait_generics(did={:?}, trait_did={:?}, generics={:?}): fulfilling \ + with {:?}", + did, trait_did, generics, full_env + ); + infcx.clear_caches(); + + // At this point, we already have all of the bounds we need. FulfillmentContext is used + // to store all of the necessary region/lifetime bounds in the InferContext, as well as + // an additional sanity check. + let mut fulfill = FulfillmentContext::new(); + fulfill.register_bound( + &infcx, + full_env, + ty, + trait_did, + ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID), + ); + fulfill.select_all_or_error(&infcx).unwrap_or_else(|e| { + panic!( + "Unable to fulfill trait {:?} for '{:?}': {:?}", + trait_did, ty, e + ) + }); + + let names_map: FxHashSet = generics + .params + .iter() + .filter_map(|param| match param.kind { + ty::GenericParamDefKind::Lifetime => Some(param.name.to_string()), + _ => None + }) + .collect(); + + let body_ids: FxHashSet<_> = infcx + .region_obligations + .borrow() + .iter() + .map(|&(id, _)| id) + .collect(); + + for id in body_ids { + infcx.process_registered_region_obligations(&[], None, full_env.clone(), id); + } + + let region_data = infcx + .borrow_region_constraints() + .region_constraint_data() + .clone(); + + let vid_to_region = self.map_vid_to_region(®ion_data); + + let info = AutoTraitInfo { + full_user_env, + region_data, + names_map, + vid_to_region, + }; + + return AutoTraitResult::PositiveImpl(auto_trait_callback(&infcx, info)); + }); + } +} + +impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { + // The core logic responsible for computing the bounds for our synthesized impl. + // + // To calculate the bounds, we call SelectionContext.select in a loop. Like FulfillmentContext, + // we recursively select the nested obligations of predicates we encounter. However, whenver we + // encounter an UnimplementedError involving a type parameter, we add it to our ParamEnv. Since + // our goal is to determine when a particular type implements an auto trait, Unimplemented + // errors tell us what conditions need to be met. + // + // This method ends up working somewhat similary to FulfillmentContext, but with a few key + // differences. FulfillmentContext works under the assumption that it's dealing with concrete + // user code. According, it considers all possible ways that a Predicate could be met - which + // isn't always what we want for a synthesized impl. For example, given the predicate 'T: + // Iterator', FulfillmentContext can end up reporting an Unimplemented error for T: + // IntoIterator - since there's an implementation of Iteratpr where T: IntoIterator, + // FulfillmentContext will drive SelectionContext to consider that impl before giving up. If we + // were to rely on FulfillmentContext's decision, we might end up synthesizing an impl like + // this: + // 'impl Send for Foo where T: IntoIterator' + // + // While it might be technically true that Foo implements Send where T: IntoIterator, + // the bound is overly restrictive - it's really only necessary that T: Iterator. + // + // For this reason, evaluate_predicates handles predicates with type variables specially. When + // we encounter an Unimplemented error for a bound such as 'T: Iterator', we immediately add it + // to our ParamEnv, and add it to our stack for recursive evaluation. When we later select it, + // we'll pick up any nested bounds, without ever inferring that 'T: IntoIterator' needs to + // hold. + // + // One additonal consideration is supertrait bounds. Normally, a ParamEnv is only ever + // consutrcted once for a given type. As part of the construction process, the ParamEnv will + // have any supertrait bounds normalized - e.g. if we have a type 'struct Foo', the + // ParamEnv will contain 'T: Copy' and 'T: Clone', since 'Copy: Clone'. When we construct our + // own ParamEnv, we need to do this outselves, through traits::elaborate_predicates, or else + // SelectionContext will choke on the missing predicates. However, this should never show up in + // the final synthesized generics: we don't want our generated docs page to contain something + // like 'T: Copy + Clone', as that's redundant. Therefore, we keep track of a separate + // 'user_env', which only holds the predicates that will actually be displayed to the user. + pub fn evaluate_predicates<'b, 'gcx, 'c>( + &self, + infcx: &InferCtxt<'b, 'tcx, 'c>, + ty_did: DefId, + trait_did: DefId, + ty: ty::Ty<'c>, + param_env: ty::ParamEnv<'c>, + user_env: ty::ParamEnv<'c>, + fresh_preds: &mut FxHashSet>, + only_projections: bool, + ) -> Option<(ty::ParamEnv<'c>, ty::ParamEnv<'c>)> { + let tcx = infcx.tcx; + + let mut select = SelectionContext::new(&infcx); + + let mut already_visited = FxHashSet(); + let mut predicates = VecDeque::new(); + predicates.push_back(ty::Binder::bind(ty::TraitPredicate { + trait_ref: ty::TraitRef { + def_id: trait_did, + substs: infcx.tcx.mk_substs_trait(ty, &[]), + }, + })); + + let mut computed_preds: FxHashSet<_> = param_env.caller_bounds.iter().cloned().collect(); + let mut user_computed_preds: FxHashSet<_> = + user_env.caller_bounds.iter().cloned().collect(); + + let mut new_env = param_env.clone(); + let dummy_cause = ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID); + + while let Some(pred) = predicates.pop_front() { + infcx.clear_caches(); + + if !already_visited.insert(pred.clone()) { + continue; + } + + let result = select.select(&Obligation::new(dummy_cause.clone(), new_env, pred)); + + match &result { + &Ok(Some(ref vtable)) => { + let obligations = vtable.clone().nested_obligations().into_iter(); + + if !self.evaluate_nested_obligations( + ty, + obligations, + &mut user_computed_preds, + fresh_preds, + &mut predicates, + &mut select, + only_projections, + ) { + return None; + } + } + &Ok(None) => {} + &Err(SelectionError::Unimplemented) => { + if self.is_of_param(pred.skip_binder().trait_ref.substs) { + already_visited.remove(&pred); + self.add_user_pred(&mut user_computed_preds, + ty::Predicate::Trait(pred.clone())); + predicates.push_back(pred); + } else { + debug!( + "evaluate_nested_obligations: Unimplemented found, bailing: \ + {:?} {:?} {:?}", + ty, + pred, + pred.skip_binder().trait_ref.substs + ); + return None; + } + } + _ => panic!("Unexpected error for '{:?}': {:?}", ty, result), + }; + + computed_preds.extend(user_computed_preds.iter().cloned()); + let normalized_preds = + elaborate_predicates(tcx, computed_preds.clone().into_iter().collect()); + new_env = ty::ParamEnv::new(tcx.mk_predicates(normalized_preds), param_env.reveal); + } + + let final_user_env = ty::ParamEnv::new( + tcx.mk_predicates(user_computed_preds.into_iter()), + user_env.reveal, + ); + debug!( + "evaluate_nested_obligations(ty_did={:?}, trait_did={:?}): succeeded with '{:?}' \ + '{:?}'", + ty_did, trait_did, new_env, final_user_env + ); + + return Some((new_env, final_user_env)); + } + + // This method is designed to work around the following issue: + // When we compute auto trait bounds, we repeatedly call SelectionContext.select, + // progressively building a ParamEnv based on the results we get. + // However, our usage of SelectionContext differs from its normal use within the compiler, + // in that we capture and re-reprocess predicates from Unimplemented errors. + // + // This can lead to a corner case when dealing with region parameters. + // During our selection loop in evaluate_predicates, we might end up with + // two trait predicates that differ only in their region parameters: + // one containing a HRTB lifetime parameter, and one containing a 'normal' + // lifetime parameter. For example: + // + // T as MyTrait<'a> + // T as MyTrait<'static> + // + // If we put both of these predicates in our computed ParamEnv, we'll + // confuse SelectionContext, since it will (correctly) view both as being applicable. + // + // To solve this, we pick the 'more strict' lifetime bound - i.e. the HRTB + // Our end goal is to generate a user-visible description of the conditions + // under which a type implements an auto trait. A trait predicate involving + // a HRTB means that the type needs to work with any choice of lifetime, + // not just one specific lifetime (e.g. 'static). + fn add_user_pred<'c>(&self, user_computed_preds: &mut FxHashSet>, + new_pred: ty::Predicate<'c>) { + let mut should_add_new = true; + user_computed_preds.retain(|&old_pred| { + match (&new_pred, old_pred) { + (&ty::Predicate::Trait(new_trait), ty::Predicate::Trait(old_trait)) => { + if new_trait.def_id() == old_trait.def_id() { + let new_substs = new_trait.skip_binder().trait_ref.substs; + let old_substs = old_trait.skip_binder().trait_ref.substs; + if !new_substs.types().eq(old_substs.types()) { + // We can't compare lifetimes if the types are different, + // so skip checking old_pred + return true + } + + for (new_region, old_region) in new_substs + .regions() + .zip(old_substs.regions()) { + + match (new_region, old_region) { + // If both predicates have an 'ReLateBound' (a HRTB) in the + // same spot, we do nothing + ( + ty::RegionKind::ReLateBound(_, _), + ty::RegionKind::ReLateBound(_, _) + ) => {}, + + (ty::RegionKind::ReLateBound(_, _), _) => { + // The new predicate has a HRTB in a spot where the old + // predicate does not (if they both had a HRTB, the previous + // match arm would have executed). + // + // The means we want to remove the older predicate from + // user_computed_preds, since having both it and the new + // predicate in a ParamEnv would confuse SelectionContext + // We're currently in the predicate passed to 'retain', + // so we return 'false' to remove the old predicate from + // user_computed_preds + return false; + }, + (_, ty::RegionKind::ReLateBound(_, _)) => { + // This is the opposite situation as the previous arm - the + // old predicate has a HRTB lifetime in a place where the + // new predicate does not. We want to leave the old + // predicate in user_computed_preds, and skip adding + // new_pred to user_computed_params. + should_add_new = false + } + _ => {} + } + } + } + }, + _ => {} + } + return true + }); + + if should_add_new { + user_computed_preds.insert(new_pred); + } + } + + pub fn region_name(&self, region: Region) -> Option { + match region { + &ty::ReEarlyBound(r) => Some(r.name.to_string()), + _ => None, + } + } + + pub fn get_lifetime(&self, region: Region, names_map: &FxHashMap) -> String { + self.region_name(region) + .map(|name| { + names_map.get(&name).unwrap_or_else(|| { + panic!("Missing lifetime with name {:?} for {:?}", name, region) + }) + }) + .unwrap_or(&"'static".to_string()) + .clone() + } + + // This is very similar to handle_lifetimes. However, instead of matching ty::Region's + // to each other, we match ty::RegionVid's to ty::Region's + pub fn map_vid_to_region<'cx>( + &self, + regions: &RegionConstraintData<'cx>, + ) -> FxHashMap> { + let mut vid_map: FxHashMap, RegionDeps<'cx>> = FxHashMap(); + let mut finished_map = FxHashMap(); + + for constraint in regions.constraints.keys() { + match constraint { + &Constraint::VarSubVar(r1, r2) => { + { + let deps1 = vid_map + .entry(RegionTarget::RegionVid(r1)) + .or_default(); + deps1.larger.insert(RegionTarget::RegionVid(r2)); + } + + let deps2 = vid_map + .entry(RegionTarget::RegionVid(r2)) + .or_default(); + deps2.smaller.insert(RegionTarget::RegionVid(r1)); + } + &Constraint::RegSubVar(region, vid) => { + { + let deps1 = vid_map + .entry(RegionTarget::Region(region)) + .or_default(); + deps1.larger.insert(RegionTarget::RegionVid(vid)); + } + + let deps2 = vid_map + .entry(RegionTarget::RegionVid(vid)) + .or_default(); + deps2.smaller.insert(RegionTarget::Region(region)); + } + &Constraint::VarSubReg(vid, region) => { + finished_map.insert(vid, region); + } + &Constraint::RegSubReg(r1, r2) => { + { + let deps1 = vid_map + .entry(RegionTarget::Region(r1)) + .or_default(); + deps1.larger.insert(RegionTarget::Region(r2)); + } + + let deps2 = vid_map + .entry(RegionTarget::Region(r2)) + .or_default(); + deps2.smaller.insert(RegionTarget::Region(r1)); + } + } + } + + while !vid_map.is_empty() { + let target = vid_map.keys().next().expect("Keys somehow empty").clone(); + let deps = vid_map.remove(&target).expect("Entry somehow missing"); + + for smaller in deps.smaller.iter() { + for larger in deps.larger.iter() { + match (smaller, larger) { + (&RegionTarget::Region(_), &RegionTarget::Region(_)) => { + if let Entry::Occupied(v) = vid_map.entry(*smaller) { + let smaller_deps = v.into_mut(); + smaller_deps.larger.insert(*larger); + smaller_deps.larger.remove(&target); + } + + if let Entry::Occupied(v) = vid_map.entry(*larger) { + let larger_deps = v.into_mut(); + larger_deps.smaller.insert(*smaller); + larger_deps.smaller.remove(&target); + } + } + (&RegionTarget::RegionVid(v1), &RegionTarget::Region(r1)) => { + finished_map.insert(v1, r1); + } + (&RegionTarget::Region(_), &RegionTarget::RegionVid(_)) => { + // Do nothing - we don't care about regions that are smaller than vids + } + (&RegionTarget::RegionVid(_), &RegionTarget::RegionVid(_)) => { + if let Entry::Occupied(v) = vid_map.entry(*smaller) { + let smaller_deps = v.into_mut(); + smaller_deps.larger.insert(*larger); + smaller_deps.larger.remove(&target); + } + + if let Entry::Occupied(v) = vid_map.entry(*larger) { + let larger_deps = v.into_mut(); + larger_deps.smaller.insert(*smaller); + larger_deps.smaller.remove(&target); + } + } + } + } + } + } + finished_map + } + + pub fn is_of_param(&self, substs: &Substs) -> bool { + if substs.is_noop() { + return false; + } + + return match substs.type_at(0).sty { + ty::TyParam(_) => true, + ty::TyProjection(p) => self.is_of_param(p.substs), + _ => false, + }; + } + + pub fn evaluate_nested_obligations< + 'b, + 'c, + 'd, + 'cx, + T: Iterator>>, + >( + &self, + ty: ty::Ty, + nested: T, + computed_preds: &'b mut FxHashSet>, + fresh_preds: &'b mut FxHashSet>, + predicates: &'b mut VecDeque>, + select: &mut SelectionContext<'c, 'd, 'cx>, + only_projections: bool, + ) -> bool { + let dummy_cause = ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID); + + for (obligation, predicate) in nested + .filter(|o| o.recursion_depth == 1) + .map(|o| (o.clone(), o.predicate.clone())) + { + let is_new_pred = + fresh_preds.insert(self.clean_pred(select.infcx(), predicate.clone())); + + match &predicate { + &ty::Predicate::Trait(ref p) => { + let substs = &p.skip_binder().trait_ref.substs; + + if self.is_of_param(substs) && !only_projections && is_new_pred { + self.add_user_pred(computed_preds, predicate); + } + predicates.push_back(p.clone()); + } + &ty::Predicate::Projection(p) => { + // If the projection isn't all type vars, then + // we don't want to add it as a bound + if self.is_of_param(p.skip_binder().projection_ty.substs) && is_new_pred { + self.add_user_pred(computed_preds, predicate); + } else { + match poly_project_and_unify_type(select, &obligation.with(p.clone())) { + Err(e) => { + debug!( + "evaluate_nested_obligations: Unable to unify predicate \ + '{:?}' '{:?}', bailing out", + ty, e + ); + return false; + } + Ok(Some(v)) => { + if !self.evaluate_nested_obligations( + ty, + v.clone().iter().cloned(), + computed_preds, + fresh_preds, + predicates, + select, + only_projections, + ) { + return false; + } + } + Ok(None) => { + panic!("Unexpected result when selecting {:?} {:?}", ty, obligation) + } + } + } + } + &ty::Predicate::RegionOutlives(ref binder) => { + if select.infcx().region_outlives_predicate(&dummy_cause, binder).is_err() { + return false; + } + } + &ty::Predicate::TypeOutlives(ref binder) => { + match ( + binder.no_late_bound_regions(), + binder.map_bound_ref(|pred| pred.0).no_late_bound_regions(), + ) { + (None, Some(t_a)) => { + select.infcx().register_region_obligation( + ast::DUMMY_NODE_ID, + RegionObligation { + sup_type: t_a, + sub_region: select.infcx().tcx.types.re_static, + cause: dummy_cause.clone(), + }, + ); + } + (Some(ty::OutlivesPredicate(t_a, r_b)), _) => { + select.infcx().register_region_obligation( + ast::DUMMY_NODE_ID, + RegionObligation { + sup_type: t_a, + sub_region: r_b, + cause: dummy_cause.clone(), + }, + ); + } + _ => {} + }; + } + _ => panic!("Unexpected predicate {:?} {:?}", ty, predicate), + }; + } + return true; + } + + pub fn clean_pred<'c, 'd, 'cx>( + &self, + infcx: &InferCtxt<'c, 'd, 'cx>, + p: ty::Predicate<'cx>, + ) -> ty::Predicate<'cx> { + infcx.freshen(p) + } +} + +// Replaces all ReVars in a type with ty::Region's, using the provided map +pub struct RegionReplacer<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + vid_to_region: &'a FxHashMap>, + tcx: TyCtxt<'a, 'gcx, 'tcx>, +} + +impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { + self.tcx + } + + fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { + (match r { + &ty::ReVar(vid) => self.vid_to_region.get(&vid).cloned(), + _ => None, + }).unwrap_or_else(|| r.super_fold_with(self)) + } +} diff --git a/src/librustc/traits/codegen/mod.rs b/src/librustc/traits/codegen/mod.rs new file mode 100644 index 000000000000..cf404202ac12 --- /dev/null +++ b/src/librustc/traits/codegen/mod.rs @@ -0,0 +1,184 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// This file contains various trait resolution methods used by codegen. +// They all assume regions can be erased and monomorphic types. It +// seems likely that they should eventually be merged into more +// general routines. + +use dep_graph::{DepKind, DepTrackingMapConfig}; +use std::marker::PhantomData; +use syntax_pos::DUMMY_SP; +use infer::InferCtxt; +use syntax_pos::Span; +use traits::{FulfillmentContext, Obligation, ObligationCause, SelectionContext, + TraitEngine, Vtable}; +use ty::{self, Ty, TyCtxt}; +use ty::subst::{Subst, Substs}; +use ty::fold::TypeFoldable; + +/// Attempts to resolve an obligation to a vtable.. The result is +/// a shallow vtable resolution -- meaning that we do not +/// (necessarily) resolve all nested obligations on the impl. Note +/// that type check should guarantee to us that all nested +/// obligations *could be* resolved if we wanted to. +/// Assumes that this is run after the entire crate has been successfully type-checked. +pub fn codegen_fulfill_obligation<'a, 'tcx>(ty: TyCtxt<'a, 'tcx, 'tcx>, + (param_env, trait_ref): + (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) + -> Vtable<'tcx, ()> +{ + // Remove any references to regions; this helps improve caching. + let trait_ref = ty.erase_regions(&trait_ref); + + debug!("codegen_fulfill_obligation(trait_ref={:?}, def_id={:?})", + (param_env, trait_ref), trait_ref.def_id()); + + // Do the initial selection for the obligation. This yields the + // shallow result we are looking for -- that is, what specific impl. + ty.infer_ctxt().enter(|infcx| { + let mut selcx = SelectionContext::new(&infcx); + + let obligation_cause = ObligationCause::dummy(); + let obligation = Obligation::new(obligation_cause, + param_env, + trait_ref.to_poly_trait_predicate()); + + let selection = match selcx.select(&obligation) { + Ok(Some(selection)) => selection, + Ok(None) => { + // Ambiguity can happen when monomorphizing during trans + // expands to some humongo type that never occurred + // statically -- this humongo type can then overflow, + // leading to an ambiguous result. So report this as an + // overflow bug, since I believe this is the only case + // where ambiguity can result. + bug!("Encountered ambiguity selecting `{:?}` during codegen, \ + presuming due to overflow", + trait_ref) + } + Err(e) => { + bug!("Encountered error `{:?}` selecting `{:?}` during codegen", + e, trait_ref) + } + }; + + debug!("fulfill_obligation: selection={:?}", selection); + + // Currently, we use a fulfillment context to completely resolve + // all nested obligations. This is because they can inform the + // inference of the impl's type parameters. + let mut fulfill_cx = FulfillmentContext::new(); + let vtable = selection.map(|predicate| { + debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate); + fulfill_cx.register_predicate_obligation(&infcx, predicate); + }); + let vtable = infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &vtable); + + info!("Cache miss: {:?} => {:?}", trait_ref, vtable); + vtable + }) +} + +impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { + /// Monomorphizes a type from the AST by first applying the + /// in-scope substitutions and then normalizing any associated + /// types. + pub fn subst_and_normalize_erasing_regions( + self, + param_substs: &Substs<'tcx>, + param_env: ty::ParamEnv<'tcx>, + value: &T + ) -> T + where + T: TypeFoldable<'tcx>, + { + debug!( + "subst_and_normalize_erasing_regions(\ + param_substs={:?}, \ + value={:?}, \ + param_env={:?})", + param_substs, + value, + param_env, + ); + let substituted = value.subst(self, param_substs); + self.normalize_erasing_regions(param_env, substituted) + } +} + +// Implement DepTrackingMapConfig for `trait_cache` +pub struct TraitSelectionCache<'tcx> { + data: PhantomData<&'tcx ()> +} + +impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> { + type Key = (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>); + type Value = Vtable<'tcx, ()>; + fn to_dep_kind() -> DepKind { + DepKind::TraitSelect + } +} + +// # Global Cache + +pub struct ProjectionCache<'gcx> { + data: PhantomData<&'gcx ()> +} + +impl<'gcx> DepTrackingMapConfig for ProjectionCache<'gcx> { + type Key = Ty<'gcx>; + type Value = Ty<'gcx>; + fn to_dep_kind() -> DepKind { + DepKind::TraitSelect + } +} + +impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { + /// Finishes processes any obligations that remain in the + /// fulfillment context, and then returns the result with all type + /// variables removed and regions erased. Because this is intended + /// for use after type-check has completed, if any errors occur, + /// it will panic. It is used during normalization and other cases + /// where processing the obligations in `fulfill_cx` may cause + /// type inference variables that appear in `result` to be + /// unified, and hence we need to process those obligations to get + /// the complete picture of the type. + fn drain_fulfillment_cx_or_panic(&self, + span: Span, + fulfill_cx: &mut FulfillmentContext<'tcx>, + result: &T) + -> T::Lifted + where T: TypeFoldable<'tcx> + ty::Lift<'gcx> + { + debug!("drain_fulfillment_cx_or_panic()"); + + // In principle, we only need to do this so long as `result` + // contains unbound type parameters. It could be a slight + // optimization to stop iterating early. + match fulfill_cx.select_all_or_error(self) { + Ok(()) => { } + Err(errors) => { + span_bug!(span, "Encountered errors `{:?}` resolving bounds after type-checking", + errors); + } + } + + let result = self.resolve_type_vars_if_possible(result); + let result = self.tcx.erase_regions(&result); + + match self.tcx.lift_to_global(&result) { + Some(result) => result, + None => { + span_bug!(span, "Uninferred types/regions in `{:?}`", result); + } + } + } +} diff --git a/src/librustc/traits/coherence.rs b/src/librustc/traits/coherence.rs index 7d1f3b31bfc2..02bfab033efc 100644 --- a/src/librustc/traits/coherence.rs +++ b/src/librustc/traits/coherence.rs @@ -8,18 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! See `README.md` for high-level documentation +//! See rustc guide chapters on [trait-resolution] and [trait-specialization] for more info on how +//! this works. +//! +//! [trait-resolution]: https://rust-lang-nursery.github.io/rustc-guide/traits/resolution.html +//! [trait-specialization]: https://rust-lang-nursery.github.io/rustc-guide/traits/specialization.html use hir::def_id::{DefId, LOCAL_CRATE}; use syntax_pos::DUMMY_SP; -use traits::{self, Normalized, SelectionContext, Obligation, ObligationCause, Reveal}; +use traits::{self, Normalized, SelectionContext, Obligation, ObligationCause}; use traits::IntercrateMode; use traits::select::IntercrateAmbiguityCause; use ty::{self, Ty, TyCtxt}; use ty::fold::TypeFoldable; use ty::subst::Subst; -use infer::{InferCtxt, InferOk}; +use infer::{InferOk}; /// Whether we do the orphan check relative to this crate or /// to some remote crate. @@ -40,15 +44,22 @@ pub struct OverlapResult<'tcx> { pub intercrate_ambiguity_causes: Vec, } -/// If there are types that satisfy both impls, returns a suitably-freshened -/// `ImplHeader` with those types substituted -pub fn overlapping_impls<'cx, 'gcx, 'tcx>(infcx: &InferCtxt<'cx, 'gcx, 'tcx>, - impl1_def_id: DefId, - impl2_def_id: DefId, - intercrate_mode: IntercrateMode) - -> Option> +/// If there are types that satisfy both impls, invokes `on_overlap` +/// with a suitably-freshened `ImplHeader` with those types +/// substituted. Otherwise, invokes `no_overlap`. +pub fn overlapping_impls<'gcx, F1, F2, R>( + tcx: TyCtxt<'_, 'gcx, 'gcx>, + impl1_def_id: DefId, + impl2_def_id: DefId, + intercrate_mode: IntercrateMode, + on_overlap: F1, + no_overlap: F2, +) -> R +where + F1: FnOnce(OverlapResult<'_>) -> R, + F2: FnOnce() -> R, { - debug!("impl_can_satisfy(\ + debug!("overlapping_impls(\ impl1_def_id={:?}, \ impl2_def_id={:?}, intercrate_mode={:?})", @@ -56,8 +67,23 @@ pub fn overlapping_impls<'cx, 'gcx, 'tcx>(infcx: &InferCtxt<'cx, 'gcx, 'tcx>, impl2_def_id, intercrate_mode); - let selcx = &mut SelectionContext::intercrate(infcx, intercrate_mode); - overlap(selcx, impl1_def_id, impl2_def_id) + let overlaps = tcx.infer_ctxt().enter(|infcx| { + let selcx = &mut SelectionContext::intercrate(&infcx, intercrate_mode); + overlap(selcx, impl1_def_id, impl2_def_id).is_some() + }); + + if !overlaps { + return no_overlap(); + } + + // In the case where we detect an error, run the check again, but + // this time tracking intercrate ambuiguity causes for better + // diagnostics. (These take time and can lead to false errors.) + tcx.infer_ctxt().enter(|infcx| { + let selcx = &mut SelectionContext::intercrate(&infcx, intercrate_mode); + selcx.enable_tracking_intercrate_ambiguity_causes(); + on_overlap(overlap(selcx, impl1_def_id, impl2_def_id).unwrap()) + }) } fn with_fresh_ty_vars<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, @@ -97,7 +123,7 @@ fn overlap<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, // types into scope; instead, we replace the generic types with // fresh type variables, and hence we do our evaluations in an // empty environment. - let param_env = ty::ParamEnv::empty(Reveal::UserFacing); + let param_env = ty::ParamEnv::empty(); let a_impl_header = with_fresh_ty_vars(selcx, param_env, a_def_id); let b_impl_header = with_fresh_ty_vars(selcx, param_env, b_def_id); @@ -128,17 +154,20 @@ fn overlap<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, recursion_depth: 0, predicate: p }) .chain(obligations) - .find(|o| !selcx.evaluate_obligation(o)); + .find(|o| !selcx.predicate_may_hold_fatal(o)); + // FIXME: the call to `selcx.predicate_may_hold_fatal` above should be ported + // to the canonical trait query form, `infcx.predicate_may_hold`, once + // the new system supports intercrate mode (which coherence needs). if let Some(failing_obligation) = opt_failing_obligation { debug!("overlap: obligation unsatisfiable {:?}", failing_obligation); return None } - Some(OverlapResult { - impl_header: selcx.infcx().resolve_type_vars_if_possible(&a_impl_header), - intercrate_ambiguity_causes: selcx.intercrate_ambiguity_causes().to_vec(), - }) + let impl_header = selcx.infcx().resolve_type_vars_if_possible(&a_impl_header); + let intercrate_ambiguity_causes = selcx.take_intercrate_ambiguity_causes(); + debug!("overlap: intercrate_ambiguity_causes={:#?}", intercrate_ambiguity_causes); + Some(OverlapResult { impl_header, intercrate_ambiguity_causes }) } pub fn trait_ref_is_knowable<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, @@ -255,7 +284,7 @@ pub fn orphan_check<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, /// is bad, because the only local type with `T` as a subtree is /// `LocalType`, and `Vec<->` is between it and the type parameter. /// - similarly, `FundamentalPair, T>` is bad, because -/// the second occurence of `T` is not a subtree of *any* local type. +/// the second occurrence of `T` is not a subtree of *any* local type. /// - however, `LocalType>` is OK, because `T` is a subtree of /// `LocalType>`, which is local and has no types between it and /// the type parameter. @@ -451,7 +480,10 @@ fn ty_is_local_constructor(ty: Ty, in_crate: InCrate) -> bool { true } - ty::TyClosure(..) | ty::TyGenerator(..) | ty::TyAnon(..) => { + ty::TyClosure(..) | + ty::TyGenerator(..) | + ty::TyGeneratorWitness(..) | + ty::TyAnon(..) => { bug!("ty_is_local invoked on unexpected type: {:?}", ty) } } diff --git a/src/librustc/traits/engine.rs b/src/librustc/traits/engine.rs new file mode 100644 index 000000000000..acbf5392cf54 --- /dev/null +++ b/src/librustc/traits/engine.rs @@ -0,0 +1,79 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::InferCtxt; +use ty::{self, Ty, TyCtxt}; +use hir::def_id::DefId; + +use super::{FulfillmentContext, FulfillmentError}; +use super::{ObligationCause, PredicateObligation}; + +pub trait TraitEngine<'tcx>: 'tcx { + fn normalize_projection_type( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + projection_ty: ty::ProjectionTy<'tcx>, + cause: ObligationCause<'tcx>, + ) -> Ty<'tcx>; + + fn register_bound( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + ty: Ty<'tcx>, + def_id: DefId, + cause: ObligationCause<'tcx>, + ); + + fn register_predicate_obligation( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + obligation: PredicateObligation<'tcx>, + ); + + fn select_all_or_error( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + ) -> Result<(), Vec>>; + + fn select_where_possible( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + ) -> Result<(), Vec>>; + + fn pending_obligations(&self) -> Vec>; +} + +pub trait TraitEngineExt<'tcx> { + fn register_predicate_obligations( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + obligations: impl IntoIterator>, + ); +} + +impl> TraitEngineExt<'tcx> for T { + fn register_predicate_obligations( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + obligations: impl IntoIterator>, + ) { + for obligation in obligations { + self.register_predicate_obligation(infcx, obligation); + } + } +} + +impl dyn TraitEngine<'tcx> { + pub fn new(_tcx: TyCtxt<'_, '_, 'tcx>) -> Box { + Box::new(FulfillmentContext::new()) + } +} diff --git a/src/librustc/traits/error_reporting.rs b/src/librustc/traits/error_reporting.rs index 54da23821f5d..92c66ef39eaa 100644 --- a/src/librustc/traits/error_reporting.rs +++ b/src/librustc/traits/error_reporting.rs @@ -21,22 +21,22 @@ use super::{ TraitNotObjectSafe, ConstEvalFailure, PredicateObligation, - Reveal, SelectionContext, SelectionError, ObjectSafetyViolation, + Overflow, }; -use errors::DiagnosticBuilder; +use errors::{Applicability, DiagnosticBuilder}; use hir; use hir::def_id::DefId; use infer::{self, InferCtxt}; use infer::type_variable::TypeVariableOrigin; -use middle::const_val; use std::fmt; use syntax::ast; use session::DiagnosticMessageId; use ty::{self, AdtKind, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; +use ty::GenericParamDefKind; use ty::error::ExpectedFound; use ty::fast_reject; use ty::fold::TypeFolder; @@ -48,15 +48,16 @@ use syntax_pos::{DUMMY_SP, Span}; impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn report_fulfillment_errors(&self, - errors: &Vec>, - body_id: Option) { + errors: &[FulfillmentError<'tcx>], + body_id: Option, + fallback_has_occurred: bool) { #[derive(Debug)] struct ErrorDescriptor<'tcx> { predicate: ty::Predicate<'tcx>, index: Option, // None if this is an old error } - let mut error_map : FxHashMap<_, _> = + let mut error_map : FxHashMap<_, Vec<_>> = self.reported_trait_errors.borrow().iter().map(|(&span, predicates)| { (span, predicates.iter().map(|predicate| ErrorDescriptor { predicate: predicate.clone(), @@ -65,14 +66,14 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { }).collect(); for (index, error) in errors.iter().enumerate() { - error_map.entry(error.obligation.cause.span).or_insert(Vec::new()).push( + error_map.entry(error.obligation.cause.span).or_default().push( ErrorDescriptor { predicate: error.obligation.predicate.clone(), index: Some(index) }); self.reported_trait_errors.borrow_mut() - .entry(error.obligation.cause.span).or_insert(Vec::new()) + .entry(error.obligation.cause.span).or_default() .push(error.obligation.predicate.clone()); } @@ -109,7 +110,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { for (error, suppressed) in errors.iter().zip(is_suppressed) { if !suppressed { - self.report_fulfillment_error(error, body_id); + self.report_fulfillment_error(error, body_id, fallback_has_occurred); } } } @@ -141,8 +142,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // FIXME: I'm just not taking associated types at all here. // Eventually I'll need to implement param-env-aware // `Γ₁ ⊦ φ₁ => Γ₂ ⊦ φ₂` logic. - let param_env = ty::ParamEnv::empty(Reveal::UserFacing); - if let Ok(_) = self.can_sub(param_env, error, implication) { + let param_env = ty::ParamEnv::empty(); + if self.can_sub(param_env, error, implication).is_ok() { debug!("error_implies: {:?} -> {:?} -> {:?}", cond, error, implication); return true } @@ -153,11 +154,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } fn report_fulfillment_error(&self, error: &FulfillmentError<'tcx>, - body_id: Option) { + body_id: Option, + fallback_has_occurred: bool) { debug!("report_fulfillment_errors({:?})", error); match error.code { FulfillmentErrorCode::CodeSelectionError(ref e) => { - self.report_selection_error(&error.obligation, e); + self.report_selection_error(&error.obligation, e, fallback_has_occurred); } FulfillmentErrorCode::CodeProjectionError(ref e) => { self.report_projection_error(&error.obligation, e); @@ -201,17 +203,19 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { obligation.cause.span, infer::LateBoundRegionConversionTime::HigherRankedType, data); - let normalized = super::normalize_projection_type( + let mut obligations = vec![]; + let normalized_ty = super::normalize_projection_type( &mut selcx, obligation.param_env, data.projection_ty, obligation.cause.clone(), - 0 + 0, + &mut obligations ); if let Err(error) = self.at(&obligation.cause, obligation.param_env) - .eq(normalized.value, data.ty) { + .eq(normalized_ty, data.ty) { values = Some(infer::ValuePairs::Types(ExpectedFound { - expected: normalized.value, + expected: normalized_ty, found: data.ty, })); err_buf = error; @@ -262,6 +266,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { }, ty::TyGenerator(..) => Some(18), ty::TyForeign(..) => Some(19), + ty::TyGeneratorWitness(..) => Some(20), ty::TyInfer(..) | ty::TyError => None } } @@ -336,18 +341,15 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { .unwrap_or(trait_ref.def_id()); let trait_ref = *trait_ref.skip_binder(); - let desugaring; - let method; let mut flags = vec![]; - let direct = match obligation.cause.code { + match obligation.cause.code { ObligationCauseCode::BuiltinDerivedObligation(..) | - ObligationCauseCode::ImplDerivedObligation(..) => false, - _ => true - }; - if direct { - // this is a "direct", user-specified, rather than derived, - // obligation. - flags.push(("direct", None)); + ObligationCauseCode::ImplDerivedObligation(..) => {} + _ => { + // this is a "direct", user-specified, rather than derived, + // obligation. + flags.push(("direct".to_string(), None)); + } } if let ObligationCauseCode::ItemObligation(item) = obligation.cause.code { @@ -357,22 +359,46 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // // Currently I'm leaving it for what I need for `try`. if self.tcx.trait_of_item(item) == Some(trait_ref.def_id) { - method = self.tcx.item_name(item); - flags.push(("from_method", None)); - flags.push(("from_method", Some(&*method))); + let method = self.tcx.item_name(item); + flags.push(("from_method".to_string(), None)); + flags.push(("from_method".to_string(), Some(method.to_string()))); } } if let Some(k) = obligation.cause.span.compiler_desugaring_kind() { - desugaring = k.as_symbol().as_str(); - flags.push(("from_desugaring", None)); - flags.push(("from_desugaring", Some(&*desugaring))); + flags.push(("from_desugaring".to_string(), None)); + flags.push(("from_desugaring".to_string(), Some(k.name().to_string()))); + } + let generics = self.tcx.generics_of(def_id); + let self_ty = trait_ref.self_ty(); + // This is also included through the generics list as `Self`, + // but the parser won't allow you to use it + flags.push(("_Self".to_string(), Some(self_ty.to_string()))); + if let Some(def) = self_ty.ty_adt_def() { + // We also want to be able to select self's original + // signature with no type arguments resolved + flags.push(("_Self".to_string(), Some(self.tcx.type_of(def.did).to_string()))); + } + + for param in generics.params.iter() { + let value = match param.kind { + GenericParamDefKind::Type {..} => { + trait_ref.substs[param.index as usize].to_string() + }, + GenericParamDefKind::Lifetime => continue, + }; + let name = param.name.to_string(); + flags.push((name, Some(value))); + } + + if let Some(true) = self_ty.ty_adt_def().map(|def| def.did.is_local()) { + flags.push(("crate_local".to_string(), None)); } if let Ok(Some(command)) = OnUnimplementedDirective::of_item( self.tcx, trait_ref.def_id, def_id ) { - command.evaluate(self.tcx, trait_ref, &flags) + command.evaluate(self.tcx, trait_ref, &flags[..]) } else { OnUnimplementedNote::empty() } @@ -409,24 +435,42 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } fn report_similar_impl_candidates(&self, - impl_candidates: Vec>, + mut impl_candidates: Vec>, err: &mut DiagnosticBuilder) { if impl_candidates.is_empty() { return; } + let len = impl_candidates.len(); let end = if impl_candidates.len() <= 5 { impl_candidates.len() } else { 4 }; + + let normalize = |candidate| self.tcx.global_tcx().infer_ctxt().enter(|ref infcx| { + let normalized = infcx + .at(&ObligationCause::dummy(), ty::ParamEnv::empty()) + .normalize(candidate) + .ok(); + match normalized { + Some(normalized) => format!("\n {:?}", normalized.value), + None => format!("\n {:?}", candidate), + } + }); + + // Sort impl candidates so that ordering is consistent for UI tests. + let normalized_impl_candidates = &mut impl_candidates[0..end] + .iter() + .map(normalize) + .collect::>(); + normalized_impl_candidates.sort(); + err.help(&format!("the following implementations were found:{}{}", - &impl_candidates[0..end].iter().map(|candidate| { - format!("\n {:?}", candidate) - }).collect::(), - if impl_candidates.len() > 5 { - format!("\nand {} others", impl_candidates.len() - 4) + normalized_impl_candidates.join(""), + if len > 5 { + format!("\nand {} others", len - 4) } else { "".to_owned() } @@ -480,7 +524,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { item_name: ast::Name, _impl_item_def_id: DefId, trait_item_def_id: DefId, - requirement: &fmt::Display) + requirement: &dyn fmt::Display) -> DiagnosticBuilder<'tcx> { let msg = "impl has stricter requirements than trait"; @@ -507,7 +551,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { &data.parent_trait_ref); match self.get_parent_trait_ref(&data.parent_code) { Some(t) => Some(t), - None => Some(format!("{}", parent_trait_ref.0.self_ty())), + None => Some(parent_trait_ref.skip_binder().self_ty().to_string()), } } _ => None, @@ -516,7 +560,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { pub fn report_selection_error(&self, obligation: &PredicateObligation<'tcx>, - error: &SelectionError<'tcx>) + error: &SelectionError<'tcx>, + fallback_has_occurred: bool) { let span = obligation.cause.span; @@ -548,7 +593,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { .map(|t| (format!(" in `{}`", t), format!("within `{}`, ", t))) .unwrap_or((String::new(), String::new())); - let OnUnimplementedNote { message, label } + let OnUnimplementedNote { message, label, note } = self.on_unimplemented_note(trait_ref, obligation); let have_alt_message = message.is_some() || label.is_some(); @@ -562,23 +607,31 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { trait_ref.to_predicate(), post_message) })); + let explanation = + if obligation.cause.code == ObligationCauseCode::MainFunctionType { + "consider using `()`, or a `Result`".to_owned() + } else { + format!("{}the trait `{}` is not implemented for `{}`", + pre_message, + trait_ref, + trait_ref.self_ty()) + }; + if let Some(ref s) = label { // If it has a custom "#[rustc_on_unimplemented]" // error message, let's display it as the label! err.span_label(span, s.as_str()); - err.help(&format!("{}the trait `{}` is not implemented for `{}`", - pre_message, - trait_ref, - trait_ref.self_ty())); + err.help(&explanation); } else { - err.span_label(span, - &*format!("{}the trait `{}` is not implemented for `{}`", - pre_message, - trait_ref, - trait_ref.self_ty())); + err.span_label(span, explanation); + } + if let Some(ref s) = note { + // If it has a custom "#[rustc_on_unimplemented]" note, let's display it + err.note(s.as_str()); } self.suggest_borrow_on_unsized_slice(&obligation.cause.code, &mut err); + self.suggest_remove_reference(&obligation, &mut err, &trait_ref); // Try to report a help message if !trait_ref.has_infer_types() && @@ -598,6 +651,34 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.report_similar_impl_candidates(impl_candidates, &mut err); } + // If this error is due to `!: Trait` not implemented but `(): Trait` is + // implemented, and fallback has occured, then it could be due to a + // variable that used to fallback to `()` now falling back to `!`. Issue a + // note informing about the change in behaviour. + if trait_predicate.skip_binder().self_ty().is_never() + && fallback_has_occurred + { + let predicate = trait_predicate.map_bound(|mut trait_pred| { + trait_pred.trait_ref.substs = self.tcx.mk_substs_trait( + self.tcx.mk_nil(), + &trait_pred.trait_ref.substs[1..], + ); + trait_pred + }); + let unit_obligation = Obligation { + predicate: ty::Predicate::Trait(predicate), + .. obligation.clone() + }; + if self.predicate_may_hold(&unit_obligation) { + err.note("the trait is implemented for `()`. \ + Possibly this error has been caused by changes to \ + Rust's type-inference algorithm \ + (see: https://github.com/rust-lang/rust/issues/48950 \ + for more info). Consider whether you meant to use the \ + type `()` here instead."); + } + } + err } @@ -608,16 +689,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { span_bug!(span, "subtype requirement gave wrong error: `{:?}`", predicate) } - ty::Predicate::Equate(ref predicate) => { - let predicate = self.resolve_type_vars_if_possible(predicate); - let err = self.equality_predicate(&obligation.cause, - obligation.param_env, - &predicate).err().unwrap(); - struct_span_err!(self.tcx.sess, span, E0278, - "the requirement `{}` is not satisfied (`{}`)", - predicate, err) - } - ty::Predicate::RegionOutlives(ref predicate) => { let predicate = self.resolve_type_vars_if_possible(predicate); let err = self.region_outlives_predicate(&obligation.cause, @@ -712,98 +783,53 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } let found_trait_ty = found_trait_ref.self_ty(); - let found_did = found_trait_ty.ty_to_def_id(); + let found_did = match found_trait_ty.sty { + ty::TyClosure(did, _) | + ty::TyForeign(did) | + ty::TyFnDef(did, _) => Some(did), + ty::TyAdt(def, _) => Some(def.did), + _ => None, + }; let found_span = found_did.and_then(|did| { self.tcx.hir.span_if_local(did) }).map(|sp| self.tcx.sess.codemap().def_span(sp)); // the sp could be an fn def - let found_ty_count = - match found_trait_ref.skip_binder().substs.type_at(1).sty { - ty::TyTuple(ref tys, _) => tys.len(), - _ => 1, - }; - let (expected_tys, expected_ty_count) = - match expected_trait_ref.skip_binder().substs.type_at(1).sty { - ty::TyTuple(ref tys, _) => - (tys.iter().map(|t| &t.sty).collect(), tys.len()), - ref sty => (vec![sty], 1), - }; - if found_ty_count == expected_ty_count { + let found = match found_trait_ref.skip_binder().substs.type_at(1).sty { + ty::TyTuple(ref tys) => tys.iter() + .map(|_| ArgKind::empty()).collect::>(), + _ => vec![ArgKind::empty()], + }; + let expected = match expected_trait_ref.skip_binder().substs.type_at(1).sty { + ty::TyTuple(ref tys) => tys.iter() + .map(|t| match t.sty { + ty::TypeVariants::TyTuple(ref tys) => ArgKind::Tuple( + Some(span), + tys.iter() + .map(|ty| ("_".to_owned(), ty.sty.to_string())) + .collect::>() + ), + _ => ArgKind::Arg("_".to_owned(), t.sty.to_string()), + }).collect(), + ref sty => vec![ArgKind::Arg("_".to_owned(), sty.to_string())], + }; + if found.len() == expected.len() { self.report_closure_arg_mismatch(span, found_span, found_trait_ref, expected_trait_ref) } else { - let expected_tuple = if expected_ty_count == 1 { - expected_tys.first().and_then(|t| { - if let &&ty::TyTuple(ref tuptys, _) = t { - Some(tuptys.len()) - } else { - None - } - }) - } else { - None - }; - - // FIXME(#44150): Expand this to "N args expected but a N-tuple found." - // Type of the 1st expected argument is somehow provided as type of a - // found one in that case. - // - // ``` - // [1i32, 2, 3].sort_by(|(a, b)| ..) - // // ^^^^^^^ -------- - // // expected_trait_ref: std::ops::FnMut<(&i32, &i32)> - // // found_trait_ref: std::ops::FnMut<(&i32,)> - // ``` - - let (closure_span, closure_args) = found_did + let (closure_span, found) = found_did .and_then(|did| self.tcx.hir.get_if_local(did)) - .and_then(|node| { - if let hir::map::NodeExpr( - &hir::Expr { - node: hir::ExprClosure(_, ref decl, id, span, _), - .. - }) = node - { - let ty_snips = decl.inputs.iter() - .map(|ty| { - self.tcx.sess.codemap().span_to_snippet(ty.span).ok() - .and_then(|snip| { - // filter out dummy spans - if snip == "," || snip == "|" { - None - } else { - Some(snip) - } - }) - }) - .collect::>>(); + .map(|node| { + let (found_span, found) = self.get_fn_like_arguments(node); + (Some(found_span), found) + }).unwrap_or((found_span, found)); - let body = self.tcx.hir.body(id); - let pat_snips = body.arguments.iter() - .map(|arg| - self.tcx.sess.codemap().span_to_snippet(arg.pat.span).ok()) - .collect::>>(); - - Some((span, pat_snips, ty_snips)) - } else { - None - } - }) - .map(|(span, pat, ty)| (Some(span), Some((pat, ty)))) - .unwrap_or((None, None)); - let closure_args = closure_args.and_then(|(pat, ty)| Some((pat?, ty))); - - self.report_arg_count_mismatch( - span, - closure_span.or(found_span), - expected_ty_count, - expected_tuple, - found_ty_count, - closure_args, - found_trait_ty.is_closure() - ) + self.report_arg_count_mismatch(span, + closure_span, + expected, + found, + found_trait_ty.is_closure()) } } @@ -814,10 +840,17 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } ConstEvalFailure(ref err) => { - if let const_val::ErrKind::TypeckError = err.kind { - return; + match err.struct_error( + self.tcx.at(span), + "could not evaluate constant expression", + ) { + Some(err) => err, + None => return, } - err.struct_error(self.tcx, span, "constant expression") + } + + Overflow => { + bug!("overflow should be handled before the `report_selection_error` path"); } }; self.note_obligation_cause(&mut err, obligation); @@ -833,11 +866,14 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let parent_node = self.tcx.hir.get_parent_node(node_id); if let Some(hir::map::NodeLocal(ref local)) = self.tcx.hir.find(parent_node) { if let Some(ref expr) = local.init { - if let hir::ExprIndex(_, _) = expr.node { + if let hir::ExprKind::Index(_, _) = expr.node { if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(expr.span) { - err.span_suggestion(expr.span, - "consider borrowing here", - format!("&{}", snippet)); + err.span_suggestion_with_applicability( + expr.span, + "consider borrowing here", + format!("&{}", snippet), + Applicability::MachineApplicable + ); } } } @@ -845,94 +881,252 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } - fn report_arg_count_mismatch( + /// Whenever references are used by mistake, like `for (i, e) in &vec.iter().enumerate()`, + /// suggest removing these references until we reach a type that implements the trait. + fn suggest_remove_reference(&self, + obligation: &PredicateObligation<'tcx>, + err: &mut DiagnosticBuilder<'tcx>, + trait_ref: &ty::Binder>) { + let trait_ref = trait_ref.skip_binder(); + let span = obligation.cause.span; + + if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(span) { + let refs_number = snippet.chars() + .filter(|c| !c.is_whitespace()) + .take_while(|c| *c == '&') + .count(); + + let mut trait_type = trait_ref.self_ty(); + + for refs_remaining in 0..refs_number { + if let ty::TypeVariants::TyRef(_, t_type, _) = trait_type.sty { + trait_type = t_type; + + let substs = self.tcx.mk_substs_trait(trait_type, &[]); + let new_trait_ref = ty::TraitRef::new(trait_ref.def_id, substs); + let new_obligation = Obligation::new(ObligationCause::dummy(), + obligation.param_env, + new_trait_ref.to_predicate()); + + if self.predicate_may_hold(&new_obligation) { + let sp = self.tcx.sess.codemap() + .span_take_while(span, |c| c.is_whitespace() || *c == '&'); + + let remove_refs = refs_remaining + 1; + let format_str = format!("consider removing {} leading `&`-references", + remove_refs); + + err.span_suggestion_short_with_applicability( + sp, &format_str, String::from(""), Applicability::MachineApplicable + ); + break; + } + } else { + break; + } + } + } + } + + /// Given some node representing a fn-like thing in the HIR map, + /// returns a span and `ArgKind` information that describes the + /// arguments it expects. This can be supplied to + /// `report_arg_count_mismatch`. + pub fn get_fn_like_arguments(&self, node: hir::map::Node) -> (Span, Vec) { + match node { + hir::map::NodeExpr(&hir::Expr { + node: hir::ExprKind::Closure(_, ref _decl, id, span, _), + .. + }) => { + (self.tcx.sess.codemap().def_span(span), self.tcx.hir.body(id).arguments.iter() + .map(|arg| { + if let hir::Pat { + node: hir::PatKind::Tuple(args, _), + span, + .. + } = arg.pat.clone().into_inner() { + ArgKind::Tuple( + Some(span), + args.iter().map(|pat| { + let snippet = self.tcx.sess.codemap() + .span_to_snippet(pat.span).unwrap(); + (snippet, "_".to_owned()) + }).collect::>(), + ) + } else { + let name = self.tcx.sess.codemap() + .span_to_snippet(arg.pat.span).unwrap(); + ArgKind::Arg(name, "_".to_owned()) + } + }) + .collect::>()) + } + hir::map::NodeItem(&hir::Item { + span, + node: hir::ItemKind::Fn(ref decl, ..), + .. + }) | + hir::map::NodeImplItem(&hir::ImplItem { + span, + node: hir::ImplItemKind::Method(hir::MethodSig { ref decl, .. }, _), + .. + }) | + hir::map::NodeTraitItem(&hir::TraitItem { + span, + node: hir::TraitItemKind::Method(hir::MethodSig { ref decl, .. }, _), + .. + }) => { + (self.tcx.sess.codemap().def_span(span), decl.inputs.iter() + .map(|arg| match arg.clone().node { + hir::TyKind::Tup(ref tys) => ArgKind::Tuple( + Some(arg.span), + tys.iter() + .map(|_| ("_".to_owned(), "_".to_owned())) + .collect::>(), + ), + _ => ArgKind::Arg("_".to_owned(), "_".to_owned()) + }).collect::>()) + } + hir::map::NodeVariant(&hir::Variant { + span, + node: hir::VariantKind { + data: hir::VariantData::Tuple(ref fields, _), + .. + }, + .. + }) => { + (self.tcx.sess.codemap().def_span(span), + fields.iter().map(|field| { + ArgKind::Arg(field.ident.to_string(), "_".to_string()) + }).collect::>()) + } + hir::map::NodeStructCtor(ref variant_data) => { + (self.tcx.sess.codemap().def_span(self.tcx.hir.span(variant_data.id())), + variant_data.fields() + .iter().map(|_| ArgKind::Arg("_".to_owned(), "_".to_owned())) + .collect()) + } + _ => panic!("non-FnLike node found: {:?}", node), + } + } + + /// Reports an error when the number of arguments needed by a + /// trait match doesn't match the number that the expression + /// provides. + pub fn report_arg_count_mismatch( &self, span: Span, found_span: Option, - expected: usize, - expected_tuple: Option, - found: usize, - closure_args: Option<(Vec, Vec>)>, - is_closure: bool + expected_args: Vec, + found_args: Vec, + is_closure: bool, ) -> DiagnosticBuilder<'tcx> { - use std::borrow::Cow; - let kind = if is_closure { "closure" } else { "function" }; - let args_str = |n, distinct| format!( - "{} {}argument{}", - n, - if distinct && n >= 2 { "distinct " } else { "" }, - if n == 1 { "" } else { "s" }, - ); - - let expected_str = if let Some(n) = expected_tuple { - assert!(expected == 1); - if closure_args.as_ref().map(|&(ref pats, _)| pats.len()) == Some(n) { - Cow::from("a single tuple as argument") - } else { - // be verbose when numbers differ - Cow::from(format!("a single {}-tuple as argument", n)) + let args_str = |arguments: &[ArgKind], other: &[ArgKind]| { + let arg_length = arguments.len(); + let distinct = match &other[..] { + &[ArgKind::Tuple(..)] => true, + _ => false, + }; + match (arg_length, arguments.get(0)) { + (1, Some(&ArgKind::Tuple(_, ref fields))) => { + format!("a single {}-tuple as argument", fields.len()) + } + _ => format!("{} {}argument{}", + arg_length, + if distinct && arg_length > 1 { "distinct " } else { "" }, + if arg_length == 1 { "" } else { "s" }), } - } else { - Cow::from(args_str(expected, false)) }; - let found_str = if expected_tuple.is_some() { - args_str(found, true) - } else { - args_str(found, false) - }; + let expected_str = args_str(&expected_args, &found_args); + let found_str = args_str(&found_args, &expected_args); - - let mut err = struct_span_err!(self.tcx.sess, span, E0593, + let mut err = struct_span_err!( + self.tcx.sess, + span, + E0593, "{} is expected to take {}, but it takes {}", kind, expected_str, found_str, ); - err.span_label( - span, - format!( - "expected {} that takes {}", - kind, - expected_str, - ) - ); + err.span_label(span, format!( "expected {} that takes {}", kind, expected_str)); - if let Some(span) = found_span { - if let (Some(expected_tuple), Some((pats, tys))) = (expected_tuple, closure_args) { - if expected_tuple != found || pats.len() != found { - err.span_label(span, format!("takes {}", found_str)); - } else { + if let Some(found_span) = found_span { + err.span_label(found_span, format!("takes {}", found_str)); + + // Suggest to take and ignore the arguments with expected_args_length `_`s if + // found arguments is empty (assume the user just wants to ignore args in this case). + // For example, if `expected_args_length` is 2, suggest `|_, _|`. + if found_args.is_empty() && is_closure { + let underscores = "_".repeat(expected_args.len()) + .split("") + .filter(|s| !s.is_empty()) + .collect::>() + .join(", "); + err.span_suggestion_with_applicability( + found_span, + &format!( + "consider changing the closure to take and ignore the expected argument{}", + if expected_args.len() < 2 { + "" + } else { + "s" + } + ), + format!("|{}|", underscores), + Applicability::MachineApplicable, + ); + } + + if let &[ArgKind::Tuple(_, ref fields)] = &found_args[..] { + if fields.len() == expected_args.len() { + let sugg = fields.iter() + .map(|(name, _)| name.to_owned()) + .collect::>().join(", "); + err.span_suggestion_with_applicability(found_span, + "change the closure to take multiple \ + arguments instead of a single tuple", + format!("|{}|", sugg), + Applicability::MachineApplicable); + } + } + if let &[ArgKind::Tuple(_, ref fields)] = &expected_args[..] { + if fields.len() == found_args.len() && is_closure { let sugg = format!( "|({}){}|", - pats.join(", "), - + found_args.iter() + .map(|arg| match arg { + ArgKind::Arg(name, _) => name.to_owned(), + _ => "_".to_owned(), + }) + .collect::>() + .join(", "), // add type annotations if available - if tys.iter().any(|ty| ty.is_some()) { - Cow::from(format!( - ": ({})", - tys.into_iter().map(|ty| if let Some(ty) = ty { - ty - } else { - "_".to_string() - }).collect::>().join(", ") - )) + if found_args.iter().any(|arg| match arg { + ArgKind::Arg(_, ty) => ty != "_", + _ => false, + }) { + format!(": ({})", + fields.iter() + .map(|(_, ty)| ty.to_owned()) + .collect::>() + .join(", ")) } else { - Cow::from("") + "".to_owned() }, ); - - err.span_suggestion( - span, - "consider changing the closure to accept a tuple", - sugg + err.span_suggestion_with_applicability( + found_span, + "change the closure to accept a tuple instead of \ + individual arguments", + sugg, + Applicability::MachineApplicable ); } - } else { - err.span_label(span, format!("takes {}", found_str)); } } @@ -949,13 +1143,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { fn build_fn_sig_string<'a, 'gcx, 'tcx>(tcx: ty::TyCtxt<'a, 'gcx, 'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> String { let inputs = trait_ref.substs.type_at(1); - let sig = if let ty::TyTuple(inputs, _) = inputs.sty { + let sig = if let ty::TyTuple(inputs) = inputs.sty { tcx.mk_fn_sig( inputs.iter().map(|&x| x), tcx.mk_infer(ty::TyVar(ty::TyVid { index: 0 })), false, hir::Unsafety::Normal, - ::syntax::abi::Abi::Rust + ::rustc_target::spec::abi::Abi::Rust ) } else { tcx.mk_fn_sig( @@ -963,10 +1157,10 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { tcx.mk_infer(ty::TyVar(ty::TyVid { index: 0 })), false, hir::Unsafety::Normal, - ::syntax::abi::Abi::Rust + ::rustc_target::spec::abi::Abi::Rust ) }; - format!("{}", ty::Binder(sig)) + ty::Binder::bind(sig).to_string() } let argument_is_closure = expected_ref.skip_binder().substs.type_at(0).is_closure(); @@ -1091,7 +1285,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.tcx.lang_items().sized_trait() .map_or(false, |sized_id| sized_id == trait_ref.def_id()) { - self.need_type_info(body_id, span, self_ty); + self.need_type_info_err(body_id, span, self_ty).emit(); } else { let mut err = struct_span_err!(self.tcx.sess, span, E0283, @@ -1108,7 +1302,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // Same hacky approach as above to avoid deluging user // with error messages. if !ty.references_error() && !self.tcx.sess.has_errors() { - self.need_type_info(body_id, span, ty); + self.need_type_info_err(body_id, span, ty).emit(); } } @@ -1119,9 +1313,9 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let &SubtypePredicate { a_is_expected: _, a, b } = data.skip_binder(); // both must be type variables, or the other would've been instantiated assert!(a.is_ty_var() && b.is_ty_var()); - self.need_type_info(body_id, - obligation.cause.span, - a); + self.need_type_info_err(body_id, + obligation.cause.span, + a).emit(); } } @@ -1186,7 +1380,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { cleaned_pred.to_predicate() ); - selcx.evaluate_obligation(&obligation) + self.predicate_may_hold(&obligation) }) } @@ -1197,13 +1391,15 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { { self.note_obligation_cause_code(err, &obligation.predicate, - &obligation.cause.code); + &obligation.cause.code, + &mut vec![]); } fn note_obligation_cause_code(&self, err: &mut DiagnosticBuilder, predicate: &T, - cause_code: &ObligationCauseCode<'tcx>) + cause_code: &ObligationCauseCode<'tcx>, + obligated_types: &mut Vec<&ty::TyS<'tcx>>) where T: fmt::Display { let tcx = self.tcx; @@ -1212,7 +1408,6 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ObligationCauseCode::MatchExpressionArm { .. } | ObligationCauseCode::IfExpression | ObligationCauseCode::IfExpressionWithNoElse | - ObligationCauseCode::EquatePredicate | ObligationCauseCode::MainFunctionType | ObligationCauseCode::StartFunctionType | ObligationCauseCode::IntrinsicType | @@ -1241,7 +1436,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } ObligationCauseCode::ItemObligation(item_def_id) => { let item_name = tcx.item_path_str(item_def_id); - err.note(&format!("required by `{}`", item_name)); + let msg = format!("required by `{}`", item_name); + if let Some(sp) = tcx.hir.span_if_local(item_def_id) { + let sp = tcx.sess.codemap().def_span(sp); + err.span_note(sp, &msg); + } else { + err.note(&msg); + } } ObligationCauseCode::ObjectCastObligation(object_ty) => { err.note(&format!("required for the cast to the object type `{}`", @@ -1253,11 +1454,24 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } ObligationCauseCode::VariableType(_) => { err.note("all local variables must have a statically known size"); + if !self.tcx.features().unsized_locals { + err.help("unsized locals are gated as an unstable feature"); + } + } + ObligationCauseCode::SizedArgumentType => { + err.note("all function arguments must have a statically known size"); + if !self.tcx.features().unsized_locals { + err.help("unsized locals are gated as an unstable feature"); + } } ObligationCauseCode::SizedReturnType => { err.note("the return type of a function must have a \ statically known size"); } + ObligationCauseCode::SizedYieldType => { + err.note("the yield type of a generator must have a \ + statically known size"); + } ObligationCauseCode::AssignmentLhsSized => { err.note("the left-hand-side of an assignment must have a statically known size"); } @@ -1267,11 +1481,16 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ObligationCauseCode::StructInitializerSized => { err.note("structs must have a statically known size to be initialized"); } - ObligationCauseCode::FieldSized(ref item) => { + ObligationCauseCode::FieldSized { adt_kind: ref item, last } => { match *item { AdtKind::Struct => { - err.note("only the last field of a struct may have a dynamically \ - sized type"); + if last { + err.note("the last field of a packed struct may only have a \ + dynamically sized type if it does not need drop to be run"); + } else { + err.note("only the last field of a struct may have a dynamically \ + sized type"); + } } AdtKind::Union => { err.note("no field of a union may have a dynamically sized type"); @@ -1289,23 +1508,29 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } ObligationCauseCode::BuiltinDerivedObligation(ref data) => { let parent_trait_ref = self.resolve_type_vars_if_possible(&data.parent_trait_ref); - err.note(&format!("required because it appears within the type `{}`", - parent_trait_ref.0.self_ty())); + let ty = parent_trait_ref.skip_binder().self_ty(); + err.note(&format!("required because it appears within the type `{}`", ty)); + obligated_types.push(ty); + let parent_predicate = parent_trait_ref.to_predicate(); - self.note_obligation_cause_code(err, - &parent_predicate, - &data.parent_code); + if !self.is_recursive_obligation(obligated_types, &data.parent_code) { + self.note_obligation_cause_code(err, + &parent_predicate, + &data.parent_code, + obligated_types); + } } ObligationCauseCode::ImplDerivedObligation(ref data) => { let parent_trait_ref = self.resolve_type_vars_if_possible(&data.parent_trait_ref); err.note( &format!("required because of the requirements on the impl of `{}` for `{}`", parent_trait_ref, - parent_trait_ref.0.self_ty())); + parent_trait_ref.skip_binder().self_ty())); let parent_predicate = parent_trait_ref.to_predicate(); self.note_obligation_cause_code(err, - &parent_predicate, - &data.parent_code); + &parent_predicate, + &data.parent_code, + obligated_types); } ObligationCauseCode::CompareImplMethodObligation { .. } => { err.note( @@ -1315,6 +1540,14 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } ObligationCauseCode::ReturnType(_) | ObligationCauseCode::BlockTailExpression(_) => (), + ObligationCauseCode::TrivialBound => { + err.help("see issue #48214"); + if tcx.sess.opts.unstable_features.is_nightly_build() { + err.help("add #![feature(trivial_bounds)] to the \ + crate attributes to enable", + ); + } + } } } @@ -1324,4 +1557,50 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { err.help(&format!("consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate", suggested_limit)); } + + fn is_recursive_obligation(&self, + obligated_types: &mut Vec<&ty::TyS<'tcx>>, + cause_code: &ObligationCauseCode<'tcx>) -> bool { + if let ObligationCauseCode::BuiltinDerivedObligation(ref data) = cause_code { + let parent_trait_ref = self.resolve_type_vars_if_possible(&data.parent_trait_ref); + for obligated_type in obligated_types { + if obligated_type == &parent_trait_ref.skip_binder().self_ty() { + return true; + } + } + } + return false; + } +} + +/// Summarizes information +pub enum ArgKind { + /// An argument of non-tuple type. Parameters are (name, ty) + Arg(String, String), + + /// An argument of tuple type. For a "found" argument, the span is + /// the locationo in the source of the pattern. For a "expected" + /// argument, it will be None. The vector is a list of (name, ty) + /// strings for the components of the tuple. + Tuple(Option, Vec<(String, String)>), +} + +impl ArgKind { + fn empty() -> ArgKind { + ArgKind::Arg("_".to_owned(), "_".to_owned()) + } + + /// Creates an `ArgKind` from the expected type of an + /// argument. This has no name (`_`) and no source spans.. + pub fn from_expected_ty(t: Ty<'_>) -> ArgKind { + match t.sty { + ty::TyTuple(ref tys) => ArgKind::Tuple( + None, + tys.iter() + .map(|ty| ("_".to_owned(), ty.sty.to_string())) + .collect::>() + ), + _ => ArgKind::Arg("_".to_owned(), t.sty.to_string()), + } + } } diff --git a/src/librustc/traits/fulfill.rs b/src/librustc/traits/fulfill.rs index 93e33836818c..5113f3cde328 100644 --- a/src/librustc/traits/fulfill.rs +++ b/src/librustc/traits/fulfill.rs @@ -8,17 +8,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use infer::{RegionObligation, InferCtxt, InferOk}; +use infer::{RegionObligation, InferCtxt}; +use mir::interpret::GlobalId; use ty::{self, Ty, TypeFoldable, ToPolyTraitRef, ToPredicate}; use ty::error::ExpectedFound; -use rustc_data_structures::obligation_forest::{ObligationForest, Error}; -use rustc_data_structures::obligation_forest::{ForestObligation, ObligationProcessor}; +use rustc_data_structures::obligation_forest::{Error, ForestObligation, ObligationForest}; +use rustc_data_structures::obligation_forest::{ObligationProcessor, ProcessResult}; use std::marker::PhantomData; use hir::def_id::DefId; +use mir::interpret::ConstEvalErr; +use mir::interpret::EvalErrorKind; use super::CodeAmbiguity; use super::CodeProjectionError; use super::CodeSelectionError; +use super::engine::{TraitEngine, TraitEngineExt}; use super::{FulfillmentError, FulfillmentErrorCode}; use super::{ObligationCause, PredicateObligation, Obligation}; use super::project; @@ -83,125 +87,7 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { } } - /// "Normalize" a projection type `::X` by - /// creating a fresh type variable `$0` as well as a projection - /// predicate `::X == $0`. When the - /// inference engine runs, it will attempt to find an impl of - /// `SomeTrait` or a where clause that lets us unify `$0` with - /// something concrete. If this fails, we'll unify `$0` with - /// `projection_ty` again. - pub fn normalize_projection_type(&mut self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - projection_ty: ty::ProjectionTy<'tcx>, - cause: ObligationCause<'tcx>) - -> Ty<'tcx> - { - debug!("normalize_projection_type(projection_ty={:?})", - projection_ty); - - assert!(!projection_ty.has_escaping_regions()); - - // FIXME(#20304) -- cache - - let mut selcx = SelectionContext::new(infcx); - let normalized = project::normalize_projection_type(&mut selcx, - param_env, - projection_ty, - cause, - 0); - - for obligation in normalized.obligations { - self.register_predicate_obligation(infcx, obligation); - } - - debug!("normalize_projection_type: result={:?}", normalized.value); - - normalized.value - } - - /// Requires that `ty` must implement the trait with `def_id` in - /// the given environment. This trait must not have any type - /// parameters (except for `Self`). - pub fn register_bound(&mut self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>, - def_id: DefId, - cause: ObligationCause<'tcx>) - { - let trait_ref = ty::TraitRef { - def_id, - substs: infcx.tcx.mk_substs_trait(ty, &[]), - }; - self.register_predicate_obligation(infcx, Obligation { - cause, - recursion_depth: 0, - param_env, - predicate: trait_ref.to_predicate() - }); - } - - pub fn register_predicate_obligation(&mut self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>, - obligation: PredicateObligation<'tcx>) - { - // this helps to reduce duplicate errors, as well as making - // debug output much nicer to read and so on. - let obligation = infcx.resolve_type_vars_if_possible(&obligation); - - debug!("register_predicate_obligation(obligation={:?})", obligation); - - assert!(!infcx.is_in_snapshot()); - - self.predicates.register_obligation(PendingPredicateObligation { - obligation, - stalled_on: vec![] - }); - } - - pub fn register_predicate_obligations(&mut self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>, - obligations: I) - where I: IntoIterator> - { - for obligation in obligations { - self.register_predicate_obligation(infcx, obligation); - } - } - - pub fn select_all_or_error(&mut self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>) - -> Result<(),Vec>> - { - self.select_where_possible(infcx)?; - - let errors: Vec<_> = - self.predicates.to_errors(CodeAmbiguity) - .into_iter() - .map(|e| to_fulfillment_error(e)) - .collect(); - if errors.is_empty() { - Ok(()) - } else { - Err(errors) - } - } - - pub fn select_where_possible(&mut self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>) - -> Result<(),Vec>> - { - let mut selcx = SelectionContext::new(infcx); - self.select(&mut selcx) - } - - pub fn pending_obligations(&self) -> Vec> { - self.predicates.pending_obligations() - } - - /// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it - /// only attempts to select obligations that haven't been seen before. + /// Attempts to select obligations using `selcx`. fn select(&mut self, selcx: &mut SelectionContext<'a, 'gcx, 'tcx>) -> Result<(),Vec>> { debug!("select(obligation-forest-size={})", self.predicates.len()); @@ -216,7 +102,7 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { selcx, register_region_obligations: self.register_region_obligations }); - debug!("select: outcome={:?}", outcome); + debug!("select: outcome={:#?}", outcome); // FIXME: if we kept the original cache key, we could mark projection // obligations as complete for the projection cache here. @@ -242,24 +128,394 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { } } +impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> { + /// "Normalize" a projection type `::X` by + /// creating a fresh type variable `$0` as well as a projection + /// predicate `::X == $0`. When the + /// inference engine runs, it will attempt to find an impl of + /// `SomeTrait` or a where clause that lets us unify `$0` with + /// something concrete. If this fails, we'll unify `$0` with + /// `projection_ty` again. + fn normalize_projection_type<'a, 'gcx>(&mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + projection_ty: ty::ProjectionTy<'tcx>, + cause: ObligationCause<'tcx>) + -> Ty<'tcx> + { + debug!("normalize_projection_type(projection_ty={:?})", + projection_ty); + + debug_assert!(!projection_ty.has_escaping_regions()); + + // FIXME(#20304) -- cache + + let mut selcx = SelectionContext::new(infcx); + let mut obligations = vec![]; + let normalized_ty = project::normalize_projection_type(&mut selcx, + param_env, + projection_ty, + cause, + 0, + &mut obligations); + self.register_predicate_obligations(infcx, obligations); + + debug!("normalize_projection_type: result={:?}", normalized_ty); + + normalized_ty + } + + /// Requires that `ty` must implement the trait with `def_id` in + /// the given environment. This trait must not have any type + /// parameters (except for `Self`). + fn register_bound<'a, 'gcx>(&mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + ty: Ty<'tcx>, + def_id: DefId, + cause: ObligationCause<'tcx>) + { + let trait_ref = ty::TraitRef { + def_id, + substs: infcx.tcx.mk_substs_trait(ty, &[]), + }; + self.register_predicate_obligation(infcx, Obligation { + cause, + recursion_depth: 0, + param_env, + predicate: trait_ref.to_predicate() + }); + } + + fn register_predicate_obligation<'a, 'gcx>(&mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>, + obligation: PredicateObligation<'tcx>) + { + // this helps to reduce duplicate errors, as well as making + // debug output much nicer to read and so on. + let obligation = infcx.resolve_type_vars_if_possible(&obligation); + + debug!("register_predicate_obligation(obligation={:?})", obligation); + + assert!(!infcx.is_in_snapshot()); + + self.predicates.register_obligation(PendingPredicateObligation { + obligation, + stalled_on: vec![] + }); + } + + fn select_all_or_error<'a, 'gcx>(&mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>) + -> Result<(),Vec>> + { + self.select_where_possible(infcx)?; + + let errors: Vec<_> = + self.predicates.to_errors(CodeAmbiguity) + .into_iter() + .map(|e| to_fulfillment_error(e)) + .collect(); + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } + } + + fn select_where_possible<'a, 'gcx>(&mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>) + -> Result<(),Vec>> + { + let mut selcx = SelectionContext::new(infcx); + self.select(&mut selcx) + } + + fn pending_obligations(&self) -> Vec> { + self.predicates.map_pending_obligations(|o| o.obligation.clone()) + } +} + struct FulfillProcessor<'a, 'b: 'a, 'gcx: 'tcx, 'tcx: 'b> { selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>, register_region_obligations: bool } +fn mk_pending(os: Vec>) -> Vec> { + os.into_iter().map(|o| PendingPredicateObligation { + obligation: o, + stalled_on: vec![] + }).collect() +} + impl<'a, 'b, 'gcx, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'gcx, 'tcx> { type Obligation = PendingPredicateObligation<'tcx>; type Error = FulfillmentErrorCode<'tcx>; + /// Processes a predicate obligation and returns either: + /// - `Changed(v)` if the predicate is true, presuming that `v` are also true + /// - `Unchanged` if we don't have enough info to be sure + /// - `Error(e)` if the predicate does not hold + /// + /// This is always inlined, despite its size, because it has a single + /// callsite and it is called *very* frequently. + #[inline(always)] fn process_obligation(&mut self, - obligation: &mut Self::Obligation) - -> Result>, Self::Error> + pending_obligation: &mut Self::Obligation) + -> ProcessResult { - process_predicate(self.selcx, obligation, self.register_region_obligations) - .map(|os| os.map(|os| os.into_iter().map(|o| PendingPredicateObligation { - obligation: o, - stalled_on: vec![] - }).collect())) + // if we were stalled on some unresolved variables, first check + // whether any of them have been resolved; if not, don't bother + // doing more work yet + if !pending_obligation.stalled_on.is_empty() { + if pending_obligation.stalled_on.iter().all(|&ty| { + let resolved_ty = self.selcx.infcx().shallow_resolve(&ty); + resolved_ty == ty // nothing changed here + }) { + debug!("process_predicate: pending obligation {:?} still stalled on {:?}", + self.selcx.infcx() + .resolve_type_vars_if_possible(&pending_obligation.obligation), + pending_obligation.stalled_on); + return ProcessResult::Unchanged; + } + pending_obligation.stalled_on = vec![]; + } + + let obligation = &mut pending_obligation.obligation; + + if obligation.predicate.has_infer_types() { + obligation.predicate = + self.selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate); + } + + match obligation.predicate { + ty::Predicate::Trait(ref data) => { + let trait_obligation = obligation.with(data.clone()); + + if data.is_global() && !data.has_late_bound_regions() { + // no type variables present, can use evaluation for better caching. + // FIXME: consider caching errors too. + if self.selcx.infcx().predicate_must_hold(&obligation) { + debug!("selecting trait `{:?}` at depth {} evaluated to holds", + data, obligation.recursion_depth); + return ProcessResult::Changed(vec![]) + } + } + + match self.selcx.select(&trait_obligation) { + Ok(Some(vtable)) => { + debug!("selecting trait `{:?}` at depth {} yielded Ok(Some)", + data, obligation.recursion_depth); + ProcessResult::Changed(mk_pending(vtable.nested_obligations())) + } + Ok(None) => { + debug!("selecting trait `{:?}` at depth {} yielded Ok(None)", + data, obligation.recursion_depth); + + // This is a bit subtle: for the most part, the + // only reason we can fail to make progress on + // trait selection is because we don't have enough + // information about the types in the trait. One + // exception is that we sometimes haven't decided + // what kind of closure a closure is. *But*, in + // that case, it turns out, the type of the + // closure will also change, because the closure + // also includes references to its upvars as part + // of its type, and those types are resolved at + // the same time. + // + // FIXME(#32286) logic seems false if no upvars + pending_obligation.stalled_on = + trait_ref_type_vars(self.selcx, data.to_poly_trait_ref()); + + debug!("process_predicate: pending obligation {:?} now stalled on {:?}", + self.selcx.infcx().resolve_type_vars_if_possible(obligation), + pending_obligation.stalled_on); + + ProcessResult::Unchanged + } + Err(selection_err) => { + info!("selecting trait `{:?}` at depth {} yielded Err", + data, obligation.recursion_depth); + + ProcessResult::Error(CodeSelectionError(selection_err)) + } + } + } + + ty::Predicate::RegionOutlives(ref binder) => { + match self.selcx.infcx().region_outlives_predicate(&obligation.cause, binder) { + Ok(()) => ProcessResult::Changed(vec![]), + Err(_) => ProcessResult::Error(CodeSelectionError(Unimplemented)), + } + } + + ty::Predicate::TypeOutlives(ref binder) => { + // Check if there are higher-ranked regions. + match binder.no_late_bound_regions() { + // If there are, inspect the underlying type further. + None => { + // Convert from `Binder>` to `Binder`. + let binder = binder.map_bound_ref(|pred| pred.0); + + // Check if the type has any bound regions. + match binder.no_late_bound_regions() { + // If so, this obligation is an error (for now). Eventually we should be + // able to support additional cases here, like `for<'a> &'a str: 'a`. + None => { + ProcessResult::Error(CodeSelectionError(Unimplemented)) + } + // Otherwise, we have something of the form + // `for<'a> T: 'a where 'a not in T`, which we can treat as + // `T: 'static`. + Some(t_a) => { + let r_static = self.selcx.tcx().types.re_static; + if self.register_region_obligations { + self.selcx.infcx().register_region_obligation( + obligation.cause.body_id, + RegionObligation { + sup_type: t_a, + sub_region: r_static, + cause: obligation.cause.clone(), + }); + } + ProcessResult::Changed(vec![]) + } + } + } + // If there aren't, register the obligation. + Some(ty::OutlivesPredicate(t_a, r_b)) => { + if self.register_region_obligations { + self.selcx.infcx().register_region_obligation( + obligation.cause.body_id, + RegionObligation { + sup_type: t_a, + sub_region: r_b, + cause: obligation.cause.clone() + }); + } + ProcessResult::Changed(vec![]) + } + } + } + + ty::Predicate::Projection(ref data) => { + let project_obligation = obligation.with(data.clone()); + match project::poly_project_and_unify_type(self.selcx, &project_obligation) { + Ok(None) => { + let tcx = self.selcx.tcx(); + pending_obligation.stalled_on = + trait_ref_type_vars(self.selcx, data.to_poly_trait_ref(tcx)); + ProcessResult::Unchanged + } + Ok(Some(os)) => ProcessResult::Changed(mk_pending(os)), + Err(e) => ProcessResult::Error(CodeProjectionError(e)) + } + } + + ty::Predicate::ObjectSafe(trait_def_id) => { + if !self.selcx.tcx().is_object_safe(trait_def_id) { + ProcessResult::Error(CodeSelectionError(Unimplemented)) + } else { + ProcessResult::Changed(vec![]) + } + } + + ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => { + match self.selcx.infcx().closure_kind(closure_def_id, closure_substs) { + Some(closure_kind) => { + if closure_kind.extends(kind) { + ProcessResult::Changed(vec![]) + } else { + ProcessResult::Error(CodeSelectionError(Unimplemented)) + } + } + None => { + ProcessResult::Unchanged + } + } + } + + ty::Predicate::WellFormed(ty) => { + match ty::wf::obligations(self.selcx.infcx(), + obligation.param_env, + obligation.cause.body_id, + ty, obligation.cause.span) { + None => { + pending_obligation.stalled_on = vec![ty]; + ProcessResult::Unchanged + } + Some(os) => ProcessResult::Changed(mk_pending(os)) + } + } + + ty::Predicate::Subtype(ref subtype) => { + match self.selcx.infcx().subtype_predicate(&obligation.cause, + obligation.param_env, + subtype) { + None => { + // None means that both are unresolved. + pending_obligation.stalled_on = vec![subtype.skip_binder().a, + subtype.skip_binder().b]; + ProcessResult::Unchanged + } + Some(Ok(ok)) => { + ProcessResult::Changed(mk_pending(ok.obligations)) + } + Some(Err(err)) => { + let expected_found = ExpectedFound::new(subtype.skip_binder().a_is_expected, + subtype.skip_binder().a, + subtype.skip_binder().b); + ProcessResult::Error( + FulfillmentErrorCode::CodeSubtypeError(expected_found, err)) + } + } + } + + ty::Predicate::ConstEvaluatable(def_id, substs) => { + match self.selcx.tcx().lift_to_global(&obligation.param_env) { + None => { + ProcessResult::Unchanged + } + Some(param_env) => { + match self.selcx.tcx().lift_to_global(&substs) { + Some(substs) => { + let instance = ty::Instance::resolve( + self.selcx.tcx().global_tcx(), + param_env, + def_id, + substs, + ); + if let Some(instance) = instance { + let cid = GlobalId { + instance, + promoted: None, + }; + match self.selcx.tcx().at(obligation.cause.span) + .const_eval(param_env.and(cid)) { + Ok(_) => ProcessResult::Changed(vec![]), + Err(err) => ProcessResult::Error( + CodeSelectionError(ConstEvalFailure(err))) + } + } else { + ProcessResult::Error( + CodeSelectionError(ConstEvalFailure(ConstEvalErr { + span: obligation.cause.span, + error: EvalErrorKind::TooGeneric.into(), + stacktrace: vec![], + }.into())) + ) + } + }, + None => { + pending_obligation.stalled_on = substs.types().collect(); + ProcessResult::Unchanged + } + } + } + } + } + } } fn process_backedge<'c, I>(&mut self, cycle: I, @@ -288,261 +544,6 @@ fn trait_ref_type_vars<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 't .collect() } -/// Processes a predicate obligation and returns either: -/// - `Ok(Some(v))` if the predicate is true, presuming that `v` are also true -/// - `Ok(None)` if we don't have enough info to be sure -/// - `Err` if the predicate does not hold -fn process_predicate<'a, 'gcx, 'tcx>( - selcx: &mut SelectionContext<'a, 'gcx, 'tcx>, - pending_obligation: &mut PendingPredicateObligation<'tcx>, - register_region_obligations: bool) - -> Result>>, - FulfillmentErrorCode<'tcx>> -{ - // if we were stalled on some unresolved variables, first check - // whether any of them have been resolved; if not, don't bother - // doing more work yet - if !pending_obligation.stalled_on.is_empty() { - if pending_obligation.stalled_on.iter().all(|&ty| { - let resolved_ty = selcx.infcx().shallow_resolve(&ty); - resolved_ty == ty // nothing changed here - }) { - debug!("process_predicate: pending obligation {:?} still stalled on {:?}", - selcx.infcx().resolve_type_vars_if_possible(&pending_obligation.obligation), - pending_obligation.stalled_on); - return Ok(None); - } - pending_obligation.stalled_on = vec![]; - } - - let obligation = &mut pending_obligation.obligation; - - if obligation.predicate.has_infer_types() { - obligation.predicate = selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate); - } - - match obligation.predicate { - ty::Predicate::Trait(ref data) => { - let trait_obligation = obligation.with(data.clone()); - - if data.is_global() { - // no type variables present, can use evaluation for better caching. - // FIXME: consider caching errors too. - if - // make defaulted unit go through the slow path for better warnings, - // please remove this when the warnings are removed. - !trait_obligation.predicate.skip_binder().self_ty().is_defaulted_unit() && - selcx.evaluate_obligation_conservatively(&obligation) { - debug!("selecting trait `{:?}` at depth {} evaluated to holds", - data, obligation.recursion_depth); - return Ok(Some(vec![])) - } - } - - match selcx.select(&trait_obligation) { - Ok(Some(vtable)) => { - debug!("selecting trait `{:?}` at depth {} yielded Ok(Some)", - data, obligation.recursion_depth); - Ok(Some(vtable.nested_obligations())) - } - Ok(None) => { - debug!("selecting trait `{:?}` at depth {} yielded Ok(None)", - data, obligation.recursion_depth); - - // This is a bit subtle: for the most part, the - // only reason we can fail to make progress on - // trait selection is because we don't have enough - // information about the types in the trait. One - // exception is that we sometimes haven't decided - // what kind of closure a closure is. *But*, in - // that case, it turns out, the type of the - // closure will also change, because the closure - // also includes references to its upvars as part - // of its type, and those types are resolved at - // the same time. - // - // FIXME(#32286) logic seems false if no upvars - pending_obligation.stalled_on = - trait_ref_type_vars(selcx, data.to_poly_trait_ref()); - - debug!("process_predicate: pending obligation {:?} now stalled on {:?}", - selcx.infcx().resolve_type_vars_if_possible(obligation), - pending_obligation.stalled_on); - - Ok(None) - } - Err(selection_err) => { - info!("selecting trait `{:?}` at depth {} yielded Err", - data, obligation.recursion_depth); - - Err(CodeSelectionError(selection_err)) - } - } - } - - ty::Predicate::Equate(ref binder) => { - match selcx.infcx().equality_predicate(&obligation.cause, - obligation.param_env, - binder) { - Ok(InferOk { obligations, value: () }) => { - Ok(Some(obligations)) - }, - Err(_) => Err(CodeSelectionError(Unimplemented)), - } - } - - ty::Predicate::RegionOutlives(ref binder) => { - match selcx.infcx().region_outlives_predicate(&obligation.cause, binder) { - Ok(()) => Ok(Some(Vec::new())), - Err(_) => Err(CodeSelectionError(Unimplemented)), - } - } - - ty::Predicate::TypeOutlives(ref binder) => { - // Check if there are higher-ranked regions. - match binder.no_late_bound_regions() { - // If there are, inspect the underlying type further. - None => { - // Convert from `Binder>` to `Binder`. - let binder = binder.map_bound_ref(|pred| pred.0); - - // Check if the type has any bound regions. - match binder.no_late_bound_regions() { - // If so, this obligation is an error (for now). Eventually we should be - // able to support additional cases here, like `for<'a> &'a str: 'a`. - None => { - Err(CodeSelectionError(Unimplemented)) - } - // Otherwise, we have something of the form - // `for<'a> T: 'a where 'a not in T`, which we can treat as `T: 'static`. - Some(t_a) => { - let r_static = selcx.tcx().types.re_static; - if register_region_obligations { - selcx.infcx().register_region_obligation( - obligation.cause.body_id, - RegionObligation { - sup_type: t_a, - sub_region: r_static, - cause: obligation.cause.clone(), - }); - } - Ok(Some(vec![])) - } - } - } - // If there aren't, register the obligation. - Some(ty::OutlivesPredicate(t_a, r_b)) => { - if register_region_obligations { - selcx.infcx().register_region_obligation( - obligation.cause.body_id, - RegionObligation { - sup_type: t_a, - sub_region: r_b, - cause: obligation.cause.clone() - }); - } - Ok(Some(vec![])) - } - } - } - - ty::Predicate::Projection(ref data) => { - let project_obligation = obligation.with(data.clone()); - match project::poly_project_and_unify_type(selcx, &project_obligation) { - Ok(None) => { - let tcx = selcx.tcx(); - pending_obligation.stalled_on = - trait_ref_type_vars(selcx, data.to_poly_trait_ref(tcx)); - Ok(None) - } - Ok(v) => Ok(v), - Err(e) => Err(CodeProjectionError(e)) - } - } - - ty::Predicate::ObjectSafe(trait_def_id) => { - if !selcx.tcx().is_object_safe(trait_def_id) { - Err(CodeSelectionError(Unimplemented)) - } else { - Ok(Some(Vec::new())) - } - } - - ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => { - match selcx.infcx().closure_kind(closure_def_id, closure_substs) { - Some(closure_kind) => { - if closure_kind.extends(kind) { - Ok(Some(vec![])) - } else { - Err(CodeSelectionError(Unimplemented)) - } - } - None => { - Ok(None) - } - } - } - - ty::Predicate::WellFormed(ty) => { - match ty::wf::obligations(selcx.infcx(), - obligation.param_env, - obligation.cause.body_id, - ty, obligation.cause.span) { - None => { - pending_obligation.stalled_on = vec![ty]; - Ok(None) - } - s => Ok(s) - } - } - - ty::Predicate::Subtype(ref subtype) => { - match selcx.infcx().subtype_predicate(&obligation.cause, - obligation.param_env, - subtype) { - None => { - // none means that both are unresolved - pending_obligation.stalled_on = vec![subtype.skip_binder().a, - subtype.skip_binder().b]; - Ok(None) - } - Some(Ok(ok)) => { - Ok(Some(ok.obligations)) - } - Some(Err(err)) => { - let expected_found = ExpectedFound::new(subtype.skip_binder().a_is_expected, - subtype.skip_binder().a, - subtype.skip_binder().b); - Err(FulfillmentErrorCode::CodeSubtypeError(expected_found, err)) - } - } - } - - ty::Predicate::ConstEvaluatable(def_id, substs) => { - match selcx.tcx().lift_to_global(&obligation.param_env) { - None => { - Ok(None) - } - Some(param_env) => { - match selcx.tcx().lift_to_global(&substs) { - None => { - pending_obligation.stalled_on = substs.types().collect(); - Ok(None) - } - Some(substs) => { - match selcx.tcx().at(obligation.cause.span) - .const_eval(param_env.and((def_id, substs))) { - Ok(_) => Ok(Some(vec![])), - Err(e) => Err(CodeSelectionError(ConstEvalFailure(e))) - } - } - } - } - } - } - } -} - fn to_fulfillment_error<'tcx>( error: Error, FulfillmentErrorCode<'tcx>>) -> FulfillmentError<'tcx> diff --git a/src/librustc/traits/mod.rs b/src/librustc/traits/mod.rs index fd47e09aad7f..ef14d6d05c20 100644 --- a/src/librustc/traits/mod.rs +++ b/src/librustc/traits/mod.rs @@ -8,39 +8,46 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Trait Resolution. See README.md for an overview of how this works. +//! Trait Resolution. See [rustc guide] for more info on how this works. +//! +//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/resolution.html pub use self::SelectionError::*; pub use self::FulfillmentErrorCode::*; pub use self::Vtable::*; pub use self::ObligationCauseCode::*; +use chalk_engine; use hir; use hir::def_id::DefId; use infer::outlives::env::OutlivesEnvironment; -use middle::const_val::ConstEvalErr; use middle::region; +use mir::interpret::ConstEvalErr; use ty::subst::Substs; -use ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable, ToPredicate}; +use ty::{self, AdtKind, Slice, Ty, TyCtxt, GenericParamDefKind, ToPredicate}; use ty::error::{ExpectedFound, TypeError}; +use ty::fold::{TypeFolder, TypeFoldable, TypeVisitor}; use infer::{InferCtxt}; +use rustc_data_structures::sync::Lrc; +use std::fmt::Debug; use std::rc::Rc; use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; pub use self::coherence::{orphan_check, overlapping_impls, OrphanCheckErr, OverlapResult}; -pub use self::fulfill::FulfillmentContext; +pub use self::fulfill::{FulfillmentContext, PendingPredicateObligation}; pub use self::project::MismatchedProjectionTypes; -pub use self::project::{normalize, normalize_projection_type, Normalized}; -pub use self::project::{ProjectionCache, ProjectionCacheSnapshot, Reveal}; +pub use self::project::{normalize, normalize_projection_type, poly_project_and_unify_type}; +pub use self::project::{ProjectionCache, ProjectionCacheSnapshot, Reveal, Normalized}; pub use self::object_safety::ObjectSafetyViolation; pub use self::object_safety::MethodViolationCode; pub use self::on_unimplemented::{OnUnimplementedDirective, OnUnimplementedNote}; pub use self::select::{EvaluationCache, SelectionContext, SelectionCache}; -pub use self::select::IntercrateAmbiguityCause; +pub use self::select::{EvaluationResult, IntercrateAmbiguityCause, OverflowError}; pub use self::specialize::{OverlapError, specialization_graph, translate_substs}; pub use self::specialize::{SpecializesCache, find_associated_item}; +pub use self::engine::{TraitEngine, TraitEngineExt}; pub use self::util::elaborate_predicates; pub use self::util::supertraits; pub use self::util::Supertraits; @@ -48,8 +55,11 @@ pub use self::util::supertrait_def_ids; pub use self::util::SupertraitDefIds; pub use self::util::transitive_bounds; +#[allow(dead_code)] +pub mod auto_trait; mod coherence; -mod error_reporting; +pub mod error_reporting; +mod engine; mod fulfill; mod project; mod object_safety; @@ -57,9 +67,11 @@ mod on_unimplemented; mod select; mod specialize; mod structural_impls; -pub mod trans; +pub mod codegen; mod util; +pub mod query; + // Whether to enable bug compatibility with issue #43355 #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum IntercrateMode { @@ -67,25 +79,49 @@ pub enum IntercrateMode { Fixed } +// The mode that trait queries run in +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum TraitQueryMode { + // Standard/un-canonicalized queries get accurate + // spans etc. passed in and hence can do reasonable + // error reporting on their own. + Standard, + // Canonicalized queries get dummy spans and hence + // must generally propagate errors to + // pre-canonicalization callsites. + Canonical, +} + /// An `Obligation` represents some trait reference (e.g. `int:Eq`) for /// which the vtable must be found. The process of finding a vtable is /// called "resolving" the `Obligation`. This process consists of /// either identifying an `impl` (e.g., `impl Eq for int`) that /// provides the required vtable, or else finding a bound that is in /// scope. The eventual result is usually a `Selection` (defined below). -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, Hash)] pub struct Obligation<'tcx, T> { + /// Why do we have to prove this thing? pub cause: ObligationCause<'tcx>, + + /// In which environment should we prove this thing? pub param_env: ty::ParamEnv<'tcx>, - pub recursion_depth: usize, + + /// What are we trying to prove? pub predicate: T, + + /// If we started proving this as a result of trying to prove + /// something else, track the total depth to ensure termination. + /// If this goes over a certain threshold, we abort compilation -- + /// in such cases, we can not say whether or not the predicate + /// holds for certain. Stupid halting problem. Such a drag. + pub recursion_depth: usize, } pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>; pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>; /// Why did we incur this obligation? Used for error reporting. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct ObligationCause<'tcx> { pub span: Span, @@ -100,7 +136,20 @@ pub struct ObligationCause<'tcx> { pub code: ObligationCauseCode<'tcx> } -#[derive(Clone, Debug, PartialEq, Eq)] +impl<'tcx> ObligationCause<'tcx> { + pub fn span<'a, 'gcx>(&self, tcx: &TyCtxt<'a, 'gcx, 'tcx>) -> Span { + match self.code { + ObligationCauseCode::CompareImplMethodObligation { .. } | + ObligationCauseCode::MainFunctionType | + ObligationCauseCode::StartFunctionType => { + tcx.sess.codemap().def_span(self.span) + } + _ => self.span, + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum ObligationCauseCode<'tcx> { /// Not well classified or should be obvious from span. MiscObligation, @@ -136,13 +185,17 @@ pub enum ObligationCauseCode<'tcx> { StructInitializerSized, /// Type of each variable must be Sized VariableType(ast::NodeId), + /// Argument type must be Sized + SizedArgumentType, /// Return type must be Sized SizedReturnType, + /// Yield type must be Sized + SizedYieldType, /// [T,..n] --> T must be Copy RepeatVec, - /// Types of fields (other than the last) in a struct must be sized. - FieldSized(AdtKind), + /// Types of fields (other than the last, except for packed structs) in a struct must be sized. + FieldSized { adt_kind: AdtKind, last: bool }, /// Constant expressions must be sized. ConstSized, @@ -175,9 +228,6 @@ pub enum ObligationCauseCode<'tcx> { /// Computing common supertype of an if expression with no else counter-part IfExpressionWithNoElse, - /// `where a == b` - EquatePredicate, - /// `main` has wrong type MainFunctionType, @@ -198,9 +248,12 @@ pub enum ObligationCauseCode<'tcx> { /// Block implicit return BlockTailExpression(ast::NodeId), + + /// #[feature(trivial_bounds)] is not enabled + TrivialBound, } -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct DerivedObligationCause<'tcx> { /// The trait reference of the parent obligation that led to the /// current obligation. Note that only trait obligations lead to @@ -216,6 +269,111 @@ pub type Obligations<'tcx, O> = Vec>; pub type PredicateObligations<'tcx> = Vec>; pub type TraitObligations<'tcx> = Vec>; +/// The following types: +/// * `WhereClause` +/// * `WellFormed` +/// * `FromEnv` +/// * `DomainGoal` +/// * `Goal` +/// * `Clause` +/// are used for representing the trait system in the form of +/// logic programming clauses. They are part of the interface +/// for the chalk SLG solver. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum WhereClause<'tcx> { + Implemented(ty::TraitPredicate<'tcx>), + ProjectionEq(ty::ProjectionPredicate<'tcx>), + RegionOutlives(ty::RegionOutlivesPredicate<'tcx>), + TypeOutlives(ty::TypeOutlivesPredicate<'tcx>), +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum WellFormed<'tcx> { + Trait(ty::TraitPredicate<'tcx>), + Ty(Ty<'tcx>), +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum FromEnv<'tcx> { + Trait(ty::TraitPredicate<'tcx>), + Ty(Ty<'tcx>), +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum DomainGoal<'tcx> { + Holds(WhereClause<'tcx>), + WellFormed(WellFormed<'tcx>), + FromEnv(FromEnv<'tcx>), + Normalize(ty::ProjectionPredicate<'tcx>), +} + +pub type PolyDomainGoal<'tcx> = ty::Binder>; + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum QuantifierKind { + Universal, + Existential, +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum Goal<'tcx> { + Implies(Clauses<'tcx>, &'tcx Goal<'tcx>), + And(&'tcx Goal<'tcx>, &'tcx Goal<'tcx>), + Not(&'tcx Goal<'tcx>), + DomainGoal(DomainGoal<'tcx>), + Quantified(QuantifierKind, ty::Binder<&'tcx Goal<'tcx>>), + CannotProve, +} + +pub type Goals<'tcx> = &'tcx Slice>; + +impl<'tcx> DomainGoal<'tcx> { + pub fn into_goal(self) -> Goal<'tcx> { + Goal::DomainGoal(self) + } +} + +impl<'tcx> Goal<'tcx> { + pub fn from_poly_domain_goal<'a>( + domain_goal: PolyDomainGoal<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + ) -> Goal<'tcx> { + match domain_goal.no_late_bound_regions() { + Some(p) => p.into_goal(), + None => Goal::Quantified( + QuantifierKind::Universal, + domain_goal.map_bound(|p| tcx.mk_goal(p.into_goal())) + ), + } + } +} + +/// This matches the definition from Page 7 of "A Proof Procedure for the Logic of Hereditary +/// Harrop Formulas". +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum Clause<'tcx> { + Implies(ProgramClause<'tcx>), + ForAll(ty::Binder>), +} + +/// Multiple clauses. +pub type Clauses<'tcx> = &'tcx Slice>; + +/// A "program clause" has the form `D :- G1, ..., Gn`. It is saying +/// that the domain goal `D` is true if `G1...Gn` are provable. This +/// is equivalent to the implication `G1..Gn => D`; we usually write +/// it with the reverse implication operator `:-` to emphasize the way +/// that programs are actually solved (via backchaining, which starts +/// with the goal to solve and proceeds from there). +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct ProgramClause<'tcx> { + /// This goal will be considered true... + pub goal: DomainGoal<'tcx>, + + /// ...if we can prove these hypotheses (there may be no hypotheses at all): + pub hypotheses: Goals<'tcx>, +} + pub type Selection<'tcx> = Vtable<'tcx, PredicateObligation<'tcx>>; #[derive(Clone,Debug)] @@ -225,7 +383,8 @@ pub enum SelectionError<'tcx> { ty::PolyTraitRef<'tcx>, ty::error::TypeError<'tcx>), TraitNotObjectSafe(DefId), - ConstEvalFailure(ConstEvalErr<'tcx>), + ConstEvalFailure(Lrc>), + Overflow, } pub struct FulfillmentError<'tcx> { @@ -289,7 +448,7 @@ pub type SelectionResult<'tcx, T> = Result, SelectionError<'tcx>>; /// ### The type parameter `N` /// /// See explanation on `VtableImplData`. -#[derive(Clone, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub enum Vtable<'tcx, N> { /// Vtable identifying a particular impl. VtableImpl(VtableImplData<'tcx, N>), @@ -331,8 +490,8 @@ pub enum Vtable<'tcx, N> { /// /// The type parameter `N` indicates the type used for "nested /// obligations" that are required by the impl. During type check, this -/// is `Obligation`, as one might expect. During trans, however, this -/// is `()`, because trans only requires a shallow resolution of an +/// is `Obligation`, as one might expect. During codegen, however, this +/// is `()`, because codegen only requires a shallow resolution of an /// impl, and nested obligations are satisfied later. #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub struct VtableImplData<'tcx, N> { @@ -343,8 +502,8 @@ pub struct VtableImplData<'tcx, N> { #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub struct VtableGeneratorData<'tcx, N> { - pub closure_def_id: DefId, - pub substs: ty::ClosureSubsts<'tcx>, + pub generator_def_id: DefId, + pub substs: ty::GeneratorSubsts<'tcx>, /// Nested obligations. This can be non-empty if the generator /// signature contains associated types. pub nested: Vec @@ -359,13 +518,13 @@ pub struct VtableClosureData<'tcx, N> { pub nested: Vec } -#[derive(Clone, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub struct VtableAutoImplData { pub trait_def_id: DefId, pub nested: Vec } -#[derive(Clone, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] pub struct VtableBuiltinData { pub nested: Vec } @@ -427,8 +586,7 @@ pub fn type_known_to_meet_bound<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx predicate: trait_ref.to_predicate(), }; - let result = SelectionContext::new(infcx) - .evaluate_obligation_conservatively(&obligation); + let result = infcx.predicate_must_hold(&obligation); debug!("type_known_to_meet_ty={:?} bound={} => {:?}", ty, infcx.tcx.item_path_str(def_id), result); @@ -503,17 +661,8 @@ pub fn normalize_param_env_or_error<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let predicates: Vec<_> = util::elaborate_predicates(tcx, unnormalized_env.caller_bounds.to_vec()) - .filter(|p| !p.is_global()) // (*) .collect(); - // (*) Any predicate like `i32: Trait` or whatever doesn't - // need to be in the *environment* to be proven, so screen those - // out. This is important for the soundness of inter-fn - // caching. Note though that we should probably check that these - // predicates hold at the point where the environment is - // constructed, but I am not currently doing so out of laziness. - // -nmatsakis - debug!("normalize_param_env_or_error: elaborated-predicates={:?}", predicates); @@ -536,7 +685,7 @@ pub fn normalize_param_env_or_error<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // we move over to lazy normalization *anyway*. let fulfill_cx = FulfillmentContext::new_ignoring_regions(); - let predicates = match fully_normalize_with_fulfillcx( + let predicates = match fully_normalize( &infcx, fulfill_cx, cause, @@ -551,7 +700,7 @@ pub fn normalize_param_env_or_error<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ) { Ok(predicates) => predicates, Err(errors) => { - infcx.report_fulfillment_errors(&errors, None); + infcx.report_fulfillment_errors(&errors, None, false); // An unnormalized env is better than nothing. return elaborated_env; } @@ -596,31 +745,7 @@ pub fn normalize_param_env_or_error<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }) } -pub fn fully_normalize<'a, 'gcx, 'tcx, T>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, - cause: ObligationCause<'tcx>, - param_env: ty::ParamEnv<'tcx>, - value: &T) - -> Result>> - where T : TypeFoldable<'tcx> -{ - // FIXME (@jroesch) ISSUE 26721 - // I'm not sure if this is a bug or not, needs further investigation. - // It appears that by reusing the fulfillment_cx here we incur more - // obligations and later trip an asssertion on regionck.rs line 337. - // - // The two possibilities I see is: - // - normalization is not actually fully happening and we - // have a bug else where - // - we are adding a duplicate bound into the list causing - // its size to change. - // - // I think we should probably land this refactor and then come - // back to this is a follow-up patch. - let fulfillcx = FulfillmentContext::new(); - fully_normalize_with_fulfillcx(infcx, fulfillcx, cause, param_env, value) -} - -pub fn fully_normalize_with_fulfillcx<'a, 'gcx, 'tcx, T>( +pub fn fully_normalize<'a, 'gcx, 'tcx, T>( infcx: &InferCtxt<'a, 'gcx, 'tcx>, mut fulfill_cx: FulfillmentContext<'tcx>, cause: ObligationCause<'tcx>, @@ -641,13 +766,7 @@ pub fn fully_normalize_with_fulfillcx<'a, 'gcx, 'tcx, T>( } debug!("fully_normalize: select_all_or_error start"); - match fulfill_cx.select_all_or_error(infcx) { - Ok(()) => { } - Err(e) => { - debug!("fully_normalize: error={:?}", e); - return Err(e); - } - } + fulfill_cx.select_all_or_error(infcx)?; debug!("fully_normalize: select_all_or_error complete"); let resolved_value = infcx.resolve_type_vars_if_possible(&normalized_value); debug!("fully_normalize: resolved_value={:?}", resolved_value); @@ -666,7 +785,7 @@ fn normalize_and_test_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, predicates); let result = tcx.infer_ctxt().enter(|infcx| { - let param_env = ty::ParamEnv::empty(Reveal::All); + let param_env = ty::ParamEnv::reveal_all(); let mut selcx = SelectionContext::new(&infcx); let mut fulfill_cx = FulfillmentContext::new(); let cause = ObligationCause::dummy(); @@ -709,11 +828,11 @@ fn substitute_normalize_and_test_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx fn vtable_methods<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_ref: ty::PolyTraitRef<'tcx>) - -> Rc)>>> + -> Lrc)>>> { debug!("vtable_methods({:?})", trait_ref); - Rc::new( + Lrc::new( supertraits(tcx, trait_ref).flat_map(move |trait_ref| { let trait_methods = tcx.associated_items(trait_ref.def_id()) .filter(|item| item.kind == ty::AssociatedKind::Method); @@ -732,19 +851,29 @@ fn vtable_methods<'a, 'tcx>( // the method may have some early-bound lifetimes, add // regions for those - let substs = Substs::for_item(tcx, def_id, - |_, _| tcx.types.re_erased, - |def, _| trait_ref.substs().type_for_def(def)); + let substs = trait_ref.map_bound(|trait_ref| { + Substs::for_item(tcx, def_id, |param, _| { + match param.kind { + GenericParamDefKind::Lifetime => tcx.types.re_erased.into(), + GenericParamDefKind::Type {..} => { + trait_ref.substs[param.index as usize] + } + } + }) + }); // the trait type may have higher-ranked lifetimes in it; // so erase them if they appear, so that we get the type // at some particular call site - let substs = tcx.erase_late_bound_regions_and_normalize(&ty::Binder(substs)); + let substs = tcx.normalize_erasing_late_bound_regions( + ty::ParamEnv::reveal_all(), + &substs + ); // It's possible that the method relies on where clauses that // do not hold for this particular set of type parameters. // Note that this method could then never be called, so we - // do not want to try and trans it, in that case (see #23435). + // do not want to try and codegen it, in that case (see #23435). let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, substs); if !normalize_and_test_predicates(tcx, predicates.predicates) { debug!("vtable_methods: predicates do not hold"); @@ -822,19 +951,6 @@ impl<'tcx, N> Vtable<'tcx, N> { } } - fn nested_obligations_mut(&mut self) -> &mut Vec { - match self { - &mut VtableImpl(ref mut i) => &mut i.nested, - &mut VtableParam(ref mut n) => n, - &mut VtableBuiltin(ref mut i) => &mut i.nested, - &mut VtableAutoImpl(ref mut d) => &mut d.nested, - &mut VtableGenerator(ref mut c) => &mut c.nested, - &mut VtableClosure(ref mut c) => &mut c.nested, - &mut VtableObject(ref mut d) => &mut d.nested, - &mut VtableFnPointer(ref mut d) => &mut d.nested, - } - } - pub fn map(self, f: F) -> Vtable<'tcx, M> where F: FnMut(N) -> M { match self { VtableImpl(i) => VtableImpl(VtableImplData { @@ -860,7 +976,7 @@ impl<'tcx, N> Vtable<'tcx, N> { nested: p.nested.into_iter().map(f).collect(), }), VtableGenerator(c) => VtableGenerator(VtableGeneratorData { - closure_def_id: c.closure_def_id, + generator_def_id: c.generator_def_id, substs: c.substs, nested: c.nested.into_iter().map(f).collect(), }), @@ -884,18 +1000,45 @@ impl<'tcx> FulfillmentError<'tcx> { impl<'tcx> TraitObligation<'tcx> { fn self_ty(&self) -> ty::Binder> { - ty::Binder(self.predicate.skip_binder().self_ty()) + self.predicate.map_bound(|p| p.self_ty()) } } -pub fn provide(providers: &mut ty::maps::Providers) { - *providers = ty::maps::Providers { +pub fn provide(providers: &mut ty::query::Providers) { + *providers = ty::query::Providers { is_object_safe: object_safety::is_object_safe_provider, specialization_graph_of: specialize::specialization_graph_provider, specializes: specialize::specializes, - trans_fulfill_obligation: trans::trans_fulfill_obligation, + codegen_fulfill_obligation: codegen::codegen_fulfill_obligation, vtable_methods, substitute_normalize_and_test_predicates, ..*providers }; } + +pub trait ExClauseFold<'tcx> +where + Self: chalk_engine::context::Context + Clone, +{ + fn fold_ex_clause_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>( + ex_clause: &chalk_engine::ExClause, + folder: &mut F, + ) -> chalk_engine::ExClause; + + fn visit_ex_clause_with<'gcx: 'tcx, V: TypeVisitor<'tcx>>( + ex_clause: &chalk_engine::ExClause, + visitor: &mut V, + ) -> bool; +} + +pub trait ExClauseLift<'tcx> +where + Self: chalk_engine::context::Context + Clone, +{ + type LiftedExClause: Debug + 'tcx; + + fn lift_ex_clause_to_tcx<'a, 'gcx>( + ex_clause: &chalk_engine::ExClause, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + ) -> Option; +} diff --git a/src/librustc/traits/object_safety.rs b/src/librustc/traits/object_safety.rs index 4151661b5933..aa4f63675d73 100644 --- a/src/librustc/traits/object_safety.rs +++ b/src/librustc/traits/object_safety.rs @@ -20,14 +20,15 @@ use super::elaborate_predicates; use hir::def_id::DefId; +use lint; use traits; use ty::{self, Ty, TyCtxt, TypeFoldable}; -use ty::subst::Substs; use ty::util::ExplicitSelf; use std::borrow::Cow; use syntax::ast; +use syntax_pos::Span; -#[derive(Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum ObjectSafetyViolation { /// Self : Sized declared on the trait SizedSelf, @@ -56,6 +57,9 @@ impl ObjectSafetyViolation { ObjectSafetyViolation::Method(name, MethodViolationCode::ReferencesSelf) => format!("method `{}` references the `Self` type \ in its arguments or return type", name).into(), + ObjectSafetyViolation::Method(name, + MethodViolationCode::WhereClauseReferencesSelf(_)) => + format!("method `{}` references the `Self` type in where clauses", name).into(), ObjectSafetyViolation::Method(name, MethodViolationCode::Generic) => format!("method `{}` has generic type parameters", name).into(), ObjectSafetyViolation::Method(name, MethodViolationCode::NonStandardSelfType) => @@ -75,6 +79,9 @@ pub enum MethodViolationCode { /// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self` ReferencesSelf, + /// e.g. `fn foo(&self) where Self: Clone` + WhereClauseReferencesSelf(Span), + /// e.g., `fn foo()` Generic, @@ -91,13 +98,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn astconv_object_safety_violations(self, trait_def_id: DefId) -> Vec { - let mut violations = vec![]; - - for def_id in traits::supertrait_def_ids(self, trait_def_id) { - if self.predicates_reference_self(def_id, true) { - violations.push(ObjectSafetyViolation::SupertraitSelf); - } - } + let violations = traits::supertrait_def_ids(self, trait_def_id) + .filter(|&def_id| self.predicates_reference_self(def_id, true)) + .map(|_| ObjectSafetyViolation::SupertraitSelf) + .collect(); debug!("astconv_object_safety_violations(trait_def_id={:?}) = {:?}", trait_def_id, @@ -122,7 +126,23 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { .filter(|item| item.kind == ty::AssociatedKind::Method) .filter_map(|item| { self.object_safety_violation_for_method(trait_def_id, &item) - .map(|code| ObjectSafetyViolation::Method(item.name, code)) + .map(|code| ObjectSafetyViolation::Method(item.ident.name, code)) + }).filter(|violation| { + if let ObjectSafetyViolation::Method(_, + MethodViolationCode::WhereClauseReferencesSelf(span)) = violation { + // Using`CRATE_NODE_ID` is wrong, but it's hard to get a more precise id. + // It's also hard to get a use site span, so we use the method definition span. + self.lint_node_note( + lint::builtin::WHERE_CLAUSES_OBJECT_SAFETY, + ast::CRATE_NODE_ID, + *span, + &format!("the trait `{}` cannot be made into an object", + self.item_path_str(trait_def_id)), + &violation.error_msg()); + false + } else { + true + } }).collect(); // Check the trait itself. @@ -135,7 +155,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { violations.extend(self.associated_items(trait_def_id) .filter(|item| item.kind == ty::AssociatedKind::Const) - .map(|item| ObjectSafetyViolation::AssociatedConst(item.name))); + .map(|item| ObjectSafetyViolation::AssociatedConst(item.ident.name))); debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}", trait_def_id, @@ -149,10 +169,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { trait_def_id: DefId, supertraits_only: bool) -> bool { - let trait_ref = ty::Binder(ty::TraitRef { - def_id: trait_def_id, - substs: Substs::identity_for_item(self, trait_def_id) - }); + let trait_ref = ty::Binder::dummy(ty::TraitRef::identity(self, trait_def_id)); let predicates = if supertraits_only { self.super_predicates_of(trait_def_id) } else { @@ -175,7 +192,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::Predicate::RegionOutlives(..) | ty::Predicate::ClosureKind(..) | ty::Predicate::Subtype(..) | - ty::Predicate::Equate(..) | ty::Predicate::ConstEvaluatable(..) => { false } @@ -200,11 +216,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { .any(|predicate| { match predicate { ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => { - trait_pred.0.self_ty().is_self() + trait_pred.skip_binder().self_ty().is_self() } ty::Predicate::Projection(..) | ty::Predicate::Trait(..) | - ty::Predicate::Equate(..) | ty::Predicate::Subtype(..) | ty::Predicate::RegionOutlives(..) | ty::Predicate::WellFormed(..) | @@ -247,7 +262,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { return false; } - self.virtual_call_violation_for_method(trait_def_id, method).is_none() + match self.virtual_call_violation_for_method(trait_def_id, method) { + None | Some(MethodViolationCode::WhereClauseReferencesSelf(_)) => true, + Some(_) => false, + } } /// Returns `Some(_)` if this method cannot be called on a trait @@ -286,10 +304,22 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } // We can't monomorphize things like `fn foo(...)`. - if !self.generics_of(method.def_id).types.is_empty() { + if self.generics_of(method.def_id).own_counts().types != 0 { return Some(MethodViolationCode::Generic); } + if self.predicates_of(method.def_id).predicates.into_iter() + // A trait object can't claim to live more than the concrete type, + // so outlives predicates will always hold. + .filter(|p| p.to_opt_type_outlives().is_none()) + .collect::>() + // Do a shallow visit so that `contains_illegal_self_type_reference` + // may apply it's custom visiting. + .visit_tys_shallow(|t| self.contains_illegal_self_type_reference(trait_def_id, t)) { + let span = self.def_span(method.def_id); + return Some(MethodViolationCode::WhereClauseReferencesSelf(span)); + } + None } @@ -354,10 +384,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // Compute supertraits of current trait lazily. if supertraits.is_none() { - let trait_ref = ty::Binder(ty::TraitRef { - def_id: trait_def_id, - substs: Substs::identity_for_item(self, trait_def_id) - }); + let trait_ref = ty::Binder::bind( + ty::TraitRef::identity(self, trait_def_id), + ); supertraits = Some(traits::supertraits(self, trait_ref).collect()); } @@ -369,7 +398,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // direct equality here because all of these types // are part of the formal parameter listing, and // hence there should be no inference variables. - let projection_trait_ref = ty::Binder(data.trait_ref(self)); + let projection_trait_ref = ty::Binder::bind(data.trait_ref(self)); let is_supertrait_of_current_trait = supertraits.as_ref().unwrap().contains(&projection_trait_ref); @@ -389,7 +418,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } pub(super) fn is_object_safe_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - trait_def_id: DefId) - -> bool { + trait_def_id: DefId) -> bool { tcx.object_safety_violations(trait_def_id).is_empty() } diff --git a/src/librustc/traits/on_unimplemented.rs b/src/librustc/traits/on_unimplemented.rs index 757b078086d9..280ce75720bc 100644 --- a/src/librustc/traits/on_unimplemented.rs +++ b/src/librustc/traits/on_unimplemented.rs @@ -11,17 +11,17 @@ use fmt_macros::{Parser, Piece, Position}; use hir::def_id::DefId; -use ty::{self, TyCtxt}; +use ty::{self, TyCtxt, GenericParamDefKind}; use util::common::ErrorReported; use util::nodemap::FxHashMap; use syntax::ast::{MetaItem, NestedMetaItem}; use syntax::attr; use syntax_pos::Span; -use syntax_pos::symbol::InternedString; +use syntax_pos::symbol::LocalInternedString; #[derive(Clone, Debug)] -pub struct OnUnimplementedFormatString(InternedString); +pub struct OnUnimplementedFormatString(LocalInternedString); #[derive(Debug)] pub struct OnUnimplementedDirective { @@ -29,16 +29,18 @@ pub struct OnUnimplementedDirective { pub subcommands: Vec, pub message: Option, pub label: Option, + pub note: Option, } pub struct OnUnimplementedNote { pub message: Option, pub label: Option, + pub note: Option, } impl OnUnimplementedNote { pub fn empty() -> Self { - OnUnimplementedNote { message: None, label: None } + OnUnimplementedNote { message: None, label: None, note: None } } } @@ -89,6 +91,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { let mut message = None; let mut label = None; + let mut note = None; let mut subcommands = vec![]; for item in item_iter { if item.check_name("message") && message.is_none() { @@ -103,8 +106,14 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { tcx, trait_def_id, label_.as_str(), span)?); continue; } + } else if item.check_name("note") && note.is_none() { + if let Some(note_) = item.value_str() { + note = Some(OnUnimplementedFormatString::try_parse( + tcx, trait_def_id, note_.as_str(), span)?); + continue; + } } else if item.check_name("on") && is_root && - message.is_none() && label.is_none() + message.is_none() && label.is_none() && note.is_none() { if let Some(items) = item.meta_item_list() { if let Ok(subcommand) = @@ -122,13 +131,13 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { parse_error(tcx, item.span, "this attribute must have a valid value", "expected value here", - Some(r#"eg `#[rustc_on_unimplemented = "foo"]`"#)); + Some(r#"eg `#[rustc_on_unimplemented(message="foo")]`"#)); } if errored { Err(ErrorReported) } else { - Ok(OnUnimplementedDirective { condition, message, label, subcommands }) + Ok(OnUnimplementedDirective { condition, message, label, subcommands, note }) } } @@ -154,13 +163,14 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { message: None, subcommands: vec![], label: Some(OnUnimplementedFormatString::try_parse( - tcx, trait_def_id, value.as_str(), attr.span)?) + tcx, trait_def_id, value.as_str(), attr.span)?), + note: None, })) } else { return Err(parse_error(tcx, attr.span, "`#[rustc_on_unimplemented]` requires a value", "value required here", - Some(r#"eg `#[rustc_on_unimplemented = "foo"]`"#))); + Some(r#"eg `#[rustc_on_unimplemented(message="foo")]`"#))); }; debug!("of_item({:?}/{:?}) = {:?}", trait_def_id, impl_def_id, result); result @@ -169,22 +179,21 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { pub fn evaluate(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_ref: ty::TraitRef<'tcx>, - options: &[(&str, Option<&str>)]) + options: &[(String, Option)]) -> OnUnimplementedNote { let mut message = None; let mut label = None; - info!("evaluate({:?}, trait_ref={:?}, options={:?})", - self, trait_ref, options); + let mut note = None; + info!("evaluate({:?}, trait_ref={:?}, options={:?})", self, trait_ref, options); for command in self.subcommands.iter().chain(Some(self)).rev() { if let Some(ref condition) = command.condition { if !attr::eval_condition(condition, &tcx.sess.parse_sess, &mut |c| { - options.contains(&(&c.name().as_str(), - match c.value_str().map(|s| s.as_str()) { - Some(ref s) => Some(s), - None => None - })) + options.contains(&( + c.name().as_str().to_string(), + c.value_str().map(|s| s.as_str().to_string()) + )) }) { debug!("evaluate: skipping {:?} due to condition", command); continue @@ -198,11 +207,19 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { if let Some(ref label_) = command.label { label = Some(label_.clone()); } + + if let Some(ref note_) = command.note { + note = Some(note_.clone()); + } } + let options: FxHashMap = options.into_iter() + .filter_map(|(k, v)| v.as_ref().map(|v| (k.to_owned(), v.to_owned()))) + .collect(); OnUnimplementedNote { - label: label.map(|l| l.format(tcx, trait_ref)), - message: message.map(|m| m.format(tcx, trait_ref)) + label: label.map(|l| l.format(tcx, trait_ref, &options)), + message: message.map(|m| m.format(tcx, trait_ref, &options)), + note: note.map(|n| n.format(tcx, trait_ref, &options)), } } } @@ -210,7 +227,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { pub fn try_parse(tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_def_id: DefId, - from: InternedString, + from: LocalInternedString, err_sp: Span) -> Result { @@ -227,8 +244,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { { let name = tcx.item_name(trait_def_id); let generics = tcx.generics_of(trait_def_id); - let parser = Parser::new(&self.0); - let types = &generics.types; + let parser = Parser::new(&self.0, None); let mut result = Ok(()); for token in parser { match token { @@ -238,24 +254,25 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { Position::ArgumentNamed(s) if s == "Self" => (), // `{ThisTraitsName}` is allowed Position::ArgumentNamed(s) if s == name => (), + // `{from_method}` is allowed + Position::ArgumentNamed(s) if s == "from_method" => (), + // `{from_desugaring}` is allowed + Position::ArgumentNamed(s) if s == "from_desugaring" => (), // So is `{A}` if A is a type parameter - Position::ArgumentNamed(s) => match types.iter().find(|t| { - t.name == s + Position::ArgumentNamed(s) => match generics.params.iter().find(|param| { + param.name == s }) { Some(_) => (), None => { span_err!(tcx.sess, span, E0230, - "there is no type parameter \ - {} on trait {}", - s, name); + "there is no parameter `{}` on trait `{}`", s, name); result = Err(ErrorReported); } }, // `{:1}` and `{}` are not to be used Position::ArgumentIs(_) | Position::ArgumentImplicitlyIs(_) => { span_err!(tcx.sess, span, E0231, - "only named substitution \ - parameters are allowed"); + "only named substitution parameters are allowed"); result = Err(ErrorReported); } } @@ -267,18 +284,26 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { pub fn format(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, - trait_ref: ty::TraitRef<'tcx>) + trait_ref: ty::TraitRef<'tcx>, + options: &FxHashMap) -> String { let name = tcx.item_name(trait_ref.def_id); let trait_str = tcx.item_path_str(trait_ref.def_id); let generics = tcx.generics_of(trait_ref.def_id); - let generic_map = generics.types.iter().map(|param| { - (param.name.as_str().to_string(), - trait_ref.substs.type_for_def(param).to_string()) + let generic_map = generics.params.iter().filter_map(|param| { + let value = match param.kind { + GenericParamDefKind::Type {..} => { + trait_ref.substs[param.index as usize].to_string() + }, + GenericParamDefKind::Lifetime => return None + }; + let name = param.name.to_string(); + Some((name, value)) }).collect::>(); + let empty_string = String::new(); - let parser = Parser::new(&self.0); + let parser = Parser::new(&self.0, None); parser.map(|p| { match p { Piece::String(s) => s, @@ -289,14 +314,20 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { &trait_str } None => { - bug!("broken on_unimplemented {:?} for {:?}: \ - no argument matching {:?}", - self.0, trait_ref, s) + if let Some(val) = options.get(s) { + val + } else if s == "from_desugaring" || s == "from_method" { + // don't break messages using these two arguments incorrectly + &empty_string + } else { + bug!("broken on_unimplemented {:?} for {:?}: \ + no argument matching {:?}", + self.0, trait_ref, s) + } } }, _ => { - bug!("broken on_unimplemented {:?} - bad \ - format arg", self.0) + bug!("broken on_unimplemented {:?} - bad format arg", self.0) } } } diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs index 3342d13dd6e5..1224cdd76d85 100644 --- a/src/librustc/traits/project.rs +++ b/src/librustc/traits/project.rs @@ -16,6 +16,7 @@ use super::translate_substs; use super::Obligation; use super::ObligationCause; use super::PredicateObligation; +use super::Selection; use super::SelectionContext; use super::SelectionError; use super::VtableClosureData; @@ -27,9 +28,10 @@ use super::util; use hir::def_id::DefId; use infer::{InferCtxt, InferOk}; use infer::type_variable::TypeVariableOrigin; -use middle::const_val::ConstVal; +use mir::interpret::ConstValue; +use mir::interpret::{GlobalId}; use rustc_data_structures::snapshot_map::{Snapshot, SnapshotMap}; -use syntax::symbol::Symbol; +use syntax::ast::Ident; use ty::subst::{Subst, Substs}; use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder}; @@ -63,7 +65,7 @@ pub enum Reveal { /// } UserFacing, - /// At trans time, all monomorphic projections will succeed. + /// At codegen time, all monomorphic projections will succeed. /// Also, `impl Trait` is normalized to the concrete type, /// which has to be already collected by type-checking. /// @@ -110,12 +112,81 @@ enum ProjectionTyCandidate<'tcx> { TraitDef(ty::PolyProjectionPredicate<'tcx>), // from a "impl" (or a "pseudo-impl" returned by select) - Select, + Select(Selection<'tcx>), } -struct ProjectionTyCandidateSet<'tcx> { - vec: Vec>, - ambiguous: bool +enum ProjectionTyCandidateSet<'tcx> { + None, + Single(ProjectionTyCandidate<'tcx>), + Ambiguous, + Error(SelectionError<'tcx>), +} + +impl<'tcx> ProjectionTyCandidateSet<'tcx> { + fn mark_ambiguous(&mut self) { + *self = ProjectionTyCandidateSet::Ambiguous; + } + + fn mark_error(&mut self, err: SelectionError<'tcx>) { + *self = ProjectionTyCandidateSet::Error(err); + } + + // Returns true if the push was successful, or false if the candidate + // was discarded -- this could be because of ambiguity, or because + // a higher-priority candidate is already there. + fn push_candidate(&mut self, candidate: ProjectionTyCandidate<'tcx>) -> bool { + use self::ProjectionTyCandidateSet::*; + use self::ProjectionTyCandidate::*; + + // This wacky variable is just used to try and + // make code readable and avoid confusing paths. + // It is assigned a "value" of `()` only on those + // paths in which we wish to convert `*self` to + // ambiguous (and return false, because the candidate + // was not used). On other paths, it is not assigned, + // and hence if those paths *could* reach the code that + // comes after the match, this fn would not compile. + let convert_to_ambiguous; + + match self { + None => { + *self = Single(candidate); + return true; + } + + Single(current) => { + // Duplicates can happen inside ParamEnv. In the case, we + // perform a lazy deduplication. + if current == &candidate { + return false; + } + + // Prefer where-clauses. As in select, if there are multiple + // candidates, we prefer where-clause candidates over impls. This + // may seem a bit surprising, since impls are the source of + // "truth" in some sense, but in fact some of the impls that SEEM + // applicable are not, because of nested obligations. Where + // clauses are the safer choice. See the comment on + // `select::SelectionCandidate` and #21974 for more details. + match (current, candidate) { + (ParamEnv(..), ParamEnv(..)) => convert_to_ambiguous = (), + (ParamEnv(..), _) => return false, + (_, ParamEnv(..)) => { unreachable!(); } + (_, _) => convert_to_ambiguous = (), + } + } + + Ambiguous | Error(..) => { + return false; + } + } + + // We only ever get here when we moved from a single candidate + // to ambiguous. + let () = convert_to_ambiguous; + *self = Ambiguous; + false + } } /// Evaluates constraints of the form: @@ -136,7 +207,7 @@ pub fn poly_project_and_unify_type<'cx, 'gcx, 'tcx>( let infcx = selcx.infcx(); infcx.commit_if_ok(|snapshot| { let (skol_predicate, skol_map) = - infcx.skolemize_late_bound_regions(&obligation.predicate, snapshot); + infcx.skolemize_late_bound_regions(&obligation.predicate); let skol_obligation = obligation.with(skol_predicate); let r = match project_and_unify_type(selcx, &skol_obligation) { @@ -144,7 +215,10 @@ pub fn poly_project_and_unify_type<'cx, 'gcx, 'tcx>( let span = obligation.cause.span; match infcx.leak_check(false, span, &skol_map, snapshot) { Ok(()) => Ok(infcx.plug_leaks(skol_map, snapshot, result)), - Err(e) => Err(MismatchedProjectionTypes { err: e }), + Err(e) => { + debug!("poly_project_and_unify_type: leak check encountered error {:?}", e); + Err(MismatchedProjectionTypes { err: e }) + } } } Err(e) => { @@ -170,12 +244,14 @@ fn project_and_unify_type<'cx, 'gcx, 'tcx>( debug!("project_and_unify_type(obligation={:?})", obligation); - let Normalized { value: normalized_ty, mut obligations } = + let mut obligations = vec![]; + let normalized_ty = match opt_normalize_projection_type(selcx, obligation.param_env, obligation.predicate.projection_ty, obligation.cause.clone(), - obligation.recursion_depth) { + obligation.recursion_depth, + &mut obligations) { Some(n) => n, None => return Ok(None), }; @@ -191,7 +267,10 @@ fn project_and_unify_type<'cx, 'gcx, 'tcx>( obligations.extend(inferred_obligations); Ok(Some(obligations)) }, - Err(err) => Err(MismatchedProjectionTypes { err: err }), + Err(err) => { + debug!("project_and_unify_type: equating types encountered error {:?}", err); + Err(MismatchedProjectionTypes { err: err }) + } } } @@ -288,14 +367,28 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, let ty = ty.super_fold_with(self); match ty.sty { ty::TyAnon(def_id, substs) if !substs.has_escaping_regions() => { // (*) - // Only normalize `impl Trait` after type-checking, usually in trans. + // Only normalize `impl Trait` after type-checking, usually in codegen. match self.param_env.reveal { Reveal::UserFacing => ty, Reveal::All => { + let recursion_limit = *self.tcx().sess.recursion_limit.get(); + if self.depth >= recursion_limit { + let obligation = Obligation::with_depth( + self.cause.clone(), + recursion_limit, + self.param_env, + ty, + ); + self.selcx.infcx().report_overflow_error(&obligation, true); + } + let generic_ty = self.tcx().type_of(def_id); let concrete_ty = generic_ty.subst(self.tcx(), substs); - self.fold_ty(concrete_ty) + self.depth += 1; + let folded_ty = self.fold_ty(concrete_ty); + self.depth -= 1; + folded_ty } } } @@ -314,16 +407,15 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, // binder). It would be better to normalize in a // binding-aware fashion. - let Normalized { value: normalized_ty, obligations } = - normalize_projection_type(self.selcx, - self.param_env, - data.clone(), - self.cause.clone(), - self.depth); - debug!("AssociatedTypeNormalizer: depth={} normalized {:?} to {:?} \ - with {} add'l obligations", - self.depth, ty, normalized_ty, obligations.len()); - self.obligations.extend(obligations); + let normalized_ty = normalize_projection_type(self.selcx, + self.param_env, + data.clone(), + self.cause.clone(), + self.depth, + &mut self.obligations); + debug!("AssociatedTypeNormalizer: depth={} normalized {:?} to {:?}, \ + now with {} obligations", + self.depth, ty, normalized_ty, self.obligations.len()); normalized_ty } @@ -334,13 +426,18 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, } fn fold_const(&mut self, constant: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { - if let ConstVal::Unevaluated(def_id, substs) = constant.val { - if substs.needs_infer() { - let identity_substs = Substs::identity_for_item(self.tcx(), def_id); - let data = self.param_env.and((def_id, identity_substs)); - match self.tcx().lift_to_global(&data) { - Some(data) => { - match self.tcx().const_eval(data) { + if let ConstValue::Unevaluated(def_id, substs) = constant.val { + let tcx = self.selcx.tcx().global_tcx(); + if let Some(param_env) = self.tcx().lift_to_global(&self.param_env) { + if substs.needs_infer() || substs.has_skol() { + let identity_substs = Substs::identity_for_item(tcx, def_id); + let instance = ty::Instance::resolve(tcx, param_env, def_id, identity_substs); + if let Some(instance) = instance { + let cid = GlobalId { + instance, + promoted: None + }; + match tcx.const_eval(param_env.and(cid)) { Ok(evaluated) => { let evaluated = evaluated.subst(self.tcx(), substs); return self.fold_const(evaluated); @@ -348,18 +445,20 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, Err(_) => {} } } - None => {} - } - } else { - let data = self.param_env.and((def_id, substs)); - match self.tcx().lift_to_global(&data) { - Some(data) => { - match self.tcx().const_eval(data) { - Ok(evaluated) => return self.fold_const(evaluated), - Err(_) => {} + } else { + if let Some(substs) = self.tcx().lift_to_global(&substs) { + let instance = ty::Instance::resolve(tcx, param_env, def_id, substs); + if let Some(instance) = instance { + let cid = GlobalId { + instance, + promoted: None + }; + match tcx.const_eval(param_env.and(cid)) { + Ok(evaluated) => return self.fold_const(evaluated), + Err(_) => {} + } } } - None => {} } } } @@ -392,10 +491,12 @@ pub fn normalize_projection_type<'a, 'b, 'gcx, 'tcx>( param_env: ty::ParamEnv<'tcx>, projection_ty: ty::ProjectionTy<'tcx>, cause: ObligationCause<'tcx>, - depth: usize) - -> NormalizedTy<'tcx> + depth: usize, + obligations: &mut Vec>) + -> Ty<'tcx> { - opt_normalize_projection_type(selcx, param_env, projection_ty.clone(), cause.clone(), depth) + opt_normalize_projection_type(selcx, param_env, projection_ty.clone(), cause.clone(), depth, + obligations) .unwrap_or_else(move || { // if we bottom out in ambiguity, create a type variable // and a deferred predicate to resolve this when more type @@ -405,16 +506,14 @@ pub fn normalize_projection_type<'a, 'b, 'gcx, 'tcx>( let def_id = projection_ty.item_def_id; let ty_var = selcx.infcx().next_ty_var( TypeVariableOrigin::NormalizeProjectionType(tcx.def_span(def_id))); - let projection = ty::Binder(ty::ProjectionPredicate { + let projection = ty::Binder::dummy(ty::ProjectionPredicate { projection_ty, ty: ty_var }); let obligation = Obligation::with_depth( cause, depth + 1, param_env, projection.to_predicate()); - Normalized { - value: ty_var, - obligations: vec![obligation] - } + obligations.push(obligation); + ty_var }) } @@ -422,13 +521,20 @@ pub fn normalize_projection_type<'a, 'b, 'gcx, 'tcx>( /// as Trait>::Item`. The result is always a type (and possibly /// additional obligations). Returns `None` in the case of ambiguity, /// which indicates that there are unbound type variables. +/// +/// This function used to return `Option>`, which contains a +/// `Ty<'tcx>` and an obligations vector. But that obligation vector was very +/// often immediately appended to another obligations vector. So now this +/// function takes an obligations vector and appends to it directly, which is +/// slightly uglier but avoids the need for an extra short-lived allocation. fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, projection_ty: ty::ProjectionTy<'tcx>, cause: ObligationCause<'tcx>, - depth: usize) - -> Option> + depth: usize, + obligations: &mut Vec>) + -> Option> { let infcx = selcx.infcx(); @@ -493,14 +599,16 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( found cache entry: in-progress"); // But for now, let's classify this as an overflow: - let recursion_limit = selcx.tcx().sess.recursion_limit.get(); + let recursion_limit = *selcx.tcx().sess.recursion_limit.get(); let obligation = Obligation::with_depth(cause.clone(), recursion_limit, param_env, projection_ty); selcx.infcx().report_overflow_error(&obligation, false); } - Err(ProjectionCacheEntry::NormalizedTy(mut ty)) => { + Err(ProjectionCacheEntry::NormalizedTy(ty)) => { + // This is the hottest path in this function. + // // If we find the value in the cache, then return it along // with the obligations that went along with it. Note // that, when using a fulfillment context, these @@ -517,29 +625,32 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( // Once we have inferred everything we need to know, we // can ignore the `obligations` from that point on. if !infcx.any_unresolved_type_vars(&ty.value) { - infcx.projection_cache.borrow_mut().complete(cache_key); - ty.obligations = vec![]; + infcx.projection_cache.borrow_mut().complete_normalized(cache_key, &ty); + // No need to extend `obligations`. + } else { + obligations.extend(ty.obligations); } - push_paranoid_cache_value_obligation(infcx, - param_env, - projection_ty, - cause, - depth, - &mut ty); - - return Some(ty); + obligations.push(get_paranoid_cache_value_obligation(infcx, + param_env, + projection_ty, + cause, + depth)); + return Some(ty.value); } Err(ProjectionCacheEntry::Error) => { debug!("opt_normalize_projection_type: \ found error"); - return Some(normalize_to_error(selcx, param_env, projection_ty, cause, depth)); + let result = normalize_to_error(selcx, param_env, projection_ty, cause, depth); + obligations.extend(result.obligations); + return Some(result.value) } } let obligation = Obligation::with_depth(cause.clone(), depth, param_env, projection_ty); match project_type(selcx, &obligation) { - Ok(ProjectedTy::Progress(Progress { ty: projected_ty, mut obligations })) => { + Ok(ProjectedTy::Progress(Progress { ty: projected_ty, + obligations: mut projected_obligations })) => { // if projection succeeded, then what we get out of this // is also non-normalized (consider: it was derived from // an impl, where-clause etc) and hence we must @@ -548,10 +659,10 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( debug!("opt_normalize_projection_type: \ projected_ty={:?} \ depth={} \ - obligations={:?}", + projected_obligations={:?}", projected_ty, depth, - obligations); + projected_obligations); let result = if projected_ty.has_projections() { let mut normalizer = AssociatedTypeNormalizer::new(selcx, @@ -565,22 +676,22 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( normalized_ty, depth); - obligations.extend(normalizer.obligations); + projected_obligations.extend(normalizer.obligations); Normalized { value: normalized_ty, - obligations, + obligations: projected_obligations, } } else { Normalized { value: projected_ty, - obligations, + obligations: projected_obligations, } }; let cache_value = prune_cache_value_obligations(infcx, &result); infcx.projection_cache.borrow_mut().insert_ty(cache_key, cache_value); - - Some(result) + obligations.extend(result.obligations); + Some(result.value) } Ok(ProjectedTy::NoProgress(projected_ty)) => { debug!("opt_normalize_projection_type: \ @@ -591,7 +702,8 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( obligations: vec![] }; infcx.projection_cache.borrow_mut().insert_ty(cache_key, result.clone()); - Some(result) + // No need to extend `obligations`. + Some(result.value) } Err(ProjectionTyError::TooManyCandidates) => { debug!("opt_normalize_projection_type: \ @@ -609,7 +721,9 @@ fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( infcx.projection_cache.borrow_mut() .error(cache_key); - Some(normalize_to_error(selcx, param_env, projection_ty, cause, depth)) + let result = normalize_to_error(selcx, param_env, projection_ty, cause, depth); + obligations.extend(result.obligations); + Some(result.value) } } } @@ -658,7 +772,7 @@ fn prune_cache_value_obligations<'a, 'gcx, 'tcx>(infcx: &'a InferCtxt<'a, 'gcx, /// may or may not be necessary -- in principle, all the obligations /// that must be proven to show that `T: Trait` were also returned /// when the cache was first populated. But there are some vague concerns, -/// and so we take the precatuionary measure of including `T: Trait` in +/// and so we take the precautionary measure of including `T: Trait` in /// the result: /// /// Concern #1. The current setup is fragile. Perhaps someone could @@ -675,19 +789,21 @@ fn prune_cache_value_obligations<'a, 'gcx, 'tcx>(infcx: &'a InferCtxt<'a, 'gcx, /// that may yet turn out to be wrong. This *may* lead to some sort /// of trouble, though we don't have a concrete example of how that /// can occur yet. But it seems risky at best. -fn push_paranoid_cache_value_obligation<'a, 'gcx, 'tcx>(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - projection_ty: ty::ProjectionTy<'tcx>, - cause: ObligationCause<'tcx>, - depth: usize, - result: &mut NormalizedTy<'tcx>) +fn get_paranoid_cache_value_obligation<'a, 'gcx, 'tcx>( + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + projection_ty: ty::ProjectionTy<'tcx>, + cause: ObligationCause<'tcx>, + depth: usize) + -> PredicateObligation<'tcx> { let trait_ref = projection_ty.trait_ref(infcx.tcx).to_poly_trait_ref(); - let trait_obligation = Obligation { cause, - recursion_depth: depth, - param_env, - predicate: trait_ref.to_predicate() }; - result.obligations.push(trait_obligation); + Obligation { + cause, + recursion_depth: depth, + param_env, + predicate: trait_ref.to_predicate(), + } } /// If we are projecting `::Item`, but `T: Trait` does not @@ -775,7 +891,7 @@ fn project_type<'cx, 'gcx, 'tcx>( debug!("project(obligation={:?})", obligation); - let recursion_limit = selcx.tcx().sess.recursion_limit.get(); + let recursion_limit = *selcx.tcx().sess.recursion_limit.get(); if obligation.recursion_depth >= recursion_limit { debug!("project: overflow!"); selcx.infcx().report_overflow_error(&obligation, true); @@ -789,11 +905,11 @@ fn project_type<'cx, 'gcx, 'tcx>( return Ok(ProjectedTy::Progress(Progress::error(selcx.tcx()))); } - let mut candidates = ProjectionTyCandidateSet { - vec: Vec::new(), - ambiguous: false, - }; + let mut candidates = ProjectionTyCandidateSet::None; + // Make sure that the following procedures are kept in order. ParamEnv + // needs to be first because it has highest priority, and Select checks + // the return value of push_candidate which assumes it's ran at last. assemble_candidates_from_param_env(selcx, obligation, &obligation_trait_ref, @@ -804,76 +920,27 @@ fn project_type<'cx, 'gcx, 'tcx>( &obligation_trait_ref, &mut candidates); - if let Err(e) = assemble_candidates_from_impls(selcx, - obligation, - &obligation_trait_ref, - &mut candidates) { - return Err(ProjectionTyError::TraitSelectionError(e)); - } + assemble_candidates_from_impls(selcx, + obligation, + &obligation_trait_ref, + &mut candidates); - debug!("{} candidates, ambiguous={}", - candidates.vec.len(), - candidates.ambiguous); + match candidates { + ProjectionTyCandidateSet::Single(candidate) => Ok(ProjectedTy::Progress( + confirm_candidate(selcx, + obligation, + &obligation_trait_ref, + candidate))), + ProjectionTyCandidateSet::None => Ok(ProjectedTy::NoProgress( + selcx.tcx().mk_projection( + obligation.predicate.item_def_id, + obligation.predicate.substs))), + // Error occurred while trying to processing impls. + ProjectionTyCandidateSet::Error(e) => Err(ProjectionTyError::TraitSelectionError(e)), + // Inherent ambiguity that prevents us from even enumerating the + // candidates. + ProjectionTyCandidateSet::Ambiguous => Err(ProjectionTyError::TooManyCandidates), - // Inherent ambiguity that prevents us from even enumerating the - // candidates. - if candidates.ambiguous { - return Err(ProjectionTyError::TooManyCandidates); - } - - // Drop duplicates. - // - // Note: `candidates.vec` seems to be on the critical path of the - // compiler. Replacing it with an hash set was also tried, which would - // render the following dedup unnecessary. It led to cleaner code but - // prolonged compiling time of `librustc` from 5m30s to 6m in one test, or - // ~9% performance lost. - if candidates.vec.len() > 1 { - let mut i = 0; - while i < candidates.vec.len() { - let has_dup = (0..i).any(|j| candidates.vec[i] == candidates.vec[j]); - if has_dup { - candidates.vec.swap_remove(i); - } else { - i += 1; - } - } - } - - // Prefer where-clauses. As in select, if there are multiple - // candidates, we prefer where-clause candidates over impls. This - // may seem a bit surprising, since impls are the source of - // "truth" in some sense, but in fact some of the impls that SEEM - // applicable are not, because of nested obligations. Where - // clauses are the safer choice. See the comment on - // `select::SelectionCandidate` and #21974 for more details. - if candidates.vec.len() > 1 { - debug!("retaining param-env candidates only from {:?}", candidates.vec); - candidates.vec.retain(|c| match *c { - ProjectionTyCandidate::ParamEnv(..) => true, - ProjectionTyCandidate::TraitDef(..) | - ProjectionTyCandidate::Select => false, - }); - debug!("resulting candidate set: {:?}", candidates.vec); - if candidates.vec.len() != 1 { - return Err(ProjectionTyError::TooManyCandidates); - } - } - - assert!(candidates.vec.len() <= 1); - - match candidates.vec.pop() { - Some(candidate) => { - Ok(ProjectedTy::Progress( - confirm_candidate(selcx, - obligation, - &obligation_trait_ref, - candidate))) - } - None => Ok(ProjectedTy::NoProgress( - selcx.tcx().mk_projection( - obligation.predicate.item_def_id, - obligation.predicate.substs))) } } @@ -923,7 +990,7 @@ fn assemble_candidates_from_trait_def<'cx, 'gcx, 'tcx>( ty::TyInfer(ty::TyVar(_)) => { // If the self-type is an inference variable, then it MAY wind up // being a projected type, so induce an ambiguity. - candidate_set.ambiguous = true; + candidate_set.mark_ambiguous(); return; } _ => { return; } @@ -957,9 +1024,8 @@ fn assemble_candidates_from_predicates<'cx, 'gcx, 'tcx, I>( debug!("assemble_candidates_from_predicates: predicate={:?}", predicate); match predicate { - ty::Predicate::Projection(ref data) => { - let same_def_id = - data.0.projection_ty.item_def_id == obligation.predicate.item_def_id; + ty::Predicate::Projection(data) => { + let same_def_id = data.projection_def_id() == obligation.predicate.item_def_id; let is_match = same_def_id && infcx.probe(|_| { let data_poly_trait_ref = @@ -980,10 +1046,10 @@ fn assemble_candidates_from_predicates<'cx, 'gcx, 'tcx, I>( data, is_match, same_def_id); if is_match { - candidate_set.vec.push(ctor(data.clone())); + candidate_set.push_candidate(ctor(data)); } } - _ => { } + _ => {} } } } @@ -993,40 +1059,39 @@ fn assemble_candidates_from_impls<'cx, 'gcx, 'tcx>( obligation: &ProjectionTyObligation<'tcx>, obligation_trait_ref: &ty::TraitRef<'tcx>, candidate_set: &mut ProjectionTyCandidateSet<'tcx>) - -> Result<(), SelectionError<'tcx>> { // If we are resolving `>::Item == Type`, // start out by selecting the predicate `T as TraitRef<...>`: let poly_trait_ref = obligation_trait_ref.to_poly_trait_ref(); let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate()); - selcx.infcx().probe(|_| { + let _ = selcx.infcx().commit_if_ok(|_| { let vtable = match selcx.select(&trait_obligation) { Ok(Some(vtable)) => vtable, Ok(None) => { - candidate_set.ambiguous = true; - return Ok(()); + candidate_set.mark_ambiguous(); + return Err(()); } Err(e) => { debug!("assemble_candidates_from_impls: selection error {:?}", e); - return Err(e); + candidate_set.mark_error(e); + return Err(()); } }; - match vtable { + let eligible = match &vtable { super::VtableClosure(_) | super::VtableGenerator(_) | super::VtableFnPointer(_) | super::VtableObject(_) => { debug!("assemble_candidates_from_impls: vtable={:?}", vtable); - - candidate_set.vec.push(ProjectionTyCandidate::Select); + true } - super::VtableImpl(ref impl_data) => { + super::VtableImpl(impl_data) => { // We have to be careful when projecting out of an // impl because of specialization. If we are not in - // trans (i.e., projection mode is not "any"), and the + // codegen (i.e., projection mode is not "any"), and the // impl's type is declared as default, then we disable // projection (even if the trait ref is fully // monomorphic). In the case where trait ref is not @@ -1067,27 +1132,25 @@ fn assemble_candidates_from_impls<'cx, 'gcx, 'tcx>( node_item.item.defaultness.has_value() } else { node_item.item.defaultness.is_default() || - selcx.tcx().impl_is_default(node_item.node.def_id()) + selcx.tcx().impl_is_default(node_item.node.def_id()) }; // Only reveal a specializable default if we're past type-checking // and the obligations is monomorphic, otherwise passes such as // transmute checking and polymorphic MIR optimizations could // get a result which isn't correct for all monomorphizations. - let new_candidate = if !is_default { - Some(ProjectionTyCandidate::Select) + if !is_default { + true } else if obligation.param_env.reveal == Reveal::All { - assert!(!poly_trait_ref.needs_infer()); + debug_assert!(!poly_trait_ref.needs_infer()); if !poly_trait_ref.needs_subst() { - Some(ProjectionTyCandidate::Select) + true } else { - None + false } } else { - None - }; - - candidate_set.vec.extend(new_candidate); + false + } } super::VtableParam(..) => { // This case tell us nothing about the value of an @@ -1115,6 +1178,7 @@ fn assemble_candidates_from_impls<'cx, 'gcx, 'tcx>( // in the compiler: a trait predicate (`T : SomeTrait`) and a // projection. And the projection where clause is handled // in `assemble_candidates_from_param_env`. + false } super::VtableAutoImpl(..) | super::VtableBuiltin(..) => { @@ -1124,10 +1188,18 @@ fn assemble_candidates_from_impls<'cx, 'gcx, 'tcx>( "Cannot project an associated type from `{:?}`", vtable); } - } + }; - Ok(()) - }) + if eligible { + if candidate_set.push_candidate(ProjectionTyCandidate::Select(vtable)) { + Ok(()) + } else { + Err(()) + } + } else { + Err(()) + } + }); } fn confirm_candidate<'cx, 'gcx, 'tcx>( @@ -1147,8 +1219,8 @@ fn confirm_candidate<'cx, 'gcx, 'tcx>( confirm_param_env_candidate(selcx, obligation, poly_projection) } - ProjectionTyCandidate::Select => { - confirm_select_candidate(selcx, obligation, obligation_trait_ref) + ProjectionTyCandidate::Select(vtable) => { + confirm_select_candidate(selcx, obligation, obligation_trait_ref, vtable) } } } @@ -1156,21 +1228,10 @@ fn confirm_candidate<'cx, 'gcx, 'tcx>( fn confirm_select_candidate<'cx, 'gcx, 'tcx>( selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, obligation: &ProjectionTyObligation<'tcx>, - obligation_trait_ref: &ty::TraitRef<'tcx>) + obligation_trait_ref: &ty::TraitRef<'tcx>, + vtable: Selection<'tcx>) -> Progress<'tcx> { - let poly_trait_ref = obligation_trait_ref.to_poly_trait_ref(); - let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate()); - let vtable = match selcx.select(&trait_obligation) { - Ok(Some(vtable)) => vtable, - _ => { - span_bug!( - obligation.cause.span, - "Failed to select `{:?}`", - trait_obligation); - } - }; - match vtable { super::VtableImpl(data) => confirm_impl_candidate(selcx, obligation, data), @@ -1222,7 +1283,7 @@ fn confirm_object_candidate<'cx, 'gcx, 'tcx>( // item with the correct name let env_predicates = env_predicates.filter_map(|p| match p { ty::Predicate::Projection(data) => - if data.0.projection_ty.item_def_id == obligation.predicate.item_def_id { + if data.projection_def_id() == obligation.predicate.item_def_id { Some(data) } else { None @@ -1264,7 +1325,7 @@ fn confirm_generator_candidate<'cx, 'gcx, 'tcx>( vtable: VtableGeneratorData<'tcx, PredicateObligation<'tcx>>) -> Progress<'tcx> { - let gen_sig = vtable.substs.generator_poly_sig(vtable.closure_def_id, selcx.tcx()); + let gen_sig = vtable.substs.poly_sig(vtable.generator_def_id, selcx.tcx()); let Normalized { value: gen_sig, obligations @@ -1283,28 +1344,28 @@ fn confirm_generator_candidate<'cx, 'gcx, 'tcx>( let gen_def_id = tcx.lang_items().gen_trait().unwrap(); - // Note: we unwrap the binder here but re-create it below (1) - let ty::Binder((trait_ref, yield_ty, return_ty)) = + let predicate = tcx.generator_trait_ref_and_outputs(gen_def_id, obligation.predicate.self_ty(), - gen_sig); + gen_sig) + .map_bound(|(trait_ref, yield_ty, return_ty)| { + let name = tcx.associated_item(obligation.predicate.item_def_id).ident.name; + let ty = if name == "Return" { + return_ty + } else if name == "Yield" { + yield_ty + } else { + bug!() + }; - let name = tcx.associated_item(obligation.predicate.item_def_id).name; - let ty = if name == Symbol::intern("Return") { - return_ty - } else if name == Symbol::intern("Yield") { - yield_ty - } else { - bug!() - }; - - let predicate = ty::Binder(ty::ProjectionPredicate { // (1) recreate binder here - projection_ty: ty::ProjectionTy { - substs: trait_ref.substs, - item_def_id: obligation.predicate.item_def_id, - }, - ty: ty - }); + ty::ProjectionPredicate { + projection_ty: ty::ProjectionTy { + substs: trait_ref.substs, + item_def_id: obligation.predicate.item_def_id, + }, + ty: ty + } + }); confirm_param_env_candidate(selcx, obligation, predicate) .with_addl_obligations(vtable.nested) @@ -1381,21 +1442,21 @@ fn confirm_callable_candidate<'cx, 'gcx, 'tcx>( // the `Output` associated type is declared on `FnOnce` let fn_once_def_id = tcx.lang_items().fn_once_trait().unwrap(); - // Note: we unwrap the binder here but re-create it below (1) - let ty::Binder((trait_ref, ret_type)) = + let predicate = tcx.closure_trait_ref_and_return_type(fn_once_def_id, obligation.predicate.self_ty(), fn_sig, - flag); - - let predicate = ty::Binder(ty::ProjectionPredicate { // (1) recreate binder here - projection_ty: ty::ProjectionTy::from_ref_and_name( - tcx, - trait_ref, - Symbol::intern(FN_OUTPUT_NAME), - ), - ty: ret_type - }); + flag) + .map_bound(|(trait_ref, ret_type)| { + ty::ProjectionPredicate { + projection_ty: ty::ProjectionTy::from_ref_and_name( + tcx, + trait_ref, + Ident::from_str(FN_OUTPUT_NAME), + ), + ty: ret_type + } + }); confirm_param_env_candidate(selcx, obligation, predicate) } @@ -1441,19 +1502,26 @@ fn confirm_impl_candidate<'cx, 'gcx, 'tcx>( let param_env = obligation.param_env; let assoc_ty = assoc_ty_def(selcx, impl_def_id, obligation.predicate.item_def_id); - let ty = if !assoc_ty.item.defaultness.has_value() { + if !assoc_ty.item.defaultness.has_value() { // This means that the impl is missing a definition for the // associated type. This error will be reported by the type // checker method `check_impl_items_against_trait`, so here we // just return TyError. debug!("confirm_impl_candidate: no associated type {:?} for {:?}", - assoc_ty.item.name, + assoc_ty.item.ident, obligation.predicate); - tcx.types.err + return Progress { + ty: tcx.types.err, + obligations: nested, + }; + } + let substs = translate_substs(selcx.infcx(), param_env, impl_def_id, substs, assoc_ty.node); + let ty = if let ty::AssociatedKind::Existential = assoc_ty.item.kind { + let item_substs = Substs::identity_for_item(tcx, assoc_ty.item.def_id); + tcx.mk_anon(assoc_ty.item.def_id, item_substs) } else { tcx.type_of(assoc_ty.item.def_id) }; - let substs = translate_substs(selcx.infcx(), param_env, impl_def_id, substs, assoc_ty.node); Progress { ty: ty.subst(tcx, substs), obligations: nested, @@ -1472,7 +1540,7 @@ fn assoc_ty_def<'cx, 'gcx, 'tcx>( -> specialization_graph::NodeItem { let tcx = selcx.tcx(); - let assoc_ty_name = tcx.associated_item(assoc_ty_def_id).name; + let assoc_ty_name = tcx.associated_item(assoc_ty_def_id).ident; let trait_def_id = tcx.impl_trait_ref(impl_def_id).unwrap().def_id; let trait_def = tcx.trait_def(trait_def_id); @@ -1485,7 +1553,7 @@ fn assoc_ty_def<'cx, 'gcx, 'tcx>( let impl_node = specialization_graph::Node::Impl(impl_def_id); for item in impl_node.items(tcx) { if item.kind == ty::AssociatedKind::Type && - tcx.hygienic_eq(item.name, assoc_ty_name, trait_def_id) { + tcx.hygienic_eq(item.ident, assoc_ty_name, trait_def_id) { return specialization_graph::NodeItem { node: specialization_graph::Node::Impl(impl_def_id), item, @@ -1591,20 +1659,24 @@ impl<'tcx> ProjectionCache<'tcx> { } } + pub fn clear(&mut self) { + self.map.clear(); + } + pub fn snapshot(&mut self) -> ProjectionCacheSnapshot { ProjectionCacheSnapshot { snapshot: self.map.snapshot() } } pub fn rollback_to(&mut self, snapshot: ProjectionCacheSnapshot) { - self.map.rollback_to(snapshot.snapshot); + self.map.rollback_to(&snapshot.snapshot); } pub fn rollback_skolemized(&mut self, snapshot: &ProjectionCacheSnapshot) { self.map.partial_rollback(&snapshot.snapshot, &|k| k.ty.has_re_skol()); } - pub fn commit(&mut self, snapshot: ProjectionCacheSnapshot) { - self.map.commit(snapshot.snapshot); + pub fn commit(&mut self, snapshot: &ProjectionCacheSnapshot) { + self.map.commit(&snapshot.snapshot); } /// Try to start normalize `key`; returns an error if @@ -1654,6 +1726,23 @@ impl<'tcx> ProjectionCache<'tcx> { })); } + /// A specialized version of `complete` for when the key's value is known + /// to be a NormalizedTy. + pub fn complete_normalized(&mut self, key: ProjectionCacheKey<'tcx>, ty: &NormalizedTy<'tcx>) { + // We want to insert `ty` with no obligations. If the existing value + // already has no obligations (as is common) we can use `insert_noop` + // to do a minimal amount of work -- the HashMap insertion is skipped, + // and minimal changes are made to the undo log. + if ty.obligations.is_empty() { + self.map.insert_noop(); + } else { + self.map.insert(key, ProjectionCacheEntry::NormalizedTy(Normalized { + value: ty.value, + obligations: vec![] + })); + } + } + /// Indicates that trying to normalize `key` resulted in /// ambiguity. No point in trying it again then until we gain more /// type information (in which case, the "fully resolved" key will diff --git a/src/librustc/traits/query/dropck_outlives.rs b/src/librustc/traits/query/dropck_outlives.rs new file mode 100644 index 000000000000..e41ed0824b4e --- /dev/null +++ b/src/librustc/traits/query/dropck_outlives.rs @@ -0,0 +1,265 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::at::At; +use infer::InferOk; +use rustc_data_structures::small_vec::SmallVec; +use std::iter::FromIterator; +use syntax::codemap::Span; +use ty::subst::Kind; +use ty::{self, Ty, TyCtxt}; + +impl<'cx, 'gcx, 'tcx> At<'cx, 'gcx, 'tcx> { + /// Given a type `ty` of some value being dropped, computes a set + /// of "kinds" (types, regions) that must be outlive the execution + /// of the destructor. These basically correspond to data that the + /// destructor might access. This is used during regionck to + /// impose "outlives" constraints on any lifetimes referenced + /// within. + /// + /// The rules here are given by the "dropck" RFCs, notably [#1238] + /// and [#1327]. This is a fixed-point computation, where we + /// explore all the data that will be dropped (transitively) when + /// a value of type `ty` is dropped. For each type T that will be + /// dropped and which has a destructor, we must assume that all + /// the types/regions of T are live during the destructor, unless + /// they are marked with a special attribute (`#[may_dangle]`). + /// + /// [#1238]: https://github.com/rust-lang/rfcs/blob/master/text/1238-nonparametric-dropck.md + /// [#1327]: https://github.com/rust-lang/rfcs/blob/master/text/1327-dropck-param-eyepatch.md + pub fn dropck_outlives(&self, ty: Ty<'tcx>) -> InferOk<'tcx, Vec>> { + debug!( + "dropck_outlives(ty={:?}, param_env={:?})", + ty, self.param_env, + ); + + // Quick check: there are a number of cases that we know do not require + // any destructor. + let tcx = self.infcx.tcx; + if trivial_dropck_outlives(tcx, ty) { + return InferOk { + value: vec![], + obligations: vec![], + }; + } + + let gcx = tcx.global_tcx(); + let mut orig_values = SmallVec::new(); + let c_ty = self.infcx.canonicalize_query(&self.param_env.and(ty), &mut orig_values); + let span = self.cause.span; + debug!("c_ty = {:?}", c_ty); + match &gcx.dropck_outlives(c_ty) { + Ok(result) if result.is_proven() => { + match self.infcx.instantiate_query_result_and_region_obligations( + self.cause, + self.param_env, + &orig_values, + result, + ) { + Ok(InferOk { value, obligations }) => { + let ty = self.infcx.resolve_type_vars_if_possible(&ty); + let kinds = value.into_kinds_reporting_overflows(tcx, span, ty); + return InferOk { + value: kinds, + obligations, + }; + } + + Err(_) => { /* fallthrough to error-handling code below */ } + } + } + + _ => { /* fallthrough to error-handling code below */ } + } + + // Errors and ambiuity in dropck occur in two cases: + // - unresolved inference variables at the end of typeck + // - non well-formed types where projections cannot be resolved + // Either of these should hvae created an error before. + tcx.sess + .delay_span_bug(span, "dtorck encountered internal error"); + return InferOk { + value: vec![], + obligations: vec![], + }; + } +} + +#[derive(Clone, Debug, Default)] +pub struct DropckOutlivesResult<'tcx> { + pub kinds: Vec>, + pub overflows: Vec>, +} + +impl<'tcx> DropckOutlivesResult<'tcx> { + pub fn report_overflows( + &self, + tcx: TyCtxt<'_, '_, 'tcx>, + span: Span, + ty: Ty<'tcx>, + ) { + for overflow_ty in self.overflows.iter().take(1) { + let mut err = struct_span_err!( + tcx.sess, + span, + E0320, + "overflow while adding drop-check rules for {}", + ty, + ); + err.note(&format!("overflowed on {}", overflow_ty)); + err.emit(); + } + } + + pub fn into_kinds_reporting_overflows( + self, + tcx: TyCtxt<'_, '_, 'tcx>, + span: Span, + ty: Ty<'tcx>, + ) -> Vec> { + self.report_overflows(tcx, span, ty); + let DropckOutlivesResult { kinds, overflows: _ } = self; + kinds + } +} + +/// A set of constraints that need to be satisfied in order for +/// a type to be valid for destruction. +#[derive(Clone, Debug)] +pub struct DtorckConstraint<'tcx> { + /// Types that are required to be alive in order for this + /// type to be valid for destruction. + pub outlives: Vec>, + + /// Types that could not be resolved: projections and params. + pub dtorck_types: Vec>, + + /// If, during the computation of the dtorck constraint, we + /// overflow, that gets recorded here. The caller is expected to + /// report an error. + pub overflows: Vec>, +} + +impl<'tcx> DtorckConstraint<'tcx> { + pub fn empty() -> DtorckConstraint<'tcx> { + DtorckConstraint { + outlives: vec![], + dtorck_types: vec![], + overflows: vec![], + } + } +} + +impl<'tcx> FromIterator> for DtorckConstraint<'tcx> { + fn from_iter>>(iter: I) -> Self { + let mut result = Self::empty(); + + for DtorckConstraint { + outlives, + dtorck_types, + overflows, + } in iter + { + result.outlives.extend(outlives); + result.dtorck_types.extend(dtorck_types); + result.overflows.extend(overflows); + } + + result + } +} +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for DropckOutlivesResult<'tcx> { + kinds, overflows + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for DropckOutlivesResult<'a> { + type Lifted = DropckOutlivesResult<'tcx>; + kinds, overflows + } +} + +impl_stable_hash_for!(struct DropckOutlivesResult<'tcx> { + kinds, overflows +}); + +impl_stable_hash_for!(struct DtorckConstraint<'tcx> { + outlives, + dtorck_types, + overflows +}); + +/// This returns true if the type `ty` is "trivial" for +/// dropck-outlives -- that is, if it doesn't require any types to +/// outlive. This is similar but not *quite* the same as the +/// `needs_drop` test in the compiler already -- that is, for every +/// type T for which this function return true, needs-drop would +/// return false. But the reverse does not hold: in particular, +/// `needs_drop` returns false for `PhantomData`, but it is not +/// trivial for dropck-outlives. +/// +/// Note also that `needs_drop` requires a "global" type (i.e., one +/// with erased regions), but this funtcion does not. +pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool { + match ty.sty { + // None of these types have a destructor and hence they do not + // require anything in particular to outlive the dtor's + // execution. + ty::TyInfer(ty::FreshIntTy(_)) + | ty::TyInfer(ty::FreshFloatTy(_)) + | ty::TyBool + | ty::TyInt(_) + | ty::TyUint(_) + | ty::TyFloat(_) + | ty::TyNever + | ty::TyFnDef(..) + | ty::TyFnPtr(_) + | ty::TyChar + | ty::TyGeneratorWitness(..) + | ty::TyRawPtr(_) + | ty::TyRef(..) + | ty::TyStr + | ty::TyForeign(..) + | ty::TyError => true, + + // [T; N] and [T] have same properties as T. + ty::TyArray(ty, _) | ty::TySlice(ty) => trivial_dropck_outlives(tcx, ty), + + // (T1..Tn) and closures have same properties as T1..Tn -- + // check if *any* of those are trivial. + ty::TyTuple(ref tys) => tys.iter().cloned().all(|t| trivial_dropck_outlives(tcx, t)), + ty::TyClosure(def_id, ref substs) => substs + .upvar_tys(def_id, tcx) + .all(|t| trivial_dropck_outlives(tcx, t)), + + ty::TyAdt(def, _) => { + if Some(def.did) == tcx.lang_items().manually_drop() { + // `ManuallyDrop` never has a dtor. + true + } else { + // Other types might. Moreover, PhantomData doesn't + // have a dtor, but it is considered to own its + // content, so it is non-trivial. Unions can have `impl Drop`, + // and hence are non-trivial as well. + false + } + } + + // The following *might* require a destructor: it would deeper inspection to tell. + ty::TyDynamic(..) + | ty::TyProjection(..) + | ty::TyParam(_) + | ty::TyAnon(..) + | ty::TyInfer(_) + | ty::TyGenerator(..) => false, + } +} diff --git a/src/librustc/traits/query/evaluate_obligation.rs b/src/librustc/traits/query/evaluate_obligation.rs new file mode 100644 index 000000000000..93fcadceb165 --- /dev/null +++ b/src/librustc/traits/query/evaluate_obligation.rs @@ -0,0 +1,58 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::InferCtxt; +use rustc_data_structures::small_vec::SmallVec; +use traits::{EvaluationResult, PredicateObligation, SelectionContext, + TraitQueryMode, OverflowError}; + +impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { + /// Evaluates whether the predicate can be satisfied (by any means) + /// in the given `ParamEnv`. + pub fn predicate_may_hold( + &self, + obligation: &PredicateObligation<'tcx>, + ) -> bool { + self.evaluate_obligation(obligation).may_apply() + } + + /// Evaluates whether the predicate can be satisfied in the given + /// `ParamEnv`, and returns `false` if not certain. However, this is + /// not entirely accurate if inference variables are involved. + pub fn predicate_must_hold( + &self, + obligation: &PredicateObligation<'tcx>, + ) -> bool { + self.evaluate_obligation(obligation) == EvaluationResult::EvaluatedToOk + } + + // Helper function that canonicalizes and runs the query, as well as handles + // overflow. + fn evaluate_obligation( + &self, + obligation: &PredicateObligation<'tcx>, + ) -> EvaluationResult { + let mut _orig_values = SmallVec::new(); + let c_pred = self.canonicalize_query(&obligation.param_env.and(obligation.predicate), + &mut _orig_values); + // Run canonical query. If overflow occurs, rerun from scratch but this time + // in standard trait query mode so that overflow is handled appropriately + // within `SelectionContext`. + match self.tcx.global_tcx().evaluate_obligation(c_pred) { + Ok(result) => result, + Err(OverflowError) => { + let mut selcx = + SelectionContext::with_query_mode(&self, TraitQueryMode::Standard); + selcx.evaluate_obligation_recursively(obligation) + .expect("Overflow should be caught earlier in standard query mode") + } + } + } +} diff --git a/src/librustc/traits/query/mod.rs b/src/librustc/traits/query/mod.rs new file mode 100644 index 000000000000..35f17aebc044 --- /dev/null +++ b/src/librustc/traits/query/mod.rs @@ -0,0 +1,60 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Experimental types for the trait query interface. The methods +//! defined in this module are all based on **canonicalization**, +//! which makes a canonical query by replacing unbound inference +//! variables and regions, so that results can be reused more broadly. +//! The providers for the queries defined here can be found in +//! `librustc_traits`. + +use infer::canonical::Canonical; +use ty::error::TypeError; +use ty::{self, Ty}; + +pub mod dropck_outlives; +pub mod evaluate_obligation; +pub mod normalize; +pub mod normalize_erasing_regions; +pub mod outlives_bounds; +pub mod type_op; + +pub type CanonicalProjectionGoal<'tcx> = + Canonical<'tcx, ty::ParamEnvAnd<'tcx, ty::ProjectionTy<'tcx>>>; + +pub type CanonicalTyGoal<'tcx> = Canonical<'tcx, ty::ParamEnvAnd<'tcx, Ty<'tcx>>>; + +pub type CanonicalPredicateGoal<'tcx> = + Canonical<'tcx, ty::ParamEnvAnd<'tcx, ty::Predicate<'tcx>>>; + +pub type CanonicalTypeOpEqGoal<'tcx> = + Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::eq::Eq<'tcx>>>; + +pub type CanonicalTypeOpSubtypeGoal<'tcx> = + Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::subtype::Subtype<'tcx>>>; + +pub type CanonicalTypeOpProvePredicateGoal<'tcx> = + Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::prove_predicate::ProvePredicate<'tcx>>>; + +pub type CanonicalTypeOpNormalizeGoal<'tcx, T> = + Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::normalize::Normalize>>; + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct NoSolution; + +pub type Fallible = Result; + +impl<'tcx> From> for NoSolution { + fn from(_: TypeError<'tcx>) -> NoSolution { + NoSolution + } +} + +impl_stable_hash_for!(struct NoSolution { }); diff --git a/src/librustc/traits/query/normalize.rs b/src/librustc/traits/query/normalize.rs new file mode 100644 index 000000000000..2203aefa3146 --- /dev/null +++ b/src/librustc/traits/query/normalize.rs @@ -0,0 +1,254 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Code for the 'normalization' query. This consists of a wrapper +//! which folds deeply, invoking the underlying +//! `normalize_projection_ty` query when it encounters projections. + +use infer::{InferCtxt, InferOk}; +use infer::at::At; +use mir::interpret::{GlobalId, ConstValue}; +use rustc_data_structures::small_vec::SmallVec; +use traits::{Obligation, ObligationCause, PredicateObligation, Reveal}; +use traits::project::Normalized; +use ty::{self, Ty, TyCtxt}; +use ty::fold::{TypeFoldable, TypeFolder}; +use ty::subst::{Subst, Substs}; + +use super::NoSolution; + +impl<'cx, 'gcx, 'tcx> At<'cx, 'gcx, 'tcx> { + /// Normalize `value` in the context of the inference context, + /// yielding a resulting type, or an error if `value` cannot be + /// normalized. If you don't care about regions, you should prefer + /// `normalize_erasing_regions`, which is more efficient. + /// + /// If the normalization succeeds and is unambiguous, returns back + /// the normalized value along with various outlives relations (in + /// the form of obligations that must be discharged). + /// + /// NB. This will *eventually* be the main means of + /// normalizing, but for now should be used only when we actually + /// know that normalization will succeed, since error reporting + /// and other details are still "under development". + pub fn normalize(&self, value: &T) -> Result, NoSolution> + where + T: TypeFoldable<'tcx>, + { + debug!( + "normalize::<{}>(value={:?}, param_env={:?})", + unsafe { ::std::intrinsics::type_name::() }, + value, + self.param_env, + ); + let mut normalizer = QueryNormalizer { + infcx: self.infcx, + cause: self.cause, + param_env: self.param_env, + obligations: vec![], + error: false, + anon_depth: 0, + }; + if !value.has_projections() { + return Ok(Normalized { + value: value.clone(), + obligations: vec![], + }); + } + + let value1 = value.fold_with(&mut normalizer); + if normalizer.error { + Err(NoSolution) + } else { + Ok(Normalized { + value: value1, + obligations: normalizer.obligations, + }) + } + } +} + +/// Result from the `normalize_projection_ty` query. +#[derive(Clone, Debug)] +pub struct NormalizationResult<'tcx> { + /// Result of normalization. + pub normalized_ty: Ty<'tcx>, +} + +struct QueryNormalizer<'cx, 'gcx: 'tcx, 'tcx: 'cx> { + infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, + cause: &'cx ObligationCause<'tcx>, + param_env: ty::ParamEnv<'tcx>, + obligations: Vec>, + error: bool, + anon_depth: usize, +} + +impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for QueryNormalizer<'cx, 'gcx, 'tcx> { + fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'tcx> { + self.infcx.tcx + } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + let ty = ty.super_fold_with(self); + match ty.sty { + ty::TyAnon(def_id, substs) if !substs.has_escaping_regions() => { + // (*) + // Only normalize `impl Trait` after type-checking, usually in codegen. + match self.param_env.reveal { + Reveal::UserFacing => ty, + + Reveal::All => { + let recursion_limit = *self.tcx().sess.recursion_limit.get(); + if self.anon_depth >= recursion_limit { + let obligation = Obligation::with_depth( + self.cause.clone(), + recursion_limit, + self.param_env, + ty, + ); + self.infcx.report_overflow_error(&obligation, true); + } + + let generic_ty = self.tcx().type_of(def_id); + let concrete_ty = generic_ty.subst(self.tcx(), substs); + self.anon_depth += 1; + if concrete_ty == ty { + bug!("infinite recursion generic_ty: {:#?}, substs: {:#?}, \ + concrete_ty: {:#?}, ty: {:#?}", generic_ty, substs, concrete_ty, + ty); + } + let folded_ty = self.fold_ty(concrete_ty); + self.anon_depth -= 1; + folded_ty + } + } + } + + ty::TyProjection(ref data) if !data.has_escaping_regions() => { + // (*) + // (*) This is kind of hacky -- we need to be able to + // handle normalization within binders because + // otherwise we wind up a need to normalize when doing + // trait matching (since you can have a trait + // obligation like `for<'a> T::B : Fn(&'a int)`), but + // we can't normalize with bound regions in scope. So + // far now we just ignore binders but only normalize + // if all bound regions are gone (and then we still + // have to renormalize whenever we instantiate a + // binder). It would be better to normalize in a + // binding-aware fashion. + + let gcx = self.infcx.tcx.global_tcx(); + + let mut orig_values = SmallVec::new(); + let c_data = + self.infcx.canonicalize_query(&self.param_env.and(*data), &mut orig_values); + debug!("QueryNormalizer: c_data = {:#?}", c_data); + debug!("QueryNormalizer: orig_values = {:#?}", orig_values); + match gcx.normalize_projection_ty(c_data) { + Ok(result) => { + // We don't expect ambiguity. + if result.is_ambiguous() { + self.error = true; + return ty; + } + + match self.infcx.instantiate_query_result_and_region_obligations( + self.cause, + self.param_env, + &orig_values, + &result, + ) { + Ok(InferOk { + value: result, + obligations, + }) => { + debug!("QueryNormalizer: result = {:#?}", result); + debug!("QueryNormalizer: obligations = {:#?}", obligations); + self.obligations.extend(obligations); + return result.normalized_ty; + } + + Err(_) => { + self.error = true; + return ty; + } + } + } + + Err(NoSolution) => { + self.error = true; + ty + } + } + } + + _ => ty, + } + } + + fn fold_const(&mut self, constant: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { + if let ConstValue::Unevaluated(def_id, substs) = constant.val { + let tcx = self.infcx.tcx.global_tcx(); + if let Some(param_env) = self.tcx().lift_to_global(&self.param_env) { + if substs.needs_infer() || substs.has_skol() { + let identity_substs = Substs::identity_for_item(tcx, def_id); + let instance = ty::Instance::resolve(tcx, param_env, def_id, identity_substs); + if let Some(instance) = instance { + let cid = GlobalId { + instance, + promoted: None, + }; + match tcx.const_eval(param_env.and(cid)) { + Ok(evaluated) => { + let evaluated = evaluated.subst(self.tcx(), substs); + return self.fold_const(evaluated); + } + Err(_) => {} + } + } + } else { + if let Some(substs) = self.tcx().lift_to_global(&substs) { + let instance = ty::Instance::resolve(tcx, param_env, def_id, substs); + if let Some(instance) = instance { + let cid = GlobalId { + instance, + promoted: None, + }; + match tcx.const_eval(param_env.and(cid)) { + Ok(evaluated) => return self.fold_const(evaluated), + Err(_) => {} + } + } + } + } + } + } + constant + } +} + +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for NormalizationResult<'tcx> { + normalized_ty + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for NormalizationResult<'a> { + type Lifted = NormalizationResult<'tcx>; + normalized_ty + } +} + +impl_stable_hash_for!(struct NormalizationResult<'tcx> { + normalized_ty +}); diff --git a/src/librustc/traits/query/normalize_erasing_regions.rs b/src/librustc/traits/query/normalize_erasing_regions.rs new file mode 100644 index 000000000000..1cb96a3e33f4 --- /dev/null +++ b/src/librustc/traits/query/normalize_erasing_regions.rs @@ -0,0 +1,88 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Methods for normalizing when you don't care about regions (and +//! aren't doing type inference). If either of those things don't +//! apply to you, use `infcx.normalize(...)`. +//! +//! The methods in this file use a `TypeFolder` to recursively process +//! contents, invoking the underlying +//! `normalize_ty_after_erasing_regions` query for each type found +//! within. (This underlying query is what is cached.) + +use ty::{self, Ty, TyCtxt}; +use ty::fold::{TypeFoldable, TypeFolder}; + +impl<'cx, 'tcx> TyCtxt<'cx, 'tcx, 'tcx> { + /// Erase the regions in `value` and then fully normalize all the + /// types found within. The result will also have regions erased. + /// + /// This is appropriate to use only after type-check: it assumes + /// that normalization will succeed, for example. + pub fn normalize_erasing_regions(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T + where + T: TypeFoldable<'tcx>, + { + debug!( + "normalize_erasing_regions::<{}>(value={:?}, param_env={:?})", + unsafe { ::std::intrinsics::type_name::() }, + value, + param_env, + ); + + // Erase first before we do the real query -- this keeps the + // cache from being too polluted. + let value = self.erase_regions(&value); + if !value.has_projections() { + value + } else { + value.fold_with(&mut NormalizeAfterErasingRegionsFolder { + tcx: self, + param_env: param_env, + }) + } + } + + /// If you have a `Binder`, you can do this to strip out the + /// late-bound regions and then normalize the result, yielding up + /// a `T` (with regions erased). This is appropriate when the + /// binder is being instantiated at the call site. + /// + /// NB. Currently, higher-ranked type bounds inhibit + /// normalization. Therefore, each time we erase them in + /// codegen, we need to normalize the contents. + pub fn normalize_erasing_late_bound_regions( + self, + param_env: ty::ParamEnv<'tcx>, + value: &ty::Binder, + ) -> T + where + T: TypeFoldable<'tcx>, + { + assert!(!value.needs_subst()); + let value = self.erase_late_bound_regions(value); + self.normalize_erasing_regions(param_env, value) + } +} + +struct NormalizeAfterErasingRegionsFolder<'cx, 'tcx: 'cx> { + tcx: TyCtxt<'cx, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, +} + +impl<'cx, 'tcx> TypeFolder<'tcx, 'tcx> for NormalizeAfterErasingRegionsFolder<'cx, 'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.tcx + } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + self.tcx.normalize_ty_after_erasing_regions(self.param_env.and(ty)) + } +} diff --git a/src/librustc/traits/query/outlives_bounds.rs b/src/librustc/traits/query/outlives_bounds.rs new file mode 100644 index 000000000000..0127ae423da5 --- /dev/null +++ b/src/librustc/traits/query/outlives_bounds.rs @@ -0,0 +1,171 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::InferCtxt; +use syntax::ast; +use syntax::codemap::Span; +use rustc_data_structures::small_vec::SmallVec; +use traits::{FulfillmentContext, ObligationCause, TraitEngine, TraitEngineExt}; +use traits::query::NoSolution; +use ty::{self, Ty, TyCtxt}; + +use ich::StableHashingContext; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, + StableHasherResult}; +use std::mem; + +/// Outlives bounds are relationships between generic parameters, +/// whether they both be regions (`'a: 'b`) or whether types are +/// involved (`T: 'a`). These relationships can be extracted from the +/// full set of predicates we understand or also from types (in which +/// case they are called implied bounds). They are fed to the +/// `OutlivesEnv` which in turn is supplied to the region checker and +/// other parts of the inference system. +#[derive(Clone, Debug)] +pub enum OutlivesBound<'tcx> { + RegionSubRegion(ty::Region<'tcx>, ty::Region<'tcx>), + RegionSubParam(ty::Region<'tcx>, ty::ParamTy), + RegionSubProjection(ty::Region<'tcx>, ty::ProjectionTy<'tcx>), +} + +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for self::OutlivesBound<'a> { + type Lifted = self::OutlivesBound<'tcx>; + (self::OutlivesBound::RegionSubRegion)(a, b), + (self::OutlivesBound::RegionSubParam)(a, b), + (self::OutlivesBound::RegionSubProjection)(a, b), + } +} + +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for self::OutlivesBound<'tcx> { + (self::OutlivesBound::RegionSubRegion)(a, b), + (self::OutlivesBound::RegionSubParam)(a, b), + (self::OutlivesBound::RegionSubProjection)(a, b), + } +} + +impl<'a, 'tcx> HashStable> for OutlivesBound<'tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + OutlivesBound::RegionSubRegion(ref a, ref b) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher); + } + OutlivesBound::RegionSubParam(ref a, ref b) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher); + } + OutlivesBound::RegionSubProjection(ref a, ref b) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher); + } + } + } +} + +impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { + /// Implied bounds are region relationships that we deduce + /// automatically. The idea is that (e.g.) a caller must check that a + /// function's argument types are well-formed immediately before + /// calling that fn, and hence the *callee* can assume that its + /// argument types are well-formed. This may imply certain relationships + /// between generic parameters. For example: + /// + /// fn foo<'a,T>(x: &'a T) + /// + /// can only be called with a `'a` and `T` such that `&'a T` is WF. + /// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`. + /// + /// # Parameters + /// + /// - `param_env`, the where-clauses in scope + /// - `body_id`, the body-id to use when normalizing assoc types. + /// Note that this may cause outlives obligations to be injected + /// into the inference context with this body-id. + /// - `ty`, the type that we are supposed to assume is WF. + /// - `span`, a span to use when normalizing, hopefully not important, + /// might be useful if a `bug!` occurs. + pub fn implied_outlives_bounds( + &self, + param_env: ty::ParamEnv<'tcx>, + body_id: ast::NodeId, + ty: Ty<'tcx>, + span: Span, + ) -> Vec> { + debug!("implied_outlives_bounds(ty = {:?})", ty); + + let mut orig_values = SmallVec::new(); + let key = self.canonicalize_query(¶m_env.and(ty), &mut orig_values); + let result = match self.tcx.global_tcx().implied_outlives_bounds(key) { + Ok(r) => r, + Err(NoSolution) => { + self.tcx.sess.delay_span_bug( + span, + "implied_outlives_bounds failed to solve all obligations" + ); + return vec![]; + } + }; + assert!(result.value.is_proven()); + + let result = self.instantiate_query_result_and_region_obligations( + &ObligationCause::misc(span, body_id), param_env, &orig_values, &result); + debug!("implied_outlives_bounds for {:?}: {:#?}", ty, result); + let result = match result { + Ok(v) => v, + Err(_) => { + self.tcx.sess.delay_span_bug( + span, + "implied_outlives_bounds failed to instantiate" + ); + return vec![]; + } + }; + + // Instantiation may have produced new inference variables and constraints on those + // variables. Process these constraints. + let mut fulfill_cx = FulfillmentContext::new(); + fulfill_cx.register_predicate_obligations(self, result.obligations); + if fulfill_cx.select_all_or_error(self).is_err() { + self.tcx.sess.delay_span_bug( + span, + "implied_outlives_bounds failed to solve obligations from instantiation" + ); + } + + result.value + } +} + +pub fn explicit_outlives_bounds<'tcx>( + param_env: ty::ParamEnv<'tcx>, +) -> impl Iterator> + 'tcx { + debug!("explicit_outlives_bounds()"); + param_env + .caller_bounds + .into_iter() + .filter_map(move |predicate| match predicate { + ty::Predicate::Projection(..) | + ty::Predicate::Trait(..) | + ty::Predicate::Subtype(..) | + ty::Predicate::WellFormed(..) | + ty::Predicate::ObjectSafe(..) | + ty::Predicate::ClosureKind(..) | + ty::Predicate::TypeOutlives(..) | + ty::Predicate::ConstEvaluatable(..) => None, + ty::Predicate::RegionOutlives(ref data) => data.no_late_bound_regions().map( + |ty::OutlivesPredicate(r_a, r_b)| OutlivesBound::RegionSubRegion(r_b, r_a), + ), + }) +} diff --git a/src/librustc/traits/query/type_op/custom.rs b/src/librustc/traits/query/type_op/custom.rs new file mode 100644 index 000000000000..cc752d21ab2b --- /dev/null +++ b/src/librustc/traits/query/type_op/custom.rs @@ -0,0 +1,113 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::{InferCtxt, InferOk}; +use std::fmt; +use traits::query::Fallible; + +use infer::canonical::query_result; +use infer::canonical::QueryRegionConstraint; +use std::rc::Rc; +use syntax::codemap::DUMMY_SP; +use traits::{ObligationCause, TraitEngine, TraitEngineExt}; + +pub struct CustomTypeOp { + closure: F, + description: G, +} + +impl CustomTypeOp { + pub fn new<'gcx, 'tcx, R>(closure: F, description: G) -> Self + where + F: FnOnce(&InferCtxt<'_, 'gcx, 'tcx>) -> Fallible>, + G: Fn() -> String, + { + CustomTypeOp { + closure, + description, + } + } +} + +impl<'gcx, 'tcx, F, R, G> super::TypeOp<'gcx, 'tcx> for CustomTypeOp +where + F: for<'a, 'cx> FnOnce(&'a InferCtxt<'cx, 'gcx, 'tcx>) -> Fallible>, + G: Fn() -> String, +{ + type Output = R; + + /// Processes the operation and all resulting obligations, + /// returning the final result along with any region constraints + /// (they will be given over to the NLL region solver). + fn fully_perform( + self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + ) -> Fallible<(Self::Output, Option>>>)> { + if cfg!(debug_assertions) { + info!("fully_perform({:?})", self); + } + + scrape_region_constraints(infcx, || Ok((self.closure)(infcx)?)) + } +} + +impl fmt::Debug for CustomTypeOp +where + G: Fn() -> String, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", (self.description)()) + } +} + +/// Executes `op` and then scrapes out all the "old style" region +/// constraints that result, creating query-region-constraints. +fn scrape_region_constraints<'gcx, 'tcx, R>( + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + op: impl FnOnce() -> Fallible>, +) -> Fallible<(R, Option>>>)> { + let mut fulfill_cx = TraitEngine::new(infcx.tcx); + let dummy_body_id = ObligationCause::dummy().body_id; + + // During NLL, we expect that nobody will register region + // obligations **except** as part of a custom type op (and, at the + // end of each custom type op, we scrape out the region + // obligations that resulted). So this vector should be empty on + // entry. + let pre_obligations = infcx.take_registered_region_obligations(); + assert!( + pre_obligations.is_empty(), + "scrape_region_constraints: incoming region obligations = {:#?}", + pre_obligations, + ); + + let InferOk { value, obligations } = infcx.commit_if_ok(|_| op())?; + debug_assert!(obligations.iter().all(|o| o.cause.body_id == dummy_body_id)); + fulfill_cx.register_predicate_obligations(infcx, obligations); + if let Err(e) = fulfill_cx.select_all_or_error(infcx) { + infcx.tcx.sess.diagnostic().delay_span_bug( + DUMMY_SP, + &format!("errors selecting obligation during MIR typeck: {:?}", e), + ); + } + + let region_obligations = infcx.take_registered_region_obligations(); + + let region_constraint_data = infcx.take_and_reset_region_constraints(); + + let outlives = + query_result::make_query_outlives(infcx.tcx, region_obligations, ®ion_constraint_data); + + if outlives.is_empty() { + Ok((value, None)) + } else { + Ok((value, Some(Rc::new(outlives)))) + } +} diff --git a/src/librustc/traits/query/type_op/eq.rs b/src/librustc/traits/query/type_op/eq.rs new file mode 100644 index 000000000000..52a087cbc806 --- /dev/null +++ b/src/librustc/traits/query/type_op/eq.rs @@ -0,0 +1,72 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResult, QueryResult}; +use traits::query::Fallible; +use ty::{ParamEnvAnd, Ty, TyCtxt}; + +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub struct Eq<'tcx> { + pub a: Ty<'tcx>, + pub b: Ty<'tcx>, +} + +impl<'tcx> Eq<'tcx> { + pub fn new(a: Ty<'tcx>, b: Ty<'tcx>) -> Self { + Self { a, b } + } +} + +impl<'gcx: 'tcx, 'tcx> super::QueryTypeOp<'gcx, 'tcx> for Eq<'tcx> { + type QueryResult = (); + + fn try_fast_path( + _tcx: TyCtxt<'_, 'gcx, 'tcx>, + key: &ParamEnvAnd<'tcx, Eq<'tcx>>, + ) -> Option { + if key.value.a == key.value.b { + Some(()) + } else { + None + } + } + + fn perform_query( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Self>>, + ) -> Fallible> { + tcx.type_op_eq(canonicalized) + } + + fn shrink_to_tcx_lifetime( + v: &'a CanonicalizedQueryResult<'gcx, ()>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, ()>> { + v + } +} + +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for Eq<'tcx> { + a, + b, + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for Eq<'a> { + type Lifted = Eq<'tcx>; + a, + b, + } +} + +impl_stable_hash_for! { + struct Eq<'tcx> { a, b } +} diff --git a/src/librustc/traits/query/type_op/mod.rs b/src/librustc/traits/query/type_op/mod.rs new file mode 100644 index 000000000000..be5e2838963e --- /dev/null +++ b/src/librustc/traits/query/type_op/mod.rs @@ -0,0 +1,166 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResult, QueryRegionConstraint, + QueryResult}; +use infer::{InferCtxt, InferOk}; +use rustc_data_structures::small_vec::SmallVec; +use std::fmt; +use std::rc::Rc; +use traits::query::Fallible; +use traits::ObligationCause; +use ty::fold::TypeFoldable; +use ty::{Lift, ParamEnvAnd, TyCtxt}; + +pub mod custom; +pub mod eq; +pub mod normalize; +pub mod outlives; +pub mod prove_predicate; +use self::prove_predicate::ProvePredicate; +pub mod subtype; + +/// "Type ops" are used in NLL to perform some particular action and +/// extract out the resulting region constraints (or an error if it +/// cannot be completed). +pub trait TypeOp<'gcx, 'tcx>: Sized + fmt::Debug { + type Output; + + /// Processes the operation and all resulting obligations, + /// returning the final result along with any region constraints + /// (they will be given over to the NLL region solver). + fn fully_perform( + self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + ) -> Fallible<(Self::Output, Option>>>)>; +} + +/// "Query type ops" are type ops that are implemented using a +/// [canonical query][c]. The `Self` type here contains the kernel of +/// information needed to do the operation -- `TypeOp` is actually +/// implemented for `ParamEnvAnd`, since we always need to bring +/// along a parameter environment as well. For query type-ops, we will +/// first canonicalize the key and then invoke the query on the tcx, +/// which produces the resulting query region constraints. +/// +/// [c]: https://rust-lang-nursery.github.io/rustc-guide/traits/canonicalization.html +pub trait QueryTypeOp<'gcx: 'tcx, 'tcx>: + fmt::Debug + Sized + TypeFoldable<'tcx> + Lift<'gcx> +{ + type QueryResult: TypeFoldable<'tcx> + Lift<'gcx>; + + /// Give query the option for a simple fast path that never + /// actually hits the tcx cache lookup etc. Return `Some(r)` with + /// a final result or `None` to do the full path. + fn try_fast_path( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + key: &ParamEnvAnd<'tcx, Self>, + ) -> Option; + + /// Performs the actual query with the canonicalized key -- the + /// real work happens here. This method is not given an `infcx` + /// because it shouldn't need one -- and if it had access to one, + /// it might do things like invoke `sub_regions`, which would be + /// bad, because it would create subregion relationships that are + /// not captured in the return value. + fn perform_query( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Self>>, + ) -> Fallible>; + + /// Casts a lifted query result (which is in the gcx lifetime) + /// into the tcx lifetime. This is always just an identity cast, + /// but the generic code doesn't realize it -- put another way, in + /// the generic code, we have a `Lifted<'gcx, Self::QueryResult>` + /// and we want to convert that to a `Self::QueryResult`. This is + /// not a priori valid, so we can't do it -- but in practice, it + /// is always a no-op (e.g., the lifted form of a type, + /// `Ty<'gcx>`, is a subtype of `Ty<'tcx>`). So we have to push + /// the operation into the impls that know more specifically what + /// `QueryResult` is. This operation would (maybe) be nicer with + /// something like HKTs or GATs, since then we could make + /// `QueryResult` parametric and `'gcx` and `'tcx` etc. + fn shrink_to_tcx_lifetime( + lifted_query_result: &'a CanonicalizedQueryResult<'gcx, Self::QueryResult>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, Self::QueryResult>>; + + fn fully_perform_into( + query_key: ParamEnvAnd<'tcx, Self>, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + output_query_region_constraints: &mut Vec>, + ) -> Fallible { + if let Some(result) = QueryTypeOp::try_fast_path(infcx.tcx, &query_key) { + return Ok(result); + } + + // FIXME(#33684) -- We need to use + // `canonicalize_hr_query_hack` here because of things + // like the subtype query, which go awry around + // `'static` otherwise. + let mut canonical_var_values = SmallVec::new(); + let canonical_self = + infcx.canonicalize_hr_query_hack(&query_key, &mut canonical_var_values); + let canonical_result = Self::perform_query(infcx.tcx, canonical_self)?; + let canonical_result = Self::shrink_to_tcx_lifetime(&canonical_result); + + let param_env = query_key.param_env; + + let InferOk { value, obligations } = infcx + .instantiate_nll_query_result_and_region_obligations( + &ObligationCause::dummy(), + param_env, + &canonical_var_values, + canonical_result, + output_query_region_constraints, + )?; + + // Typically, instantiating NLL query results does not + // create obligations. However, in some cases there + // are unresolved type variables, and unify them *can* + // create obligations. In that case, we have to go + // fulfill them. We do this via a (recursive) query. + for obligation in obligations { + let () = ProvePredicate::fully_perform_into( + obligation + .param_env + .and(ProvePredicate::new(obligation.predicate)), + infcx, + output_query_region_constraints, + )?; + } + + Ok(value) + } +} + +impl<'gcx: 'tcx, 'tcx, Q> TypeOp<'gcx, 'tcx> for ParamEnvAnd<'tcx, Q> +where + Q: QueryTypeOp<'gcx, 'tcx>, +{ + type Output = Q::QueryResult; + + fn fully_perform( + self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + ) -> Fallible<(Self::Output, Option>>>)> { + let mut qrc = vec![]; + let r = Q::fully_perform_into(self, infcx, &mut qrc)?; + + // Promote the final query-region-constraints into a + // (optional) ref-counted vector: + let opt_qrc = if qrc.is_empty() { + None + } else { + Some(Rc::new(qrc)) + }; + + Ok((r, opt_qrc)) + } +} diff --git a/src/librustc/traits/query/type_op/normalize.rs b/src/librustc/traits/query/type_op/normalize.rs new file mode 100644 index 000000000000..0c393fa4ca80 --- /dev/null +++ b/src/librustc/traits/query/type_op/normalize.rs @@ -0,0 +1,161 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResult, QueryResult}; +use std::fmt; +use traits::query::Fallible; +use ty::fold::TypeFoldable; +use ty::{self, Lift, ParamEnvAnd, Ty, TyCtxt}; + +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub struct Normalize { + pub value: T, +} + +impl<'tcx, T> Normalize +where + T: fmt::Debug + TypeFoldable<'tcx>, +{ + pub fn new(value: T) -> Self { + Self { value } + } +} + +impl<'gcx: 'tcx, 'tcx, T> super::QueryTypeOp<'gcx, 'tcx> for Normalize +where + T: Normalizable<'gcx, 'tcx>, +{ + type QueryResult = T; + + fn try_fast_path(_tcx: TyCtxt<'_, 'gcx, 'tcx>, key: &ParamEnvAnd<'tcx, Self>) -> Option { + if !key.value.value.has_projections() { + Some(key.value.value) + } else { + None + } + } + + fn perform_query( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Self>>, + ) -> Fallible> { + T::type_op_method(tcx, canonicalized) + } + + fn shrink_to_tcx_lifetime( + v: &'a CanonicalizedQueryResult<'gcx, T>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, T>> { + T::shrink_to_tcx_lifetime(v) + } +} + +pub trait Normalizable<'gcx, 'tcx>: fmt::Debug + TypeFoldable<'tcx> + Lift<'gcx> + Copy { + fn type_op_method( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Normalize>>, + ) -> Fallible>; + + /// Convert from the `'gcx` (lifted) form of `Self` into the `tcx` + /// form of `Self`. + fn shrink_to_tcx_lifetime( + v: &'a CanonicalizedQueryResult<'gcx, Self>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, Self>>; +} + +impl Normalizable<'gcx, 'tcx> for Ty<'tcx> +where + 'gcx: 'tcx, +{ + fn type_op_method( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Normalize>>, + ) -> Fallible> { + tcx.type_op_normalize_ty(canonicalized) + } + + fn shrink_to_tcx_lifetime( + v: &'a CanonicalizedQueryResult<'gcx, Self>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, Self>> { + v + } +} + +impl Normalizable<'gcx, 'tcx> for ty::Predicate<'tcx> +where + 'gcx: 'tcx, +{ + fn type_op_method( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Normalize>>, + ) -> Fallible> { + tcx.type_op_normalize_predicate(canonicalized) + } + + fn shrink_to_tcx_lifetime( + v: &'a CanonicalizedQueryResult<'gcx, Self>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, Self>> { + v + } +} + +impl Normalizable<'gcx, 'tcx> for ty::PolyFnSig<'tcx> +where + 'gcx: 'tcx, +{ + fn type_op_method( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Normalize>>, + ) -> Fallible> { + tcx.type_op_normalize_poly_fn_sig(canonicalized) + } + + fn shrink_to_tcx_lifetime( + v: &'a CanonicalizedQueryResult<'gcx, Self>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, Self>> { + v + } +} + +impl Normalizable<'gcx, 'tcx> for ty::FnSig<'tcx> +where + 'gcx: 'tcx, +{ + fn type_op_method( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Normalize>>, + ) -> Fallible> { + tcx.type_op_normalize_fn_sig(canonicalized) + } + + fn shrink_to_tcx_lifetime( + v: &'a CanonicalizedQueryResult<'gcx, Self>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, Self>> { + v + } +} + +BraceStructTypeFoldableImpl! { + impl<'tcx, T> TypeFoldable<'tcx> for Normalize { + value, + } where T: TypeFoldable<'tcx>, +} + +BraceStructLiftImpl! { + impl<'tcx, T> Lift<'tcx> for Normalize { + type Lifted = Normalize; + value, + } where T: Lift<'tcx>, +} + +impl_stable_hash_for! { + impl<'tcx, T> for struct Normalize { + value + } +} diff --git a/src/librustc/traits/query/type_op/outlives.rs b/src/librustc/traits/query/type_op/outlives.rs new file mode 100644 index 000000000000..e41ae7a72f9c --- /dev/null +++ b/src/librustc/traits/query/type_op/outlives.rs @@ -0,0 +1,100 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResult, QueryResult}; +use traits::query::dropck_outlives::trivial_dropck_outlives; +use traits::query::dropck_outlives::DropckOutlivesResult; +use traits::query::Fallible; +use ty::{ParamEnvAnd, Ty, TyCtxt}; + +#[derive(Copy, Clone, Debug)] +pub struct DropckOutlives<'tcx> { + dropped_ty: Ty<'tcx>, +} + +impl<'tcx> DropckOutlives<'tcx> { + pub fn new(dropped_ty: Ty<'tcx>) -> Self { + DropckOutlives { dropped_ty } + } +} + +impl super::QueryTypeOp<'gcx, 'tcx> for DropckOutlives<'tcx> +where + 'gcx: 'tcx, +{ + type QueryResult = DropckOutlivesResult<'tcx>; + + fn try_fast_path( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + key: &ParamEnvAnd<'tcx, Self>, + ) -> Option { + if trivial_dropck_outlives(tcx, key.value.dropped_ty) { + Some(DropckOutlivesResult::default()) + } else { + None + } + } + + fn perform_query( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Self>>, + ) -> Fallible> { + // Subtle: note that we are not invoking + // `infcx.at(...).dropck_outlives(...)` here, but rather the + // underlying `dropck_outlives` query. This same underlying + // query is also used by the + // `infcx.at(...).dropck_outlives(...)` fn. Avoiding the + // wrapper means we don't need an infcx in this code, which is + // good because the interface doesn't give us one (so that we + // know we are not registering any subregion relations or + // other things). + + // FIXME convert to the type expected by the `dropck_outlives` + // query. This should eventually be fixed by changing the + // *underlying query*. + let Canonical { + variables, + value: + ParamEnvAnd { + param_env, + value: DropckOutlives { dropped_ty }, + }, + } = canonicalized; + let canonicalized = Canonical { + variables, + value: param_env.and(dropped_ty), + }; + + tcx.dropck_outlives(canonicalized) + } + + fn shrink_to_tcx_lifetime( + lifted_query_result: &'a CanonicalizedQueryResult<'gcx, Self::QueryResult>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, Self::QueryResult>> { + lifted_query_result + } +} + +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for DropckOutlives<'tcx> { + dropped_ty + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for DropckOutlives<'a> { + type Lifted = DropckOutlives<'tcx>; + dropped_ty + } +} + +impl_stable_hash_for! { + struct DropckOutlives<'tcx> { dropped_ty } +} diff --git a/src/librustc/traits/query/type_op/prove_predicate.rs b/src/librustc/traits/query/type_op/prove_predicate.rs new file mode 100644 index 000000000000..33dc3210f088 --- /dev/null +++ b/src/librustc/traits/query/type_op/prove_predicate.rs @@ -0,0 +1,65 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResult, QueryResult}; +use traits::query::Fallible; +use ty::{ParamEnvAnd, Predicate, TyCtxt}; + +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub struct ProvePredicate<'tcx> { + pub predicate: Predicate<'tcx>, +} + +impl<'tcx> ProvePredicate<'tcx> { + pub fn new(predicate: Predicate<'tcx>) -> Self { + ProvePredicate { predicate } + } +} + +impl<'gcx: 'tcx, 'tcx> super::QueryTypeOp<'gcx, 'tcx> for ProvePredicate<'tcx> { + type QueryResult = (); + + fn try_fast_path( + _tcx: TyCtxt<'_, 'gcx, 'tcx>, + _key: &ParamEnvAnd<'tcx, Self>, + ) -> Option { + None + } + + fn perform_query( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Self>>, + ) -> Fallible> { + tcx.type_op_prove_predicate(canonicalized) + } + + fn shrink_to_tcx_lifetime( + v: &'a CanonicalizedQueryResult<'gcx, ()>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, ()>> { + v + } +} + +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ProvePredicate<'tcx> { + predicate, + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for ProvePredicate<'a> { + type Lifted = ProvePredicate<'tcx>; + predicate, + } +} + +impl_stable_hash_for! { + struct ProvePredicate<'tcx> { predicate } +} diff --git a/src/librustc/traits/query/type_op/subtype.rs b/src/librustc/traits/query/type_op/subtype.rs new file mode 100644 index 000000000000..dc41bb1d6ab6 --- /dev/null +++ b/src/librustc/traits/query/type_op/subtype.rs @@ -0,0 +1,72 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResult, QueryResult}; +use traits::query::Fallible; +use ty::{ParamEnvAnd, Ty, TyCtxt}; + +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub struct Subtype<'tcx> { + pub sub: Ty<'tcx>, + pub sup: Ty<'tcx>, +} + +impl<'tcx> Subtype<'tcx> { + pub fn new(sub: Ty<'tcx>, sup: Ty<'tcx>) -> Self { + Self { + sub, + sup, + } + } +} + +impl<'gcx: 'tcx, 'tcx> super::QueryTypeOp<'gcx, 'tcx> for Subtype<'tcx> { + type QueryResult = (); + + fn try_fast_path(_tcx: TyCtxt<'_, 'gcx, 'tcx>, key: &ParamEnvAnd<'tcx, Self>) -> Option<()> { + if key.value.sub == key.value.sup { + Some(()) + } else { + None + } + } + + fn perform_query( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Self>>, + ) -> Fallible> { + tcx.type_op_subtype(canonicalized) + } + + fn shrink_to_tcx_lifetime( + v: &'a CanonicalizedQueryResult<'gcx, ()>, + ) -> &'a Canonical<'tcx, QueryResult<'tcx, ()>> { + v + } +} + +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for Subtype<'tcx> { + sub, + sup, + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for Subtype<'a> { + type Lifted = Subtype<'tcx>; + sub, + sup, + } +} + +impl_stable_hash_for! { + struct Subtype<'tcx> { sub, sup } +} diff --git a/src/librustc/traits/select.rs b/src/librustc/traits/select.rs index 51d2bc8701a4..ea96c3050268 100644 --- a/src/librustc/traits/select.rs +++ b/src/librustc/traits/select.rs @@ -8,19 +8,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! See `README.md` for high-level documentation +//! See [rustc guide] for more info on how this works. +//! +//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/resolution.html#selection use self::SelectionCandidate::*; use self::EvaluationResult::*; use super::coherence::{self, Conflict}; use super::DerivedObligationCause; -use super::IntercrateMode; +use super::{IntercrateMode, TraitQueryMode}; use super::project; use super::project::{normalize_with_depth, Normalized, ProjectionCacheKey}; use super::{PredicateObligation, TraitObligation, ObligationCause}; use super::{ObligationCauseCode, BuiltinDerivedObligation, ImplDerivedObligation}; -use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch}; +use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch, Overflow}; use super::{ObjectCastObligation, Obligation}; use super::TraitNotObjectSafe; use super::Selection; @@ -35,34 +37,24 @@ use dep_graph::{DepNodeIndex, DepKind}; use hir::def_id::DefId; use infer; use infer::{InferCtxt, InferOk, TypeFreshener}; -use ty::subst::{Kind, Subst, Substs}; +use ty::subst::{Subst, Substs}; use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; use ty::fast_reject; use ty::relate::TypeRelation; use middle::lang_items; +use mir::interpret::{GlobalId}; -use rustc_data_structures::bitvec::BitVector; -use rustc_data_structures::snapshot_vec::{SnapshotVecDelegate, SnapshotVec}; +use rustc_data_structures::sync::Lock; +use rustc_data_structures::bitvec::BitArray; use std::iter; -use std::cell::RefCell; use std::cmp; use std::fmt; -use std::marker::PhantomData; use std::mem; use std::rc::Rc; -use syntax::abi::Abi; +use rustc_target::spec::abi::Abi; use hir; -use lint; -use util::nodemap::FxHashMap; +use util::nodemap::{FxHashMap, FxHashSet}; -struct InferredObligationsSnapshotVecDelegate<'tcx> { - phantom: PhantomData<&'tcx i32>, -} -impl<'tcx> SnapshotVecDelegate for InferredObligationsSnapshotVecDelegate<'tcx> { - type Value = PredicateObligation<'tcx>; - type Undo = (); - fn reverse(_: &mut Vec, _: Self::Undo) {} -} pub struct SelectionContext<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, @@ -90,12 +82,20 @@ pub struct SelectionContext<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { /// would satisfy it. This avoids crippling inference, basically. intercrate: Option, - inferred_obligations: SnapshotVec>, + intercrate_ambiguity_causes: Option>, - intercrate_ambiguity_causes: Vec, + /// Controls whether or not to filter out negative impls when selecting. + /// This is used in librustdoc to distinguish between the lack of an impl + /// and a negative impl + allow_negative_impls: bool, + + /// The mode that trait queries run in, which informs our error handling + /// policy. In essence, canonicalized queries need their errors propagated + /// rather than immediately reported because we do not have accurate spans. + query_mode: TraitQueryMode, } -#[derive(Clone)] +#[derive(Clone, Debug)] pub enum IntercrateAmbiguityCause { DownstreamCrate { trait_desc: String, @@ -148,7 +148,7 @@ struct TraitObligationStack<'prev, 'tcx: 'prev> { #[derive(Clone)] pub struct SelectionCache<'tcx> { - hashmap: RefCell, + hashmap: Lock, WithDepNode>>>>, } @@ -226,6 +226,7 @@ pub struct SelectionCache<'tcx> { /// parameter environment. #[derive(PartialEq,Eq,Debug,Clone)] enum SelectionCandidate<'tcx> { + /// If has_nested is false, there are no *further* obligations BuiltinCandidate { has_nested: bool }, ParamCandidate(ty::PolyTraitRef<'tcx>), ImplCandidate(DefId), @@ -305,9 +306,6 @@ enum BuiltinImplConditions<'tcx> { /// There is no built-in impl. There may be some other /// candidate (a where-clause or user-defined impl). None, - /// There is *no* impl for this, builtin or not. Ignore - /// all where-clauses. - Never, /// It is unknown whether there is an impl. Ambiguous } @@ -324,7 +322,7 @@ enum BuiltinImplConditions<'tcx> { /// all the "potential success" candidates can potentially succeed, /// so they are no-ops when unioned with a definite error, and within /// the categories it's easy to see that the unions are correct. -enum EvaluationResult { +pub enum EvaluationResult { /// Evaluation successful EvaluatedToOk, /// Evaluation is known to be ambiguous - it *might* hold for some @@ -367,7 +365,9 @@ enum EvaluationResult { /// When checking `foo`, we have to prove `T: Trait`. This basically /// translates into this: /// + /// ```plain,ignore /// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait + /// ``` /// /// When we try to prove it, we first go the first option, which /// recurses. This shows us that the impl is "useless" - it won't @@ -388,7 +388,7 @@ enum EvaluationResult { } impl EvaluationResult { - fn may_apply(self) -> bool { + pub fn may_apply(self) -> bool { match self { EvaluatedToOk | EvaluatedToAmbig | @@ -411,9 +411,29 @@ impl EvaluationResult { } } +impl_stable_hash_for!(enum self::EvaluationResult { + EvaluatedToOk, + EvaluatedToAmbig, + EvaluatedToUnknown, + EvaluatedToRecur, + EvaluatedToErr +}); + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +/// Indicates that trait evaluation caused overflow. +pub struct OverflowError; + +impl_stable_hash_for!(struct OverflowError { }); + +impl<'tcx> From for SelectionError<'tcx> { + fn from(OverflowError: OverflowError) -> SelectionError<'tcx> { + SelectionError::Overflow + } +} + #[derive(Clone)] pub struct EvaluationCache<'tcx> { - hashmap: RefCell, WithDepNode>> + hashmap: Lock, WithDepNode>> } impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { @@ -422,8 +442,9 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { infcx, freshener: infcx.freshener(), intercrate: None, - inferred_obligations: SnapshotVec::new(), - intercrate_ambiguity_causes: Vec::new(), + intercrate_ambiguity_causes: None, + allow_negative_impls: false, + query_mode: TraitQueryMode::Standard, } } @@ -434,11 +455,58 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { infcx, freshener: infcx.freshener(), intercrate: Some(mode), - inferred_obligations: SnapshotVec::new(), - intercrate_ambiguity_causes: Vec::new(), + intercrate_ambiguity_causes: None, + allow_negative_impls: false, + query_mode: TraitQueryMode::Standard, } } + pub fn with_negative(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, + allow_negative_impls: bool) -> SelectionContext<'cx, 'gcx, 'tcx> { + debug!("with_negative({:?})", allow_negative_impls); + SelectionContext { + infcx, + freshener: infcx.freshener(), + intercrate: None, + intercrate_ambiguity_causes: None, + allow_negative_impls, + query_mode: TraitQueryMode::Standard, + } + } + + pub fn with_query_mode(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, + query_mode: TraitQueryMode) -> SelectionContext<'cx, 'gcx, 'tcx> { + debug!("with_query_mode({:?})", query_mode); + SelectionContext { + infcx, + freshener: infcx.freshener(), + intercrate: None, + intercrate_ambiguity_causes: None, + allow_negative_impls: false, + query_mode, + } + } + + /// Enables tracking of intercrate ambiguity causes. These are + /// used in coherence to give improved diagnostics. We don't do + /// this until we detect a coherence error because it can lead to + /// false overflow results (#47139) and because it costs + /// computation time. + pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) { + assert!(self.intercrate.is_some()); + assert!(self.intercrate_ambiguity_causes.is_none()); + self.intercrate_ambiguity_causes = Some(vec![]); + debug!("selcx: enable_tracking_intercrate_ambiguity_causes"); + } + + /// Gets the intercrate ambiguity causes collected since tracking + /// was enabled and disables tracking at the same time. If + /// tracking is not enabled, just returns an empty vector. + pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec { + assert!(self.intercrate.is_some()); + self.intercrate_ambiguity_causes.take().unwrap_or(vec![]) + } + pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { self.infcx } @@ -451,29 +519,20 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.infcx } - pub fn intercrate_ambiguity_causes(&self) -> &[IntercrateAmbiguityCause] { - &self.intercrate_ambiguity_causes - } - /// Wraps the inference context's in_snapshot s.t. snapshot handling is only from the selection /// context's self. fn in_snapshot(&mut self, f: F) -> R - where F: FnOnce(&mut Self, &infer::CombinedSnapshot) -> R + where F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> R { - // The irrefutable nature of the operation means we don't need to snapshot the - // inferred_obligations vector. self.infcx.in_snapshot(|snapshot| f(self, snapshot)) } /// Wraps a probe s.t. obligations collected during it are ignored and old obligations are /// retained. fn probe(&mut self, f: F) -> R - where F: FnOnce(&mut Self, &infer::CombinedSnapshot) -> R + where F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> R { - let inferred_obligations_snapshot = self.inferred_obligations.start_snapshot(); - let result = self.infcx.probe(|snapshot| f(self, snapshot)); - self.inferred_obligations.rollback_to(inferred_obligations_snapshot); - result + self.infcx.probe(|snapshot| f(self, snapshot)) } /// Wraps a commit_if_ok s.t. obligations collected during it are not returned in selection if @@ -481,17 +540,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn commit_if_ok(&mut self, f: F) -> Result where F: FnOnce(&mut Self, &infer::CombinedSnapshot) -> Result { - let inferred_obligations_snapshot = self.inferred_obligations.start_snapshot(); - match self.infcx.commit_if_ok(|snapshot| f(self, snapshot)) { - Ok(ok) => { - self.inferred_obligations.commit(inferred_obligations_snapshot); - Ok(ok) - }, - Err(err) => { - self.inferred_obligations.rollback_to(inferred_obligations_snapshot); - Err(err) - } - } + self.infcx.commit_if_ok(|snapshot| f(self, snapshot)) } @@ -515,62 +564,30 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { pub fn select(&mut self, obligation: &TraitObligation<'tcx>) -> SelectionResult<'tcx, Selection<'tcx>> { debug!("select({:?})", obligation); - assert!(!obligation.predicate.has_escaping_regions()); - - let tcx = self.tcx(); + debug_assert!(!obligation.predicate.has_escaping_regions()); let stack = self.push_stack(TraitObligationStackList::empty(), obligation); - let ret = match self.candidate_from_obligation(&stack)? { - None => None, - Some(candidate) => { - let mut candidate = self.confirm_candidate(obligation, candidate)?; - let inferred_obligations = (*self.inferred_obligations).into_iter().cloned(); - candidate.nested_obligations_mut().extend(inferred_obligations); - Some(candidate) + + let candidate = match self.candidate_from_obligation(&stack) { + Err(SelectionError::Overflow) => { + // In standard mode, overflow must have been caught and reported + // earlier. + assert!(self.query_mode == TraitQueryMode::Canonical); + return Err(SelectionError::Overflow); }, + Err(e) => { return Err(e); }, + Ok(None) => { return Ok(None); }, + Ok(Some(candidate)) => candidate }; - // Test whether this is a `()` which was produced by defaulting a - // diverging type variable with `!` disabled. If so, we may need - // to raise a warning. - if obligation.predicate.skip_binder().self_ty().is_defaulted_unit() { - let mut raise_warning = true; - // Don't raise a warning if the trait is implemented for ! and only - // permits a trivial implementation for !. This stops us warning - // about (for example) `(): Clone` becoming `!: Clone` because such - // a switch can't cause code to stop compiling or execute - // differently. - let mut never_obligation = obligation.clone(); - let def_id = never_obligation.predicate.skip_binder().trait_ref.def_id; - never_obligation.predicate = never_obligation.predicate.map_bound(|mut trait_pred| { - // Swap out () with ! so we can check if the trait is impld for ! - { - let trait_ref = &mut trait_pred.trait_ref; - let unit_substs = trait_ref.substs; - let mut never_substs = Vec::with_capacity(unit_substs.len()); - never_substs.push(From::from(tcx.types.never)); - never_substs.extend(&unit_substs[1..]); - trait_ref.substs = tcx.intern_substs(&never_substs); - } - trait_pred - }); - if let Ok(Some(..)) = self.select(&never_obligation) { - if !tcx.trait_relevant_for_never(def_id) { - // The trait is also implemented for ! and the resulting - // implementation cannot actually be invoked in any way. - raise_warning = false; - } - } - - if raise_warning { - tcx.lint_node(lint::builtin::RESOLVE_TRAIT_ON_DEFAULTED_UNIT, - obligation.cause.body_id, - obligation.cause.span, - &format!("code relies on type inference rules which are likely \ - to change")); - } + match self.confirm_candidate(obligation, candidate) { + Err(SelectionError::Overflow) => { + assert!(self.query_mode == TraitQueryMode::Canonical); + return Err(SelectionError::Overflow); + }, + Err(e) => Err(e), + Ok(candidate) => Ok(Some(candidate)) } - Ok(ret) } /////////////////////////////////////////////////////////////////////////// @@ -584,32 +601,30 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // we can be sure it does not. /// Evaluates whether the obligation `obligation` can be satisfied (by any means). - pub fn evaluate_obligation(&mut self, - obligation: &PredicateObligation<'tcx>) - -> bool + pub fn predicate_may_hold_fatal(&mut self, + obligation: &PredicateObligation<'tcx>) + -> bool { - debug!("evaluate_obligation({:?})", + debug!("predicate_may_hold_fatal({:?})", obligation); - self.probe(|this, _| { - this.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) - .may_apply() - }) + // This fatal query is a stopgap that should only be used in standard mode, + // where we do not expect overflow to be propagated. + assert!(self.query_mode == TraitQueryMode::Standard); + + self.evaluate_obligation_recursively(obligation) + .expect("Overflow should be caught earlier in standard query mode") + .may_apply() } - /// Evaluates whether the obligation `obligation` can be satisfied, - /// and returns `false` if not certain. However, this is not entirely - /// accurate if inference variables are involved. - pub fn evaluate_obligation_conservatively(&mut self, - obligation: &PredicateObligation<'tcx>) - -> bool + /// Evaluates whether the obligation `obligation` can be satisfied and returns + /// an `EvaluationResult`. + pub fn evaluate_obligation_recursively(&mut self, + obligation: &PredicateObligation<'tcx>) + -> Result { - debug!("evaluate_obligation_conservatively({:?})", - obligation); - self.probe(|this, _| { this.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) - == EvaluatedToOk }) } @@ -619,60 +634,48 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn evaluate_predicates_recursively<'a,'o,I>(&mut self, stack: TraitObligationStackList<'o, 'tcx>, predicates: I) - -> EvaluationResult - where I : Iterator>, 'tcx:'a + -> Result + where I : IntoIterator>, 'tcx:'a { let mut result = EvaluatedToOk; for obligation in predicates { - let eval = self.evaluate_predicate_recursively(stack, obligation); + let eval = self.evaluate_predicate_recursively(stack, obligation)?; debug!("evaluate_predicate_recursively({:?}) = {:?}", obligation, eval); if let EvaluatedToErr = eval { // fast-path - EvaluatedToErr is the top of the lattice, // so we don't need to look on the other predicates. - return EvaluatedToErr; + return Ok(EvaluatedToErr); } else { result = cmp::max(result, eval); } } - result + Ok(result) } fn evaluate_predicate_recursively<'o>(&mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, obligation: &PredicateObligation<'tcx>) - -> EvaluationResult + -> Result { debug!("evaluate_predicate_recursively({:?})", obligation); match obligation.predicate { ty::Predicate::Trait(ref t) => { - assert!(!t.has_escaping_regions()); + debug_assert!(!t.has_escaping_regions()); let obligation = obligation.with(t.clone()); self.evaluate_trait_predicate_recursively(previous_stack, obligation) } - ty::Predicate::Equate(ref p) => { - // does this code ever run? - match self.infcx.equality_predicate(&obligation.cause, obligation.param_env, p) { - Ok(InferOk { obligations, .. }) => { - self.inferred_obligations.extend(obligations); - EvaluatedToOk - }, - Err(_) => EvaluatedToErr - } - } - ty::Predicate::Subtype(ref p) => { // does this code ever run? match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) { Some(Ok(InferOk { obligations, .. })) => { - self.inferred_obligations.extend(obligations); - EvaluatedToOk + self.evaluate_predicates_recursively(previous_stack, &obligations) }, - Some(Err(_)) => EvaluatedToErr, - None => EvaluatedToAmbig, + Some(Err(_)) => Ok(EvaluatedToErr), + None => Ok(EvaluatedToAmbig), } } @@ -684,21 +687,21 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Some(obligations) => self.evaluate_predicates_recursively(previous_stack, obligations.iter()), None => - EvaluatedToAmbig, + Ok(EvaluatedToAmbig), } } ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => { // we do not consider region relationships when // evaluating trait matches - EvaluatedToOk + Ok(EvaluatedToOk) } ty::Predicate::ObjectSafe(trait_def_id) => { if self.tcx().is_object_safe(trait_def_id) { - EvaluatedToOk + Ok(EvaluatedToOk) } else { - EvaluatedToErr + Ok(EvaluatedToErr) } } @@ -716,10 +719,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { result } Ok(None) => { - EvaluatedToAmbig + Ok(EvaluatedToAmbig) } Err(_) => { - EvaluatedToErr + Ok(EvaluatedToErr) } } } @@ -728,28 +731,43 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { match self.infcx.closure_kind(closure_def_id, closure_substs) { Some(closure_kind) => { if closure_kind.extends(kind) { - EvaluatedToOk + Ok(EvaluatedToOk) } else { - EvaluatedToErr + Ok(EvaluatedToErr) } } None => { - EvaluatedToAmbig + Ok(EvaluatedToAmbig) } } } ty::Predicate::ConstEvaluatable(def_id, substs) => { - match self.tcx().lift_to_global(&(obligation.param_env, substs)) { + let tcx = self.tcx(); + match tcx.lift_to_global(&(obligation.param_env, substs)) { Some((param_env, substs)) => { - match self.tcx().const_eval(param_env.and((def_id, substs))) { - Ok(_) => EvaluatedToOk, - Err(_) => EvaluatedToErr + let instance = ty::Instance::resolve( + tcx.global_tcx(), + param_env, + def_id, + substs, + ); + if let Some(instance) = instance { + let cid = GlobalId { + instance, + promoted: None + }; + match self.tcx().const_eval(param_env.and(cid)) { + Ok(_) => Ok(EvaluatedToOk), + Err(_) => Ok(EvaluatedToErr) + } + } else { + Ok(EvaluatedToErr) } } None => { // Inference variables still left in param_env or substs. - EvaluatedToAmbig + Ok(EvaluatedToAmbig) } } } @@ -759,17 +777,17 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn evaluate_trait_predicate_recursively<'o>(&mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, mut obligation: TraitObligation<'tcx>) - -> EvaluationResult + -> Result { - debug!("evaluate_trait_predicate_recursively({:?})", - obligation); + debug!("evaluate_trait_predicate_recursively({:?})", obligation); - if !self.intercrate.is_some() && obligation.is_global() { - // If a param env is consistent, global obligations do not depend on its particular - // value in order to work, so we can clear out the param env and get better - // caching. (If the current param env is inconsistent, we don't care what happens). + if self.intercrate.is_none() && obligation.is_global() + && obligation.param_env.caller_bounds.iter().all(|bound| bound.needs_subst()) { + // If a param env has no global bounds, global obligations do not + // depend on its particular value in order to work, so we can clear + // out the param env and get better caching. debug!("evaluate_trait_predicate_recursively({:?}) - in global", obligation); - obligation.param_env = ty::ParamEnv::empty(obligation.param_env.reveal); + obligation.param_env = obligation.param_env.without_caller_bounds(); } let stack = self.push_stack(previous_stack, &obligation); @@ -778,22 +796,23 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { debug!("CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result); - return result; + return Ok(result); } let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack)); + let result = result?; debug!("CACHE MISS: EVAL({:?})={:?}", fresh_trait_ref, result); self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result); - result + Ok(result) } fn evaluate_stack<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) - -> EvaluationResult + -> Result { // In intercrate mode, whenever any of the types are unbound, // there can always be an impl. Even if there are no impls in @@ -819,7 +838,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // This suffices to allow chains like `FnMut` implemented in // terms of `Fn` etc, but we could probably make this more // precise still. - let unbound_input_types = stack.fresh_trait_ref.input_types().any(|ty| ty.is_fresh()); + let unbound_input_types = + stack.fresh_trait_ref.skip_binder().input_types().any(|ty| ty.is_fresh()); // this check was an imperfect workaround for a bug n the old // intercrate mode, it should be removed when that goes away. if unbound_input_types && @@ -828,22 +848,26 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { debug!("evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous", stack.fresh_trait_ref); // Heuristics: show the diagnostics when there are no candidates in crate. - if let Ok(candidate_set) = self.assemble_candidates(stack) { - if !candidate_set.ambiguous && candidate_set.vec.is_empty() { - let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; - let self_ty = trait_ref.self_ty(); - let cause = IntercrateAmbiguityCause::DownstreamCrate { - trait_desc: trait_ref.to_string(), - self_desc: if self_ty.has_concrete_skeleton() { - Some(self_ty.to_string()) - } else { - None - }, - }; - self.intercrate_ambiguity_causes.push(cause); + if self.intercrate_ambiguity_causes.is_some() { + debug!("evaluate_stack: intercrate_ambiguity_causes is some"); + if let Ok(candidate_set) = self.assemble_candidates(stack) { + if !candidate_set.ambiguous && candidate_set.vec.is_empty() { + let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; + let self_ty = trait_ref.self_ty(); + let cause = IntercrateAmbiguityCause::DownstreamCrate { + trait_desc: trait_ref.to_string(), + self_desc: if self_ty.has_concrete_skeleton() { + Some(self_ty.to_string()) + } else { + None + }, + }; + debug!("evaluate_stack: pushing cause = {:?}", cause); + self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause); + } } } - return EvaluatedToAmbig; + return Ok(EvaluatedToAmbig); } if unbound_input_types && stack.iter().skip(1).any( @@ -853,7 +877,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { { debug!("evaluate_stack({:?}) --> unbound argument, recursive --> giving up", stack.fresh_trait_ref); - return EvaluatedToUnknown; + return Ok(EvaluatedToUnknown); } // If there is any previous entry on the stack that precisely @@ -888,18 +912,19 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { if self.coinductive_match(cycle) { debug!("evaluate_stack({:?}) --> recursive, coinductive", stack.fresh_trait_ref); - return EvaluatedToOk; + return Ok(EvaluatedToOk); } else { debug!("evaluate_stack({:?}) --> recursive, inductive", stack.fresh_trait_ref); - return EvaluatedToRecur; + return Ok(EvaluatedToRecur); } } match self.candidate_from_obligation(stack) { Ok(Some(c)) => self.evaluate_candidate(stack, &c), - Ok(None) => EvaluatedToAmbig, - Err(..) => EvaluatedToErr + Ok(None) => Ok(EvaluatedToAmbig), + Err(Overflow) => Err(OverflowError), + Err(..) => Ok(EvaluatedToErr) } } @@ -937,7 +962,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn evaluate_candidate<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, candidate: &SelectionCandidate<'tcx>) - -> EvaluationResult + -> Result { debug!("evaluate_candidate: depth={} candidate={:?}", stack.obligation.recursion_depth, candidate); @@ -949,12 +974,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { stack.list(), selection.nested_obligations().iter()) } - Err(..) => EvaluatedToErr + Err(..) => Ok(EvaluatedToErr) } - }); + })?; debug!("evaluate_candidate: depth={} result={:?}", stack.obligation.recursion_depth, result); - result + Ok(result) } fn check_evaluation_cache(&self, @@ -988,13 +1013,28 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } if self.can_use_global_caches(param_env) { - let mut cache = self.tcx().evaluation_cache.hashmap.borrow_mut(); if let Some(trait_ref) = self.tcx().lift_to_global(&trait_ref) { - cache.insert(trait_ref, WithDepNode::new(dep_node, result)); + debug!( + "insert_evaluation_cache(trait_ref={:?}, candidate={:?}) global", + trait_ref, + result, + ); + // This may overwrite the cache with the same value + // FIXME: Due to #50507 this overwrites the different values + // This should be changed to use HashMapExt::insert_same + // when that is fixed + self.tcx().evaluation_cache + .hashmap.borrow_mut() + .insert(trait_ref, WithDepNode::new(dep_node, result)); return; } } + debug!( + "insert_evaluation_cache(trait_ref={:?}, candidate={:?})", + trait_ref, + result, + ); self.infcx.evaluation_cache.hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, result)); @@ -1005,8 +1045,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // // The selection process begins by examining all in-scope impls, // caller obligations, and so forth and assembling a list of - // candidates. See `README.md` and the `Candidate` type for more - // details. + // candidates. See [rustc guide] for more details. + // + // [rustc guide]: + // https://rust-lang-nursery.github.io/rustc-guide/traits/resolution.html#candidate-assembly fn candidate_from_obligation<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) @@ -1014,9 +1056,16 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { { // Watch out for overflow. This intentionally bypasses (and does // not update) the cache. - let recursion_limit = self.infcx.tcx.sess.recursion_limit.get(); + let recursion_limit = *self.infcx.tcx.sess.recursion_limit.get(); if stack.obligation.recursion_depth >= recursion_limit { - self.infcx().report_overflow_error(&stack.obligation, true); + match self.query_mode { + TraitQueryMode::Standard => { + self.infcx().report_overflow_error(&stack.obligation, true); + }, + TraitQueryMode::Canonical => { + return Err(Overflow); + }, + } } // Check the cache. Note that we skolemize the trait-ref @@ -1028,7 +1077,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { debug!("candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})", cache_fresh_trait_pred, stack); - assert!(!stack.obligation.predicate.has_escaping_regions()); + debug_assert!(!stack.obligation.predicate.has_escaping_regions()); if let Some(c) = self.check_candidate_cache(stack.obligation.param_env, &cache_fresh_trait_pred) { @@ -1066,7 +1115,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn filter_negative_impls(&self, candidate: SelectionCandidate<'tcx>) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { if let ImplCandidate(def_id) = candidate { - if self.tcx().impl_polarity(def_id) == hir::ImplPolarity::Negative { + if !self.allow_negative_impls && + self.tcx().impl_polarity(def_id) == hir::ImplPolarity::Negative { return Err(Unimplemented) } } @@ -1092,25 +1142,39 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { None => {} Some(conflict) => { debug!("coherence stage: not knowable"); - // Heuristics: show the diagnostics when there are no candidates in crate. - let candidate_set = self.assemble_candidates(stack)?; - if !candidate_set.ambiguous && candidate_set.vec.iter().all(|c| { - !self.evaluate_candidate(stack, &c).may_apply() - }) { - let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; - let self_ty = trait_ref.self_ty(); - let trait_desc = trait_ref.to_string(); - let self_desc = if self_ty.has_concrete_skeleton() { - Some(self_ty.to_string()) - } else { - None - }; - let cause = if let Conflict::Upstream = conflict { - IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc } - } else { - IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc } - }; - self.intercrate_ambiguity_causes.push(cause); + if self.intercrate_ambiguity_causes.is_some() { + debug!("evaluate_stack: intercrate_ambiguity_causes is some"); + // Heuristics: show the diagnostics when there are no candidates in crate. + if let Ok(candidate_set) = self.assemble_candidates(stack) { + let no_candidates_apply = + candidate_set + .vec + .iter() + .map(|c| self.evaluate_candidate(stack, &c)) + .collect::, OverflowError>>()? + .iter() + .all(|r| !r.may_apply()); + if !candidate_set.ambiguous && no_candidates_apply { + let trait_ref = stack.obligation.predicate.skip_binder().trait_ref; + let self_ty = trait_ref.self_ty(); + let trait_desc = trait_ref.to_string(); + let self_desc = if self_ty.has_concrete_skeleton() { + Some(self_ty.to_string()) + } else { + None + }; + let cause = if let Conflict::Upstream = conflict { + IntercrateAmbiguityCause::UpstreamCrateUpdate { + trait_desc, + self_desc, + } + } else { + IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc } + }; + debug!("evaluate_stack: pushing cause = {:?}", cause); + self.intercrate_ambiguity_causes.as_mut().unwrap().push(cause); + } + } } return Ok(None); } @@ -1158,18 +1222,26 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } // Winnow, but record the exact outcome of evaluation, which - // is needed for specialization. - let mut candidates: Vec<_> = candidates.into_iter().filter_map(|c| { - let eval = self.evaluate_candidate(stack, &c); - if eval.may_apply() { - Some(EvaluatedCandidate { + // is needed for specialization. Propagate overflow if it occurs. + let candidates: Result>, _> = candidates + .into_iter() + .map(|c| match self.evaluate_candidate(stack, &c) { + Ok(eval) if eval.may_apply() => Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval, - }) - } else { - None - } - }).collect(); + })), + Ok(_) => Ok(None), + Err(OverflowError) => Err(Overflow), + }) + .collect(); + + let mut candidates: Vec = + candidates?.into_iter().filter_map(|c| c).collect(); + + debug!("winnowed to {} candidates for {:?}: {:?}", + candidates.len(), + stack, + candidates); // If there are STILL multiple candidate, we can further // reduce the list by dropping duplicates -- including @@ -1282,7 +1354,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { -> Option>> { let tcx = self.tcx(); - let trait_ref = &cache_fresh_trait_pred.0.trait_ref; + let trait_ref = &cache_fresh_trait_pred.skip_binder().trait_ref; if self.can_use_global_caches(param_env) { let cache = tcx.selection_cache.hashmap.borrow(); if let Some(cached) = cache.get(&trait_ref) { @@ -1302,17 +1374,29 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>) { let tcx = self.tcx(); - let trait_ref = cache_fresh_trait_pred.0.trait_ref; + let trait_ref = cache_fresh_trait_pred.skip_binder().trait_ref; if self.can_use_global_caches(param_env) { - let mut cache = tcx.selection_cache.hashmap.borrow_mut(); if let Some(trait_ref) = tcx.lift_to_global(&trait_ref) { if let Some(candidate) = tcx.lift_to_global(&candidate) { - cache.insert(trait_ref, WithDepNode::new(dep_node, candidate)); + debug!( + "insert_candidate_cache(trait_ref={:?}, candidate={:?}) global", + trait_ref, + candidate, + ); + // This may overwrite the cache with the same value + tcx.selection_cache + .hashmap.borrow_mut() + .insert(trait_ref, WithDepNode::new(dep_node, candidate)); return; } } } + debug!( + "insert_candidate_cache(trait_ref={:?}, candidate={:?}) local", + trait_ref, + candidate, + ); self.infcx.selection_cache.hashmap .borrow_mut() .insert(trait_ref, WithDepNode::new(dep_node, candidate)); @@ -1355,7 +1439,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let lang_items = self.tcx().lang_items(); if lang_items.copy_trait() == Some(def_id) { debug!("obligation self ty is {:?}", - obligation.predicate.0.self_ty()); + obligation.predicate.skip_binder().self_ty()); // User-defined copy impls are permitted, but only for // structs and enums. @@ -1370,22 +1454,22 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let sized_conditions = self.sized_conditions(obligation); self.assemble_builtin_bound_candidates(sized_conditions, &mut candidates)?; - } else if lang_items.unsize_trait() == Some(def_id) { - self.assemble_candidates_for_unsizing(obligation, &mut candidates); - } else { - if lang_items.clone_trait() == Some(def_id) { - // Same builtin conditions as `Copy`, i.e. every type which has builtin support - // for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone` - // types have builtin support for `Clone`. - let clone_conditions = self.copy_clone_conditions(obligation); - self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?; - } + } else if lang_items.unsize_trait() == Some(def_id) { + self.assemble_candidates_for_unsizing(obligation, &mut candidates); + } else { + if lang_items.clone_trait() == Some(def_id) { + // Same builtin conditions as `Copy`, i.e. every type which has builtin support + // for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone` + // types have builtin support for `Clone`. + let clone_conditions = self.copy_clone_conditions(obligation); + self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?; + } - self.assemble_generator_candidates(obligation, &mut candidates)?; - self.assemble_closure_candidates(obligation, &mut candidates)?; - self.assemble_fn_pointer_candidates(obligation, &mut candidates)?; - self.assemble_candidates_from_impls(obligation, &mut candidates)?; - self.assemble_candidates_from_object_ty(obligation, &mut candidates); + self.assemble_generator_candidates(obligation, &mut candidates)?; + self.assemble_closure_candidates(obligation, &mut candidates)?; + self.assemble_fn_pointer_candidates(obligation, &mut candidates)?; + self.assemble_candidates_from_impls(obligation, &mut candidates)?; + self.assemble_candidates_from_object_ty(obligation, &mut candidates); } self.assemble_candidates_from_projected_tys(obligation, &mut candidates); @@ -1407,7 +1491,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // before we go into the whole skolemization thing, just // quickly check if the self-type is a projection at all. - match obligation.predicate.0.trait_ref.self_ty().sty { + match obligation.predicate.skip_binder().trait_ref.self_ty().sty { ty::TyProjection(_) | ty::TyAnon(..) => {} ty::TyInfer(ty::TyVar(_)) => { span_bug!(obligation.cause.span, @@ -1429,13 +1513,13 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn match_projection_obligation_against_definition_bounds( &mut self, obligation: &TraitObligation<'tcx>, - snapshot: &infer::CombinedSnapshot) + snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> bool { let poly_trait_predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate); let (skol_trait_predicate, skol_map) = - self.infcx().skolemize_late_bound_regions(&poly_trait_predicate, snapshot); + self.infcx().skolemize_late_bound_regions(&poly_trait_predicate); debug!("match_projection_obligation_against_definition_bounds: \ skol_trait_predicate={:?} skol_map={:?}", skol_trait_predicate, @@ -1500,16 +1584,13 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { trait_bound: ty::PolyTraitRef<'tcx>, skol_trait_ref: ty::TraitRef<'tcx>, skol_map: &infer::SkolemizationMap<'tcx>, - snapshot: &infer::CombinedSnapshot) + snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> bool { - assert!(!skol_trait_ref.has_escaping_regions()); - match self.infcx.at(&obligation.cause, obligation.param_env) - .sup(ty::Binder(skol_trait_ref), trait_bound) { - Ok(InferOk { obligations, .. }) => { - self.inferred_obligations.extend(obligations); - } - Err(_) => { return false; } + debug_assert!(!skol_trait_ref.has_escaping_regions()); + if self.infcx.at(&obligation.cause, obligation.param_env) + .sup(ty::Binder::dummy(skol_trait_ref), trait_bound).is_err() { + return false; } self.infcx.leak_check(false, obligation.cause.span, skol_map, snapshot).is_ok() @@ -1537,12 +1618,14 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let matching_bounds = all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id()); - let matching_bounds = - matching_bounds.filter( - |bound| self.evaluate_where_clause(stack, bound.clone()).may_apply()); - - let param_candidates = - matching_bounds.map(|bound| ParamCandidate(bound)); + // keep only those bounds which may apply, and propagate overflow if it occurs + let mut param_candidates = vec![]; + for bound in matching_bounds { + let wc = self.evaluate_where_clause(stack, bound.clone())?; + if wc.may_apply() { + param_candidates.push(ParamCandidate(bound)); + } + } candidates.vec.extend(param_candidates); @@ -1552,14 +1635,14 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn evaluate_where_clause<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>) - -> EvaluationResult + -> Result { self.probe(move |this, _| { match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) { Ok(obligations) => { this.evaluate_predicates_recursively(stack.list(), obligations.iter()) } - Err(()) => EvaluatedToErr + Err(()) => Ok(EvaluatedToErr) } }) } @@ -1606,7 +1689,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { candidates: &mut SelectionCandidateSet<'tcx>) -> Result<(),SelectionError<'tcx>> { - let kind = match self.tcx().lang_items().fn_trait_kind(obligation.predicate.0.def_id()) { + let kind = match self.tcx().lang_items().fn_trait_kind(obligation.predicate.def_id()) { Some(k) => k, None => { return Ok(()); } }; @@ -1662,12 +1745,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // provide an impl, but only for suitable `fn` pointers ty::TyFnDef(..) | ty::TyFnPtr(_) => { - if let ty::Binder(ty::FnSig { + if let ty::FnSig { unsafety: hir::Unsafety::Normal, abi: Abi::Rust, variadic: false, .. - }) = self_ty.fn_sig(self.tcx()) { + } = self_ty.fn_sig(self.tcx()).skip_binder() { candidates.vec.push(FnPointerCandidate); } } @@ -1688,7 +1771,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.tcx().for_each_relevant_impl( obligation.predicate.def_id(), - obligation.predicate.0.trait_ref.self_ty(), + obligation.predicate.skip_binder().trait_ref.self_ty(), |impl_def_id| { self.probe(|this, snapshot| { /* [1] */ match this.match_impl(impl_def_id, obligation, snapshot) { @@ -1913,7 +1996,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } // (.., T) -> (.., U). - (&ty::TyTuple(tys_a, _), &ty::TyTuple(tys_b, _)) => { + (&ty::TyTuple(tys_a), &ty::TyTuple(tys_b)) => { tys_a.len() == tys_b.len() } @@ -1934,9 +2017,6 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // attempt to evaluate recursive bounds to see if they are // satisfied. - /// Returns true if `candidate_i` should be dropped in favor of - /// `candidate_j`. Generally speaking we will drop duplicate - /// candidates and prefer where-clause candidates. /// Returns true if `victim` should be dropped in favor of /// `other`. Generally speaking we will drop duplicate /// candidates and prefer where-clause candidates. @@ -1948,18 +2028,63 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { other: &EvaluatedCandidate<'tcx>) -> bool { + // Check if a bound would previously have been removed when normalizing + // the param_env so that it can be given the lowest priority. See + // #50825 for the motivation for this. + let is_global = |cand: &ty::PolyTraitRef<'_>| { + cand.is_global() && !cand.has_late_bound_regions() + }; + if victim.candidate == other.candidate { return true; } match other.candidate { - ObjectCandidate | - ParamCandidate(_) | ProjectionCandidate => match victim.candidate { + // Prefer BuiltinCandidate { has_nested: false } to anything else. + // This is a fix for #53123 and prevents winnowing from accidentally extending the + // lifetime of a variable. + BuiltinCandidate { has_nested: false } => true, + ParamCandidate(ref cand) => match victim.candidate { AutoImplCandidate(..) => { bug!( "default implementations shouldn't be recorded \ when there are other valid candidates"); } + // Prefer BuiltinCandidate { has_nested: false } to anything else. + // This is a fix for #53123 and prevents winnowing from accidentally extending the + // lifetime of a variable. + BuiltinCandidate { has_nested: false } => false, + ImplCandidate(..) | + ClosureCandidate | + GeneratorCandidate | + FnPointerCandidate | + BuiltinObjectCandidate | + BuiltinUnsizeCandidate | + BuiltinCandidate { .. } => { + // Global bounds from the where clause should be ignored + // here (see issue #50825). Otherwise, we have a where + // clause so don't go around looking for impls. + !is_global(cand) + } + ObjectCandidate | + ProjectionCandidate => { + // Arbitrarily give param candidates priority + // over projection and object candidates. + !is_global(cand) + }, + ParamCandidate(..) => false, + }, + ObjectCandidate | + ProjectionCandidate => match victim.candidate { + AutoImplCandidate(..) => { + bug!( + "default implementations shouldn't be recorded \ + when there are other valid candidates"); + } + // Prefer BuiltinCandidate { has_nested: false } to anything else. + // This is a fix for #53123 and prevents winnowing from accidentally extending the + // lifetime of a variable. + BuiltinCandidate { has_nested: false } => false, ImplCandidate(..) | ClosureCandidate | GeneratorCandidate | @@ -1967,8 +2092,6 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { BuiltinObjectCandidate | BuiltinUnsizeCandidate | BuiltinCandidate { .. } => { - // We have a where-clause so don't go around looking - // for impls. true } ObjectCandidate | @@ -1977,22 +2100,44 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // over projection and object candidates. true }, - ParamCandidate(..) => false, + ParamCandidate(ref cand) => is_global(cand), }, ImplCandidate(other_def) => { // See if we can toss out `victim` based on specialization. // This requires us to know *for sure* that the `other` impl applies // i.e. EvaluatedToOk: if other.evaluation == EvaluatedToOk { - if let ImplCandidate(victim_def) = victim.candidate { - let tcx = self.tcx().global_tcx(); - return tcx.specializes((other_def, victim_def)) || - tcx.impls_are_allowed_to_overlap(other_def, victim_def); + match victim.candidate { + ImplCandidate(victim_def) => { + let tcx = self.tcx().global_tcx(); + return tcx.specializes((other_def, victim_def)) || + tcx.impls_are_allowed_to_overlap(other_def, victim_def); + } + ParamCandidate(ref cand) => { + // Prefer the impl to a global where clause candidate. + return is_global(cand); + } + _ => () } } false }, + ClosureCandidate | + GeneratorCandidate | + FnPointerCandidate | + BuiltinObjectCandidate | + BuiltinUnsizeCandidate | + BuiltinCandidate { has_nested: true } => { + match victim.candidate { + ParamCandidate(ref cand) => { + // Prefer these to a global where-clause bound + // (see issue #50825) + is_global(cand) && other.evaluation == EvaluatedToOk + } + _ => false, + } + } _ => false } } @@ -2001,13 +2146,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // BUILTIN BOUNDS // // These cover the traits that are built-in to the language - // itself. This includes `Copy` and `Sized` for sure. For the - // moment, it also includes `Send` / `Sync` and a few others, but - // those will hopefully change to library-defined traits in the - // future. + // itself: `Copy`, `Clone` and `Sized`. - // HACK: if this returns an error, selection exits without considering - // other impls. fn assemble_builtin_bound_candidates<'o>(&mut self, conditions: BuiltinImplConditions<'tcx>, candidates: &mut SelectionCandidateSet<'tcx>) @@ -2026,14 +2166,13 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { debug!("assemble_builtin_bound_candidates: ambiguous builtin"); Ok(candidates.ambiguous = true) } - BuiltinImplConditions::Never => { Err(Unimplemented) } } } fn sized_conditions(&mut self, obligation: &TraitObligation<'tcx>) -> BuiltinImplConditions<'tcx> { - use self::BuiltinImplConditions::{Ambiguous, None, Never, Where}; + use self::BuiltinImplConditions::{Ambiguous, None, Where}; // NOTE: binder moved to (*) let self_ty = self.infcx.shallow_resolve( @@ -2044,22 +2183,22 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyRawPtr(..) | ty::TyChar | ty::TyRef(..) | ty::TyGenerator(..) | - ty::TyArray(..) | ty::TyClosure(..) | ty::TyNever | - ty::TyError => { + ty::TyGeneratorWitness(..) | ty::TyArray(..) | ty::TyClosure(..) | + ty::TyNever | ty::TyError => { // safe for everything - Where(ty::Binder(Vec::new())) + Where(ty::Binder::dummy(Vec::new())) } - ty::TyStr | ty::TySlice(_) | ty::TyDynamic(..) | ty::TyForeign(..) => Never, + ty::TyStr | ty::TySlice(_) | ty::TyDynamic(..) | ty::TyForeign(..) => None, - ty::TyTuple(tys, _) => { - Where(ty::Binder(tys.last().into_iter().cloned().collect())) + ty::TyTuple(tys) => { + Where(ty::Binder::bind(tys.last().into_iter().cloned().collect())) } ty::TyAdt(def, substs) => { let sized_crit = def.sized_constraint(self.tcx()); // (*) binder moved here - Where(ty::Binder( + Where(ty::Binder::bind( sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect() )) } @@ -2067,9 +2206,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ty::TyProjection(_) | ty::TyParam(_) | ty::TyAnon(..) => None, ty::TyInfer(ty::TyVar(_)) => Ambiguous, - ty::TyInfer(ty::FreshTy(_)) - | ty::TyInfer(ty::FreshIntTy(_)) - | ty::TyInfer(ty::FreshFloatTy(_)) => { + ty::TyInfer(ty::CanonicalTy(_)) | + ty::TyInfer(ty::FreshTy(_)) | + ty::TyInfer(ty::FreshIntTy(_)) | + ty::TyInfer(ty::FreshFloatTy(_)) => { bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty); } @@ -2083,46 +2223,45 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let self_ty = self.infcx.shallow_resolve( obligation.predicate.skip_binder().self_ty()); - use self::BuiltinImplConditions::{Ambiguous, None, Never, Where}; + use self::BuiltinImplConditions::{Ambiguous, None, Where}; match self_ty.sty { ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | + ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyError => { + Where(ty::Binder::dummy(Vec::new())) + } + ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | - ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar | - ty::TyRawPtr(..) | ty::TyError | ty::TyNever | - ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => { - Where(ty::Binder(Vec::new())) + ty::TyChar | ty::TyRawPtr(..) | ty::TyNever | + ty::TyRef(_, _, hir::MutImmutable) => { + // Implementations provided in libcore + None } ty::TyDynamic(..) | ty::TyStr | ty::TySlice(..) | - ty::TyGenerator(..) | ty::TyForeign(..) | - ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { - Never + ty::TyGenerator(..) | ty::TyGeneratorWitness(..) | ty::TyForeign(..) | + ty::TyRef(_, _, hir::MutMutable) => { + None } ty::TyArray(element_ty, _) => { // (*) binder moved here - Where(ty::Binder(vec![element_ty])) + Where(ty::Binder::bind(vec![element_ty])) } - ty::TyTuple(tys, _) => { + ty::TyTuple(tys) => { // (*) binder moved here - Where(ty::Binder(tys.to_vec())) + Where(ty::Binder::bind(tys.to_vec())) } ty::TyClosure(def_id, substs) => { let trait_id = obligation.predicate.def_id(); - let copy_closures = - Some(trait_id) == self.tcx().lang_items().copy_trait() && - self.tcx().has_copy_closures(def_id.krate); - let clone_closures = - Some(trait_id) == self.tcx().lang_items().clone_trait() && - self.tcx().has_clone_closures(def_id.krate); - - if copy_closures || clone_closures { - Where(ty::Binder(substs.upvar_tys(def_id, self.tcx()).collect())) + let is_copy_trait = Some(trait_id) == self.tcx().lang_items().copy_trait(); + let is_clone_trait = Some(trait_id) == self.tcx().lang_items().clone_trait(); + if is_copy_trait || is_clone_trait { + Where(ty::Binder::bind(substs.upvar_tys(def_id, self.tcx()).collect())) } else { - Never + None } } @@ -2138,9 +2277,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Ambiguous } - ty::TyInfer(ty::FreshTy(_)) - | ty::TyInfer(ty::FreshIntTy(_)) - | ty::TyInfer(ty::FreshFloatTy(_)) => { + ty::TyInfer(ty::CanonicalTy(_)) | + ty::TyInfer(ty::FreshTy(_)) | + ty::TyInfer(ty::FreshIntTy(_)) | + ty::TyInfer(ty::FreshFloatTy(_)) => { bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty); } @@ -2179,6 +2319,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ty::TyParam(..) | ty::TyForeign(..) | ty::TyProjection(..) | + ty::TyInfer(ty::CanonicalTy(_)) | ty::TyInfer(ty::TyVar(_)) | ty::TyInfer(ty::FreshTy(_)) | ty::TyInfer(ty::FreshIntTy(_)) | @@ -2188,7 +2329,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } ty::TyRawPtr(ty::TypeAndMut { ty: element_ty, ..}) | - ty::TyRef(_, ty::TypeAndMut { ty: element_ty, ..}) => { + ty::TyRef(_, element_ty, _) => { vec![element_ty] }, @@ -2196,7 +2337,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { vec![element_ty] } - ty::TyTuple(ref tys, _) => { + ty::TyTuple(ref tys) => { // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet tys.to_vec() } @@ -2205,9 +2346,16 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { substs.upvar_tys(def_id, self.tcx()).collect() } - ty::TyGenerator(def_id, ref substs, interior) => { - let witness = iter::once(interior.witness); - substs.upvar_tys(def_id, self.tcx()).chain(witness).collect() + ty::TyGenerator(def_id, ref substs, _) => { + let witness = substs.witness(def_id, self.tcx()); + substs.upvar_tys(def_id, self.tcx()).chain(iter::once(witness)).collect() + } + + ty::TyGeneratorWitness(types) => { + // This is sound because no regions in the witness can refer to + // the binder outside the witness. So we'll effectivly reuse + // the implicit binder around the witness. + types.skip_binder().to_vec() } // for `PhantomData`, we pass `T` @@ -2253,11 +2401,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // 3. Re-bind the regions back to `for<'a> &'a int : Copy` types.skip_binder().into_iter().flat_map(|ty| { // binder moved -\ - let ty: ty::Binder> = ty::Binder(ty); // <----------/ + let ty: ty::Binder> = ty::Binder::bind(ty); // <----/ self.in_snapshot(|this, snapshot| { let (skol_ty, skol_map) = - this.infcx().skolemize_late_bound_regions(&ty, snapshot); + this.infcx().skolemize_late_bound_regions(&ty); let Normalized { value: normalized_ty, mut obligations } = project::normalize_with_depth(this, param_env, @@ -2282,7 +2430,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // // Confirmation unifies the output type parameters of the trait // with the values found in the obligation, possibly yielding a - // type error. See `README.md` for more details. + // type error. See [rustc guide] for more details. + // + // [rustc guide]: + // https://rust-lang-nursery.github.io/rustc-guide/traits/resolution.html#confirmation fn confirm_candidate(&mut self, obligation: &TraitObligation<'tcx>, @@ -2440,18 +2591,19 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds. fn confirm_auto_impl_candidate(&mut self, - obligation: &TraitObligation<'tcx>, - trait_def_id: DefId) - -> VtableAutoImplData> + obligation: &TraitObligation<'tcx>, + trait_def_id: DefId) + -> VtableAutoImplData> { debug!("confirm_auto_impl_candidate({:?}, {:?})", obligation, trait_def_id); - // binder is moved below - let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); - let types = self.constituent_types_for_ty(self_ty); - self.vtable_auto_impl(obligation, trait_def_id, ty::Binder(types)) + let types = obligation.predicate.map_bound(|inner| { + let self_ty = self.infcx.shallow_resolve(inner.self_ty()); + self.constituent_types_for_ty(self_ty) + }); + self.vtable_auto_impl(obligation, trait_def_id, types) } /// See `confirm_auto_impl_candidate` @@ -2474,7 +2626,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let trait_obligations = self.in_snapshot(|this, snapshot| { let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); let (trait_ref, skol_map) = - this.infcx().skolemize_late_bound_regions(&poly_trait_ref, snapshot); + this.infcx().skolemize_late_bound_regions(&poly_trait_ref); let cause = obligation.derived_cause(ImplDerivedObligation); this.impl_or_trait_obligations(cause, obligation.recursion_depth + 1, @@ -2529,7 +2681,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, skol_map: infer::SkolemizationMap<'tcx>, - snapshot: &infer::CombinedSnapshot) + snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!("vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={}, skol_map={:?})", @@ -2586,6 +2738,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { }; let mut upcast_trait_ref = None; + let mut nested = vec![]; let vtable_base; { @@ -2604,7 +2757,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.commit_if_ok( |this, _| this.match_poly_trait_ref(obligation, t)) { - Ok(_) => { upcast_trait_ref = Some(t); false } + Ok(obligations) => { + upcast_trait_ref = Some(t); + nested.extend(obligations); + false + } Err(_) => { true } } }); @@ -2622,7 +2779,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { VtableObjectData { upcast_trait_ref: upcast_trait_ref.unwrap(), vtable_base, - nested: vec![] + nested, } } @@ -2665,39 +2822,41 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // touch bound regions, they just capture the in-scope // type/region parameters let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder()); - let (closure_def_id, substs) = match self_ty.sty { + let (generator_def_id, substs) = match self_ty.sty { ty::TyGenerator(id, substs, _) => (id, substs), _ => bug!("closure candidate for non-closure {:?}", obligation) }; debug!("confirm_generator_candidate({:?},{:?},{:?})", obligation, - closure_def_id, + generator_def_id, substs); let trait_ref = - self.generator_trait_ref_unnormalized(obligation, closure_def_id, substs); + self.generator_trait_ref_unnormalized(obligation, generator_def_id, substs); let Normalized { value: trait_ref, - obligations + mut obligations } = normalize_with_depth(self, obligation.param_env, obligation.cause.clone(), obligation.recursion_depth+1, &trait_ref); - debug!("confirm_generator_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", - closure_def_id, + debug!("confirm_generator_candidate(generator_def_id={:?}, \ + trait_ref={:?}, obligations={:?})", + generator_def_id, trait_ref, obligations); - self.confirm_poly_trait_refs(obligation.cause.clone(), - obligation.param_env, - obligation.predicate.to_poly_trait_ref(), - trait_ref)?; + obligations.extend( + self.confirm_poly_trait_refs(obligation.cause.clone(), + obligation.param_env, + obligation.predicate.to_poly_trait_ref(), + trait_ref)?); Ok(VtableGeneratorData { - closure_def_id: closure_def_id, + generator_def_id: generator_def_id, substs: substs.clone(), nested: obligations }) @@ -2710,7 +2869,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { { debug!("confirm_closure_candidate({:?})", obligation); - let kind = match self.tcx().lang_items().fn_trait_kind(obligation.predicate.0.def_id()) { + let kind = match self.tcx().lang_items().fn_trait_kind(obligation.predicate.def_id()) { Some(k) => k, None => bug!("closure candidate for non-fn trait {:?}", obligation) }; @@ -2740,10 +2899,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { trait_ref, obligations); - self.confirm_poly_trait_refs(obligation.cause.clone(), - obligation.param_env, - obligation.predicate.to_poly_trait_ref(), - trait_ref)?; + obligations.extend( + self.confirm_poly_trait_refs(obligation.cause.clone(), + obligation.param_env, + obligation.predicate.to_poly_trait_ref(), + trait_ref)?); obligations.push(Obligation::new( obligation.cause.clone(), @@ -2787,13 +2947,13 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation_param_env: ty::ParamEnv<'tcx>, obligation_trait_ref: ty::PolyTraitRef<'tcx>, expected_trait_ref: ty::PolyTraitRef<'tcx>) - -> Result<(), SelectionError<'tcx>> + -> Result>, SelectionError<'tcx>> { let obligation_trait_ref = obligation_trait_ref.clone(); self.infcx .at(&obligation_cause, obligation_param_env) .sup(obligation_trait_ref, expected_trait_ref) - .map(|InferOk { obligations, .. }| self.inferred_obligations.extend(obligations)) + .map(|InferOk { obligations, .. }| obligations) .map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e)) } @@ -2818,19 +2978,20 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // Trait+Kx+'a -> Trait+Ky+'b (upcasts). (&ty::TyDynamic(ref data_a, r_a), &ty::TyDynamic(ref data_b, r_b)) => { // See assemble_candidates_for_unsizing for more info. - // Binders reintroduced below in call to mk_existential_predicates. - let principal = data_a.skip_binder().principal(); - let iter = principal.into_iter().map(ty::ExistentialPredicate::Trait) - .chain(data_a.skip_binder().projection_bounds() - .map(|x| ty::ExistentialPredicate::Projection(x))) - .chain(data_b.auto_traits().map(ty::ExistentialPredicate::AutoTrait)); - let new_trait = tcx.mk_dynamic( - ty::Binder(tcx.mk_existential_predicates(iter)), r_b); + let existential_predicates = data_a.map_bound(|data_a| { + let principal = data_a.principal(); + let iter = principal.into_iter().map(ty::ExistentialPredicate::Trait) + .chain(data_a.projection_bounds() + .map(|x| ty::ExistentialPredicate::Projection(x))) + .chain(data_b.auto_traits().map(ty::ExistentialPredicate::AutoTrait)); + tcx.mk_existential_predicates(iter) + }); + let new_trait = tcx.mk_dynamic(existential_predicates, r_b); let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(target, new_trait) .map_err(|_| Unimplemented)?; - self.inferred_obligations.extend(obligations); + nested.extend(obligations); // Register one obligation for 'a: 'b. let cause = ObligationCause::new(obligation.cause.span, @@ -2840,7 +3001,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { nested.push(Obligation::with_depth(cause, obligation.recursion_depth + 1, obligation.param_env, - ty::Binder(outlives).to_predicate())); + ty::Binder::bind(outlives).to_predicate())); } // T -> Trait. @@ -2883,7 +3044,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // If the type is `Foo+'a`, ensures that the type // being cast to `Foo+'a` outlives `'a`: let outlives = ty::OutlivesPredicate(source, r); - push(ty::Binder(outlives).to_predicate()); + push(ty::Binder::dummy(outlives).to_predicate()); } // [T; n] -> [T]. @@ -2892,7 +3053,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.infcx.at(&obligation.cause, obligation.param_env) .eq(b, a) .map_err(|_| Unimplemented)?; - self.inferred_obligations.extend(obligations); + nested.extend(obligations); } // Struct -> Struct. @@ -2908,7 +3069,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } else { return Err(Unimplemented); }; - let mut ty_params = BitVector::new(substs_a.types().count()); + let mut ty_params = BitArray::new(substs_a.types().count()); let mut found = false; for ty in field.walk() { if let ty::TyParam(p) = ty.sty { @@ -2926,7 +3087,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // with a potentially unsized trailing field. let params = substs_a.iter().enumerate().map(|(i, &k)| { if ty_params.contains(i) { - Kind::from(tcx.types.err) + tcx.types.err.into() } else { k } @@ -2946,7 +3107,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // unsized parameters is equal to the target. let params = substs_a.iter().enumerate().map(|(i, &k)| { if ty_params.contains(i) { - Kind::from(substs_b.type_at(i)) + substs_b.type_at(i).into() } else { k } @@ -2956,7 +3117,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.infcx.at(&obligation.cause, obligation.param_env) .eq(target, new_struct) .map_err(|_| Unimplemented)?; - self.inferred_obligations.extend(obligations); + nested.extend(obligations); // Construct the nested Field: Unsize> predicate. nested.push(tcx.predicate_for_trait_def( @@ -2965,29 +3126,29 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation.predicate.def_id(), obligation.recursion_depth + 1, inner_source, - &[inner_target])); + &[inner_target.into()])); } // (.., T) -> (.., U). - (&ty::TyTuple(tys_a, _), &ty::TyTuple(tys_b, _)) => { + (&ty::TyTuple(tys_a), &ty::TyTuple(tys_b)) => { assert_eq!(tys_a.len(), tys_b.len()); // The last field of the tuple has to exist. - let (a_last, a_mid) = if let Some(x) = tys_a.split_last() { + let (&a_last, a_mid) = if let Some(x) = tys_a.split_last() { x } else { return Err(Unimplemented); }; - let b_last = tys_b.last().unwrap(); + let &b_last = tys_b.last().unwrap(); // Check that the source tuple with the target's // last element is equal to the target. - let new_tuple = tcx.mk_tup(a_mid.iter().chain(Some(b_last)), false); + let new_tuple = tcx.mk_tup(a_mid.iter().cloned().chain(iter::once(b_last))); let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) .eq(target, new_tuple) .map_err(|_| Unimplemented)?; - self.inferred_obligations.extend(obligations); + nested.extend(obligations); // Construct the nested T: Unsize predicate. nested.push(tcx.predicate_for_trait_def( @@ -2996,7 +3157,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation.predicate.def_id(), obligation.recursion_depth + 1, a_last, - &[b_last])); + &[b_last.into()])); } _ => bug!() @@ -3018,7 +3179,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn rematch_impl(&mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, - snapshot: &infer::CombinedSnapshot) + snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> (Normalized<'tcx, &'tcx Substs<'tcx>>, infer::SkolemizationMap<'tcx>) { @@ -3035,7 +3196,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn match_impl(&mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, - snapshot: &infer::CombinedSnapshot) + snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> Result<(Normalized<'tcx, &'tcx Substs<'tcx>>, infer::SkolemizationMap<'tcx>), ()> { @@ -3049,8 +3210,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } let (skol_obligation, skol_map) = self.infcx().skolemize_late_bound_regions( - &obligation.predicate, - snapshot); + &obligation.predicate); let skol_obligation_trait_ref = skol_obligation.trait_ref; let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, @@ -3059,7 +3219,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs); - let impl_trait_ref = + let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } = project::normalize_with_depth(self, obligation.param_env, obligation.cause.clone(), @@ -3075,12 +3235,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let InferOk { obligations, .. } = self.infcx.at(&obligation.cause, obligation.param_env) - .eq(skol_obligation_trait_ref, impl_trait_ref.value) + .eq(skol_obligation_trait_ref, impl_trait_ref) .map_err(|e| { debug!("match_impl: failed eq_trait_refs due to `{}`", e); () })?; - self.inferred_obligations.extend(obligations); + nested_obligations.extend(obligations); if let Err(e) = self.infcx.leak_check(false, obligation.cause.span, @@ -3093,7 +3253,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { debug!("match_impl: success impl_substs={:?}", impl_substs); Ok((Normalized { value: impl_substs, - obligations: impl_trait_ref.obligations + obligations: nested_obligations }, skol_map)) } @@ -3130,8 +3290,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { where_clause_trait_ref: ty::PolyTraitRef<'tcx>) -> Result>,()> { - self.match_poly_trait_ref(obligation, where_clause_trait_ref)?; - Ok(Vec::new()) + self.match_poly_trait_ref(obligation, where_clause_trait_ref) } /// Returns `Ok` if `poly_trait_ref` being true implies that the @@ -3139,7 +3298,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn match_poly_trait_ref(&mut self, obligation: &TraitObligation<'tcx>, poly_trait_ref: ty::PolyTraitRef<'tcx>) - -> Result<(),()> + -> Result>,()> { debug!("match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}", obligation, @@ -3147,7 +3306,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.infcx.at(&obligation.cause, obligation.param_env) .sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref) - .map(|InferOk { obligations, .. }| self.inferred_obligations.extend(obligations)) + .map(|InferOk { obligations, .. }| obligations) .map_err(|_| ()) } @@ -3185,38 +3344,40 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { -> ty::PolyTraitRef<'tcx> { let closure_type = self.infcx.closure_sig(closure_def_id, substs); - let ty::Binder((trait_ref, _)) = - self.tcx().closure_trait_ref_and_return_type(obligation.predicate.def_id(), - obligation.predicate.0.self_ty(), // (1) - closure_type, - util::TupleArgumentsFlag::No); + // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an unboxed closure type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. - ty::Binder(trait_ref) + self.tcx().closure_trait_ref_and_return_type(obligation.predicate.def_id(), + obligation.predicate + .skip_binder().self_ty(), // (1) + closure_type, + util::TupleArgumentsFlag::No) + .map_bound(|(trait_ref, _)| trait_ref) } fn generator_trait_ref_unnormalized(&mut self, obligation: &TraitObligation<'tcx>, closure_def_id: DefId, - substs: ty::ClosureSubsts<'tcx>) + substs: ty::GeneratorSubsts<'tcx>) -> ty::PolyTraitRef<'tcx> { - let gen_sig = substs.generator_poly_sig(closure_def_id, self.tcx()); - let ty::Binder((trait_ref, ..)) = - self.tcx().generator_trait_ref_and_outputs(obligation.predicate.def_id(), - obligation.predicate.0.self_ty(), // (1) - gen_sig); + let gen_sig = substs.poly_sig(closure_def_id, self.tcx()); + // (1) Feels icky to skip the binder here, but OTOH we know // that the self-type is an generator type and hence is // in fact unparameterized (or at least does not reference any // regions bound in the obligation). Still probably some // refactoring could make this nicer. - ty::Binder(trait_ref) + self.tcx().generator_trait_ref_and_outputs(obligation.predicate.def_id(), + obligation.predicate + .skip_binder().self_ty(), // (1) + gen_sig) + .map_bound(|(trait_ref, ..)| trait_ref) } /// Returns the obligations that are implied by instantiating an @@ -3230,7 +3391,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { def_id: DefId, // of impl or trait substs: &Substs<'tcx>, // for impl or trait skol_map: infer::SkolemizationMap<'tcx>, - snapshot: &infer::CombinedSnapshot) + snapshot: &infer::CombinedSnapshot<'cx, 'tcx>) -> Vec> { debug!("impl_or_trait_obligations(def_id={:?})", def_id); @@ -3252,7 +3413,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // that order. let predicates = tcx.predicates_of(def_id); assert_eq!(predicates.parent, None); - let predicates = predicates.predicates.iter().flat_map(|predicate| { + let mut predicates: Vec<_> = predicates.predicates.iter().flat_map(|predicate| { let predicate = normalize_with_depth(self, param_env, cause.clone(), recursion_depth, &predicate.subst(tcx, substs)); predicate.obligations.into_iter().chain( @@ -3263,6 +3424,28 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { predicate: predicate.value })) }).collect(); + + // We are performing deduplication here to avoid exponential blowups + // (#38528) from happening, but the real cause of the duplication is + // unknown. What we know is that the deduplication avoids exponential + // amount of predicates being propagated when processing deeply nested + // types. + // + // This code is hot enough that it's worth avoiding the allocation + // required for the FxHashSet when possible. Special-casing lengths 0, + // 1 and 2 covers roughly 75--80% of the cases. + if predicates.len() <= 1 { + // No possibility of duplicates. + } else if predicates.len() == 2 { + // Only two elements. Drop the second if they are equal. + if predicates[0] == predicates[1] { + predicates.truncate(1); + } + } else { + // Three or more elements. Use a general deduplication process. + let mut seen = FxHashSet(); + predicates.retain(|i| seen.insert(i.clone())); + } self.infcx().plug_leaks(skol_map, snapshot, predicates) } } @@ -3304,17 +3487,25 @@ impl<'tcx> TraitObligation<'tcx> { impl<'tcx> SelectionCache<'tcx> { pub fn new() -> SelectionCache<'tcx> { SelectionCache { - hashmap: RefCell::new(FxHashMap()) + hashmap: Lock::new(FxHashMap()) } } + + pub fn clear(&self) { + *self.hashmap.borrow_mut() = FxHashMap() + } } impl<'tcx> EvaluationCache<'tcx> { pub fn new() -> EvaluationCache<'tcx> { EvaluationCache { - hashmap: RefCell::new(FxHashMap()) + hashmap: Lock::new(FxHashMap()) } } + + pub fn clear(&self) { + *self.hashmap.borrow_mut() = FxHashMap() + } } impl<'o,'tcx> TraitObligationStack<'o,'tcx> { @@ -3362,7 +3553,7 @@ impl<'o,'tcx> fmt::Debug for TraitObligationStack<'o,'tcx> { } } -#[derive(Clone)] +#[derive(Clone, Eq, PartialEq)] pub struct WithDepNode { dep_node: DepNodeIndex, cached_value: T diff --git a/src/librustc/traits/specialize/mod.rs b/src/librustc/traits/specialize/mod.rs index afe29cc0e7ba..06f9b446146e 100644 --- a/src/librustc/traits/specialize/mod.rs +++ b/src/librustc/traits/specialize/mod.rs @@ -8,14 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// Logic and data structures related to impl specialization, explained in -// greater detail below. -// -// At the moment, this implementation support only the simple "chain" rule: -// If any two impls overlap, one must be a strict subset of the other. -// -// See traits/README.md for a bit more detail on how specialization -// fits together with the rest of the trait machinery. +//! Logic and data structures related to impl specialization, explained in +//! greater detail below. +//! +//! At the moment, this implementation support only the simple "chain" rule: +//! If any two impls overlap, one must be a strict subset of the other. +//! +//! See the [rustc guide] for a bit more detail on how specialization +//! fits together with the rest of the trait machinery. +//! +//! [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/specialization.html use super::{SelectionContext, FulfillmentContext}; use super::util::impl_trait_ref_and_oblig; @@ -24,11 +26,11 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use hir::def_id::DefId; use infer::{InferCtxt, InferOk}; use ty::subst::{Subst, Substs}; -use traits::{self, Reveal, ObligationCause}; +use traits::{self, ObligationCause, TraitEngine}; use traits::select::IntercrateAmbiguityCause; use ty::{self, TyCtxt, TypeFoldable}; use syntax_pos::DUMMY_SP; -use std::rc::Rc; +use rustc_data_structures::sync::Lrc; use lint; @@ -127,10 +129,10 @@ pub fn find_associated_item<'a, 'tcx>( let trait_def = tcx.trait_def(trait_def_id); let ancestors = trait_def.ancestors(tcx, impl_data.impl_def_id); - match ancestors.defs(tcx, item.name, item.kind, trait_def_id).next() { + match ancestors.defs(tcx, item.ident, item.kind, trait_def_id).next() { Some(node_item) => { let substs = tcx.infer_ctxt().enter(|infcx| { - let param_env = ty::ParamEnv::empty(Reveal::All); + let param_env = ty::ParamEnv::reveal_all(); let substs = substs.rebase_onto(tcx, trait_def_id, impl_data.substs); let substs = translate_substs(&infcx, param_env, impl_data.impl_def_id, substs, node_item.node); @@ -162,7 +164,7 @@ pub(super) fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // The feature gate should prevent introducing new specializations, but not // taking advantage of upstream ones. - if !tcx.sess.features.borrow().specialization && + if !tcx.features().specialization && (impl1_def_id.is_local() || impl2_def_id.is_local()) { return false; } @@ -194,6 +196,7 @@ pub(super) fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // that this always succeeds. let impl1_trait_ref = match traits::fully_normalize(&infcx, + FulfillmentContext::new(), ObligationCause::dummy(), penv, &impl1_trait_ref) { @@ -306,7 +309,7 @@ impl SpecializesCache { // Query provider for `specialization_graph_of`. pub(super) fn specialization_graph_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_id: DefId) - -> Rc { + -> Lrc { let mut sg = specialization_graph::Graph::new(); let mut trait_impls = Vec::new(); @@ -361,7 +364,7 @@ pub(super) fn specialization_graph_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx match tcx.span_of_impl(overlap.with_impl) { Ok(span) => { err.span_label(tcx.sess.codemap().def_span(span), - format!("first implementation here")); + "first implementation here".to_string()); err.span_label(impl_span, format!("conflicting implementation{}", overlap.self_desc @@ -390,7 +393,7 @@ pub(super) fn specialization_graph_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx } } - Rc::new(sg) + Lrc::new(sg) } /// Recovers the "impl X for Y" signature from `impl_def_id` and returns it as a @@ -435,9 +438,9 @@ fn to_pretty_impl_header(tcx: TyCtxt, impl_def_id: DefId) -> Option { } pretty_predicates.push(p.to_string()); } - for ty in types_without_default_bounds { - pretty_predicates.push(format!("{}: ?Sized", ty)); - } + pretty_predicates.extend( + types_without_default_bounds.iter().map(|ty| format!("{}: ?Sized", ty)) + ); if !pretty_predicates.is_empty() { write!(w, "\n where {}", pretty_predicates.join(", ")).unwrap(); } diff --git a/src/librustc/traits/specialize/specialization_graph.rs b/src/librustc/traits/specialize/specialization_graph.rs index 834389e5d009..a7652574c1a2 100644 --- a/src/librustc/traits/specialize/specialization_graph.rs +++ b/src/librustc/traits/specialize/specialization_graph.rs @@ -17,8 +17,9 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher, use traits; use ty::{self, TyCtxt, TypeFoldable}; use ty::fast_reject::{self, SimplifiedType}; -use std::rc::Rc; -use syntax::ast::Name; +use rustc_data_structures::sync::Lrc; +use syntax::ast::Ident; +use util::captures::Captures; use util::nodemap::{DefIdMap, FxHashMap}; /// A per-trait graph of impls in specialization order. At the moment, this @@ -36,6 +37,7 @@ use util::nodemap::{DefIdMap, FxHashMap}; /// parents of a given specializing impl, which is needed for extracting /// default items amongst other things. In the simple "chain" rule, every impl /// has at most one parent. +#[derive(RustcEncodable, RustcDecodable)] pub struct Graph { // all impls have a parent; the "root" impls have as their parent the def_id // of the trait @@ -47,6 +49,7 @@ pub struct Graph { /// Children of a given impl, grouped into blanket/non-blanket varieties as is /// done in `TraitDef`. +#[derive(Default, RustcEncodable, RustcDecodable)] struct Children { // Impls of a trait (or specializations of a given impl). To allow for // quicker lookup, the impls are indexed by a simplified version of their @@ -70,33 +73,48 @@ enum Inserted { /// The impl was inserted as a new child in this group of children. BecameNewSibling(Option), - /// The impl replaced an existing impl that specializes it. - Replaced(DefId), + /// The impl should replace an existing impl X, because the impl specializes X. + ReplaceChild(DefId), /// The impl is a specialization of an existing child. ShouldRecurseOn(DefId), } impl<'a, 'gcx, 'tcx> Children { - fn new() -> Children { - Children { - nonblanket_impls: FxHashMap(), - blanket_impls: vec![], - } - } - /// Insert an impl into this set of children without comparing to any existing impls fn insert_blindly(&mut self, tcx: TyCtxt<'a, 'gcx, 'tcx>, impl_def_id: DefId) { let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) { - self.nonblanket_impls.entry(sty).or_insert(vec![]).push(impl_def_id) + debug!("insert_blindly: impl_def_id={:?} sty={:?}", impl_def_id, sty); + self.nonblanket_impls.entry(sty).or_default().push(impl_def_id) } else { + debug!("insert_blindly: impl_def_id={:?} sty=None", impl_def_id); self.blanket_impls.push(impl_def_id) } } + /// Remove an impl from this set of children. Used when replacing + /// an impl with a parent. The impl must be present in the list of + /// children already. + fn remove_existing(&mut self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + impl_def_id: DefId) { + let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); + let vec: &mut Vec; + if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) { + debug!("remove_existing: impl_def_id={:?} sty={:?}", impl_def_id, sty); + vec = self.nonblanket_impls.get_mut(&sty).unwrap(); + } else { + debug!("remove_existing: impl_def_id={:?} sty=None", impl_def_id); + vec = &mut self.blanket_impls; + } + + let index = vec.iter().position(|d| *d == impl_def_id).unwrap(); + vec.remove(index); + } + /// Attempt to insert an impl into this set of children, while comparing for /// specialization relationships. fn insert(&mut self, @@ -107,11 +125,22 @@ impl<'a, 'gcx, 'tcx> Children { { let mut last_lint = None; - for slot in match simplified_self { - Some(sty) => self.filtered_mut(sty), - None => self.iter_mut(), + debug!( + "insert(impl_def_id={:?}, simplified_self={:?})", + impl_def_id, + simplified_self, + ); + + for possible_sibling in match simplified_self { + Some(sty) => self.filtered(sty), + None => self.iter(), } { - let possible_sibling = *slot; + debug!( + "insert: impl_def_id={:?}, simplified_self={:?}, possible_sibling={:?}", + impl_def_id, + simplified_self, + possible_sibling, + ); let overlap_error = |overlap: traits::coherence::OverlapResult| { // overlap, but no specialization; error out @@ -133,12 +162,12 @@ impl<'a, 'gcx, 'tcx> Children { }; let tcx = tcx.global_tcx(); - let (le, ge) = tcx.infer_ctxt().enter(|infcx| { - let overlap = traits::overlapping_impls(&infcx, - possible_sibling, - impl_def_id, - traits::IntercrateMode::Issue43355); - if let Some(overlap) = overlap { + let (le, ge) = traits::overlapping_impls( + tcx, + possible_sibling, + impl_def_id, + traits::IntercrateMode::Issue43355, + |overlap| { if tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) { return Ok((false, false)); } @@ -151,10 +180,9 @@ impl<'a, 'gcx, 'tcx> Children { } else { Ok((le, ge)) } - } else { - Ok((false, false)) - } - })?; + }, + || Ok((false, false)), + )?; if le && !ge { debug!("descending as child of TraitRef {:?}", @@ -166,21 +194,17 @@ impl<'a, 'gcx, 'tcx> Children { debug!("placing as parent of TraitRef {:?}", tcx.impl_trait_ref(possible_sibling).unwrap()); - // possible_sibling specializes the impl - *slot = impl_def_id; - return Ok(Inserted::Replaced(possible_sibling)); + return Ok(Inserted::ReplaceChild(possible_sibling)); } else { if !tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) { - tcx.infer_ctxt().enter(|infcx| { - if let Some(overlap) = traits::overlapping_impls( - &infcx, - possible_sibling, - impl_def_id, - traits::IntercrateMode::Fixed) - { - last_lint = Some(overlap_error(overlap)); - } - }); + traits::overlapping_impls( + tcx, + possible_sibling, + impl_def_id, + traits::IntercrateMode::Fixed, + |overlap| last_lint = Some(overlap_error(overlap)), + || (), + ); } // no overlap (error bailed already via ?) @@ -193,15 +217,14 @@ impl<'a, 'gcx, 'tcx> Children { Ok(Inserted::BecameNewSibling(last_lint)) } - fn iter_mut(&'a mut self) -> Box + 'a> { - let nonblanket = self.nonblanket_impls.iter_mut().flat_map(|(_, v)| v.iter_mut()); - Box::new(self.blanket_impls.iter_mut().chain(nonblanket)) + fn iter(&mut self) -> Box + '_> { + let nonblanket = self.nonblanket_impls.iter_mut().flat_map(|(_, v)| v.iter()); + Box::new(self.blanket_impls.iter().chain(nonblanket).cloned()) } - fn filtered_mut(&'a mut self, sty: SimplifiedType) - -> Box + 'a> { - let nonblanket = self.nonblanket_impls.entry(sty).or_insert(vec![]).iter_mut(); - Box::new(self.blanket_impls.iter_mut().chain(nonblanket)) + fn filtered(&mut self, sty: SimplifiedType) -> Box + '_> { + let nonblanket = self.nonblanket_impls.entry(sty).or_default().iter(); + Box::new(self.blanket_impls.iter().chain(nonblanket).cloned()) } } @@ -238,7 +261,7 @@ impl<'a, 'gcx, 'tcx> Graph { trait_ref, impl_def_id, trait_def_id); self.parent.insert(impl_def_id, trait_def_id); - self.children.entry(trait_def_id).or_insert(Children::new()) + self.children.entry(trait_def_id).or_default() .insert_blindly(tcx, impl_def_id); return Ok(None); } @@ -251,7 +274,7 @@ impl<'a, 'gcx, 'tcx> Graph { loop { use self::Inserted::*; - let insert_result = self.children.entry(parent).or_insert(Children::new()) + let insert_result = self.children.entry(parent).or_default() .insert(tcx, impl_def_id, simplified)?; match insert_result { @@ -259,11 +282,37 @@ impl<'a, 'gcx, 'tcx> Graph { last_lint = opt_lint; break; } - Replaced(new_child) => { - self.parent.insert(new_child, impl_def_id); - let mut new_children = Children::new(); - new_children.insert_blindly(tcx, new_child); - self.children.insert(impl_def_id, new_children); + ReplaceChild(grand_child_to_be) => { + // We currently have + // + // P + // | + // G + // + // and we are inserting the impl N. We want to make it: + // + // P + // | + // N + // | + // G + + // Adjust P's list of children: remove G and then add N. + { + let siblings = self.children + .get_mut(&parent) + .unwrap(); + siblings.remove_existing(tcx, grand_child_to_be); + siblings.insert_blindly(tcx, impl_def_id); + } + + // Set G's parent to N and N's parent to P + self.parent.insert(grand_child_to_be, impl_def_id); + self.parent.insert(impl_def_id, parent); + + // Add G as N's child. + self.children.entry(impl_def_id).or_default() + .insert_blindly(tcx, grand_child_to_be); break; } ShouldRecurseOn(new_parent) => { @@ -286,7 +335,7 @@ impl<'a, 'gcx, 'tcx> Graph { was already present."); } - self.children.entry(parent).or_insert(Children::new()).insert_blindly(tcx, child); + self.children.entry(parent).or_default().insert_blindly(tcx, child); } /// The parent of a given impl, which is the def id of the trait when the @@ -314,9 +363,10 @@ impl<'a, 'gcx, 'tcx> Node { } /// Iterate over the items defined directly by the given (impl or trait) node. - #[inline] // FIXME(#35870) Avoid closures being unexported due to impl Trait. - pub fn items(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) - -> impl Iterator + 'a { + pub fn items( + &self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + ) -> impl Iterator + 'a { tcx.associated_items(self.def_id()) } @@ -330,7 +380,7 @@ impl<'a, 'gcx, 'tcx> Node { pub struct Ancestors { trait_def_id: DefId, - specialization_graph: Rc, + specialization_graph: Lrc, current_source: Option, } @@ -368,13 +418,27 @@ impl<'a, 'gcx, 'tcx> Ancestors { /// Search the items from the given ancestors, returning each definition /// with the given name and the given kind. #[inline] // FIXME(#35870) Avoid closures being unexported due to impl Trait. - pub fn defs(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_item_name: Name, - trait_item_kind: ty::AssociatedKind, trait_def_id: DefId) - -> impl Iterator> + 'a { + pub fn defs( + self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_item_name: Ident, + trait_item_kind: ty::AssociatedKind, + trait_def_id: DefId, + ) -> impl Iterator> + Captures<'gcx> + Captures<'tcx> + 'a { self.flat_map(move |node| { - node.items(tcx).filter(move |impl_item| { - impl_item.kind == trait_item_kind && - tcx.hygienic_eq(impl_item.name, trait_item_name, trait_def_id) + use ty::AssociatedKind::*; + node.items(tcx).filter(move |impl_item| match (trait_item_kind, impl_item.kind) { + | (Const, Const) + | (Method, Method) + | (Type, Type) + | (Type, Existential) + => tcx.hygienic_eq(impl_item.ident, trait_item_name, trait_def_id), + + | (Const, _) + | (Method, _) + | (Type, _) + | (Existential, _) + => false, }).map(move |item| NodeItem { node: node, item: item }) }) } @@ -394,9 +458,9 @@ pub fn ancestors(tcx: TyCtxt, } } -impl<'gcx> HashStable> for Children { +impl<'a> HashStable> for Children { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let Children { ref nonblanket_impls, diff --git a/src/librustc/traits/structural_impls.rs b/src/librustc/traits/structural_impls.rs index e1e2798ecb51..9292b42eb525 100644 --- a/src/librustc/traits/structural_impls.rs +++ b/src/librustc/traits/structural_impls.rs @@ -8,10 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use chalk_engine; +use rustc_data_structures::accumulate_vec::AccumulateVec; use traits; use traits::project::Normalized; -use ty::{self, Lift, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use ty::{self, Lift, TyCtxt}; use std::fmt; use std::rc::Rc; @@ -20,23 +22,24 @@ use std::rc::Rc; impl<'tcx, T: fmt::Debug> fmt::Debug for Normalized<'tcx, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Normalized({:?},{:?})", - self.value, - self.obligations) + write!(f, "Normalized({:?},{:?})", self.value, self.obligations) } } impl<'tcx, O: fmt::Debug> fmt::Debug for traits::Obligation<'tcx, O> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if ty::tls::with(|tcx| tcx.sess.verbose()) { - write!(f, "Obligation(predicate={:?},cause={:?},depth={})", - self.predicate, - self.cause, - self.recursion_depth) + write!( + f, + "Obligation(predicate={:?},cause={:?},depth={})", + self.predicate, self.cause, self.recursion_depth + ) } else { - write!(f, "Obligation(predicate={:?},depth={})", - self.predicate, - self.recursion_depth) + write!( + f, + "Obligation(predicate={:?},depth={})", + self.predicate, self.recursion_depth + ) } } } @@ -44,57 +47,52 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for traits::Obligation<'tcx, O> { impl<'tcx, N: fmt::Debug> fmt::Debug for traits::Vtable<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - super::VtableImpl(ref v) => - write!(f, "{:?}", v), + super::VtableImpl(ref v) => write!(f, "{:?}", v), - super::VtableAutoImpl(ref t) => - write!(f, "{:?}", t), + super::VtableAutoImpl(ref t) => write!(f, "{:?}", t), - super::VtableClosure(ref d) => - write!(f, "{:?}", d), + super::VtableClosure(ref d) => write!(f, "{:?}", d), - super::VtableGenerator(ref d) => - write!(f, "{:?}", d), + super::VtableGenerator(ref d) => write!(f, "{:?}", d), - super::VtableFnPointer(ref d) => - write!(f, "VtableFnPointer({:?})", d), + super::VtableFnPointer(ref d) => write!(f, "VtableFnPointer({:?})", d), - super::VtableObject(ref d) => - write!(f, "{:?}", d), + super::VtableObject(ref d) => write!(f, "{:?}", d), - super::VtableParam(ref n) => - write!(f, "VtableParam({:?})", n), + super::VtableParam(ref n) => write!(f, "VtableParam({:?})", n), - super::VtableBuiltin(ref d) => - write!(f, "{:?}", d) + super::VtableBuiltin(ref d) => write!(f, "{:?}", d), } } } impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableImplData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableImpl(impl_def_id={:?}, substs={:?}, nested={:?})", - self.impl_def_id, - self.substs, - self.nested) + write!( + f, + "VtableImpl(impl_def_id={:?}, substs={:?}, nested={:?})", + self.impl_def_id, self.substs, self.nested + ) } } impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableGeneratorData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableGenerator(closure_def_id={:?}, substs={:?}, nested={:?})", - self.closure_def_id, - self.substs, - self.nested) + write!( + f, + "VtableGenerator(generator_def_id={:?}, substs={:?}, nested={:?})", + self.generator_def_id, self.substs, self.nested + ) } } impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableClosureData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableClosure(closure_def_id={:?}, substs={:?}, nested={:?})", - self.closure_def_id, - self.substs, - self.nested) + write!( + f, + "VtableClosure(closure_def_id={:?}, substs={:?}, nested={:?})", + self.closure_def_id, self.substs, self.nested + ) } } @@ -106,34 +104,37 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableBuiltinData { impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableAutoImplData { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableAutoImplData(trait_def_id={:?}, nested={:?})", - self.trait_def_id, - self.nested) + write!( + f, + "VtableAutoImplData(trait_def_id={:?}, nested={:?})", + self.trait_def_id, self.nested + ) } } impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableObjectData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableObject(upcast={:?}, vtable_base={}, nested={:?})", - self.upcast_trait_ref, - self.vtable_base, - self.nested) + write!( + f, + "VtableObject(upcast={:?}, vtable_base={}, nested={:?})", + self.upcast_trait_ref, self.vtable_base, self.nested + ) } } impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableFnPointerData<'tcx, N> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableFnPointer(fn_ty={:?}, nested={:?})", - self.fn_ty, - self.nested) + write!( + f, + "VtableFnPointer(fn_ty={:?}, nested={:?})", + self.fn_ty, self.nested + ) } } impl<'tcx> fmt::Debug for traits::FulfillmentError<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "FulfillmentError({:?},{:?})", - self.obligation, - self.code) + write!(f, "FulfillmentError({:?},{:?})", self.obligation, self.code) } } @@ -142,9 +143,10 @@ impl<'tcx> fmt::Debug for traits::FulfillmentErrorCode<'tcx> { match *self { super::CodeSelectionError(ref e) => write!(f, "{:?}", e), super::CodeProjectionError(ref e) => write!(f, "{:?}", e), - super::CodeSubtypeError(ref a, ref b) => - write!(f, "CodeSubtypeError({:?}, {:?})", a, b), - super::CodeAmbiguity => write!(f, "Ambiguity") + super::CodeSubtypeError(ref a, ref b) => { + write!(f, "CodeSubtypeError({:?}, {:?})", a, b) + } + super::CodeAmbiguity => write!(f, "Ambiguity"), } } } @@ -165,17 +167,15 @@ impl<'a, 'tcx> Lift<'tcx> for traits::SelectionError<'a> { super::Unimplemented => Some(super::Unimplemented), super::OutputTypeParameterMismatch(a, b, ref err) => { tcx.lift(&(a, b)).and_then(|(a, b)| { - tcx.lift(err).map(|err| { - super::OutputTypeParameterMismatch(a, b, err) - }) + tcx.lift(err) + .map(|err| super::OutputTypeParameterMismatch(a, b, err)) }) } - super::TraitNotObjectSafe(def_id) => { - Some(super::TraitNotObjectSafe(def_id)) - } - super::ConstEvalFailure(ref err) => { - tcx.lift(err).map(super::ConstEvalFailure) - } + super::TraitNotObjectSafe(def_id) => Some(super::TraitNotObjectSafe(def_id)), + super::ConstEvalFailure(ref err) => tcx.lift(&**err).map(|err| super::ConstEvalFailure( + err.into(), + )), + super::Overflow => bug!(), // FIXME: ape ConstEvalFailure? } } } @@ -193,24 +193,21 @@ impl<'a, 'tcx> Lift<'tcx> for traits::ObligationCauseCode<'a> { super::ReferenceOutlivesReferent(ty) => { tcx.lift(&ty).map(super::ReferenceOutlivesReferent) } - super::ObjectTypeBound(ty, r) => { - tcx.lift(&ty).and_then(|ty| { - tcx.lift(&r).and_then(|r| { - Some(super::ObjectTypeBound(ty, r)) - }) - }) - } - super::ObjectCastObligation(ty) => { - tcx.lift(&ty).map(super::ObjectCastObligation) - } + super::ObjectTypeBound(ty, r) => tcx.lift(&ty).and_then(|ty| { + tcx.lift(&r) + .and_then(|r| Some(super::ObjectTypeBound(ty, r))) + }), + super::ObjectCastObligation(ty) => tcx.lift(&ty).map(super::ObjectCastObligation), super::AssignmentLhsSized => Some(super::AssignmentLhsSized), super::TupleInitializerSized => Some(super::TupleInitializerSized), super::StructInitializerSized => Some(super::StructInitializerSized), super::VariableType(id) => Some(super::VariableType(id)), super::ReturnType(id) => Some(super::ReturnType(id)), + super::SizedArgumentType => Some(super::SizedArgumentType), super::SizedReturnType => Some(super::SizedReturnType), + super::SizedYieldType => Some(super::SizedYieldType), super::RepeatVec => Some(super::RepeatVec), - super::FieldSized(item) => Some(super::FieldSized(item)), + super::FieldSized { adt_kind, last } => Some(super::FieldSized { adt_kind, last }), super::ConstSized => Some(super::ConstSized), super::SharedStatic => Some(super::SharedStatic), super::BuiltinDerivedObligation(ref cause) => { @@ -219,28 +216,28 @@ impl<'a, 'tcx> Lift<'tcx> for traits::ObligationCauseCode<'a> { super::ImplDerivedObligation(ref cause) => { tcx.lift(cause).map(super::ImplDerivedObligation) } - super::CompareImplMethodObligation { item_name, - impl_item_def_id, - trait_item_def_id } => { - Some(super::CompareImplMethodObligation { - item_name, - impl_item_def_id, - trait_item_def_id, - }) - } + super::CompareImplMethodObligation { + item_name, + impl_item_def_id, + trait_item_def_id, + } => Some(super::CompareImplMethodObligation { + item_name, + impl_item_def_id, + trait_item_def_id, + }), super::ExprAssignable => Some(super::ExprAssignable), - super::MatchExpressionArm { arm_span, source } => { - Some(super::MatchExpressionArm { arm_span, - source: source }) - } + super::MatchExpressionArm { arm_span, source } => Some(super::MatchExpressionArm { + arm_span, + source: source, + }), super::IfExpression => Some(super::IfExpression), super::IfExpressionWithNoElse => Some(super::IfExpressionWithNoElse), - super::EquatePredicate => Some(super::EquatePredicate), super::MainFunctionType => Some(super::MainFunctionType), super::StartFunctionType => Some(super::StartFunctionType), super::IntrinsicType => Some(super::IntrinsicType), super::MethodReceiver => Some(super::MethodReceiver), super::BlockTailExpression(id) => Some(super::BlockTailExpression(id)), + super::TrivialBound => Some(super::TrivialBound), } } } @@ -249,12 +246,11 @@ impl<'a, 'tcx> Lift<'tcx> for traits::DerivedObligationCause<'a> { type Lifted = traits::DerivedObligationCause<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { tcx.lift(&self.parent_trait_ref).and_then(|trait_ref| { - tcx.lift(&*self.parent_code).map(|code| { - traits::DerivedObligationCause { + tcx.lift(&*self.parent_code) + .map(|code| traits::DerivedObligationCause { parent_trait_ref: trait_ref, - parent_code: Rc::new(code) - } - }) + parent_code: Rc::new(code), + }) }) } } @@ -262,17 +258,15 @@ impl<'a, 'tcx> Lift<'tcx> for traits::DerivedObligationCause<'a> { impl<'a, 'tcx> Lift<'tcx> for traits::ObligationCause<'a> { type Lifted = traits::ObligationCause<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - tcx.lift(&self.code).map(|code| { - traits::ObligationCause { - span: self.span, - body_id: self.body_id, - code, - } + tcx.lift(&self.code).map(|code| traits::ObligationCause { + span: self.span, + body_id: self.body_id, + code, }) } } -// For trans only. +// For codegen only. impl<'a, 'tcx> Lift<'tcx> for traits::Vtable<'a, ()> { type Lifted = traits::Vtable<'tcx, ()>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { @@ -280,49 +274,40 @@ impl<'a, 'tcx> Lift<'tcx> for traits::Vtable<'a, ()> { traits::VtableImpl(traits::VtableImplData { impl_def_id, substs, - nested - }) => { - tcx.lift(&substs).map(|substs| { - traits::VtableImpl(traits::VtableImplData { - impl_def_id, - substs, - nested, - }) + nested, + }) => tcx.lift(&substs).map(|substs| { + traits::VtableImpl(traits::VtableImplData { + impl_def_id, + substs, + nested, }) - } + }), traits::VtableAutoImpl(t) => Some(traits::VtableAutoImpl(t)), traits::VtableGenerator(traits::VtableGeneratorData { - closure_def_id, + generator_def_id, substs, - nested - }) => { - tcx.lift(&substs).map(|substs| { - traits::VtableGenerator(traits::VtableGeneratorData { - closure_def_id: closure_def_id, - substs: substs, - nested: nested - }) + nested, + }) => tcx.lift(&substs).map(|substs| { + traits::VtableGenerator(traits::VtableGeneratorData { + generator_def_id: generator_def_id, + substs: substs, + nested: nested, }) - } + }), traits::VtableClosure(traits::VtableClosureData { closure_def_id, substs, - nested - }) => { - tcx.lift(&substs).map(|substs| { - traits::VtableClosure(traits::VtableClosureData { - closure_def_id, - substs, - nested, - }) + nested, + }) => tcx.lift(&substs).map(|substs| { + traits::VtableClosure(traits::VtableClosureData { + closure_def_id, + substs, + nested, }) - } + }), traits::VtableFnPointer(traits::VtableFnPointerData { fn_ty, nested }) => { tcx.lift(&fn_ty).map(|fn_ty| { - traits::VtableFnPointer(traits::VtableFnPointerData { - fn_ty, - nested, - }) + traits::VtableFnPointer(traits::VtableFnPointerData { fn_ty, nested }) }) } traits::VtableParam(n) => Some(traits::VtableParam(n)), @@ -330,16 +315,14 @@ impl<'a, 'tcx> Lift<'tcx> for traits::Vtable<'a, ()> { traits::VtableObject(traits::VtableObjectData { upcast_trait_ref, vtable_base, - nested - }) => { - tcx.lift(&upcast_trait_ref).map(|trait_ref| { - traits::VtableObject(traits::VtableObjectData { - upcast_trait_ref: trait_ref, - vtable_base, - nested, - }) + nested, + }) => tcx.lift(&upcast_trait_ref).map(|trait_ref| { + traits::VtableObject(traits::VtableObjectData { + upcast_trait_ref: trait_ref, + vtable_base, + nested, }) - } + }), } } } @@ -347,8 +330,7 @@ impl<'a, 'tcx> Lift<'tcx> for traits::Vtable<'a, ()> { /////////////////////////////////////////////////////////////////////////// // TypeFoldable implementations. -impl<'tcx, O: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Obligation<'tcx, O> -{ +impl<'tcx, O: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Obligation<'tcx, O> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { traits::Obligation { cause: self.cause.clone(), @@ -363,258 +345,384 @@ impl<'tcx, O: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Obligation<'tcx } } -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableImplData<'tcx, N> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - traits::VtableImplData { - impl_def_id: self.impl_def_id, - substs: self.substs.fold_with(folder), - nested: self.nested.fold_with(folder), - } - } +BraceStructTypeFoldableImpl! { + impl<'tcx, N> TypeFoldable<'tcx> for traits::VtableImplData<'tcx, N> { + impl_def_id, substs, nested + } where N: TypeFoldable<'tcx> +} - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.substs.visit_with(visitor) || self.nested.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx, N> TypeFoldable<'tcx> for traits::VtableGeneratorData<'tcx, N> { + generator_def_id, substs, nested + } where N: TypeFoldable<'tcx> +} + +BraceStructTypeFoldableImpl! { + impl<'tcx, N> TypeFoldable<'tcx> for traits::VtableClosureData<'tcx, N> { + closure_def_id, substs, nested + } where N: TypeFoldable<'tcx> +} + +BraceStructTypeFoldableImpl! { + impl<'tcx, N> TypeFoldable<'tcx> for traits::VtableAutoImplData { + trait_def_id, nested + } where N: TypeFoldable<'tcx> +} + +BraceStructTypeFoldableImpl! { + impl<'tcx, N> TypeFoldable<'tcx> for traits::VtableBuiltinData { + nested + } where N: TypeFoldable<'tcx> +} + +BraceStructTypeFoldableImpl! { + impl<'tcx, N> TypeFoldable<'tcx> for traits::VtableObjectData<'tcx, N> { + upcast_trait_ref, vtable_base, nested + } where N: TypeFoldable<'tcx> +} + +BraceStructTypeFoldableImpl! { + impl<'tcx, N> TypeFoldable<'tcx> for traits::VtableFnPointerData<'tcx, N> { + fn_ty, + nested + } where N: TypeFoldable<'tcx> +} + +EnumTypeFoldableImpl! { + impl<'tcx, N> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> { + (traits::VtableImpl)(a), + (traits::VtableAutoImpl)(a), + (traits::VtableGenerator)(a), + (traits::VtableClosure)(a), + (traits::VtableFnPointer)(a), + (traits::VtableParam)(a), + (traits::VtableBuiltin)(a), + (traits::VtableObject)(a), + } where N: TypeFoldable<'tcx> +} + +BraceStructTypeFoldableImpl! { + impl<'tcx, T> TypeFoldable<'tcx> for Normalized<'tcx, T> { + value, + obligations + } where T: TypeFoldable<'tcx> +} + +impl<'tcx> fmt::Display for traits::WhereClause<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use traits::WhereClause::*; + + match self { + Implemented(trait_ref) => write!(fmt, "Implemented({})", trait_ref), + ProjectionEq(projection) => write!(fmt, "ProjectionEq({})", projection), + RegionOutlives(predicate) => write!(fmt, "RegionOutlives({})", predicate), + TypeOutlives(predicate) => write!(fmt, "TypeOutlives({})", predicate), + } } } -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableGeneratorData<'tcx, N> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - traits::VtableGeneratorData { - closure_def_id: self.closure_def_id, - substs: self.substs.fold_with(folder), - nested: self.nested.fold_with(folder), - } - } +impl<'tcx> fmt::Display for traits::WellFormed<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use traits::WellFormed::*; - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.substs.visit_with(visitor) || self.nested.visit_with(visitor) + match self { + Trait(trait_ref) => write!(fmt, "WellFormed({})", trait_ref), + Ty(ty) => write!(fmt, "WellFormed({})", ty), + } } } -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableClosureData<'tcx, N> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - traits::VtableClosureData { - closure_def_id: self.closure_def_id, - substs: self.substs.fold_with(folder), - nested: self.nested.fold_with(folder), - } - } +impl<'tcx> fmt::Display for traits::FromEnv<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use traits::FromEnv::*; - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.substs.visit_with(visitor) || self.nested.visit_with(visitor) + match self { + Trait(trait_ref) => write!(fmt, "FromEnv({})", trait_ref), + Ty(ty) => write!(fmt, "FromEnv({})", ty), + } } } -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableAutoImplData { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - traits::VtableAutoImplData { - trait_def_id: self.trait_def_id, - nested: self.nested.fold_with(folder), - } - } +impl<'tcx> fmt::Display for traits::DomainGoal<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use traits::DomainGoal::*; - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.nested.visit_with(visitor) + match self { + Holds(wc) => write!(fmt, "{}", wc), + WellFormed(wf) => write!(fmt, "{}", wf), + FromEnv(from_env) => write!(fmt, "{}", from_env), + Normalize(projection) => write!(fmt, "Normalize({})", projection), + } } } -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableBuiltinData { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - traits::VtableBuiltinData { - nested: self.nested.fold_with(folder), - } - } +impl fmt::Display for traits::QuantifierKind { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use traits::QuantifierKind::*; - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.nested.visit_with(visitor) + match self { + Universal => write!(fmt, "forall"), + Existential => write!(fmt, "exists"), + } } } -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableObjectData<'tcx, N> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - traits::VtableObjectData { - upcast_trait_ref: self.upcast_trait_ref.fold_with(folder), - vtable_base: self.vtable_base, - nested: self.nested.fold_with(folder), - } - } +impl<'tcx> fmt::Display for traits::Goal<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use traits::Goal::*; - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.upcast_trait_ref.visit_with(visitor) || self.nested.visit_with(visitor) - } -} - -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableFnPointerData<'tcx, N> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - traits::VtableFnPointerData { - fn_ty: self.fn_ty.fold_with(folder), - nested: self.nested.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.fn_ty.visit_with(visitor) || self.nested.visit_with(visitor) - } -} - -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - match *self { - traits::VtableImpl(ref v) => traits::VtableImpl(v.fold_with(folder)), - traits::VtableAutoImpl(ref t) => traits::VtableAutoImpl(t.fold_with(folder)), - traits::VtableGenerator(ref d) => { - traits::VtableGenerator(d.fold_with(folder)) + match self { + Implies(hypotheses, goal) => { + write!(fmt, "if (")?; + for (index, hyp) in hypotheses.iter().enumerate() { + if index > 0 { + write!(fmt, ", ")?; + } + write!(fmt, "{}", hyp)?; + } + write!(fmt, ") {{ {} }}", goal) } - traits::VtableClosure(ref d) => { - traits::VtableClosure(d.fold_with(folder)) + And(goal1, goal2) => write!(fmt, "({} && {})", goal1, goal2), + Not(goal) => write!(fmt, "not {{ {} }}", goal), + DomainGoal(goal) => write!(fmt, "{}", goal), + Quantified(qkind, goal) => { + // FIXME: appropriate binder names + write!(fmt, "{}<> {{ {} }}", qkind, goal.skip_binder()) } - traits::VtableFnPointer(ref d) => { - traits::VtableFnPointer(d.fold_with(folder)) - } - traits::VtableParam(ref n) => traits::VtableParam(n.fold_with(folder)), - traits::VtableBuiltin(ref d) => traits::VtableBuiltin(d.fold_with(folder)), - traits::VtableObject(ref d) => traits::VtableObject(d.fold_with(folder)), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - traits::VtableImpl(ref v) => v.visit_with(visitor), - traits::VtableAutoImpl(ref t) => t.visit_with(visitor), - traits::VtableGenerator(ref d) => d.visit_with(visitor), - traits::VtableClosure(ref d) => d.visit_with(visitor), - traits::VtableFnPointer(ref d) => d.visit_with(visitor), - traits::VtableParam(ref n) => n.visit_with(visitor), - traits::VtableBuiltin(ref d) => d.visit_with(visitor), - traits::VtableObject(ref d) => d.visit_with(visitor), + CannotProve => write!(fmt, "CannotProve"), } } } -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Normalized<'tcx, T> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - Normalized { - value: self.value.fold_with(folder), - obligations: self.obligations.fold_with(folder), +impl<'tcx> fmt::Display for traits::ProgramClause<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let traits::ProgramClause { goal, hypotheses } = self; + write!(fmt, "{}", goal)?; + if !hypotheses.is_empty() { + write!(fmt, " :- ")?; + for (index, condition) in hypotheses.iter().enumerate() { + if index > 0 { + write!(fmt, ", ")?; + } + write!(fmt, "{}", condition)?; + } } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.value.visit_with(visitor) || self.obligations.visit_with(visitor) + write!(fmt, ".") } } -impl<'tcx> TypeFoldable<'tcx> for traits::ObligationCauseCode<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - match *self { - super::ExprAssignable | - super::MatchExpressionArm { arm_span: _, source: _ } | - super::IfExpression | - super::IfExpressionWithNoElse | - super::EquatePredicate | - super::MainFunctionType | - super::StartFunctionType | - super::IntrinsicType | - super::MethodReceiver | - super::MiscObligation | - super::SliceOrArrayElem | - super::TupleElem | - super::ItemObligation(_) | - super::AssignmentLhsSized | - super::TupleInitializerSized | - super::StructInitializerSized | - super::VariableType(_) | - super::ReturnType(_) | - super::SizedReturnType | - super::ReturnNoExpression | - super::RepeatVec | - super::FieldSized(_) | - super::ConstSized | - super::SharedStatic | - super::BlockTailExpression(_) | - super::CompareImplMethodObligation { .. } => self.clone(), +impl<'tcx> fmt::Display for traits::Clause<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use traits::Clause::*; - super::ProjectionWf(proj) => super::ProjectionWf(proj.fold_with(folder)), - super::ReferenceOutlivesReferent(ty) => { - super::ReferenceOutlivesReferent(ty.fold_with(folder)) - } - super::ObjectTypeBound(ty, r) => { - super::ObjectTypeBound(ty.fold_with(folder), r.fold_with(folder)) - } - super::ObjectCastObligation(ty) => { - super::ObjectCastObligation(ty.fold_with(folder)) - } - super::BuiltinDerivedObligation(ref cause) => { - super::BuiltinDerivedObligation(cause.fold_with(folder)) - } - super::ImplDerivedObligation(ref cause) => { - super::ImplDerivedObligation(cause.fold_with(folder)) + match self { + Implies(clause) => write!(fmt, "{}", clause), + ForAll(clause) => { + // FIXME: appropriate binder names + write!(fmt, "forall<> {{ {} }}", clause.skip_binder()) } } } +} - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - super::ExprAssignable | - super::MatchExpressionArm { arm_span: _, source: _ } | - super::IfExpression | - super::IfExpressionWithNoElse | - super::EquatePredicate | - super::MainFunctionType | - super::StartFunctionType | - super::IntrinsicType | - super::MethodReceiver | - super::MiscObligation | - super::SliceOrArrayElem | - super::TupleElem | - super::ItemObligation(_) | - super::AssignmentLhsSized | - super::TupleInitializerSized | - super::StructInitializerSized | - super::VariableType(_) | - super::ReturnType(_) | - super::SizedReturnType | - super::ReturnNoExpression | - super::RepeatVec | - super::FieldSized(_) | - super::ConstSized | - super::SharedStatic | - super::BlockTailExpression(_) | - super::CompareImplMethodObligation { .. } => false, - - super::ProjectionWf(proj) => proj.visit_with(visitor), - super::ReferenceOutlivesReferent(ty) => ty.visit_with(visitor), - super::ObjectTypeBound(ty, r) => ty.visit_with(visitor) || r.visit_with(visitor), - super::ObjectCastObligation(ty) => ty.visit_with(visitor), - super::BuiltinDerivedObligation(ref cause) => cause.visit_with(visitor), - super::ImplDerivedObligation(ref cause) => cause.visit_with(visitor) - } +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for traits::WhereClause<'tcx> { + (traits::WhereClause::Implemented)(trait_ref), + (traits::WhereClause::ProjectionEq)(projection), + (traits::WhereClause::TypeOutlives)(ty_outlives), + (traits::WhereClause::RegionOutlives)(region_outlives), } } -impl<'tcx> TypeFoldable<'tcx> for traits::DerivedObligationCause<'tcx> { +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for traits::WhereClause<'a> { + type Lifted = traits::WhereClause<'tcx>; + (traits::WhereClause::Implemented)(trait_ref), + (traits::WhereClause::ProjectionEq)(projection), + (traits::WhereClause::TypeOutlives)(ty_outlives), + (traits::WhereClause::RegionOutlives)(region_outlives), + } +} + +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for traits::WellFormed<'tcx> { + (traits::WellFormed::Trait)(trait_ref), + (traits::WellFormed::Ty)(ty), + } +} + +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for traits::WellFormed<'a> { + type Lifted = traits::WellFormed<'tcx>; + (traits::WellFormed::Trait)(trait_ref), + (traits::WellFormed::Ty)(ty), + } +} + +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for traits::FromEnv<'tcx> { + (traits::FromEnv::Trait)(trait_ref), + (traits::FromEnv::Ty)(ty), + } +} + +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for traits::FromEnv<'a> { + type Lifted = traits::FromEnv<'tcx>; + (traits::FromEnv::Trait)(trait_ref), + (traits::FromEnv::Ty)(ty), + } +} + +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for traits::DomainGoal<'tcx> { + (traits::DomainGoal::Holds)(wc), + (traits::DomainGoal::WellFormed)(wf), + (traits::DomainGoal::FromEnv)(from_env), + (traits::DomainGoal::Normalize)(projection), + } +} + +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for traits::DomainGoal<'a> { + type Lifted = traits::DomainGoal<'tcx>; + (traits::DomainGoal::Holds)(wc), + (traits::DomainGoal::WellFormed)(wf), + (traits::DomainGoal::FromEnv)(from_env), + (traits::DomainGoal::Normalize)(projection), + } +} + +CloneTypeFoldableAndLiftImpls! { + traits::QuantifierKind, +} + +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for traits::Goal<'tcx> { + (traits::Goal::Implies)(hypotheses, goal), + (traits::Goal::And)(goal1, goal2), + (traits::Goal::Not)(goal), + (traits::Goal::DomainGoal)(domain_goal), + (traits::Goal::Quantified)(qkind, goal), + (traits::Goal::CannotProve), + } +} + +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for traits::Goal<'a> { + type Lifted = traits::Goal<'tcx>; + (traits::Goal::Implies)(hypotheses, goal), + (traits::Goal::And)(goal1, goal2), + (traits::Goal::Not)(goal), + (traits::Goal::DomainGoal)(domain_goal), + (traits::Goal::Quantified)(kind, goal), + (traits::Goal::CannotProve), + } +} + +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - traits::DerivedObligationCause { - parent_trait_ref: self.parent_trait_ref.fold_with(folder), - parent_code: self.parent_code.fold_with(folder) - } + let v = self.iter() + .map(|t| t.fold_with(folder)) + .collect::>(); + folder.tcx().intern_goals(&v) } fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.parent_trait_ref.visit_with(visitor) || self.parent_code.visit_with(visitor) + self.iter().any(|t| t.visit_with(visitor)) } } -impl<'tcx> TypeFoldable<'tcx> for traits::ObligationCause<'tcx> { +impl<'tcx> TypeFoldable<'tcx> for &'tcx traits::Goal<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - traits::ObligationCause { - span: self.span, - body_id: self.body_id, - code: self.code.fold_with(folder), - } + let v = (**self).fold_with(folder); + folder.tcx().mk_goal(v) } fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.code.visit_with(visitor) + (**self).visit_with(visitor) } } + +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for traits::ProgramClause<'tcx> { + goal, + hypotheses + } +} + +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for traits::Clause<'tcx> { + (traits::Clause::Implies)(clause), + (traits::Clause::ForAll)(clause), + } +} + +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + let v = self.iter() + .map(|t| t.fold_with(folder)) + .collect::>(); + folder.tcx().intern_clauses(&v) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.iter().any(|t| t.visit_with(visitor)) + } +} + +impl<'tcx, C> TypeFoldable<'tcx> for chalk_engine::ExClause +where + C: traits::ExClauseFold<'tcx>, + C::Substitution: Clone, + C::RegionConstraint: Clone, +{ + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ::fold_ex_clause_with( + self, + folder, + ) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + ::visit_ex_clause_with( + self, + visitor, + ) + } +} + +impl<'tcx, C> Lift<'tcx> for chalk_engine::ExClause +where + C: chalk_engine::context::Context + Clone, + C: traits::ExClauseLift<'tcx>, +{ + type Lifted = C::LiftedExClause; + + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + ::lift_ex_clause_to_tcx(self, tcx) + } +} + +EnumTypeFoldableImpl! { + impl<'tcx, C> TypeFoldable<'tcx> for chalk_engine::DelayedLiteral { + (chalk_engine::DelayedLiteral::CannotProve)(a), + (chalk_engine::DelayedLiteral::Negative)(a), + (chalk_engine::DelayedLiteral::Positive)(a, b), + } where + C: chalk_engine::context::Context + Clone, + C::CanonicalConstrainedSubst: TypeFoldable<'tcx>, +} + +EnumTypeFoldableImpl! { + impl<'tcx, C> TypeFoldable<'tcx> for chalk_engine::Literal { + (chalk_engine::Literal::Negative)(a), + (chalk_engine::Literal::Positive)(a), + } where + C: chalk_engine::context::Context + Clone, + C::GoalInEnvironment: Clone + TypeFoldable<'tcx>, +} + +CloneTypeFoldableAndLiftImpls! { + chalk_engine::TableIndex, +} diff --git a/src/librustc/traits/trans/mod.rs b/src/librustc/traits/trans/mod.rs deleted file mode 100644 index c873580e3ad6..000000000000 --- a/src/librustc/traits/trans/mod.rs +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// This file contains various trait resolution methods used by trans. -// They all assume regions can be erased and monomorphic types. It -// seems likely that they should eventually be merged into more -// general routines. - -use dep_graph::{DepKind, DepTrackingMapConfig}; -use infer::TransNormalize; -use std::marker::PhantomData; -use syntax_pos::DUMMY_SP; -use hir::def_id::DefId; -use traits::{FulfillmentContext, Obligation, ObligationCause, SelectionContext, Vtable}; -use ty::{self, Ty, TyCtxt}; -use ty::subst::{Subst, Substs}; -use ty::fold::{TypeFoldable, TypeFolder}; - -/// Attempts to resolve an obligation to a vtable.. The result is -/// a shallow vtable resolution -- meaning that we do not -/// (necessarily) resolve all nested obligations on the impl. Note -/// that type check should guarantee to us that all nested -/// obligations *could be* resolved if we wanted to. -/// Assumes that this is run after the entire crate has been successfully type-checked. -pub fn trans_fulfill_obligation<'a, 'tcx>(ty: TyCtxt<'a, 'tcx, 'tcx>, - (param_env, trait_ref): - (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) - -> Vtable<'tcx, ()> -{ - // Remove any references to regions; this helps improve caching. - let trait_ref = ty.erase_regions(&trait_ref); - - debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})", - (param_env, trait_ref), trait_ref.def_id()); - - // Do the initial selection for the obligation. This yields the - // shallow result we are looking for -- that is, what specific impl. - ty.infer_ctxt().enter(|infcx| { - let mut selcx = SelectionContext::new(&infcx); - - let obligation_cause = ObligationCause::dummy(); - let obligation = Obligation::new(obligation_cause, - param_env, - trait_ref.to_poly_trait_predicate()); - - let selection = match selcx.select(&obligation) { - Ok(Some(selection)) => selection, - Ok(None) => { - // Ambiguity can happen when monomorphizing during trans - // expands to some humongo type that never occurred - // statically -- this humongo type can then overflow, - // leading to an ambiguous result. So report this as an - // overflow bug, since I believe this is the only case - // where ambiguity can result. - bug!("Encountered ambiguity selecting `{:?}` during trans, \ - presuming due to overflow", - trait_ref) - } - Err(e) => { - bug!("Encountered error `{:?}` selecting `{:?}` during trans", - e, trait_ref) - } - }; - - debug!("fulfill_obligation: selection={:?}", selection); - - // Currently, we use a fulfillment context to completely resolve - // all nested obligations. This is because they can inform the - // inference of the impl's type parameters. - let mut fulfill_cx = FulfillmentContext::new(); - let vtable = selection.map(|predicate| { - debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate); - fulfill_cx.register_predicate_obligation(&infcx, predicate); - }); - let vtable = infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &vtable); - - info!("Cache miss: {:?} => {:?}", trait_ref, vtable); - vtable - }) -} - -impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { - /// Monomorphizes a type from the AST by first applying the in-scope - /// substitutions and then normalizing any associated types. - pub fn trans_apply_param_substs(self, - param_substs: &Substs<'tcx>, - value: &T) - -> T - where T: TransNormalize<'tcx> - { - debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value); - let substituted = value.subst(self, param_substs); - let substituted = self.erase_regions(&substituted); - AssociatedTypeNormalizer::new(self).fold(&substituted) - } - - pub fn trans_apply_param_substs_env( - self, - param_substs: &Substs<'tcx>, - param_env: ty::ParamEnv<'tcx>, - value: &T, - ) -> T - where - T: TransNormalize<'tcx>, - { - debug!( - "apply_param_substs_env(param_substs={:?}, value={:?}, param_env={:?})", - param_substs, - value, - param_env, - ); - let substituted = value.subst(self, param_substs); - let substituted = self.erase_regions(&substituted); - AssociatedTypeNormalizerEnv::new(self, param_env).fold(&substituted) - } - - pub fn trans_impl_self_ty(&self, def_id: DefId, substs: &'tcx Substs<'tcx>) - -> Ty<'tcx> - { - self.trans_apply_param_substs(substs, &self.type_of(def_id)) - } -} - -struct AssociatedTypeNormalizer<'a, 'gcx: 'a> { - tcx: TyCtxt<'a, 'gcx, 'gcx>, -} - -impl<'a, 'gcx> AssociatedTypeNormalizer<'a, 'gcx> { - fn new(tcx: TyCtxt<'a, 'gcx, 'gcx>) -> Self { - AssociatedTypeNormalizer { tcx } - } - - fn fold>(&mut self, value: &T) -> T { - if !value.has_projections() { - value.clone() - } else { - value.fold_with(self) - } - } -} - -impl<'a, 'gcx> TypeFolder<'gcx, 'gcx> for AssociatedTypeNormalizer<'a, 'gcx> { - fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'gcx> { - self.tcx - } - - fn fold_ty(&mut self, ty: Ty<'gcx>) -> Ty<'gcx> { - if !ty.has_projections() { - ty - } else { - debug!("AssociatedTypeNormalizer: ty={:?}", ty); - self.tcx.fully_normalize_monormophic_ty(ty) - } - } -} - -struct AssociatedTypeNormalizerEnv<'a, 'gcx: 'a> { - tcx: TyCtxt<'a, 'gcx, 'gcx>, - param_env: ty::ParamEnv<'gcx>, -} - -impl<'a, 'gcx> AssociatedTypeNormalizerEnv<'a, 'gcx> { - fn new(tcx: TyCtxt<'a, 'gcx, 'gcx>, param_env: ty::ParamEnv<'gcx>) -> Self { - Self { tcx, param_env } - } - - fn fold>(&mut self, value: &T) -> T { - if !value.has_projections() { - value.clone() - } else { - value.fold_with(self) - } - } -} - -impl<'a, 'gcx> TypeFolder<'gcx, 'gcx> for AssociatedTypeNormalizerEnv<'a, 'gcx> { - fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'gcx> { - self.tcx - } - - fn fold_ty(&mut self, ty: Ty<'gcx>) -> Ty<'gcx> { - if !ty.has_projections() { - ty - } else { - debug!("AssociatedTypeNormalizerEnv: ty={:?}", ty); - self.tcx.normalize_associated_type_in_env(&ty, self.param_env) - } - } -} - -// Implement DepTrackingMapConfig for `trait_cache` -pub struct TraitSelectionCache<'tcx> { - data: PhantomData<&'tcx ()> -} - -impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> { - type Key = (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>); - type Value = Vtable<'tcx, ()>; - fn to_dep_kind() -> DepKind { - DepKind::TraitSelect - } -} - -// # Global Cache - -pub struct ProjectionCache<'gcx> { - data: PhantomData<&'gcx ()> -} - -impl<'gcx> DepTrackingMapConfig for ProjectionCache<'gcx> { - type Key = Ty<'gcx>; - type Value = Ty<'gcx>; - fn to_dep_kind() -> DepKind { - DepKind::TraitSelect - } -} diff --git a/src/librustc/traits/util.rs b/src/librustc/traits/util.rs index 898accb90215..40f13ac06f56 100644 --- a/src/librustc/traits/util.rs +++ b/src/librustc/traits/util.rs @@ -9,7 +9,7 @@ // except according to those terms. use hir::def_id::DefId; -use ty::subst::{Subst, Substs}; +use ty::subst::{Kind, Subst, Substs}; use ty::{self, Ty, TyCtxt, ToPredicate, ToPolyTraitRef}; use ty::outlives::Component; use util::nodemap::FxHashSet; @@ -25,9 +25,6 @@ fn anonymize_predicate<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::Predicate::Trait(ref data) => ty::Predicate::Trait(tcx.anonymize_late_bound_regions(data)), - ty::Predicate::Equate(ref data) => - ty::Predicate::Equate(tcx.anonymize_late_bound_regions(data)), - ty::Predicate::RegionOutlives(ref data) => ty::Predicate::RegionOutlives(tcx.anonymize_late_bound_regions(data)), @@ -163,11 +160,6 @@ impl<'cx, 'gcx, 'tcx> Elaborator<'cx, 'gcx, 'tcx> { // Currently, we do not elaborate object-safe // predicates. } - ty::Predicate::Equate(..) => { - // Currently, we do not "elaborate" predicates like - // `X == Y`, though conceivably we might. For example, - // `&X == &Y` implies that `X == Y`. - } ty::Predicate::Subtype(..) => { // Currently, we do not "elaborate" predicates like `X // <: Y`, though conceivably we might. @@ -217,13 +209,13 @@ impl<'cx, 'gcx, 'tcx> Elaborator<'cx, 'gcx, 'tcx> { None } else { Some(ty::Predicate::RegionOutlives( - ty::Binder(ty::OutlivesPredicate(r, r_min)))) + ty::Binder::dummy(ty::OutlivesPredicate(r, r_min)))) }, Component::Param(p) => { - let ty = tcx.mk_param(p.idx, p.name); + let ty = tcx.mk_ty_param(p.idx, p.name); Some(ty::Predicate::TypeOutlives( - ty::Binder(ty::OutlivesPredicate(ty, r_min)))) + ty::Binder::dummy(ty::OutlivesPredicate(ty, r_min)))) }, Component::UnresolvedInferenceVariable(_) => { @@ -247,6 +239,10 @@ impl<'cx, 'gcx, 'tcx> Elaborator<'cx, 'gcx, 'tcx> { impl<'cx, 'gcx, 'tcx> Iterator for Elaborator<'cx, 'gcx, 'tcx> { type Item = ty::Predicate<'tcx>; + fn size_hint(&self) -> (usize, Option) { + (self.stack.len(), None) + } + fn next(&mut self) -> Option> { // Extract next item from top-most stack frame, if any. let next_predicate = match self.stack.pop() { @@ -355,6 +351,11 @@ impl<'tcx,I:Iterator>> Iterator for FilterToTraits { } } } + + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.base_iterator.size_hint(); + (0, upper) + } } /////////////////////////////////////////////////////////////////////////// @@ -433,13 +434,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { cause: ObligationCause<'tcx>, trait_def_id: DefId, recursion_depth: usize, - param_ty: Ty<'tcx>, - ty_params: &[Ty<'tcx>]) + self_ty: Ty<'tcx>, + params: &[Kind<'tcx>]) -> PredicateObligation<'tcx> { let trait_ref = ty::TraitRef { def_id: trait_def_id, - substs: self.mk_substs_trait(param_ty, ty_params) + substs: self.mk_substs_trait(self_ty, params) }; predicate_for_trait_ref(cause, param_env, trait_ref, recursion_depth) } @@ -511,13 +512,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let arguments_tuple = match tuple_arguments { TupleArgumentsFlag::No => sig.skip_binder().inputs()[0], TupleArgumentsFlag::Yes => - self.intern_tup(sig.skip_binder().inputs(), false), + self.intern_tup(sig.skip_binder().inputs()), }; let trait_ref = ty::TraitRef { def_id: fn_trait_def_id, - substs: self.mk_substs_trait(self_ty, &[arguments_tuple]), + substs: self.mk_substs_trait(self_ty, &[arguments_tuple.into()]), }; - ty::Binder((trait_ref, sig.skip_binder().output())) + ty::Binder::bind((trait_ref, sig.skip_binder().output())) } pub fn generator_trait_ref_and_outputs(self, @@ -530,14 +531,14 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { def_id: fn_trait_def_id, substs: self.mk_substs_trait(self_ty, &[]), }; - ty::Binder((trait_ref, sig.skip_binder().yield_ty, sig.skip_binder().return_ty)) + ty::Binder::bind((trait_ref, sig.skip_binder().yield_ty, sig.skip_binder().return_ty)) } pub fn impl_is_default(self, node_item_def_id: DefId) -> bool { match self.hir.as_local_node_id(node_item_def_id) { Some(node_id) => { let item = self.hir.expect_item(node_id); - if let hir::ItemImpl(_, _, defaultness, ..) = item.node { + if let hir::ItemKind::Impl(_, _, defaultness, ..) = item.node { defaultness.is_default() } else { false diff --git a/src/librustc/ty/README.md b/src/librustc/ty/README.md deleted file mode 100644 index 3fd956ecfb87..000000000000 --- a/src/librustc/ty/README.md +++ /dev/null @@ -1,165 +0,0 @@ -# Types and the Type Context - -The `ty` module defines how the Rust compiler represents types -internally. It also defines the *typing context* (`tcx` or `TyCtxt`), -which is the central data structure in the compiler. - -## The tcx and how it uses lifetimes - -The `tcx` ("typing context") is the central data structure in the -compiler. It is the context that you use to perform all manner of -queries. The struct `TyCtxt` defines a reference to this shared context: - -```rust -tcx: TyCtxt<'a, 'gcx, 'tcx> -// -- ---- ---- -// | | | -// | | innermost arena lifetime (if any) -// | "global arena" lifetime -// lifetime of this reference -``` - -As you can see, the `TyCtxt` type takes three lifetime parameters. -These lifetimes are perhaps the most complex thing to understand about -the tcx. During Rust compilation, we allocate most of our memory in -**arenas**, which are basically pools of memory that get freed all at -once. When you see a reference with a lifetime like `'tcx` or `'gcx`, -you know that it refers to arena-allocated data (or data that lives as -long as the arenas, anyhow). - -We use two distinct levels of arenas. The outer level is the "global -arena". This arena lasts for the entire compilation: so anything you -allocate in there is only freed once compilation is basically over -(actually, when we shift to executing LLVM). - -To reduce peak memory usage, when we do type inference, we also use an -inner level of arena. These arenas get thrown away once type inference -is over. This is done because type inference generates a lot of -"throw-away" types that are not particularly interesting after type -inference completes, so keeping around those allocations would be -wasteful. - -Often, we wish to write code that explicitly asserts that it is not -taking place during inference. In that case, there is no "local" -arena, and all the types that you can access are allocated in the -global arena. To express this, the idea is to use the same lifetime -for the `'gcx` and `'tcx` parameters of `TyCtxt`. Just to be a touch -confusing, we tend to use the name `'tcx` in such contexts. Here is an -example: - -```rust -fn not_in_inference<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { - // ---- ---- - // Using the same lifetime here asserts - // that the innermost arena accessible through - // this reference *is* the global arena. -} -``` - -In contrast, if we want to code that can be usable during type inference, then you -need to declare a distinct `'gcx` and `'tcx` lifetime parameter: - -```rust -fn maybe_in_inference<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) { - // ---- ---- - // Using different lifetimes here means that - // the innermost arena *may* be distinct - // from the global arena (but doesn't have to be). -} -``` - -### Allocating and working with types - -Rust types are represented using the `Ty<'tcx>` defined in the `ty` -module (not to be confused with the `Ty` struct from [the HIR]). This -is in fact a simple type alias for a reference with `'tcx` lifetime: - -```rust -pub type Ty<'tcx> = &'tcx TyS<'tcx>; -``` - -[the HIR]: ../hir/README.md - -You can basically ignore the `TyS` struct -- you will basically never -access it explicitly. We always pass it by reference using the -`Ty<'tcx>` alias -- the only exception I think is to define inherent -methods on types. Instances of `TyS` are only ever allocated in one of -the rustc arenas (never e.g. on the stack). - -One common operation on types is to **match** and see what kinds of -types they are. This is done by doing `match ty.sty`, sort of like this: - -```rust -fn test_type<'tcx>(ty: Ty<'tcx>) { - match ty.sty { - ty::TyArray(elem_ty, len) => { ... } - ... - } -} -``` - -The `sty` field (the origin of this name is unclear to me; perhaps -structural type?) is of type `TypeVariants<'tcx>`, which is an enum -defining all of the different kinds of types in the compiler. - -> NB: inspecting the `sty` field on types during type inference can be -> risky, as there may be inference variables and other things to -> consider, or sometimes types are not yet known that will become -> known later.). - -To allocate a new type, you can use the various `mk_` methods defined -on the `tcx`. These have names that correpond mostly to the various kinds -of type variants. For example: - -```rust -let array_ty = tcx.mk_array(elem_ty, len * 2); -``` - -These methods all return a `Ty<'tcx>` -- note that the lifetime you -get back is the lifetime of the innermost arena that this `tcx` has -access to. In fact, types are always canonicalized and interned (so we -never allocate exactly the same type twice) and are always allocated -in the outermost arena where they can be (so, if they do not contain -any inference variables or other "temporary" types, they will be -allocated in the global arena). However, the lifetime `'tcx` is always -a safe approximation, so that is what you get back. - -> NB. Because types are interned, it is possible to compare them for -> equality efficiently using `==` -- however, this is almost never what -> you want to do unless you happen to be hashing and looking for -> duplicates. This is because often in Rust there are multiple ways to -> represent the same type, particularly once inference is involved. If -> you are going to be testing for type equality, you probably need to -> start looking into the inference code to do it right. - -You can also find various common types in the `tcx` itself by accessing -`tcx.types.bool`, `tcx.types.char`, etc (see `CommonTypes` for more). - -### Beyond types: Other kinds of arena-allocated data structures - -In addition to types, there are a number of other arena-allocated data -structures that you can allocate, and which are found in this -module. Here are a few examples: - -- `Substs`, allocated with `mk_substs` -- this will intern a slice of types, often used to - specify the values to be substituted for generics (e.g., `HashMap` - would be represented as a slice `&'tcx [tcx.types.i32, tcx.types.u32]`). -- `TraitRef`, typically passed by value -- a **trait reference** - consists of a reference to a trait along with its various type - parameters (including `Self`), like `i32: Display` (here, the def-id - would reference the `Display` trait, and the substs would contain - `i32`). -- `Predicate` defines something the trait system has to prove (see `traits` module). - -### Import conventions - -Although there is no hard and fast rule, the `ty` module tends to be used like so: - -```rust -use ty::{self, Ty, TyCtxt}; -``` - -In particular, since they are so common, the `Ty` and `TyCtxt` types -are imported directly. Other types are often referenced with an -explicit `ty::` prefix (e.g., `ty::TraitRef<'tcx>`). But some modules -choose to import a larger or smaller set of names explicitly. diff --git a/src/librustc/ty/_match.rs b/src/librustc/ty/_match.rs index f86c1cf0dd6a..047bfcc8c6f5 100644 --- a/src/librustc/ty/_match.rs +++ b/src/librustc/ty/_match.rs @@ -92,6 +92,6 @@ impl<'a, 'gcx, 'tcx> TypeRelation<'a, 'gcx, 'tcx> for Match<'a, 'gcx, 'tcx> { -> RelateResult<'tcx, ty::Binder> where T: Relate<'tcx> { - Ok(ty::Binder(self.relate(a.skip_binder(), b.skip_binder())?)) + Ok(ty::Binder::bind(self.relate(a.skip_binder(), b.skip_binder())?)) } } diff --git a/src/librustc/ty/adjustment.rs b/src/librustc/ty/adjustment.rs index 6df6bb9df232..3263da8fda36 100644 --- a/src/librustc/ty/adjustment.rs +++ b/src/librustc/ty/adjustment.rs @@ -77,7 +77,7 @@ pub enum Adjust<'tcx> { /// Go from a mut raw pointer to a const raw pointer. MutToConstPointer, - /// Dereference once, producing an lvalue. + /// Dereference once, producing a place. Deref(Option>), /// Take the address and produce either a `&` or `*` pointer. @@ -91,8 +91,8 @@ pub enum Adjust<'tcx> { /// pointers. We don't store the details of how the transform is /// done (in fact, we don't know that, because it might depend on /// the precise type parameters). We just store the target - /// type. Trans figures out what has to be done at monomorphization - /// time based on the precise source/target type at hand. + /// type. Codegen backends and miri figure out what has to be done + /// based on the precise source/target type at hand. Unsize, } @@ -119,10 +119,43 @@ impl<'a, 'gcx, 'tcx> OverloadedDeref<'tcx> { } } +/// At least for initial deployment, we want to limit two-phase borrows to +/// only a few specific cases. Right now, those mostly "things that desugar" +/// into method calls +/// - using x.some_method() syntax, where some_method takes &mut self +/// - using Foo::some_method(&mut x, ...) syntax +/// - binary assignment operators (+=, -=, *=, etc.) +/// Anything else should be rejected until generalized two phase borrow support +/// is implemented. Right now, dataflow can't handle the general case where there +/// is more than one use of a mutable borrow, and we don't want to accept too much +/// new code via two-phase borrows, so we try to limit where we create two-phase +/// capable mutable borrows. +/// See #49434 for tracking. +#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)] +pub enum AllowTwoPhase { + Yes, + No +} + +#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)] +pub enum AutoBorrowMutability { + Mutable { allow_two_phase_borrow: AllowTwoPhase }, + Immutable, +} + +impl From for hir::Mutability { + fn from(m: AutoBorrowMutability) -> Self { + match m { + AutoBorrowMutability::Mutable { .. } => hir::MutMutable, + AutoBorrowMutability::Immutable => hir::MutImmutable, + } + } +} + #[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)] pub enum AutoBorrow<'tcx> { /// Convert from T to &T. - Ref(ty::Region<'tcx>, hir::Mutability), + Ref(ty::Region<'tcx>, AutoBorrowMutability), /// Convert from T to *T. RawPtr(hir::Mutability), diff --git a/src/librustc/ty/binding.rs b/src/librustc/ty/binding.rs index 3db61b76cc55..971b3c3d14ae 100644 --- a/src/librustc/ty/binding.rs +++ b/src/librustc/ty/binding.rs @@ -18,6 +18,8 @@ pub enum BindingMode { BindByValue(Mutability), } +CloneTypeFoldableAndLiftImpls! { BindingMode, } + impl BindingMode { pub fn convert(ba: BindingAnnotation) -> BindingMode { match ba { diff --git a/src/librustc/ty/cast.rs b/src/librustc/ty/cast.rs index c118b7a4692e..7593d4ed24e7 100644 --- a/src/librustc/ty/cast.rs +++ b/src/librustc/ty/cast.rs @@ -9,7 +9,7 @@ // except according to those terms. // Helpers for handling cast expressions, used in both -// typeck and trans. +// typeck and codegen. use ty::{self, Ty}; @@ -20,7 +20,6 @@ use syntax::ast; pub enum IntTy { U(ast::UintTy), I, - Ivar, CEnum, Bool, Char @@ -37,9 +36,9 @@ pub enum CastTy<'tcx> { /// Function Pointers FnPtr, /// Raw pointers - Ptr(&'tcx ty::TypeAndMut<'tcx>), + Ptr(ty::TypeAndMut<'tcx>), /// References - RPtr(&'tcx ty::TypeAndMut<'tcx>), + RPtr(ty::TypeAndMut<'tcx>), } /// Cast Kind. See RFC 401 (or librustc_typeck/check/cast.rs) @@ -64,14 +63,14 @@ impl<'tcx> CastTy<'tcx> { ty::TyBool => Some(CastTy::Int(IntTy::Bool)), ty::TyChar => Some(CastTy::Int(IntTy::Char)), ty::TyInt(_) => Some(CastTy::Int(IntTy::I)), - ty::TyInfer(ty::InferTy::IntVar(_)) => Some(CastTy::Int(IntTy::Ivar)), + ty::TyInfer(ty::InferTy::IntVar(_)) => Some(CastTy::Int(IntTy::I)), ty::TyInfer(ty::InferTy::FloatVar(_)) => Some(CastTy::Float), ty::TyUint(u) => Some(CastTy::Int(IntTy::U(u))), ty::TyFloat(_) => Some(CastTy::Float), ty::TyAdt(d,_) if d.is_enum() && d.is_payloadfree() => Some(CastTy::Int(IntTy::CEnum)), - ty::TyRawPtr(ref mt) => Some(CastTy::Ptr(mt)), - ty::TyRef(_, ref mt) => Some(CastTy::RPtr(mt)), + ty::TyRawPtr(mt) => Some(CastTy::Ptr(mt)), + ty::TyRef(_, ty, mutbl) => Some(CastTy::RPtr(ty::TypeAndMut { ty, mutbl })), ty::TyFnPtr(..) => Some(CastTy::FnPtr), _ => None, } diff --git a/src/librustc/ty/codec.rs b/src/librustc/ty/codec.rs index fbb14f39ade3..967a3324cfb2 100644 --- a/src/librustc/ty/codec.rs +++ b/src/librustc/ty/codec.rs @@ -17,13 +17,14 @@ // persisting to incr. comp. caches. use hir::def_id::{DefId, CrateNum}; -use middle::const_val::ByteArray; +use infer::canonical::{CanonicalVarInfo, CanonicalVarInfos}; use rustc_data_structures::fx::FxHashMap; use rustc_serialize::{Decodable, Decoder, Encoder, Encodable, opaque}; use std::hash::Hash; use std::intrinsics; use ty::{self, Ty, TyCtxt}; use ty::subst::Substs; +use mir::interpret::Allocation; /// The shorthand encoding uses an enum's variant index `usize` /// and is offset by this value so it never matches a real variant. @@ -53,7 +54,7 @@ pub trait TyEncoder: Encoder { fn position(&self) -> usize; } -impl<'buf> TyEncoder for opaque::Encoder<'buf> { +impl TyEncoder for opaque::Encoder { #[inline] fn position(&self) -> usize { self.position() @@ -241,14 +242,16 @@ pub fn decode_existential_predicate_slice<'a, 'tcx, D>(decoder: &mut D) } #[inline] -pub fn decode_byte_array<'a, 'tcx, D>(decoder: &mut D) - -> Result, D::Error> +pub fn decode_canonical_var_infos<'a, 'tcx, D>(decoder: &mut D) + -> Result, D::Error> where D: TyDecoder<'a, 'tcx>, 'tcx: 'a, { - Ok(ByteArray { - data: decoder.tcx().alloc_byte_array(&Vec::decode(decoder)?) - }) + let len = decoder.read_usize()?; + let interned: Result, _> = (0..len).map(|_| Decodable::decode(decoder)) + .collect(); + Ok(decoder.tcx() + .intern_canonical_var_infos(interned?.as_slice())) } #[inline] @@ -260,6 +263,15 @@ pub fn decode_const<'a, 'tcx, D>(decoder: &mut D) Ok(decoder.tcx().mk_const(Decodable::decode(decoder)?)) } +#[inline] +pub fn decode_allocation<'a, 'tcx, D>(decoder: &mut D) + -> Result<&'tcx Allocation, D::Error> + where D: TyDecoder<'a, 'tcx>, + 'tcx: 'a, +{ + Ok(decoder.tcx().intern_const_alloc(Decodable::decode(decoder)?)) +} + #[macro_export] macro_rules! __impl_decoder_methods { ($($name:ident -> $ty:ty;)*) => { @@ -274,11 +286,11 @@ macro_rules! implement_ty_decoder { ($DecoderName:ident <$($typaram:tt),*>) => { mod __ty_decoder_impl { use super::$DecoderName; + use $crate::infer::canonical::CanonicalVarInfos; use $crate::ty; use $crate::ty::codec::*; use $crate::ty::subst::Substs; use $crate::hir::def_id::{CrateNum}; - use $crate::middle::const_val::ByteArray; use rustc_serialize::{Decoder, SpecializedDecoder}; use std::borrow::Cow; @@ -377,10 +389,11 @@ macro_rules! implement_ty_decoder { } } - impl<$($typaram),*> SpecializedDecoder> - for $DecoderName<$($typaram),*> { - fn specialized_decode(&mut self) -> Result, Self::Error> { - decode_byte_array(self) + impl<$($typaram),*> SpecializedDecoder> + for $DecoderName<$($typaram),*> { + fn specialized_decode(&mut self) + -> Result, Self::Error> { + decode_canonical_var_infos(self) } } @@ -390,6 +403,15 @@ macro_rules! implement_ty_decoder { decode_const(self) } } + + impl<$($typaram),*> SpecializedDecoder<&'tcx $crate::mir::interpret::Allocation> + for $DecoderName<$($typaram),*> { + fn specialized_decode( + &mut self + ) -> Result<&'tcx $crate::mir::interpret::Allocation, Self::Error> { + decode_allocation(self) + } + } } } } diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 87742fe91627..42948a3f5f18 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -14,7 +14,8 @@ use dep_graph::DepGraph; use dep_graph::{DepNode, DepConstructor}; use errors::DiagnosticBuilder; use session::Session; -use session::config::OutputFilenames; +use session::config::{BorrowckMode, OutputFilenames}; +use session::config::CrateType; use middle; use hir::{TraitCandidate, HirId, ItemLocalId}; use hir::def::{Def, Export}; @@ -23,69 +24,73 @@ use hir::map as hir_map; use hir::map::DefPathHash; use lint::{self, Lint}; use ich::{StableHashingContext, NodeIdHashingMode}; +use infer::canonical::{CanonicalVarInfo, CanonicalVarInfos}; use infer::outlives::free_region_map::FreeRegionMap; -use middle::const_val::ConstVal; -use middle::cstore::{CrateStore, LinkMeta}; +use middle::cstore::{CrateStoreDyn, LinkMeta}; use middle::cstore::EncodedMetadata; use middle::lang_items; use middle::resolve_lifetime::{self, ObjectLifetimeDefault}; use middle::stability; -use mir::{Mir, interpret}; -use ty::subst::{Kind, Substs}; +use mir::{self, Mir, interpret}; +use mir::interpret::Allocation; +use ty::subst::{Kind, Substs, Subst}; use ty::ReprOptions; -use ty::Instance; use traits; +use traits::{Clause, Clauses, Goal, Goals}; use ty::{self, Ty, TypeAndMut}; use ty::{TyS, TypeVariants, Slice}; -use ty::{AdtKind, AdtDef, ClosureSubsts, GeneratorInterior, Region, Const}; +use ty::{AdtKind, AdtDef, ClosureSubsts, GeneratorSubsts, Region, Const}; use ty::{PolyFnSig, InferTy, ParamTy, ProjectionTy, ExistentialPredicate, Predicate}; use ty::RegionKind; use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; use ty::TypeVariants::*; +use ty::GenericParamDefKind; use ty::layout::{LayoutDetails, TargetDataLayout}; -use ty::maps; +use ty::query; use ty::steal::Steal; use ty::BindingMode; -use util::nodemap::{NodeMap, NodeSet, DefIdSet, ItemLocalMap}; +use ty::CanonicalTy; +use util::nodemap::{DefIdSet, ItemLocalMap}; use util::nodemap::{FxHashMap, FxHashSet}; use rustc_data_structures::accumulate_vec::AccumulateVec; use rustc_data_structures::stable_hasher::{HashStable, hash_stable_hashmap, StableHasher, StableHasherResult, StableVec}; -use arena::{TypedArena, DroplessArena}; -use rustc_const_math::{ConstInt, ConstUsize}; +use arena::{TypedArena, SyncDroplessArena}; use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::sync::{self, Lrc, Lock, WorkerLocal}; use std::any::Any; use std::borrow::Borrow; -use std::cell::{Cell, RefCell}; use std::cmp::Ordering; use std::collections::hash_map::{self, Entry}; use std::hash::{Hash, Hasher}; +use std::fmt; use std::mem; use std::ops::Deref; use std::iter; -use std::rc::Rc; use std::sync::mpsc; use std::sync::Arc; -use syntax::abi; -use syntax::ast::{self, Name, NodeId}; +use rustc_target::spec::abi; +use syntax::ast::{self, NodeId}; use syntax::attr; use syntax::codemap::MultiSpan; -use syntax::symbol::{Symbol, keywords}; +use syntax::edition::Edition; +use syntax::feature_gate; +use syntax::symbol::{Symbol, keywords, InternedString}; use syntax_pos::Span; use hir; pub struct AllArenas<'tcx> { - pub global: GlobalArenas<'tcx>, - pub interner: DroplessArena, + pub global: WorkerLocal>, + pub interner: SyncDroplessArena, } impl<'tcx> AllArenas<'tcx> { pub fn new() -> Self { AllArenas { - global: GlobalArenas::new(), - interner: DroplessArena::new(), + global: WorkerLocal::new(|_| GlobalArenas::new()), + interner: SyncDroplessArena::new(), } } } @@ -121,95 +126,102 @@ impl<'tcx> GlobalArenas<'tcx> { } } +type InternedSet<'tcx, T> = Lock>>; + pub struct CtxtInterners<'tcx> { /// The arena that types, regions, etc are allocated from - arena: &'tcx DroplessArena, + arena: &'tcx SyncDroplessArena, /// Specifically use a speedy hash algorithm for these hash sets, /// they're accessed quite often. - type_: RefCell>>>, - type_list: RefCell>>>>, - substs: RefCell>>>, - region: RefCell>>, - existential_predicates: RefCell>>>>, - predicates: RefCell>>>>, - const_: RefCell>>>, + type_: InternedSet<'tcx, TyS<'tcx>>, + type_list: InternedSet<'tcx, Slice>>, + substs: InternedSet<'tcx, Substs<'tcx>>, + canonical_var_infos: InternedSet<'tcx, Slice>, + region: InternedSet<'tcx, RegionKind>, + existential_predicates: InternedSet<'tcx, Slice>>, + predicates: InternedSet<'tcx, Slice>>, + const_: InternedSet<'tcx, Const<'tcx>>, + clauses: InternedSet<'tcx, Slice>>, + goals: InternedSet<'tcx, Slice>>, } impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> { - fn new(arena: &'tcx DroplessArena) -> CtxtInterners<'tcx> { + fn new(arena: &'tcx SyncDroplessArena) -> CtxtInterners<'tcx> { CtxtInterners { arena, - type_: RefCell::new(FxHashSet()), - type_list: RefCell::new(FxHashSet()), - substs: RefCell::new(FxHashSet()), - region: RefCell::new(FxHashSet()), - existential_predicates: RefCell::new(FxHashSet()), - predicates: RefCell::new(FxHashSet()), - const_: RefCell::new(FxHashSet()), + type_: Default::default(), + type_list: Default::default(), + substs: Default::default(), + region: Default::default(), + existential_predicates: Default::default(), + canonical_var_infos: Default::default(), + predicates: Default::default(), + const_: Default::default(), + clauses: Default::default(), + goals: Default::default(), } } - /// Intern a type. global_interners is Some only if this is - /// a local interner and global_interners is its counterpart. - fn intern_ty(&self, st: TypeVariants<'tcx>, - global_interners: Option<&CtxtInterners<'gcx>>) - -> Ty<'tcx> { - let ty = { - let mut interner = self.type_.borrow_mut(); - let global_interner = global_interners.map(|interners| { - interners.type_.borrow_mut() - }); + /// Intern a type + fn intern_ty( + local: &CtxtInterners<'tcx>, + global: &CtxtInterners<'gcx>, + st: TypeVariants<'tcx> + ) -> Ty<'tcx> { + let flags = super::flags::FlagComputation::for_sty(&st); + + // HACK(eddyb) Depend on flags being accurate to + // determine that all contents are in the global tcx. + // See comments on Lift for why we can't use that. + if flags.flags.intersects(ty::TypeFlags::KEEP_IN_LOCAL_TCX) { + let mut interner = local.type_.borrow_mut(); if let Some(&Interned(ty)) = interner.get(&st) { return ty; } - if let Some(ref interner) = global_interner { - if let Some(&Interned(ty)) = interner.get(&st) { - return ty; - } - } - let flags = super::flags::FlagComputation::for_sty(&st); let ty_struct = TyS { sty: st, flags: flags.flags, - region_depth: flags.depth, + outer_exclusive_binder: flags.outer_exclusive_binder, }; - // HACK(eddyb) Depend on flags being accurate to - // determine that all contents are in the global tcx. - // See comments on Lift for why we can't use that. - if !flags.flags.intersects(ty::TypeFlags::KEEP_IN_LOCAL_TCX) { - if let Some(interner) = global_interners { - let ty_struct: TyS<'gcx> = unsafe { - mem::transmute(ty_struct) - }; - let ty: Ty<'gcx> = interner.arena.alloc(ty_struct); - global_interner.unwrap().insert(Interned(ty)); - return ty; - } - } else { - // Make sure we don't end up with inference - // types/regions in the global tcx. - if global_interners.is_none() { - drop(interner); - bug!("Attempted to intern `{:?}` which contains \ - inference types/regions in the global type context", - &ty_struct); - } + // Make sure we don't end up with inference + // types/regions in the global interner + if local as *const _ as usize == global as *const _ as usize { + bug!("Attempted to intern `{:?}` which contains \ + inference types/regions in the global type context", + &ty_struct); } // Don't be &mut TyS. - let ty: Ty<'tcx> = self.arena.alloc(ty_struct); + let ty: Ty<'tcx> = local.arena.alloc(ty_struct); interner.insert(Interned(ty)); ty - }; + } else { + let mut interner = global.type_.borrow_mut(); + if let Some(&Interned(ty)) = interner.get(&st) { + return ty; + } - debug!("Interned type: {:?} Pointer: {:?}", - ty, ty as *const TyS); - ty + let ty_struct = TyS { + sty: st, + flags: flags.flags, + outer_exclusive_binder: flags.outer_exclusive_binder, + }; + + // This is safe because all the types the ty_struct can point to + // already is in the global arena + let ty_struct: TyS<'gcx> = unsafe { + mem::transmute(ty_struct) + }; + + // Don't be &mut TyS. + let ty: Ty<'gcx> = global.arena.alloc(ty_struct); + interner.insert(Interned(ty)); + ty + } } - } pub struct CommonTypes<'tcx> { @@ -256,9 +268,7 @@ fn validate_hir_id_for_typeck_tables(local_id_root: Option, if let Some(local_id_root) = local_id_root { if hir_id.owner != local_id_root.index { ty::tls::with(|tcx| { - let node_id = tcx.hir - .definitions() - .find_node_for_hir_id(hir_id); + let node_id = tcx.hir.hir_to_node_id(hir_id); bug!("node {} with HirId::owner {:?} cannot be placed in \ TypeckTables with local_id_root {:?}", @@ -340,6 +350,16 @@ pub struct TypeckTables<'tcx> { /// method calls, including those of overloaded operators. type_dependent_defs: ItemLocalMap, + /// Resolved field indices for field accesses in expressions (`S { field }`, `obj.field`) + /// or patterns (`S { field }`). The index is often useful by itself, but to learn more + /// about the field you also need definition of the variant to which the field + /// belongs, but it may not exist if it's a tuple field (`tuple.0`). + field_indices: ItemLocalMap, + + /// Stores the canonicalized types provided by the user. See also `UserAssertTy` statement in + /// MIR. + user_provided_tys: ItemLocalMap>, + /// Stores the types for various nodes in the AST. Note that this table /// is not guaranteed to be populated until after typeck. See /// typeck::check::fn_ctxt for details. @@ -381,7 +401,7 @@ pub struct TypeckTables<'tcx> { /// For each fn, records the "liberated" types of its arguments /// and return type. Liberated means that all bound regions /// (including late-bound regions) are replaced with free - /// equivalents. This table is not used in trans (since regions + /// equivalents. This table is not used in codegen (since regions /// are erased there) and hence is not serialized to metadata. liberated_fn_sigs: ItemLocalMap>, @@ -397,9 +417,9 @@ pub struct TypeckTables<'tcx> { /// Set of trait imports actually used in the method resolution. /// This is used for warning unused imports. During type - /// checking, this `Rc` should not be cloned: it must have a ref-count + /// checking, this `Lrc` should not be cloned: it must have a ref-count /// of 1 so that we can insert things into the set mutably. - pub used_trait_imports: Rc, + pub used_trait_imports: Lrc, /// If any errors occurred while type-checking this body, /// this field will be set to `true`. @@ -409,6 +429,10 @@ pub struct TypeckTables<'tcx> { /// its where clauses and parameter types. These are then /// read-again by borrowck. pub free_region_map: FreeRegionMap<'tcx>, + + /// All the existential types that are restricted to concrete types + /// by this function + pub concrete_existential_types: FxHashMap>, } impl<'tcx> TypeckTables<'tcx> { @@ -416,6 +440,8 @@ impl<'tcx> TypeckTables<'tcx> { TypeckTables { local_id_root, type_dependent_defs: ItemLocalMap(), + field_indices: ItemLocalMap(), + user_provided_tys: ItemLocalMap(), node_types: ItemLocalMap(), node_substs: ItemLocalMap(), adjustments: ItemLocalMap(), @@ -426,9 +452,10 @@ impl<'tcx> TypeckTables<'tcx> { liberated_fn_sigs: ItemLocalMap(), fru_field_types: ItemLocalMap(), cast_kinds: ItemLocalMap(), - used_trait_imports: Rc::new(DefIdSet()), + used_trait_imports: Lrc::new(DefIdSet()), tainted_by_errors: false, free_region_map: FreeRegionMap::new(), + concrete_existential_types: FxHashMap(), } } @@ -457,6 +484,34 @@ impl<'tcx> TypeckTables<'tcx> { } } + pub fn field_indices(&self) -> LocalTableInContext { + LocalTableInContext { + local_id_root: self.local_id_root, + data: &self.field_indices + } + } + + pub fn field_indices_mut(&mut self) -> LocalTableInContextMut { + LocalTableInContextMut { + local_id_root: self.local_id_root, + data: &mut self.field_indices + } + } + + pub fn user_provided_tys(&self) -> LocalTableInContext> { + LocalTableInContext { + local_id_root: self.local_id_root, + data: &self.user_provided_tys + } + } + + pub fn user_provided_tys_mut(&mut self) -> LocalTableInContextMut> { + LocalTableInContextMut { + local_id_root: self.local_id_root, + data: &mut self.user_provided_tys + } + } + pub fn node_types(&self) -> LocalTableInContext> { LocalTableInContext { local_id_root: self.local_id_root, @@ -477,7 +532,7 @@ impl<'tcx> TypeckTables<'tcx> { None => { bug!("node_id_to_type: no type for node `{}`", tls::with(|tcx| { - let id = tcx.hir.definitions().find_node_for_hir_id(id); + let id = tcx.hir.hir_to_node_id(id); tcx.hir.node_to_string(id) })) } @@ -573,7 +628,7 @@ impl<'tcx> TypeckTables<'tcx> { pub fn is_method_call(&self, expr: &hir::Expr) -> bool { // Only paths and method calls/overloaded operators have // entries in type_dependent_defs, ignore the former here. - if let hir::ExprPath(_) = expr.node { + if let hir::ExprKind::Path(_) = expr.node { return false; } @@ -674,13 +729,15 @@ impl<'tcx> TypeckTables<'tcx> { } } -impl<'gcx> HashStable> for TypeckTables<'gcx> { +impl<'a, 'gcx> HashStable> for TypeckTables<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let ty::TypeckTables { local_id_root, ref type_dependent_defs, + ref field_indices, + ref user_provided_tys, ref node_types, ref node_substs, ref adjustments, @@ -696,10 +753,13 @@ impl<'gcx> HashStable> for TypeckTables<'gcx> { ref used_trait_imports, tainted_by_errors, ref free_region_map, + ref concrete_existential_types, } = *self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { type_dependent_defs.hash_stable(hcx, hasher); + field_indices.hash_stable(hcx, hasher); + user_provided_tys.hash_stable(hcx, hasher); node_types.hash_stable(hcx, hasher); node_substs.hash_stable(hcx, hasher); adjustments.hash_stable(hcx, hasher); @@ -734,13 +794,20 @@ impl<'gcx> HashStable> for TypeckTables<'gcx> { used_trait_imports.hash_stable(hcx, hasher); tainted_by_errors.hash_stable(hcx, hasher); free_region_map.hash_stable(hcx, hasher); + concrete_existential_types.hash_stable(hcx, hasher); }) } } impl<'tcx> CommonTypes<'tcx> { fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> { - let mk = |sty| interners.intern_ty(sty, None); + // Ensure our type representation does not grow + #[cfg(target_pointer_width = "64")] + assert!(mem::size_of::() <= 24); + #[cfg(target_pointer_width = "64")] + assert!(mem::size_of::() <= 32); + + let mk = |sty| CtxtInterners::intern_ty(interners, interners, sty); let mk_region = |r| { if let Some(r) = interners.region.borrow().get(&r) { return r.0; @@ -779,11 +846,11 @@ impl<'tcx> CommonTypes<'tcx> { /// The central data structure of the compiler. It stores references /// to the various **arenas** and also houses the results of the /// various **compiler queries** that have been performed. See the -/// module-level [README] for more details. +/// [rustc guide] for more details. /// -/// [README]: https://github.com/rust-lang/rust/blob/master/src/librustc/ty/README.md +/// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/ty.html #[derive(Copy, Clone)] -pub struct TyCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { +pub struct TyCtxt<'a, 'gcx: 'tcx, 'tcx: 'a> { gcx: &'a GlobalCtxt<'gcx>, interners: &'a CtxtInterners<'tcx> } @@ -796,31 +863,26 @@ impl<'a, 'gcx, 'tcx> Deref for TyCtxt<'a, 'gcx, 'tcx> { } pub struct GlobalCtxt<'tcx> { - global_arenas: &'tcx GlobalArenas<'tcx>, + global_arenas: &'tcx WorkerLocal>, global_interners: CtxtInterners<'tcx>, - cstore: &'tcx CrateStore, + cstore: &'tcx CrateStoreDyn, pub sess: &'tcx Session, pub dep_graph: DepGraph, - /// This provides access to the incr. comp. on-disk cache for query results. - /// Do not access this directly. It is only meant to be used by - /// `DepGraph::try_mark_green()` and the query infrastructure in `ty::maps`. - pub(crate) on_disk_query_result_cache: maps::OnDiskCache<'tcx>, - /// Common types, pre-interned for your convenience. pub types: CommonTypes<'tcx>, /// Map indicating what traits are in scope for places where this /// is relevant; generated by resolve. trait_map: FxHashMap>>>>, + Lrc>>>>, /// Export map produced by name resolution. - export_map: FxHashMap>>, + export_map: FxHashMap>>, pub hir: hir_map::Map<'tcx>, @@ -828,19 +890,19 @@ pub struct GlobalCtxt<'tcx> { /// as well as all upstream crates. Only populated in incremental mode. pub def_path_hash_to_def_id: Option>, - pub maps: maps::Maps<'tcx>, + pub(crate) queries: query::Queries<'tcx>, // Records the free variables refrenced by every closure // expression. Do not track deps for this, just recompute it from // scratch every time. - freevars: FxHashMap>>, + freevars: FxHashMap>>, maybe_unused_trait_imports: FxHashSet, maybe_unused_extern_crates: Vec<(DefId, Span)>, // Internal cache for metadata decoding. No need to track deps on this. - pub rcache: RefCell>>, + pub rcache: Lock>>, /// Caches the results of trait selection. This cache is used /// for things that do not have to do with the parameters in scope. @@ -858,143 +920,36 @@ pub struct GlobalCtxt<'tcx> { /// Data layout specification for the current target. pub data_layout: TargetDataLayout, - /// Used to prevent layout from recursing too deeply. - pub layout_depth: Cell, + stability_interner: Lock>, - /// Map from function to the `#[derive]` mode that it's defining. Only used - /// by `proc-macro` crates. - pub derive_macros: RefCell>, + /// Stores the value of constants (and deduplicates the actual memory) + allocation_interner: Lock>, - stability_interner: RefCell>, + pub alloc_map: Lock>, - pub interpret_interner: RefCell>, - - layout_interner: RefCell>, - - /// A vector of every trait accessible in the whole crate - /// (i.e. including those from subcrates). This is used only for - /// error reporting, and so is lazily initialized and generally - /// shouldn't taint the common path (hence the RefCell). - pub all_traits: RefCell>>, + layout_interner: Lock>, /// A general purpose channel to throw data out the back towards LLVM worker /// threads. /// - /// This is intended to only get used during the trans phase of the compiler + /// This is intended to only get used during the codegen phase of the compiler /// when satisfying the query for a particular codegen unit. Internally in /// the query it'll send data along this channel to get processed later. - pub tx_to_llvm_workers: mpsc::Sender>, + pub tx_to_llvm_workers: Lock>>, output_filenames: Arc, } -/// Everything needed to efficiently work with interned allocations -#[derive(Debug, Default)] -pub struct InterpretInterner<'tcx> { - /// Stores the value of constants (and deduplicates the actual memory) - allocs: FxHashSet<&'tcx interpret::Allocation>, - - /// Allows obtaining function instance handles via a unique identifier - functions: FxHashMap>, - - /// Inverse map of `interpret_functions`. - /// Used so we don't allocate a new pointer every time we need one - function_cache: FxHashMap, interpret::AllocId>, - - /// Allows obtaining const allocs via a unique identifier - alloc_by_id: FxHashMap, - - /// The AllocId to assign to the next new regular allocation. - /// Always incremented, never gets smaller. - next_id: interpret::AllocId, - - /// Allows checking whether a constant already has an allocation - alloc_cache: FxHashMap, interpret::AllocId>, - - /// A cache for basic byte allocations keyed by their contents. This is used to deduplicate - /// allocations for string and bytestring literals. - literal_alloc_cache: FxHashMap, interpret::AllocId>, -} - -impl<'tcx> InterpretInterner<'tcx> { - pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> interpret::AllocId { - if let Some(&alloc_id) = self.function_cache.get(&instance) { - return alloc_id; - } - let id = self.reserve(); - debug!("creating fn ptr: {}", id); - self.functions.insert(id, instance); - self.function_cache.insert(instance, id); - id - } - - pub fn get_fn( - &self, - id: interpret::AllocId, - ) -> Option> { - self.functions.get(&id).cloned() - } - - pub fn get_alloc( - &self, - id: interpret::AllocId, - ) -> Option<&'tcx interpret::Allocation> { - self.alloc_by_id.get(&id).cloned() - } - - pub fn get_cached( - &self, - global_id: interpret::GlobalId<'tcx>, - ) -> Option { - self.alloc_cache.get(&global_id).cloned() - } - - pub fn cache( - &mut self, - global_id: interpret::GlobalId<'tcx>, - ptr: interpret::AllocId, - ) { - if let Some(old) = self.alloc_cache.insert(global_id, ptr) { - bug!("tried to cache {:?}, but was already existing as {:#?}", global_id, old); - } - } - - pub fn intern_at_reserved( - &mut self, - id: interpret::AllocId, - alloc: &'tcx interpret::Allocation, - ) { - if let Some(old) = self.alloc_by_id.insert(id, alloc) { - bug!("tried to intern allocation at {}, but was already existing as {:#?}", id, old); - } - } - - /// obtains a new allocation ID that can be referenced but does not - /// yet have an allocation backing it. - pub fn reserve( - &mut self, - ) -> interpret::AllocId { - let next = self.next_id; - self.next_id.0 = self.next_id.0 - .checked_add(1) - .expect("You overflowed a u64 by incrementing by 1... \ - You've just earned yourself a free drink if we ever meet. \ - Seriously, how did you do that?!"); - next - } -} - -impl<'tcx> GlobalCtxt<'tcx> { - /// Get the global TyCtxt. - pub fn global_tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { - TyCtxt { - gcx: self, - interners: &self.global_interners - } - } -} - impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + /// Get the global TyCtxt. + #[inline] + pub fn global_tcx(self) -> TyCtxt<'a, 'gcx, 'gcx> { + TyCtxt { + gcx: self.gcx, + interners: &self.gcx.global_interners, + } + } + pub fn alloc_generics(self, generics: ty::Generics) -> &'gcx ty::Generics { self.global_arenas.generics.alloc(generics) } @@ -1053,58 +1008,49 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn intern_const_alloc( self, - alloc: interpret::Allocation, - ) -> &'gcx interpret::Allocation { - if let Some(alloc) = self.interpret_interner.borrow().allocs.get(&alloc) { + alloc: Allocation, + ) -> &'gcx Allocation { + let allocs = &mut self.allocation_interner.borrow_mut(); + if let Some(alloc) = allocs.get(&alloc) { return alloc; } let interned = self.global_arenas.const_allocs.alloc(alloc); - if let Some(prev) = self.interpret_interner.borrow_mut().allocs.replace(interned) { + if let Some(prev) = allocs.replace(interned) { bug!("Tried to overwrite interned Allocation: {:#?}", prev) } interned } /// Allocates a byte or string literal for `mir::interpret` - pub fn allocate_cached(self, bytes: &[u8]) -> interpret::AllocId { - // check whether we already allocated this literal or a constant with the same memory - if let Some(&alloc_id) = self.interpret_interner.borrow().literal_alloc_cache.get(bytes) { - return alloc_id; - } + pub fn allocate_bytes(self, bytes: &[u8]) -> interpret::AllocId { // create an allocation that just contains these bytes - let alloc = interpret::Allocation::from_bytes(bytes); + let alloc = interpret::Allocation::from_byte_aligned_bytes(bytes); let alloc = self.intern_const_alloc(alloc); - - let mut int = self.interpret_interner.borrow_mut(); - // the next unique id - let id = int.reserve(); - // make the allocation identifiable - int.alloc_by_id.insert(id, alloc); - // cache it for the future - int.literal_alloc_cache.insert(bytes.to_owned(), id); - id + self.alloc_map.lock().allocate(alloc) } pub fn intern_stability(self, stab: attr::Stability) -> &'gcx attr::Stability { - if let Some(st) = self.stability_interner.borrow().get(&stab) { + let mut stability_interner = self.stability_interner.borrow_mut(); + if let Some(st) = stability_interner.get(&stab) { return st; } let interned = self.global_interners.arena.alloc(stab); - if let Some(prev) = self.stability_interner.borrow_mut().replace(interned) { + if let Some(prev) = stability_interner.replace(interned) { bug!("Tried to overwrite interned Stability: {:?}", prev) } interned } pub fn intern_layout(self, layout: LayoutDetails) -> &'gcx LayoutDetails { - if let Some(layout) = self.layout_interner.borrow().get(&layout) { + let mut layout_interner = self.layout_interner.borrow_mut(); + if let Some(layout) = layout_interner.get(&layout) { return layout; } let interned = self.global_arenas.layout.alloc(layout); - if let Some(prev) = self.layout_interner.borrow_mut().replace(interned) { + if let Some(prev) = layout_interner.replace(interned) { bug!("Tried to overwrite interned Layout: {:?}", prev) } interned @@ -1131,20 +1077,22 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// value (types, substs, etc.) can only be used while `ty::tls` has a valid /// reference to the context, to allow formatting values that need it. pub fn create_and_enter(s: &'tcx Session, - cstore: &'tcx CrateStore, - local_providers: ty::maps::Providers<'tcx>, - extern_providers: ty::maps::Providers<'tcx>, + cstore: &'tcx CrateStoreDyn, + local_providers: ty::query::Providers<'tcx>, + extern_providers: ty::query::Providers<'tcx>, arenas: &'tcx AllArenas<'tcx>, resolutions: ty::Resolutions, hir: hir_map::Map<'tcx>, - on_disk_query_result_cache: maps::OnDiskCache<'tcx>, + on_disk_query_result_cache: query::OnDiskCache<'tcx>, crate_name: &str, - tx: mpsc::Sender>, + tx: mpsc::Sender>, output_filenames: &OutputFilenames, f: F) -> R where F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'tcx>) -> R { - let data_layout = TargetDataLayout::parse(s); + let data_layout = TargetDataLayout::parse(&s.target.target).unwrap_or_else(|err| { + s.fatal(&err); + }); let interners = CtxtInterners::new(&arenas.interner); let common_types = CommonTypes::new(&interners); let dep_graph = hir.dep_graph.clone(); @@ -1153,7 +1101,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { providers[LOCAL_CRATE] = local_providers; let def_path_hash_to_def_id = if s.opts.build_dep_graph() { - let upstream_def_path_tables: Vec<(CrateNum, Rc<_>)> = cstore + let upstream_def_path_tables: Vec<(CrateNum, Lrc<_>)> = cstore .crates_untracked() .iter() .map(|&cnum| (cnum, cstore.def_path_table(cnum))) @@ -1184,30 +1132,28 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { None }; - let mut trait_map = FxHashMap(); + let mut trait_map: FxHashMap<_, Lrc>> = FxHashMap(); for (k, v) in resolutions.trait_map { let hir_id = hir.node_to_hir_id(k); - let map = trait_map.entry(hir_id.owner) - .or_insert_with(|| Rc::new(FxHashMap())); - Rc::get_mut(map).unwrap() + let map = trait_map.entry(hir_id.owner).or_default(); + Lrc::get_mut(map).unwrap() .insert(hir_id.local_id, - Rc::new(StableVec::new(v))); + Lrc::new(StableVec::new(v))); } - tls::enter_global(GlobalCtxt { + let gcx = &GlobalCtxt { sess: s, cstore, global_arenas: &arenas.global, global_interners: interners, dep_graph: dep_graph.clone(), - on_disk_query_result_cache, types: common_types, trait_map, export_map: resolutions.export_map.into_iter().map(|(k, v)| { - (k, Rc::new(v)) + (k, Lrc::new(v)) }).collect(), freevars: resolutions.freevars.into_iter().map(|(k, v)| { - (hir.local_def_id(k), Rc::new(v)) + (hir.local_def_id(k), Lrc::new(v)) }).collect(), maybe_unused_trait_imports: resolutions.maybe_unused_trait_imports @@ -1221,21 +1167,23 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { .collect(), hir, def_path_hash_to_def_id, - maps: maps::Maps::new(providers), - rcache: RefCell::new(FxHashMap()), + queries: query::Queries::new(providers, on_disk_query_result_cache), + rcache: Lock::new(FxHashMap()), selection_cache: traits::SelectionCache::new(), evaluation_cache: traits::EvaluationCache::new(), crate_name: Symbol::intern(crate_name), data_layout, - layout_interner: RefCell::new(FxHashSet()), - layout_depth: Cell::new(0), - derive_macros: RefCell::new(NodeMap()), - stability_interner: RefCell::new(FxHashSet()), - interpret_interner: Default::default(), - all_traits: RefCell::new(None), - tx_to_llvm_workers: tx, + layout_interner: Lock::new(FxHashSet()), + stability_interner: Lock::new(FxHashSet()), + allocation_interner: Lock::new(FxHashSet()), + alloc_map: Lock::new(interpret::AllocMap::new()), + tx_to_llvm_workers: Lock::new(tx), output_filenames: Arc::new(output_filenames.clone()), - }, f) + }; + + sync::assert_send_val(&gcx); + + tls::enter_global(gcx, f) } pub fn consider_optimizing String>(&self, msg: T) -> bool { @@ -1243,18 +1191,60 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.sess.consider_optimizing(&cname, msg) } - pub fn lang_items(self) -> Rc { + pub fn lib_features(self) -> Lrc { + self.get_lib_features(LOCAL_CRATE) + } + + pub fn lang_items(self) -> Lrc { self.get_lang_items(LOCAL_CRATE) } - pub fn stability(self) -> Rc> { + /// Due to missing llvm support for lowering 128 bit math to software emulation + /// (on some targets), the lowering can be done in MIR. + /// + /// This function only exists until said support is implemented. + pub fn is_binop_lang_item(&self, def_id: DefId) -> Option<(mir::BinOp, bool)> { + let items = self.lang_items(); + let def_id = Some(def_id); + if items.i128_add_fn() == def_id { Some((mir::BinOp::Add, false)) } + else if items.u128_add_fn() == def_id { Some((mir::BinOp::Add, false)) } + else if items.i128_sub_fn() == def_id { Some((mir::BinOp::Sub, false)) } + else if items.u128_sub_fn() == def_id { Some((mir::BinOp::Sub, false)) } + else if items.i128_mul_fn() == def_id { Some((mir::BinOp::Mul, false)) } + else if items.u128_mul_fn() == def_id { Some((mir::BinOp::Mul, false)) } + else if items.i128_div_fn() == def_id { Some((mir::BinOp::Div, false)) } + else if items.u128_div_fn() == def_id { Some((mir::BinOp::Div, false)) } + else if items.i128_rem_fn() == def_id { Some((mir::BinOp::Rem, false)) } + else if items.u128_rem_fn() == def_id { Some((mir::BinOp::Rem, false)) } + else if items.i128_shl_fn() == def_id { Some((mir::BinOp::Shl, false)) } + else if items.u128_shl_fn() == def_id { Some((mir::BinOp::Shl, false)) } + else if items.i128_shr_fn() == def_id { Some((mir::BinOp::Shr, false)) } + else if items.u128_shr_fn() == def_id { Some((mir::BinOp::Shr, false)) } + else if items.i128_addo_fn() == def_id { Some((mir::BinOp::Add, true)) } + else if items.u128_addo_fn() == def_id { Some((mir::BinOp::Add, true)) } + else if items.i128_subo_fn() == def_id { Some((mir::BinOp::Sub, true)) } + else if items.u128_subo_fn() == def_id { Some((mir::BinOp::Sub, true)) } + else if items.i128_mulo_fn() == def_id { Some((mir::BinOp::Mul, true)) } + else if items.u128_mulo_fn() == def_id { Some((mir::BinOp::Mul, true)) } + else if items.i128_shlo_fn() == def_id { Some((mir::BinOp::Shl, true)) } + else if items.u128_shlo_fn() == def_id { Some((mir::BinOp::Shl, true)) } + else if items.i128_shro_fn() == def_id { Some((mir::BinOp::Shr, true)) } + else if items.u128_shro_fn() == def_id { Some((mir::BinOp::Shr, true)) } + else { None } + } + + pub fn stability(self) -> Lrc> { self.stability_index(LOCAL_CRATE) } - pub fn crates(self) -> Rc> { + pub fn crates(self) -> Lrc> { self.all_crate_nums(LOCAL_CRATE) } + pub fn features(self) -> Lrc { + self.features_query(LOCAL_CRATE) + } + pub fn def_key(self, id: DefId) -> hir_map::DefKey { if id.is_local() { self.hir.def_key(id) @@ -1312,11 +1302,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // Note that this is *untracked* and should only be used within the query // system if the result is otherwise tracked through queries - pub fn crate_data_as_rc_any(self, cnum: CrateNum) -> Rc { + pub fn crate_data_as_rc_any(self, cnum: CrateNum) -> Lrc { self.cstore.crate_data_as_rc_any(cnum) } - pub fn create_stable_hashing_context(self) -> StableHashingContext<'gcx> { + pub fn create_stable_hashing_context(self) -> StableHashingContext<'a> { let krate = self.dep_graph.with_ignore(|| self.gcx.hir.krate()); StableHashingContext::new(self.sess, @@ -1359,26 +1349,180 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { -> Result<(), E::Error> where E: ty::codec::TyEncoder { - self.on_disk_query_result_cache.serialize(self.global_tcx(), encoder) + self.queries.on_disk_cache.serialize(self.global_tcx(), encoder) } + /// If true, we should use a naive AST walk to determine if match + /// guard could perform bad mutations (or mutable-borrows). + pub fn check_for_mutation_in_guard_via_ast_walk(self) -> bool { + !self.sess.opts.debugging_opts.disable_ast_check_for_mutation_in_guard + } + + /// If true, we should use the AST-based borrowck (we may *also* use + /// the MIR-based borrowck). + pub fn use_ast_borrowck(self) -> bool { + self.borrowck_mode().use_ast() + } + + /// If true, we should use the MIR-based borrowck (we may *also* use + /// the AST-based borrowck). + pub fn use_mir_borrowck(self) -> bool { + self.borrowck_mode().use_mir() + } + + /// If true, we should use the MIR-based borrow check, but also + /// fall back on the AST borrow check if the MIR-based one errors. + pub fn migrate_borrowck(self) -> bool { + self.borrowck_mode().migrate() + } + + /// If true, make MIR codegen for `match` emit a temp that holds a + /// borrow of the input to the match expression. + pub fn generate_borrow_of_any_match_input(&self) -> bool { + self.emit_read_for_match() + } + + /// If true, make MIR codegen for `match` emit ReadForMatch + /// statements (which simulate the maximal effect of executing the + /// patterns in a match arm). + pub fn emit_read_for_match(&self) -> bool { + self.use_mir_borrowck() && !self.sess.opts.debugging_opts.nll_dont_emit_read_for_match + } + + /// If true, pattern variables for use in guards on match arms + /// will be bound as references to the data, and occurrences of + /// those variables in the guard expression will implicitly + /// dereference those bindings. (See rust-lang/rust#27282.) + pub fn all_pat_vars_are_implicit_refs_within_guards(self) -> bool { + self.borrowck_mode().use_mir() + } + + /// If true, we should enable two-phase borrows checks. This is + /// done with either: `-Ztwo-phase-borrows`, `#![feature(nll)]`, + /// or by opting into an edition after 2015. + pub fn two_phase_borrows(self) -> bool { + if self.features().nll || self.sess.opts.debugging_opts.two_phase_borrows { + return true; + } + + match self.sess.edition() { + Edition::Edition2015 => false, + Edition::Edition2018 => true, + _ => true, + } + } + + /// What mode(s) of borrowck should we run? AST? MIR? both? + /// (Also considers the `#![feature(nll)]` setting.) + pub fn borrowck_mode(&self) -> BorrowckMode { + // Here are the main constraints we need to deal with: + // + // 1. An opts.borrowck_mode of `BorrowckMode::Ast` is + // synonymous with no `-Z borrowck=...` flag at all. + // (This is arguably a historical accident.) + // + // 2. `BorrowckMode::Migrate` is the limited migration to + // NLL that we are deploying with the 2018 edition. + // + // 3. We want to allow developers on the Nightly channel + // to opt back into the "hard error" mode for NLL, + // (which they can do via specifying `#![feature(nll)]` + // explicitly in their crate). + // + // So, this precedence list is how pnkfelix chose to work with + // the above constraints: + // + // * `#![feature(nll)]` *always* means use NLL with hard + // errors. (To simplify the code here, it now even overrides + // a user's attempt to specify `-Z borrowck=compare`, which + // we arguably do not need anymore and should remove.) + // + // * Otherwise, if no `-Z borrowck=...` flag was given (or + // if `borrowck=ast` was specified), then use the default + // as required by the edition. + // + // * Otherwise, use the behavior requested via `-Z borrowck=...` + + if self.features().nll { return BorrowckMode::Mir; } + + match self.sess.opts.borrowck_mode { + mode @ BorrowckMode::Mir | + mode @ BorrowckMode::Compare | + mode @ BorrowckMode::Migrate => mode, + + BorrowckMode::Ast => match self.sess.edition() { + Edition::Edition2015 => BorrowckMode::Ast, + Edition::Edition2018 => BorrowckMode::Migrate, + + // For now, future editions mean Migrate. (But it + // would make a lot of sense for it to be changed to + // `BorrowckMode::Mir`, depending on how we plan to + // time the forcing of full migration to NLL.) + _ => BorrowckMode::Migrate, + }, + } + } + + /// Should we emit EndRegion MIR statements? These are consumed by + /// MIR borrowck, but not when NLL is used. They are also consumed + /// by the validation stuff. + pub fn emit_end_regions(self) -> bool { + self.sess.opts.debugging_opts.emit_end_regions || + self.sess.opts.debugging_opts.mir_emit_validate > 0 || + self.use_mir_borrowck() + } + + #[inline] + pub fn local_crate_exports_generics(self) -> bool { + debug_assert!(self.sess.opts.share_generics()); + + self.sess.crate_types.borrow().iter().any(|crate_type| { + match crate_type { + CrateType::Executable | + CrateType::Staticlib | + CrateType::ProcMacro | + CrateType::Cdylib => false, + CrateType::Rlib | + CrateType::Dylib => true, + } + }) + } } impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { - pub fn encode_metadata(self, link_meta: &LinkMeta, reachable: &NodeSet) + pub fn encode_metadata(self, link_meta: &LinkMeta) -> EncodedMetadata { - self.cstore.encode_metadata(self, link_meta, reachable) + self.cstore.encode_metadata(self, link_meta) } } impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> { /// Call the closure with a local `TyCtxt` using the given arena. - pub fn enter_local(&self, arena: &'tcx DroplessArena, f: F) -> R - where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + pub fn enter_local( + &self, + arena: &'tcx SyncDroplessArena, + f: F + ) -> R + where + F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R { let interners = CtxtInterners::new(arena); - tls::enter(self, &interners, f) + let tcx = TyCtxt { + gcx: self, + interners: &interners, + }; + ty::tls::with_related_context(tcx.global_tcx(), |icx| { + let new_icx = ty::tls::ImplicitCtxt { + tcx, + query: icx.query.clone(), + layout_depth: icx.layout_depth, + task: icx.task, + }; + ty::tls::enter_context(&new_icx, |new_icx| { + f(new_icx.tcx) + }) + }) } } @@ -1399,8 +1543,8 @@ impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> { /// contain the TypeVariants key or if the address of the interned /// pointer differs. The latter case is possible if a primitive type, /// e.g. `()` or `u8`, was interned in a different context. -pub trait Lift<'tcx> { - type Lifted; +pub trait Lift<'tcx>: fmt::Debug { + type Lifted: fmt::Debug + 'tcx; fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option; } @@ -1434,6 +1578,57 @@ impl<'a, 'tcx> Lift<'tcx> for Region<'a> { } } +impl<'a, 'tcx> Lift<'tcx> for &'a Goal<'a> { + type Lifted = &'tcx Goal<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Goal<'tcx>> { + if tcx.interners.arena.in_arena(*self as *const _) { + return Some(unsafe { mem::transmute(*self) }); + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for &'a Slice> { + type Lifted = &'tcx Slice>; + fn lift_to_tcx<'b, 'gcx>( + &self, + tcx: TyCtxt<'b, 'gcx, 'tcx>, + ) -> Option<&'tcx Slice>> { + if tcx.interners.arena.in_arena(*self as *const _) { + return Some(unsafe { mem::transmute(*self) }); + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for &'a Slice> { + type Lifted = &'tcx Slice>; + fn lift_to_tcx<'b, 'gcx>( + &self, + tcx: TyCtxt<'b, 'gcx, 'tcx>, + ) -> Option<&'tcx Slice>> { + if tcx.interners.arena.in_arena(*self as *const _) { + return Some(unsafe { mem::transmute(*self) }); + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + impl<'a, 'tcx> Lift<'tcx> for &'a Const<'a> { type Lifted = &'tcx Const<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Const<'tcx>> { @@ -1524,83 +1719,292 @@ impl<'a, 'tcx> Lift<'tcx> for &'a Slice> { } } +impl<'a, 'tcx> Lift<'tcx> for &'a Slice { + type Lifted = &'tcx Slice; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + if self.len() == 0 { + return Some(Slice::empty()); + } + if tcx.interners.arena.in_arena(*self as *const _) { + return Some(unsafe { mem::transmute(*self) }); + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + pub mod tls { - use super::{CtxtInterners, GlobalCtxt, TyCtxt}; + use super::{GlobalCtxt, TyCtxt}; - use std::cell::Cell; use std::fmt; + use std::mem; use syntax_pos; + use ty::query; + use errors::{Diagnostic, TRACK_DIAGNOSTICS}; + use rustc_data_structures::OnDrop; + use rustc_data_structures::sync::{self, Lrc, Lock}; + use dep_graph::OpenTask; - /// Marker types used for the scoped TLS slot. - /// The type context cannot be used directly because the scoped TLS - /// in libstd doesn't allow types generic over lifetimes. - enum ThreadLocalGlobalCtxt {} - enum ThreadLocalInterners {} + #[cfg(not(parallel_queries))] + use std::cell::Cell; - thread_local! { - static TLS_TCX: Cell> = Cell::new(None) + #[cfg(parallel_queries)] + use rayon_core; + + /// This is the implicit state of rustc. It contains the current + /// TyCtxt and query. It is updated when creating a local interner or + /// executing a new query. Whenever there's a TyCtxt value available + /// you should also have access to an ImplicitCtxt through the functions + /// in this module. + #[derive(Clone)] + pub struct ImplicitCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + /// The current TyCtxt. Initially created by `enter_global` and updated + /// by `enter_local` with a new local interner + pub tcx: TyCtxt<'a, 'gcx, 'tcx>, + + /// The current query job, if any. This is updated by start_job in + /// ty::query::plumbing when executing a query + pub query: Option>>, + + /// Used to prevent layout from recursing too deeply. + pub layout_depth: usize, + + /// The current dep graph task. This is used to add dependencies to queries + /// when executing them + pub task: &'a OpenTask, } + /// Sets Rayon's thread local variable which is preserved for Rayon jobs + /// to `value` during the call to `f`. It is restored to its previous value after. + /// This is used to set the pointer to the new ImplicitCtxt. + #[cfg(parallel_queries)] + fn set_tlv R, R>(value: usize, f: F) -> R { + rayon_core::tlv::with(value, f) + } + + /// Gets Rayon's thread local variable which is preserved for Rayon jobs. + /// This is used to get the pointer to the current ImplicitCtxt. + #[cfg(parallel_queries)] + fn get_tlv() -> usize { + rayon_core::tlv::get() + } + + /// A thread local variable which stores a pointer to the current ImplicitCtxt + #[cfg(not(parallel_queries))] + thread_local!(static TLV: Cell = Cell::new(0)); + + /// Sets TLV to `value` during the call to `f`. + /// It is restored to its previous value after. + /// This is used to set the pointer to the new ImplicitCtxt. + #[cfg(not(parallel_queries))] + fn set_tlv R, R>(value: usize, f: F) -> R { + let old = get_tlv(); + let _reset = OnDrop(move || TLV.with(|tlv| tlv.set(old))); + TLV.with(|tlv| tlv.set(value)); + f() + } + + /// This is used to get the pointer to the current ImplicitCtxt. + #[cfg(not(parallel_queries))] + fn get_tlv() -> usize { + TLV.with(|tlv| tlv.get()) + } + + /// This is a callback from libsyntax as it cannot access the implicit state + /// in librustc otherwise fn span_debug(span: syntax_pos::Span, f: &mut fmt::Formatter) -> fmt::Result { with(|tcx| { write!(f, "{}", tcx.sess.codemap().span_to_string(span)) }) } - pub fn enter_global<'gcx, F, R>(gcx: GlobalCtxt<'gcx>, f: F) -> R - where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R + /// This is a callback from libsyntax as it cannot access the implicit state + /// in librustc otherwise. It is used to when diagnostic messages are + /// emitted and stores them in the current query, if there is one. + fn track_diagnostic(diagnostic: &Diagnostic) { + with_context_opt(|icx| { + if let Some(icx) = icx { + if let Some(ref query) = icx.query { + query.diagnostics.lock().push(diagnostic.clone()); + } + } + }) + } + + /// Sets up the callbacks from libsyntax on the current thread + pub fn with_thread_locals(f: F) -> R + where F: FnOnce() -> R { syntax_pos::SPAN_DEBUG.with(|span_dbg| { let original_span_debug = span_dbg.get(); span_dbg.set(span_debug); - let result = enter(&gcx, &gcx.global_interners, f); - span_dbg.set(original_span_debug); - result - }) - } - pub fn enter<'a, 'gcx: 'tcx, 'tcx, F, R>(gcx: &'a GlobalCtxt<'gcx>, - interners: &'a CtxtInterners<'tcx>, - f: F) -> R - where F: FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R - { - let gcx_ptr = gcx as *const _ as *const ThreadLocalGlobalCtxt; - let interners_ptr = interners as *const _ as *const ThreadLocalInterners; - TLS_TCX.with(|tls| { - let prev = tls.get(); - tls.set(Some((gcx_ptr, interners_ptr))); - let ret = f(TyCtxt { - gcx, - interners, + let _on_drop = OnDrop(move || { + span_dbg.set(original_span_debug); }); - tls.set(prev); - ret - }) - } - pub fn with(f: F) -> R - where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R - { - TLS_TCX.with(|tcx| { - let (gcx, interners) = tcx.get().unwrap(); - let gcx = unsafe { &*(gcx as *const GlobalCtxt) }; - let interners = unsafe { &*(interners as *const CtxtInterners) }; - f(TyCtxt { - gcx, - interners, + TRACK_DIAGNOSTICS.with(|current| { + let original = current.get(); + current.set(track_diagnostic); + + let _on_drop = OnDrop(move || { + current.set(original); + }); + + f() }) }) } + /// Sets `context` as the new current ImplicitCtxt for the duration of the function `f` + pub fn enter_context<'a, 'gcx: 'tcx, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'gcx, 'tcx>, + f: F) -> R + where F: FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R + { + set_tlv(context as *const _ as usize, || { + f(&context) + }) + } + + /// Enters GlobalCtxt by setting up libsyntax callbacks and + /// creating a initial TyCtxt and ImplicitCtxt. + /// This happens once per rustc session and TyCtxts only exists + /// inside the `f` function. + pub fn enter_global<'gcx, F, R>(gcx: &GlobalCtxt<'gcx>, f: F) -> R + where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R + { + with_thread_locals(|| { + // Update GCX_PTR to indicate there's a GlobalCtxt available + GCX_PTR.with(|lock| { + *lock.lock() = gcx as *const _ as usize; + }); + // Set GCX_PTR back to 0 when we exit + let _on_drop = OnDrop(move || { + GCX_PTR.with(|lock| *lock.lock() = 0); + }); + + let tcx = TyCtxt { + gcx, + interners: &gcx.global_interners, + }; + let icx = ImplicitCtxt { + tcx, + query: None, + layout_depth: 0, + task: &OpenTask::Ignore, + }; + enter_context(&icx, |_| { + f(tcx) + }) + }) + } + + /// Stores a pointer to the GlobalCtxt if one is available. + /// This is used to access the GlobalCtxt in the deadlock handler + /// given to Rayon. + scoped_thread_local!(pub static GCX_PTR: Lock); + + /// Creates a TyCtxt and ImplicitCtxt based on the GCX_PTR thread local. + /// This is used in the deadlock handler. + pub unsafe fn with_global(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + { + let gcx = GCX_PTR.with(|lock| *lock.lock()); + assert!(gcx != 0); + let gcx = &*(gcx as *const GlobalCtxt<'_>); + let tcx = TyCtxt { + gcx, + interners: &gcx.global_interners, + }; + let icx = ImplicitCtxt { + query: None, + tcx, + layout_depth: 0, + task: &OpenTask::Ignore, + }; + enter_context(&icx, |_| f(tcx)) + } + + /// Allows access to the current ImplicitCtxt in a closure if one is available + pub fn with_context_opt(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'gcx, 'tcx>>) -> R + { + let context = get_tlv(); + if context == 0 { + f(None) + } else { + // We could get a ImplicitCtxt pointer from another thread. + // Ensure that ImplicitCtxt is Sync + sync::assert_sync::(); + + unsafe { f(Some(&*(context as *const ImplicitCtxt))) } + } + } + + /// Allows access to the current ImplicitCtxt. + /// Panics if there is no ImplicitCtxt available + pub fn with_context(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R + { + with_context_opt(|opt_context| f(opt_context.expect("no ImplicitCtxt stored in tls"))) + } + + /// Allows access to the current ImplicitCtxt whose tcx field has the same global + /// interner as the tcx argument passed in. This means the closure is given an ImplicitCtxt + /// with the same 'gcx lifetime as the TyCtxt passed in. + /// This will panic if you pass it a TyCtxt which has a different global interner from + /// the current ImplicitCtxt's tcx field. + pub fn with_related_context<'a, 'gcx, 'tcx1, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx1>, f: F) -> R + where F: for<'b, 'tcx2> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx2>) -> R + { + with_context(|context| { + unsafe { + let gcx = tcx.gcx as *const _ as usize; + assert!(context.tcx.gcx as *const _ as usize == gcx); + let context: &ImplicitCtxt = mem::transmute(context); + f(context) + } + }) + } + + /// Allows access to the current ImplicitCtxt whose tcx field has the same global + /// interner and local interner as the tcx argument passed in. This means the closure + /// is given an ImplicitCtxt with the same 'tcx and 'gcx lifetimes as the TyCtxt passed in. + /// This will panic if you pass it a TyCtxt which has a different global interner or + /// a different local interner from the current ImplicitCtxt's tcx field. + pub fn with_fully_related_context<'a, 'gcx, 'tcx, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx>, f: F) -> R + where F: for<'b> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx>) -> R + { + with_context(|context| { + unsafe { + let gcx = tcx.gcx as *const _ as usize; + let interners = tcx.interners as *const _ as usize; + assert!(context.tcx.gcx as *const _ as usize == gcx); + assert!(context.tcx.interners as *const _ as usize == interners); + let context: &ImplicitCtxt = mem::transmute(context); + f(context) + } + }) + } + + /// Allows access to the TyCtxt in the current ImplicitCtxt. + /// Panics if there is no ImplicitCtxt available + pub fn with(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + { + with_context(|context| f(context.tcx)) + } + + /// Allows access to the TyCtxt in the current ImplicitCtxt. + /// The closure is passed None if there is no ImplicitCtxt available pub fn with_opt(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(Option>) -> R { - if TLS_TCX.with(|tcx| tcx.get().is_some()) { - with(|v| f(Some(v))) - } else { - f(None) - } + with_context_opt(|opt_context| f(opt_context.map(|context| context.tcx))) } } @@ -1672,13 +2076,14 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { pub fn print_debug_stats(self) { sty_debug_print!( self, - TyAdt, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr, TyGenerator, TyForeign, - TyDynamic, TyClosure, TyTuple, TyParam, TyInfer, TyProjection, TyAnon); + TyAdt, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr, + TyGenerator, TyGeneratorWitness, TyDynamic, TyClosure, TyTuple, + TyParam, TyInfer, TyProjection, TyAnon, TyForeign); println!("Substs interner: #{}", self.interners.substs.borrow().len()); println!("Region interner: #{}", self.interners.region.borrow().len()); println!("Stability interner: #{}", self.stability_interner.borrow().len()); - println!("Interpret interner: #{}", self.interpret_interner.borrow().allocs.len()); + println!("Allocation interner: #{}", self.allocation_interner.borrow().len()); println!("Layout interner: #{}", self.layout_interner.borrow().len()); } } @@ -1729,6 +2134,12 @@ impl<'tcx: 'lcx, 'lcx> Borrow<[Ty<'lcx>]> for Interned<'tcx, Slice>> { } } +impl<'tcx: 'lcx, 'lcx> Borrow<[CanonicalVarInfo]> for Interned<'tcx, Slice> { + fn borrow<'a>(&'a self) -> &'a [CanonicalVarInfo] { + &self.0[..] + } +} + impl<'tcx: 'lcx, 'lcx> Borrow<[Kind<'lcx>]> for Interned<'tcx, Substs<'tcx>> { fn borrow<'a>(&'a self) -> &'a [Kind<'lcx>] { &self.0[..] @@ -1761,39 +2172,38 @@ impl<'tcx: 'lcx, 'lcx> Borrow> for Interned<'tcx, Const<'tcx>> { } } +impl<'tcx: 'lcx, 'lcx> Borrow<[Clause<'lcx>]> +for Interned<'tcx, Slice>> { + fn borrow<'a>(&'a self) -> &'a [Clause<'lcx>] { + &self.0[..] + } +} + +impl<'tcx: 'lcx, 'lcx> Borrow<[Goal<'lcx>]> +for Interned<'tcx, Slice>> { + fn borrow<'a>(&'a self) -> &'a [Goal<'lcx>] { + &self.0[..] + } +} + macro_rules! intern_method { ($lt_tcx:tt, $name:ident: $method:ident($alloc:ty, - $alloc_method:ident, + $alloc_method:expr, $alloc_to_key:expr, - $alloc_to_ret:expr, - $needs_infer:expr) -> $ty:ty) => { + $keep_in_local_tcx:expr) -> $ty:ty) => { impl<'a, 'gcx, $lt_tcx> TyCtxt<'a, 'gcx, $lt_tcx> { pub fn $method(self, v: $alloc) -> &$lt_tcx $ty { - { - let key = ($alloc_to_key)(&v); - if let Some(i) = self.interners.$name.borrow().get(key) { - return i.0; - } - if !self.is_global() { - if let Some(i) = self.global_interners.$name.borrow().get(key) { - return i.0; - } - } - } + let key = ($alloc_to_key)(&v); // HACK(eddyb) Depend on flags being accurate to // determine that all contents are in the global tcx. // See comments on Lift for why we can't use that. - if !($needs_infer)(&v) { - if !self.is_global() { - let v = unsafe { - mem::transmute(v) - }; - let i = ($alloc_to_ret)(self.global_interners.arena.$alloc_method(v)); - self.global_interners.$name.borrow_mut().insert(Interned(i)); - return i; + if ($keep_in_local_tcx)(&v) { + let mut interner = self.interners.$name.borrow_mut(); + if let Some(&Interned(v)) = interner.get(key) { + return v; } - } else { + // Make sure we don't end up with inference // types/regions in the global tcx. if self.is_global() { @@ -1801,18 +2211,33 @@ macro_rules! intern_method { inference types/regions in the global type context", v); } - } - let i = ($alloc_to_ret)(self.interners.arena.$alloc_method(v)); - self.interners.$name.borrow_mut().insert(Interned(i)); - i + let i = $alloc_method(&self.interners.arena, v); + interner.insert(Interned(i)); + i + } else { + let mut interner = self.global_interners.$name.borrow_mut(); + if let Some(&Interned(v)) = interner.get(key) { + return v; + } + + // This transmutes $alloc<'tcx> to $alloc<'gcx> + let v = unsafe { + mem::transmute(v) + }; + let i: &$lt_tcx $ty = $alloc_method(&self.global_interners.arena, v); + // Cast to 'gcx + let i = unsafe { mem::transmute(i) }; + interner.insert(Interned(i)); + i + } } } } } macro_rules! direct_interners { - ($lt_tcx:tt, $($name:ident: $method:ident($needs_infer:expr) -> $ty:ty),+) => { + ($lt_tcx:tt, $($name:ident: $method:ident($keep_in_local_tcx:expr) -> $ty:ty),+) => { $(impl<$lt_tcx> PartialEq for Interned<$lt_tcx, $ty> { fn eq(&self, other: &Self) -> bool { self.0 == other.0 @@ -1827,7 +2252,12 @@ macro_rules! direct_interners { } } - intern_method!($lt_tcx, $name: $method($ty, alloc, |x| x, |x| x, $needs_infer) -> $ty);)+ + intern_method!( + $lt_tcx, + $name: $method($ty, + |a: &$lt_tcx SyncDroplessArena, v| -> &$lt_tcx $ty { a.alloc(v) }, + |x| x, + $keep_in_local_tcx) -> $ty);)+ } } @@ -1836,21 +2266,17 @@ pub fn keep_local<'tcx, T: ty::TypeFoldable<'tcx>>(x: &T) -> bool { } direct_interners!('tcx, - region: mk_region(|r| { - match r { - &ty::ReVar(_) | &ty::ReSkolemized(..) => true, - _ => false - } - }) -> RegionKind, + region: mk_region(|r: &RegionKind| r.keep_in_local_tcx()) -> RegionKind, const_: mk_const(|c: &Const| keep_local(&c.ty) || keep_local(&c.val)) -> Const<'tcx> ); macro_rules! slice_interners { ($($field:ident: $method:ident($ty:ident)),+) => ( - $(intern_method!('tcx, $field: $method(&[$ty<'tcx>], alloc_slice, Deref::deref, - |xs: &[$ty]| -> &Slice<$ty> { - unsafe { mem::transmute(xs) } - }, |xs: &[$ty]| xs.iter().any(keep_local)) -> Slice<$ty<'tcx>>);)+ + $(intern_method!( 'tcx, $field: $method( + &[$ty<'tcx>], + |a, v| Slice::from_arena(a, v), + Deref::deref, + |xs: &[$ty]| xs.iter().any(keep_local)) -> Slice<$ty<'tcx>>);)+ ) } @@ -1858,9 +2284,26 @@ slice_interners!( existential_predicates: _intern_existential_predicates(ExistentialPredicate), predicates: _intern_predicates(Predicate), type_list: _intern_type_list(Ty), - substs: _intern_substs(Kind) + substs: _intern_substs(Kind), + clauses: _intern_clauses(Clause), + goals: _intern_goals(Goal) ); +// This isn't a perfect fit: CanonicalVarInfo slices are always +// allocated in the global arena, so this `intern_method!` macro is +// overly general. But we just return false for the code that checks +// whether they belong in the thread-local arena, so no harm done, and +// seems better than open-coding the rest. +intern_method! { + 'tcx, + canonical_var_infos: _intern_canonical_var_infos( + &[CanonicalVarInfo], + |a, v| Slice::from_arena(a, v), + Deref::deref, + |_xs: &[CanonicalVarInfo]| -> bool { false } + ) -> Slice +} + impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Given a `fn` type, returns an equivalent `unsafe fn` type; /// that is, a `fn` type that is equivalent in every way for being @@ -1880,7 +2323,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn coerce_closure_fn_ty(self, sig: PolyFnSig<'tcx>) -> Ty<'tcx> { let converted_sig = sig.map_bound(|s| { let params_iter = match s.inputs()[0].sty { - ty::TyTuple(params, _) => { + ty::TyTuple(params) => { params.into_iter().cloned() } _ => bug!(), @@ -1897,15 +2340,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_fn_ptr(converted_sig) } - // Interns a type/name combination, stores the resulting box in cx.interners, - // and returns the box as cast to an unsafe ptr (see comments for Ty above). - pub fn mk_ty(self, st: TypeVariants<'tcx>) -> Ty<'tcx> { - let global_interners = if !self.is_global() { - Some(&self.global_interners) - } else { - None - }; - self.interners.intern_ty(st, global_interners) + pub fn mk_ty(&self, st: TypeVariants<'tcx>) -> Ty<'tcx> { + CtxtInterners::intern_ty(&self.interners, &self.global_interners, st) } pub fn mk_mach_int(self, tm: ast::IntTy) -> Ty<'tcx> { @@ -1957,7 +2393,19 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> { let def_id = self.require_lang_item(lang_items::OwnedBoxLangItem); let adt_def = self.adt_def(def_id); - let substs = self.mk_substs(iter::once(Kind::from(ty))); + let substs = Substs::for_item(self, def_id, |param, substs| { + match param.kind { + GenericParamDefKind::Lifetime => bug!(), + GenericParamDefKind::Type { has_default, .. } => { + if param.index == 0 { + ty.into() + } else { + assert!(has_default); + self.type_of(param.def_id).subst(self, substs).into() + } + } + } + }); self.mk_ty(TyAdt(adt_def, substs)) } @@ -1966,7 +2414,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } pub fn mk_ref(self, r: Region<'tcx>, tm: TypeAndMut<'tcx>) -> Ty<'tcx> { - self.mk_ty(TyRef(r, tm)) + self.mk_ty(TyRef(r, tm.ty, tm.mutbl)) } pub fn mk_mut_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> { @@ -1990,39 +2438,30 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> { - let n = ConstUsize::new(n, self.sess.target.usize_ty).unwrap(); - self.mk_array_const_usize(ty, n) - } - - pub fn mk_array_const_usize(self, ty: Ty<'tcx>, n: ConstUsize) -> Ty<'tcx> { - self.mk_ty(TyArray(ty, self.mk_const(ty::Const { - val: ConstVal::Integral(ConstInt::Usize(n)), - ty: self.types.usize - }))) + self.mk_ty(TyArray(ty, ty::Const::from_usize(self, n))) } pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> { self.mk_ty(TySlice(ty)) } - pub fn intern_tup(self, ts: &[Ty<'tcx>], defaulted: bool) -> Ty<'tcx> { - self.mk_ty(TyTuple(self.intern_type_list(ts), defaulted)) + pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> { + self.mk_ty(TyTuple(self.intern_type_list(ts))) } - pub fn mk_tup], Ty<'tcx>>>(self, iter: I, - defaulted: bool) -> I::Output { - iter.intern_with(|ts| self.mk_ty(TyTuple(self.intern_type_list(ts), defaulted))) + pub fn mk_tup], Ty<'tcx>>>(self, iter: I) -> I::Output { + iter.intern_with(|ts| self.mk_ty(TyTuple(self.intern_type_list(ts)))) } pub fn mk_nil(self) -> Ty<'tcx> { - self.intern_tup(&[], false) + self.intern_tup(&[]) } pub fn mk_diverging_default(self) -> Ty<'tcx> { - if self.sess.features.borrow().never_type { + if self.features().never_type { self.types.never } else { - self.intern_tup(&[], true) + self.intern_tup(&[]) } } @@ -2057,26 +2496,21 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { })) } - pub fn mk_closure(self, - closure_id: DefId, - substs: ClosureSubsts<'tcx>) - -> Ty<'tcx> { - self.mk_closure_from_closure_substs(closure_id, substs) - } - - pub fn mk_closure_from_closure_substs(self, - closure_id: DefId, - closure_substs: ClosureSubsts<'tcx>) + pub fn mk_closure(self, closure_id: DefId, closure_substs: ClosureSubsts<'tcx>) -> Ty<'tcx> { self.mk_ty(TyClosure(closure_id, closure_substs)) } pub fn mk_generator(self, id: DefId, - closure_substs: ClosureSubsts<'tcx>, - interior: GeneratorInterior<'tcx>) + generator_substs: GeneratorSubsts<'tcx>, + movability: hir::GeneratorMovability) -> Ty<'tcx> { - self.mk_ty(TyGenerator(id, closure_substs, interior)) + self.mk_ty(TyGenerator(id, generator_substs, movability)) + } + + pub fn mk_generator_witness(self, types: ty::Binder<&'tcx Slice>>) -> Ty<'tcx> { + self.mk_ty(TyGeneratorWitness(types)) } pub fn mk_var(self, v: TyVid) -> Ty<'tcx> { @@ -2095,18 +2529,23 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_ty(TyInfer(it)) } - pub fn mk_param(self, + pub fn mk_ty_param(self, index: u32, - name: Name) -> Ty<'tcx> { + name: InternedString) -> Ty<'tcx> { self.mk_ty(TyParam(ParamTy { idx: index, name: name })) } pub fn mk_self_type(self) -> Ty<'tcx> { - self.mk_param(0, keywords::SelfType.name()) + self.mk_ty_param(0, keywords::SelfType.name().as_interned_str()) } - pub fn mk_param_from_def(self, def: &ty::TypeParameterDef) -> Ty<'tcx> { - self.mk_param(def.index, def.name) + pub fn mk_param_from_def(self, param: &ty::GenericParamDef) -> Kind<'tcx> { + match param.kind { + GenericParamDefKind::Lifetime => { + self.mk_region(ty::ReEarlyBound(param.to_early_bound_region_data())).into() + } + GenericParamDefKind::Type {..} => self.mk_ty_param(param.index, param.name).into(), + } } pub fn mk_anon(self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { @@ -2116,7 +2555,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn intern_existential_predicates(self, eps: &[ExistentialPredicate<'tcx>]) -> &'tcx Slice> { assert!(!eps.is_empty()); - assert!(eps.windows(2).all(|w| w[0].cmp(self, &w[1]) != Ordering::Greater)); + assert!(eps.windows(2).all(|w| w[0].stable_cmp(self, &w[1]) != Ordering::Greater)); self._intern_existential_predicates(eps) } @@ -2149,6 +2588,30 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } + pub fn intern_canonical_var_infos(self, ts: &[CanonicalVarInfo]) -> CanonicalVarInfos<'gcx> { + if ts.len() == 0 { + Slice::empty() + } else { + self.global_tcx()._intern_canonical_var_infos(ts) + } + } + + pub fn intern_clauses(self, ts: &[Clause<'tcx>]) -> Clauses<'tcx> { + if ts.len() == 0 { + Slice::empty() + } else { + self._intern_clauses(ts) + } + } + + pub fn intern_goals(self, ts: &[Goal<'tcx>]) -> Goals<'tcx> { + if ts.len() == 0 { + Slice::empty() + } else { + self._intern_goals(ts) + } + } + pub fn mk_fn_sig(self, inputs: I, output: I::Item, @@ -2188,11 +2651,31 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } pub fn mk_substs_trait(self, - s: Ty<'tcx>, - t: &[Ty<'tcx>]) + self_ty: Ty<'tcx>, + rest: &[Kind<'tcx>]) -> &'tcx Substs<'tcx> { - self.mk_substs(iter::once(s).chain(t.into_iter().cloned()).map(Kind::from)) + self.mk_substs(iter::once(self_ty.into()).chain(rest.iter().cloned())) + } + + pub fn mk_clauses], Clauses<'tcx>>>(self, iter: I) -> I::Output { + iter.intern_with(|xs| self.intern_clauses(xs)) + } + + pub fn mk_goals], Goals<'tcx>>>(self, iter: I) -> I::Output { + iter.intern_with(|xs| self.intern_goals(xs)) + } + + pub fn mk_goal(self, goal: Goal<'tcx>) -> &'tcx Goal { + &self.intern_goals(&[goal])[0] + } + + pub fn lint_hir>(self, + lint: &'static Lint, + hir_id: HirId, + span: S, + msg: &str) { + self.struct_span_lint_hir(lint, hir_id, span.into(), msg).emit() } pub fn lint_node>(self, @@ -2203,6 +2686,17 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.struct_span_lint_node(lint, id, span.into(), msg).emit() } + pub fn lint_hir_note>(self, + lint: &'static Lint, + hir_id: HirId, + span: S, + msg: &str, + note: &str) { + let mut err = self.struct_span_lint_hir(lint, hir_id, span.into(), msg); + err.note(note); + err.emit() + } + pub fn lint_node_note>(self, lint: &'static Lint, id: NodeId, @@ -2229,7 +2723,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let sets = self.lint_levels(LOCAL_CRATE); loop { let hir_id = self.hir.definitions().node_to_hir_id(id); - if let Some(pair) = sets.level_and_source(lint, hir_id) { + if let Some(pair) = sets.level_and_source(lint, hir_id, self.sess) { return pair } let next = self.hir.get_parent_node(id); @@ -2241,6 +2735,18 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }) } + pub fn struct_span_lint_hir>(self, + lint: &'static Lint, + hir_id: HirId, + span: S, + msg: &str) + -> DiagnosticBuilder<'tcx> + { + let node_id = self.hir.hir_to_node_id(hir_id); + let (level, src) = self.lint_level_at_node(lint, node_id); + lint::struct_lint_level(self.sess, lint, level, src, Some(span.into()), msg) + } + pub fn struct_span_lint_node>(self, lint: &'static Lint, id: NodeId, @@ -2259,7 +2765,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { lint::struct_lint_level(self.sess, lint, level, src, None, msg) } - pub fn in_scope_traits(self, id: HirId) -> Option>> { + pub fn in_scope_traits(self, id: HirId) -> Option>> { self.in_scope_traits_map(id.owner) .and_then(|map| map.get(&id.local_id).cloned()) } @@ -2276,7 +2782,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } pub fn object_lifetime_defaults(self, id: HirId) - -> Option>> + -> Option>> { self.object_lifetime_defaults_map(id.owner) .and_then(|map| map.get(&id.local_id).cloned()) @@ -2327,7 +2833,7 @@ impl InternIteratorElement for Result { } } -pub fn provide(providers: &mut ty::maps::Providers) { +pub fn provide(providers: &mut ty::query::Providers) { // FIXME(#44234) - almost all of these queries have no sub-queries and // therefore no actual inputs, they're just reading tables calculated in // resolve! Does this work? Unsure! That's what the issue is about @@ -2337,17 +2843,13 @@ pub fn provide(providers: &mut ty::maps::Providers) { assert_eq!(id, LOCAL_CRATE); tcx.crate_name }; + providers.get_lib_features = |tcx, id| { + assert_eq!(id, LOCAL_CRATE); + Lrc::new(middle::lib_features::collect(tcx)) + }; providers.get_lang_items = |tcx, id| { assert_eq!(id, LOCAL_CRATE); - // FIXME(#42293) Right now we insert a `with_ignore` node in the dep - // graph here to ignore the fact that `get_lang_items` below depends on - // the entire crate. For now this'll prevent false positives of - // recompiling too much when anything changes. - // - // Once red/green incremental compilation lands we should be able to - // remove this because while the crate changes often the lint level map - // will change rarely. - tcx.dep_graph.with_ignore(|| Rc::new(middle::lang_items::collect(tcx))) + Lrc::new(middle::lang_items::collect(tcx)) }; providers.freevars = |tcx, id| tcx.gcx.freevars.get(&id).cloned(); providers.maybe_unused_trait_import = |tcx, id| { @@ -2355,12 +2857,12 @@ pub fn provide(providers: &mut ty::maps::Providers) { }; providers.maybe_unused_extern_crates = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); - Rc::new(tcx.maybe_unused_extern_crates.clone()) + Lrc::new(tcx.maybe_unused_extern_crates.clone()) }; providers.stability_index = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); - Rc::new(stability::Index::new(tcx)) + Lrc::new(stability::Index::new(tcx)) }; providers.lookup_stability = |tcx, id| { assert_eq!(id.krate, LOCAL_CRATE); @@ -2378,25 +2880,26 @@ pub fn provide(providers: &mut ty::maps::Providers) { }; providers.all_crate_nums = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); - Rc::new(tcx.cstore.crates_untracked()) + Lrc::new(tcx.cstore.crates_untracked()) }; providers.postorder_cnums = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); - Rc::new(tcx.cstore.postorder_cnums_untracked()) + Lrc::new(tcx.cstore.postorder_cnums_untracked()) }; providers.output_filenames = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); tcx.output_filenames.clone() }; - providers.has_copy_closures = |tcx, cnum| { + providers.features_query = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); - tcx.sess.features.borrow().copy_closures + Lrc::new(tcx.sess.features_untracked().clone()) }; - providers.has_clone_closures = |tcx, cnum| { + providers.is_panic_runtime = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); - tcx.sess.features.borrow().clone_closures + attr::contains_name(tcx.hir.krate_attrs(), "panic_runtime") }; - providers.fully_normalize_monormophic_ty = |tcx, ty| { - tcx.fully_normalize_associated_types_in(&ty) + providers.is_compiler_builtins = |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + attr::contains_name(tcx.hir.krate_attrs(), "compiler_builtins") }; } diff --git a/src/librustc/ty/erase_regions.rs b/src/librustc/ty/erase_regions.rs index 4f8fca67949b..2fb2154ce6ba 100644 --- a/src/librustc/ty/erase_regions.rs +++ b/src/librustc/ty/erase_regions.rs @@ -11,8 +11,8 @@ use ty::{self, Ty, TyCtxt}; use ty::fold::{TypeFolder, TypeFoldable}; -pub(super) fn provide(providers: &mut ty::maps::Providers) { - *providers = ty::maps::Providers { +pub(super) fn provide(providers: &mut ty::query::Providers) { + *providers = ty::query::Providers { erase_regions_ty, ..*providers }; @@ -68,7 +68,7 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionEraserVisitor<'a, 'gcx, 't // // Note that we *CAN* replace early-bound regions -- the // type system never "sees" those, they get substituted - // away. In trans, they will always be erased to 'erased + // away. In codegen, they will always be erased to 'erased // whenever a substitution occurs. match *r { ty::ReLateBound(..) => r, @@ -76,4 +76,3 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionEraserVisitor<'a, 'gcx, 't } } } - diff --git a/src/librustc/ty/error.rs b/src/librustc/ty/error.rs index d2152024cff0..49fffaa375b2 100644 --- a/src/librustc/ty/error.rs +++ b/src/librustc/ty/error.rs @@ -9,18 +9,13 @@ // except according to those terms. use hir::def_id::DefId; -use infer::type_variable; -use middle::const_val::ConstVal; -use ty::{self, BoundRegion, DefIdTree, Region, Ty, TyCtxt}; - +use ty::{self, BoundRegion, Region, Ty, TyCtxt}; use std::fmt; -use syntax::abi; +use rustc_target::spec::abi; use syntax::ast; -use errors::DiagnosticBuilder; +use errors::{Applicability, DiagnosticBuilder}; use syntax_pos::Span; -use rustc_const_math::ConstInt; - use hir; #[derive(Clone, Copy, Debug)] @@ -56,7 +51,6 @@ pub enum TypeError<'tcx> { CyclicTy(Ty<'tcx>), ProjectionMismatched(ExpectedFound), ProjectionBoundsLength(ExpectedFound), - TyParamDefaultMismatch(ExpectedFound>), ExistentialMismatch(ExpectedFound<&'tcx ty::Slice>>), OldStyleLUB(Box>), @@ -167,11 +161,6 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { values.expected, values.found) }, - TyParamDefaultMismatch(ref values) => { - write!(f, "conflicting type parameter defaults `{}` and `{}`", - values.expected.ty, - values.found.ty) - } ExistentialMismatch(ref values) => { report_maybe_different(f, format!("trait `{}`", values.expected), format!("trait `{}`", values.found)) @@ -188,38 +177,34 @@ impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { match self.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyNever => self.to_string(), - ty::TyTuple(ref tys, _) if tys.is_empty() => self.to_string(), + ty::TyTuple(ref tys) if tys.is_empty() => self.to_string(), ty::TyAdt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)), ty::TyForeign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)), ty::TyArray(_, n) => { - if let ConstVal::Integral(ConstInt::Usize(n)) = n.val { - format!("array of {} elements", n) - } else { - "array".to_string() + match n.assert_usize(tcx) { + Some(n) => format!("array of {} elements", n), + None => "array".to_string(), } } ty::TySlice(_) => "slice".to_string(), ty::TyRawPtr(_) => "*-ptr".to_string(), - ty::TyRef(region, tymut) => { + ty::TyRef(region, ty, mutbl) => { + let tymut = ty::TypeAndMut { ty, mutbl }; let tymut_string = tymut.to_string(); if tymut_string == "_" || //unknown type name, tymut_string.len() > 10 || //name longer than saying "reference", region.to_string() != "" //... or a complex type { - match tymut { - ty::TypeAndMut{mutbl, ..} => { - format!("{}reference", match mutbl { - hir::Mutability::MutMutable => "mutable ", - _ => "" - }) - } - } + format!("{}reference", match mutbl { + hir::Mutability::MutMutable => "mutable ", + _ => "" + }) } else { format!("&{}", tymut_string) } } - ty::TyFnDef(..) => format!("fn item"), + ty::TyFnDef(..) => "fn item".to_string(), ty::TyFnPtr(_) => "fn pointer".to_string(), ty::TyDynamic(ref inner, ..) => { inner.principal().map_or_else(|| "trait".to_string(), @@ -227,10 +212,12 @@ impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { } ty::TyClosure(..) => "closure".to_string(), ty::TyGenerator(..) => "generator".to_string(), + ty::TyGeneratorWitness(..) => "generator witness".to_string(), ty::TyTuple(..) => "tuple".to_string(), ty::TyInfer(ty::TyVar(_)) => "inferred type".to_string(), ty::TyInfer(ty::IntVar(_)) => "integral variable".to_string(), ty::TyInfer(ty::FloatVar(_)) => "floating-point variable".to_string(), + ty::TyInfer(ty::CanonicalTy(_)) | ty::TyInfer(ty::FreshTy(_)) => "skolemized type".to_string(), ty::TyInfer(ty::FreshIntTy(_)) => "skolemized integral type".to_string(), ty::TyInfer(ty::FreshFloatTy(_)) => "skolemized floating-point type".to_string(), @@ -263,43 +250,22 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { db.note("no two closures, even if identical, have the same type"); db.help("consider boxing your closure and/or using it as a trait object"); } + match (&values.found.sty, &values.expected.sty) { // Issue #53280 + (ty::TyInfer(ty::IntVar(_)), ty::TyFloat(_)) => { + if let Ok(snippet) = self.sess.codemap().span_to_snippet(sp) { + if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') { + db.span_suggestion_with_applicability( + sp, + "use a float literal", + format!("{}.0", snippet), + Applicability::MachineApplicable + ); + } + } + }, + _ => {} + } }, - TyParamDefaultMismatch(values) => { - let expected = values.expected; - let found = values.found; - db.span_note(sp, &format!("conflicting type parameter defaults `{}` and `{}`", - expected.ty, - found.ty)); - - match self.hir.span_if_local(expected.def_id) { - Some(span) => { - db.span_note(span, "a default was defined here..."); - } - None => { - let item_def_id = self.parent(expected.def_id).unwrap(); - db.note(&format!("a default is defined on `{}`", - self.item_path_str(item_def_id))); - } - } - - db.span_note( - expected.origin_span, - "...that was applied to an unconstrained type variable here"); - - match self.hir.span_if_local(found.def_id) { - Some(span) => { - db.span_note(span, "a second default was defined here..."); - } - None => { - let item_def_id = self.parent(found.def_id).unwrap(); - db.note(&format!("a second default is defined on `{}`", - self.item_path_str(item_def_id))); - } - } - - db.span_note(found.origin_span, - "...that also applies to the same type variable here"); - } OldStyleLUB(err) => { db.note("this was previously accepted by the compiler but has been phased out"); db.note("for more information, see https://github.com/rust-lang/rust/issues/45852"); diff --git a/src/librustc/ty/fast_reject.rs b/src/librustc/ty/fast_reject.rs index 138f6af77c65..cf5e55a59f71 100644 --- a/src/librustc/ty/fast_reject.rs +++ b/src/librustc/ty/fast_reject.rs @@ -28,7 +28,7 @@ pub type SimplifiedType = SimplifiedTypeGen; /// because we sometimes need to use SimplifiedTypeGen values as stable sorting /// keys (in which case we use a DefPathHash as id-type) but in the general case /// the non-stable but fast to construct DefId-version is the better choice. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, RustcEncodable, RustcDecodable)] pub enum SimplifiedTypeGen where D: Copy + Debug + Ord + Eq + Hash { @@ -46,6 +46,7 @@ pub enum SimplifiedTypeGen TraitSimplifiedType(D), ClosureSimplifiedType(D), GeneratorSimplifiedType(D), + GeneratorWitnessSimplifiedType(usize), AnonSimplifiedType(D), FunctionSimplifiedType(usize), ParameterSimplifiedType, @@ -79,11 +80,11 @@ pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::TyDynamic(ref trait_info, ..) => { trait_info.principal().map(|p| TraitSimplifiedType(p.def_id())) } - ty::TyRef(_, mt) => { + ty::TyRef(_, ty, _) => { // since we introduce auto-refs during method lookup, we // just treat &T and T as equivalent from the point of // view of possibly unifying - simplify_type(tcx, mt.ty, can_simplify_params) + simplify_type(tcx, ty, can_simplify_params) } ty::TyFnDef(def_id, _) | ty::TyClosure(def_id, _) => { @@ -92,8 +93,11 @@ pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::TyGenerator(def_id, _, _) => { Some(GeneratorSimplifiedType(def_id)) } + ty::TyGeneratorWitness(ref tys) => { + Some(GeneratorWitnessSimplifiedType(tys.skip_binder().len())) + } ty::TyNever => Some(NeverSimplifiedType), - ty::TyTuple(ref tys, _) => { + ty::TyTuple(ref tys) => { Some(TupleSimplifiedType(tys.len())) } ty::TyFnPtr(ref f) => { @@ -141,6 +145,7 @@ impl SimplifiedTypeGen { TraitSimplifiedType(d) => TraitSimplifiedType(map(d)), ClosureSimplifiedType(d) => ClosureSimplifiedType(map(d)), GeneratorSimplifiedType(d) => GeneratorSimplifiedType(map(d)), + GeneratorWitnessSimplifiedType(n) => GeneratorWitnessSimplifiedType(n), AnonSimplifiedType(d) => AnonSimplifiedType(map(d)), FunctionSimplifiedType(n) => FunctionSimplifiedType(n), ParameterSimplifiedType => ParameterSimplifiedType, @@ -149,12 +154,12 @@ impl SimplifiedTypeGen { } } -impl<'gcx, D> HashStable> for SimplifiedTypeGen +impl<'a, 'gcx, D> HashStable> for SimplifiedTypeGen where D: Copy + Debug + Ord + Eq + Hash + - HashStable>, + HashStable>, { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -175,6 +180,7 @@ impl<'gcx, D> HashStable> for SimplifiedTypeGen TraitSimplifiedType(d) => d.hash_stable(hcx, hasher), ClosureSimplifiedType(d) => d.hash_stable(hcx, hasher), GeneratorSimplifiedType(d) => d.hash_stable(hcx, hasher), + GeneratorWitnessSimplifiedType(n) => n.hash_stable(hcx, hasher), AnonSimplifiedType(d) => d.hash_stable(hcx, hasher), FunctionSimplifiedType(n) => n.hash_stable(hcx, hasher), ForeignSimplifiedType(d) => d.hash_stable(hcx, hasher), diff --git a/src/librustc/ty/flags.rs b/src/librustc/ty/flags.rs index 63c646dbd231..3718c436b3a0 100644 --- a/src/librustc/ty/flags.rs +++ b/src/librustc/ty/flags.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::const_val::{ConstVal, ConstAggregate}; +use mir::interpret::ConstValue; use ty::subst::Substs; use ty::{self, Ty, TypeFlags, TypeFoldable}; @@ -16,13 +16,16 @@ use ty::{self, Ty, TypeFlags, TypeFoldable}; pub struct FlagComputation { pub flags: TypeFlags, - // maximum depth of any bound region that we have seen thus far - pub depth: u32, + // see `TyS::outer_exclusive_binder` for details + pub outer_exclusive_binder: ty::DebruijnIndex, } impl FlagComputation { fn new() -> FlagComputation { - FlagComputation { flags: TypeFlags::empty(), depth: 0 } + FlagComputation { + flags: TypeFlags::empty(), + outer_exclusive_binder: ty::INNERMOST, + } } pub fn for_sty(st: &ty::TypeVariants) -> FlagComputation { @@ -35,10 +38,17 @@ impl FlagComputation { self.flags = self.flags | (flags & TypeFlags::NOMINAL_FLAGS); } - fn add_depth(&mut self, depth: u32) { - if depth > self.depth { - self.depth = depth; - } + /// indicates that `self` refers to something at binding level `binder` + fn add_binder(&mut self, binder: ty::DebruijnIndex) { + let exclusive_binder = binder.shifted_in(1); + self.add_exclusive_binder(exclusive_binder); + } + + /// indicates that `self` refers to something *inside* binding + /// level `binder` -- not bound by `binder`, but bound by the next + /// binder internal to it + fn add_exclusive_binder(&mut self, exclusive_binder: ty::DebruijnIndex) { + self.outer_exclusive_binder = self.outer_exclusive_binder.max(exclusive_binder); } /// Adds the flags/depth from a set of types that appear within the current type, but within a @@ -49,9 +59,11 @@ impl FlagComputation { // The types that contributed to `computation` occurred within // a region binder, so subtract one from the region depth // within when adding the depth to `self`. - let depth = computation.depth; - if depth > 0 { - self.add_depth(depth - 1); + let outer_exclusive_binder = computation.outer_exclusive_binder; + if outer_exclusive_binder > ty::INNERMOST { + self.add_exclusive_binder(outer_exclusive_binder.shifted_out(1)); + } else { + // otherwise, this binder captures nothing } } @@ -79,7 +91,7 @@ impl FlagComputation { } &ty::TyParam(ref p) => { - self.add_flags(TypeFlags::HAS_LOCAL_NAMES); + self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES); if p.is_self() { self.add_flags(TypeFlags::HAS_SELF); } else { @@ -87,27 +99,40 @@ impl FlagComputation { } } - &ty::TyGenerator(_, ref substs, ref interior) => { + &ty::TyGenerator(_, ref substs, _) => { self.add_flags(TypeFlags::HAS_TY_CLOSURE); - self.add_flags(TypeFlags::HAS_LOCAL_NAMES); + self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES); self.add_substs(&substs.substs); - self.add_ty(interior.witness); + } + + &ty::TyGeneratorWitness(ref ts) => { + let mut computation = FlagComputation::new(); + computation.add_tys(&ts.skip_binder()[..]); + self.add_bound_computation(&computation); } &ty::TyClosure(_, ref substs) => { self.add_flags(TypeFlags::HAS_TY_CLOSURE); - self.add_flags(TypeFlags::HAS_LOCAL_NAMES); + self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES); self.add_substs(&substs.substs); } &ty::TyInfer(infer) => { - self.add_flags(TypeFlags::HAS_LOCAL_NAMES); // it might, right? + self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES); // it might, right? self.add_flags(TypeFlags::HAS_TY_INFER); match infer { ty::FreshTy(_) | ty::FreshIntTy(_) | - ty::FreshFloatTy(_) => {} - _ => self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX) + ty::FreshFloatTy(_) | + ty::CanonicalTy(_) => { + self.add_flags(TypeFlags::HAS_CANONICAL_VARS); + } + + ty::TyVar(_) | + ty::IntVar(_) | + ty::FloatVar(_) => { + self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX) + } } } @@ -160,15 +185,12 @@ impl FlagComputation { self.add_ty(m.ty); } - &ty::TyRef(r, ref m) => { + &ty::TyRef(r, ty, _) => { self.add_region(r); - self.add_ty(m.ty); + self.add_ty(ty); } - &ty::TyTuple(ref ts, is_default) => { - if is_default { - self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX); - } + &ty::TyTuple(ref ts) => { self.add_tys(&ts[..]); } @@ -184,7 +206,7 @@ impl FlagComputation { fn add_ty(&mut self, ty: Ty) { self.add_flags(ty.flags); - self.add_depth(ty.region_depth); + self.add_exclusive_binder(ty.outer_exclusive_binder); } fn add_tys(&mut self, tys: &[Ty]) { @@ -205,41 +227,15 @@ impl FlagComputation { fn add_region(&mut self, r: ty::Region) { self.add_flags(r.type_flags()); if let ty::ReLateBound(debruijn, _) = *r { - self.add_depth(debruijn.depth); + self.add_binder(debruijn); } } fn add_const(&mut self, constant: &ty::Const) { self.add_ty(constant.ty); - match constant.val { - ConstVal::Integral(_) | - ConstVal::Float(_) | - ConstVal::Str(_) | - ConstVal::ByteStr(_) | - ConstVal::Bool(_) | - ConstVal::Char(_) | - ConstVal::Variant(_) => {} - ConstVal::Function(_, substs) => { - self.add_substs(substs); - } - ConstVal::Aggregate(ConstAggregate::Struct(fields)) => { - for &(_, v) in fields { - self.add_const(v); - } - } - ConstVal::Aggregate(ConstAggregate::Tuple(fields)) | - ConstVal::Aggregate(ConstAggregate::Array(fields)) => { - for v in fields { - self.add_const(v); - } - } - ConstVal::Aggregate(ConstAggregate::Repeat(v, _)) => { - self.add_const(v); - } - ConstVal::Unevaluated(_, substs) => { - self.add_flags(TypeFlags::HAS_PROJECTION); - self.add_substs(substs); - } + if let ConstValue::Unevaluated(_, substs) = constant.val { + self.add_flags(TypeFlags::HAS_PROJECTION); + self.add_substs(substs); } } diff --git a/src/librustc/ty/fold.rs b/src/librustc/ty/fold.rs index c5b82730e488..e4484041b065 100644 --- a/src/librustc/ty/fold.rs +++ b/src/librustc/ty/fold.rs @@ -39,16 +39,19 @@ //! These methods return true to indicate that the visitor has found what it is looking for //! and does not need to visit anything else. -use middle::const_val::ConstVal; +use mir::interpret::ConstValue; use hir::def_id::DefId; use ty::{self, Binder, Ty, TyCtxt, TypeFlags}; -use std::fmt; use std::collections::BTreeMap; +use std::fmt; use util::nodemap::FxHashSet; /// The TypeFoldable trait is implemented for every type that can be folded. /// Basically, every type that has a corresponding method in TypeFolder. +/// +/// To implement this conveniently, use the +/// `BraceStructTypeFoldableImpl` etc macros found in `macros.rs`. pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self; fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { @@ -60,11 +63,22 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { self.super_visit_with(visitor) } - fn has_regions_escaping_depth(&self, depth: u32) -> bool { - self.visit_with(&mut HasEscapingRegionsVisitor { depth: depth }) + /// True if `self` has any late-bound regions that are either + /// bound by `binder` or bound by some binder outside of `binder`. + /// If `binder` is `ty::INNERMOST`, this indicates whether + /// there are any late-bound regions that appear free. + fn has_regions_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool { + self.visit_with(&mut HasEscapingRegionsVisitor { outer_index: binder }) } + + /// True if this `self` has any regions that escape `binder` (and + /// hence are not bound by it). + fn has_regions_bound_above(&self, binder: ty::DebruijnIndex) -> bool { + self.has_regions_bound_at_or_above(binder.shifted_in(1)) + } + fn has_escaping_regions(&self) -> bool { - self.has_regions_escaping_depth(0) + self.has_regions_bound_at_or_above(ty::INNERMOST) } fn has_type_flags(&self, flags: TypeFlags) -> bool { @@ -88,6 +102,9 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { fn needs_infer(&self) -> bool { self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_RE_INFER) } + fn has_skol(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_RE_SKOL) + } fn needs_subst(&self) -> bool { self.has_type_flags(TypeFlags::NEEDS_SUBST) } @@ -108,21 +125,30 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { self.has_type_flags(TypeFlags::HAS_FREE_REGIONS) } - fn is_normalized_for_trans(&self) -> bool { - !self.has_type_flags(TypeFlags::HAS_RE_INFER | - TypeFlags::HAS_FREE_REGIONS | - TypeFlags::HAS_TY_INFER | - TypeFlags::HAS_PARAMS | - TypeFlags::HAS_NORMALIZABLE_PROJECTION | - TypeFlags::HAS_TY_ERR | - TypeFlags::HAS_SELF) - } /// Indicates whether this value references only 'global' /// types/lifetimes that are the same regardless of what fn we are - /// in. This is used for caching. Errs on the side of returning - /// false. + /// in. This is used for caching. fn is_global(&self) -> bool { - !self.has_type_flags(TypeFlags::HAS_LOCAL_NAMES) + !self.has_type_flags(TypeFlags::HAS_FREE_LOCAL_NAMES) + } + + /// True if there are any late-bound regions + fn has_late_bound_regions(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_RE_LATE_BOUND) + } + + /// A visitor that does not recurse into types, works like `fn walk_shallow` in `Ty`. + fn visit_tys_shallow(&self, visit: impl FnMut(Ty<'tcx>) -> bool) -> bool { + + pub struct Visitor(F); + + impl<'tcx, F: FnMut(Ty<'tcx>) -> bool> TypeVisitor<'tcx> for Visitor { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { + self.0(ty) + } + } + + self.visit_with(&mut Visitor(visit)) } } @@ -174,15 +200,18 @@ pub trait TypeVisitor<'tcx> : Sized { /////////////////////////////////////////////////////////////////////////// // Some sample folders -pub struct BottomUpFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a, F> - where F: FnMut(Ty<'tcx>) -> Ty<'tcx> +pub struct BottomUpFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a, F, G> + where F: FnMut(Ty<'tcx>) -> Ty<'tcx>, + G: FnMut(ty::Region<'tcx>) -> ty::Region<'tcx>, { pub tcx: TyCtxt<'a, 'gcx, 'tcx>, pub fldop: F, + pub reg_op: G, } -impl<'a, 'gcx, 'tcx, F> TypeFolder<'gcx, 'tcx> for BottomUpFolder<'a, 'gcx, 'tcx, F> +impl<'a, 'gcx, 'tcx, F, G> TypeFolder<'gcx, 'tcx> for BottomUpFolder<'a, 'gcx, 'tcx, F, G> where F: FnMut(Ty<'tcx>) -> Ty<'tcx>, + G: FnMut(ty::Region<'tcx>) -> ty::Region<'tcx>, { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } @@ -190,6 +219,11 @@ impl<'a, 'gcx, 'tcx, F> TypeFolder<'gcx, 'tcx> for BottomUpFolder<'a, 'gcx, 'tcx let t1 = ty.super_fold_with(self); (self.fldop)(t1) } + + fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { + let r = r.super_fold_with(self); + (self.reg_op)(r) + } } /////////////////////////////////////////////////////////////////////////// @@ -206,7 +240,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { { let mut have_bound_regions = false; self.fold_regions(value, &mut have_bound_regions, |r, d| { - region_set.insert(self.mk_region(r.from_depth(d))); + region_set.insert(self.mk_region(r.shifted_out_to_binder(d))); r }); have_bound_regions @@ -215,50 +249,98 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Folds the escaping and free regions in `value` using `f`, and /// sets `skipped_regions` to true if any late-bound region was found /// and skipped. - pub fn fold_regions(self, + pub fn fold_regions( + self, value: &T, skipped_regions: &mut bool, - mut f: F) - -> T - where F : FnMut(ty::Region<'tcx>, u32) -> ty::Region<'tcx>, - T : TypeFoldable<'tcx>, + mut f: impl FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>, + ) -> T + where + T : TypeFoldable<'tcx>, { value.fold_with(&mut RegionFolder::new(self, skipped_regions, &mut f)) } - pub fn for_each_free_region(self, - value: &T, - callback: F) - where F: FnMut(ty::Region<'tcx>), - T: TypeFoldable<'tcx>, - { - value.visit_with(&mut RegionVisitor { current_depth: 0, callback }); + /// Invoke `callback` on every region appearing free in `value`. + pub fn for_each_free_region( + self, + value: &impl TypeFoldable<'tcx>, + mut callback: impl FnMut(ty::Region<'tcx>), + ) { + self.any_free_region_meets(value, |r| { + callback(r); + false + }); + } + + /// True if `callback` returns true for every region appearing free in `value`. + pub fn all_free_regions_meet( + self, + value: &impl TypeFoldable<'tcx>, + mut callback: impl FnMut(ty::Region<'tcx>) -> bool, + ) -> bool { + !self.any_free_region_meets(value, |r| !callback(r)) + } + + /// True if `callback` returns true for some region appearing free in `value`. + pub fn any_free_region_meets( + self, + value: &impl TypeFoldable<'tcx>, + callback: impl FnMut(ty::Region<'tcx>) -> bool, + ) -> bool { + return value.visit_with(&mut RegionVisitor { + outer_index: ty::INNERMOST, + callback + }); struct RegionVisitor { - current_depth: u32, + /// The index of a binder *just outside* the things we have + /// traversed. If we encounter a bound region bound by this + /// binder or one outer to it, it appears free. Example: + /// + /// ``` + /// for<'a> fn(for<'b> fn(), T) + /// ^ ^ ^ ^ + /// | | | | here, would be shifted in 1 + /// | | | here, would be shifted in 2 + /// | | here, would be INNERMOST shifted in by 1 + /// | here, initially, binder would be INNERMOST + /// ``` + /// + /// You see that, initially, *any* bound value is free, + /// because we've not traversed any binders. As we pass + /// through a binder, we shift the `outer_index` by 1 to + /// account for the new binder that encloses us. + outer_index: ty::DebruijnIndex, callback: F, } impl<'tcx, F> TypeVisitor<'tcx> for RegionVisitor - where F : FnMut(ty::Region<'tcx>) + where F: FnMut(ty::Region<'tcx>) -> bool { fn visit_binder>(&mut self, t: &Binder) -> bool { - self.current_depth += 1; - t.skip_binder().visit_with(self); - self.current_depth -= 1; - - false // keep visiting + self.outer_index.shift_in(1); + let result = t.skip_binder().visit_with(self); + self.outer_index.shift_out(1); + result } fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { match *r { - ty::ReLateBound(debruijn, _) if debruijn.depth <= self.current_depth => { - /* ignore bound regions */ + ty::ReLateBound(debruijn, _) if debruijn < self.outer_index => { + false // ignore bound regions, keep visiting } _ => (self.callback)(r), } + } - false // keep visiting + fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { + // We're only interested in types involving regions + if ty.flags.intersects(TypeFlags::HAS_FREE_REGIONS) { + ty.super_visit_with(self) + } else { + false // keep visiting + } } } } @@ -276,21 +358,32 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub struct RegionFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, skipped_regions: &'a mut bool, - current_depth: u32, - fld_r: &'a mut (FnMut(ty::Region<'tcx>, u32) -> ty::Region<'tcx> + 'a), + + /// Stores the index of a binder *just outside* the stuff we have + /// visited. So this begins as INNERMOST; when we pass through a + /// binder, it is incremented (via `shift_in`). + current_index: ty::DebruijnIndex, + + /// Callback invokes for each free region. The `DebruijnIndex` + /// points to the binder *just outside* the ones we have passed + /// through. + fold_region_fn: &'a mut (dyn FnMut( + ty::Region<'tcx>, + ty::DebruijnIndex, + ) -> ty::Region<'tcx> + 'a), } impl<'a, 'gcx, 'tcx> RegionFolder<'a, 'gcx, 'tcx> { - pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, - skipped_regions: &'a mut bool, - fld_r: &'a mut F) -> RegionFolder<'a, 'gcx, 'tcx> - where F : FnMut(ty::Region<'tcx>, u32) -> ty::Region<'tcx> - { + pub fn new( + tcx: TyCtxt<'a, 'gcx, 'tcx>, + skipped_regions: &'a mut bool, + fold_region_fn: &'a mut dyn FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>, + ) -> RegionFolder<'a, 'gcx, 'tcx> { RegionFolder { tcx, skipped_regions, - current_depth: 1, - fld_r, + current_index: ty::INNERMOST, + fold_region_fn, } } } @@ -299,24 +392,24 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionFolder<'a, 'gcx, 'tcx> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { - self.current_depth += 1; + self.current_index.shift_in(1); let t = t.super_fold_with(self); - self.current_depth -= 1; + self.current_index.shift_out(1); t } fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { match *r { - ty::ReLateBound(debruijn, _) if debruijn.depth < self.current_depth => { - debug!("RegionFolder.fold_region({:?}) skipped bound region (current depth={})", - r, self.current_depth); + ty::ReLateBound(debruijn, _) if debruijn < self.current_index => { + debug!("RegionFolder.fold_region({:?}) skipped bound region (current index={:?})", + r, self.current_index); *self.skipped_regions = true; r } _ => { - debug!("RegionFolder.fold_region({:?}) folding free region (current_depth={})", - r, self.current_depth); - (self.fld_r)(r, self.current_depth) + debug!("RegionFolder.fold_region({:?}) folding free region (current_index={:?})", + r, self.current_index); + (self.fold_region_fn)(r, self.current_index) } } } @@ -329,8 +422,12 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionFolder<'a, 'gcx, 'tcx> { struct RegionReplacer<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, - current_depth: u32, - fld_r: &'a mut (FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a), + + /// As with `RegionFolder`, represents the index of a binder *just outside* + /// the ones we have visited. + current_index: ty::DebruijnIndex, + + fld_r: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a), map: BTreeMap> } @@ -371,27 +468,29 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }).0 } - /// Flattens two binding levels into one. So `for<'a> for<'b> Foo` + /// Flattens multiple binding levels into one. So `for<'a> for<'b> Foo` /// becomes `for<'a,'b> Foo`. pub fn flatten_late_bound_regions(self, bound2_value: &Binder>) -> Binder where T: TypeFoldable<'tcx> { let bound0_value = bound2_value.skip_binder().skip_binder(); - let value = self.fold_regions(bound0_value, &mut false, - |region, current_depth| { + let value = self.fold_regions(bound0_value, &mut false, |region, current_depth| { match *region { - ty::ReLateBound(debruijn, br) if debruijn.depth >= current_depth => { - // should be true if no escaping regions from bound2_value - assert!(debruijn.depth - current_depth <= 1); - self.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(current_depth), br)) + ty::ReLateBound(debruijn, br) => { + // We assume no regions bound *outside* of the + // binders in `bound2_value` (nmatsakis added in + // the course of this PR; seems like a reasonable + // sanity check though). + assert!(debruijn == current_depth); + self.mk_region(ty::ReLateBound(current_depth, br)) } _ => { region } } }); - Binder(value) + Binder::bind(value) } /// Returns a set of all late-bound regions that are constrained @@ -423,7 +522,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { collector.regions } - /// Replace any late-bound regions bound in `value` with `'erased`. Useful in trans but also + /// Replace any late-bound regions bound in `value` with `'erased`. Useful in codegen but also /// method lookup and a few other places where precise region relationships are not required. pub fn erase_late_bound_regions(self, value: &Binder) -> T where T : TypeFoldable<'tcx> @@ -443,9 +542,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { where T : TypeFoldable<'tcx>, { let mut counter = 0; - Binder(self.replace_late_bound_regions(sig, |_| { + Binder::bind(self.replace_late_bound_regions(sig, |_| { counter += 1; - self.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(counter))) + self.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BrAnon(counter))) }).0) } } @@ -457,7 +556,7 @@ impl<'a, 'gcx, 'tcx> RegionReplacer<'a, 'gcx, 'tcx> { { RegionReplacer { tcx, - current_depth: 1, + current_index: ty::INNERMOST, fld_r, map: BTreeMap::default() } @@ -468,14 +567,14 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { - self.current_depth += 1; + self.current_index.shift_in(1); let t = t.super_fold_with(self); - self.current_depth -= 1; + self.current_index.shift_out(1); t } fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - if !t.has_regions_escaping_depth(self.current_depth-1) { + if !t.has_regions_bound_at_or_above(self.current_index) { return t; } @@ -484,14 +583,15 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> { fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { match *r { - ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => { + ty::ReLateBound(debruijn, br) if debruijn == self.current_index => { let fld_r = &mut self.fld_r; let region = *self.map.entry(br).or_insert_with(|| fld_r(br)); if let ty::ReLateBound(debruijn1, br) = *region { // If the callback returns a late-bound region, - // that region should always use depth 1. Then we - // adjust it to the correct depth. - assert_eq!(debruijn1.depth, 1); + // that region should always use the INNERMOST + // debruijn index. Then we adjust it to the + // correct depth. + assert_eq!(debruijn1, ty::INNERMOST); self.tcx.mk_region(ty::ReLateBound(debruijn, br)) } else { region @@ -514,7 +614,7 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> { pub fn shift_region(region: ty::RegionKind, amount: u32) -> ty::RegionKind { match region { ty::ReLateBound(debruijn, br) => { - ty::ReLateBound(debruijn.shifted(amount), br) + ty::ReLateBound(debruijn.shifted_in(amount), br) } _ => { region @@ -530,7 +630,7 @@ pub fn shift_region_ref<'a, 'gcx, 'tcx>( { match region { &ty::ReLateBound(debruijn, br) if amount > 0 => { - tcx.mk_region(ty::ReLateBound(debruijn.shifted(amount), br)) + tcx.mk_region(ty::ReLateBound(debruijn.shifted_in(amount), br)) } _ => { region @@ -574,23 +674,32 @@ pub fn shift_regions<'a, 'gcx, 'tcx, T>(tcx: TyCtxt<'a, 'gcx, 'tcx>, /// represent the scope to which it is attached, etc. An escaping region represents a bound region /// for which this processing has not yet been done. struct HasEscapingRegionsVisitor { - depth: u32, + /// Anything bound by `outer_index` or "above" is escaping + outer_index: ty::DebruijnIndex, } impl<'tcx> TypeVisitor<'tcx> for HasEscapingRegionsVisitor { fn visit_binder>(&mut self, t: &Binder) -> bool { - self.depth += 1; + self.outer_index.shift_in(1); let result = t.super_visit_with(self); - self.depth -= 1; + self.outer_index.shift_out(1); result } fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { - t.region_depth > self.depth + // If the outer-exclusive-binder is *strictly greater* than + // `outer_index`, that means that `t` contains some content + // bound at `outer_index` or above (because + // `outer_exclusive_binder` is always 1 higher than the + // content in `t`). Therefore, `t` has some escaping regions. + t.outer_exclusive_binder > self.outer_index } fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { - r.escapes_depth(self.depth) + // If the region is bound by `outer_index` or anything outside + // of outer index, then it escapes the binders we have + // visited. + r.bound_at_or_above_binder(self.outer_index) } } @@ -611,7 +720,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { } fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool { - if let ConstVal::Unevaluated(..) = c.val { + if let ConstValue::Unevaluated(..) = c.val { let projection_flags = TypeFlags::HAS_NORMALIZABLE_PROJECTION | TypeFlags::HAS_PROJECTION; if projection_flags.intersects(self.flags) { @@ -622,17 +731,26 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { } } -/// Collects all the late-bound regions it finds into a hash set. +/// Collects all the late-bound regions at the innermost binding level +/// into a hash set. struct LateBoundRegionsCollector { - current_depth: u32, + current_index: ty::DebruijnIndex, regions: FxHashSet, + + /// If true, we only want regions that are known to be + /// "constrained" when you equate this type with another type. In + /// partcular, if you have e.g. `&'a u32` and `&'b u32`, equating + /// them constraints `'a == 'b`. But if you have `<&'a u32 as + /// Trait>::Foo` and `<&'b u32 as Trait>::Foo`, normalizing those + /// types may mean that `'a` and `'b` don't appear in the results, + /// so they are not considered *constrained*. just_constrained: bool, } impl LateBoundRegionsCollector { fn new(just_constrained: bool) -> Self { LateBoundRegionsCollector { - current_depth: 1, + current_index: ty::INNERMOST, regions: FxHashSet(), just_constrained, } @@ -641,9 +759,9 @@ impl LateBoundRegionsCollector { impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector { fn visit_binder>(&mut self, t: &Binder) -> bool { - self.current_depth += 1; + self.current_index.shift_in(1); let result = t.super_visit_with(self); - self.current_depth -= 1; + self.current_index.shift_out(1); result } @@ -663,7 +781,7 @@ impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector { fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { match *r { - ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => { + ty::ReLateBound(debruijn, br) if debruijn == self.current_index => { self.regions.insert(br); } _ => { } diff --git a/src/librustc/ty/inhabitedness/mod.rs b/src/librustc/ty/inhabitedness/mod.rs index 0072512464a0..0ace44dca77b 100644 --- a/src/librustc/ty/inhabitedness/mod.rs +++ b/src/librustc/ty/inhabitedness/mod.rs @@ -228,7 +228,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { match self.sty { TyAdt(def, substs) => { { - let substs_set = visited.entry(def.did).or_insert(FxHashSet::default()); + let substs_set = visited.entry(def.did).or_default(); if !substs_set.insert(substs) { // We are already calculating the inhabitedness of this type. // The type must contain a reference to itself. Break the @@ -256,20 +256,21 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { }, TyNever => DefIdForest::full(tcx), - TyTuple(ref tys, _) => { + TyTuple(ref tys) => { DefIdForest::union(tcx, tys.iter().map(|ty| { ty.uninhabited_from(visited, tcx) })) }, TyArray(ty, len) => { - if len.val.to_const_int().and_then(|i| i.to_u64()) == Some(0) { - DefIdForest::empty() - } else { - ty.uninhabited_from(visited, tcx) + match len.assert_usize(tcx) { + // If the array is definitely non-empty, it's uninhabited if + // the type of its elements is uninhabited. + Some(n) if n != 0 => ty.uninhabited_from(visited, tcx), + _ => DefIdForest::empty() } } - TyRef(_, ref tm) => { - tm.ty.uninhabited_from(visited, tcx) + TyRef(_, ty, _) => { + ty.uninhabited_from(visited, tcx) } _ => DefIdForest::empty(), diff --git a/src/librustc/ty/instance.rs b/src/librustc/ty/instance.rs index 63bf52a9bdf7..7329f4832f2e 100644 --- a/src/librustc/ty/instance.rs +++ b/src/librustc/ty/instance.rs @@ -10,20 +10,19 @@ use hir::def_id::DefId; use ty::{self, Ty, TypeFoldable, Substs, TyCtxt}; -use ty::subst::Kind; use traits; -use syntax::abi::Abi; +use rustc_target::spec::abi::Abi; use util::ppaux; use std::fmt; -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct Instance<'tcx> { pub def: InstanceDef<'tcx>, pub substs: &'tcx Substs<'tcx>, } -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub enum InstanceDef<'tcx> { Item(DefId), Intrinsic(DefId), @@ -51,7 +50,11 @@ impl<'a, 'tcx> Instance<'tcx> { -> Ty<'tcx> { let ty = tcx.type_of(self.def.def_id()); - tcx.trans_apply_param_substs(self.substs, &ty) + tcx.subst_and_normalize_erasing_regions( + self.substs, + ty::ParamEnv::reveal_all(), + &ty, + ) } } @@ -96,18 +99,17 @@ impl<'tcx> InstanceDef<'tcx> { &self, tcx: TyCtxt<'a, 'tcx, 'tcx> ) -> bool { - use syntax::attr::requests_inline; if self.is_inline(tcx) { return true } if let ty::InstanceDef::DropGlue(..) = *self { - // Drop glue wants to be instantiated at every translation + // Drop glue wants to be instantiated at every codegen // unit, but without an #[inline] hint. We should make this // available to normal end-users. return true } - requests_inline(&self.attrs(tcx)[..]) || - tcx.is_const_fn(self.def_id()) + let codegen_fn_attrs = tcx.codegen_fn_attrs(self.def_id()); + codegen_fn_attrs.requests_inline() || tcx.is_const_fn(self.def_id()) } } @@ -142,7 +144,7 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) -> Instance<'tcx> { assert!(!substs.has_escaping_regions(), - "substs of instance {:?} not normalized for trans: {:?}", + "substs of instance {:?} not normalized for codegen: {:?}", def_id, substs); Instance { def: InstanceDef::Item(def_id), substs: substs } } @@ -172,7 +174,7 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { /// `RevealMode` in the parameter environment.) /// /// Presuming that coherence and type-check have succeeded, if this method is invoked - /// in a monomorphic context (i.e., like during trans), then it is guaranteed to return + /// in a monomorphic context (i.e., like during codegen), then it is guaranteed to return /// `Some`. pub fn resolve(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, @@ -185,7 +187,11 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { resolve_associated_item(tcx, &item, param_env, trait_def_id, substs) } else { let ty = tcx.type_of(def_id); - let item_type = tcx.trans_apply_param_substs_env(substs, param_env, &ty); + let item_type = tcx.subst_and_normalize_erasing_regions( + substs, + param_env, + &ty, + ); let def = match item_type.sty { ty::TyFnDef(..) if { @@ -200,7 +206,7 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { _ => { if Some(def_id) == tcx.lang_items().drop_in_place_fn() { let ty = substs.type_at(0); - if ty.needs_drop(tcx, ty::ParamEnv::empty(traits::Reveal::All)) { + if ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) { debug!(" => nontrivial drop glue"); ty::InstanceDef::DropGlue(def_id, Some(ty)) } else { @@ -252,7 +258,7 @@ fn resolve_associated_item<'a, 'tcx>( def_id, trait_id, rcvr_substs); let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs); - let vtbl = tcx.trans_fulfill_obligation((param_env, ty::Binder(trait_ref))); + let vtbl = tcx.codegen_fulfill_obligation((param_env, ty::Binder::bind(trait_ref))); // Now that we know which impl is being used, we can dispatch to // the actual function: @@ -263,10 +269,10 @@ fn resolve_associated_item<'a, 'tcx>( let substs = tcx.erase_regions(&substs); Some(ty::Instance::new(def_id, substs)) } - traits::VtableGenerator(closure_data) => { + traits::VtableGenerator(generator_data) => { Some(Instance { - def: ty::InstanceDef::Item(closure_data.closure_def_id), - substs: closure_data.substs.substs + def: ty::InstanceDef::Item(generator_data.generator_def_id), + substs: generator_data.substs.substs }) } traits::VtableClosure(closure_data) => { @@ -288,7 +294,7 @@ fn resolve_associated_item<'a, 'tcx>( }) } traits::VtableBuiltin(..) => { - if let Some(_) = tcx.lang_items().clone_trait() { + if tcx.lang_items().clone_trait().is_some() { Some(Instance { def: ty::InstanceDef::CloneShim(def_id, trait_ref.self_ty()), substs: rcvr_substs @@ -314,7 +320,7 @@ fn needs_fn_once_adapter_shim<'a, 'tcx>(actual_closure_kind: ty::ClosureKind, } (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => { // The closure fn `llfn` is a `fn(&self, ...)`. We want a - // `fn(&mut self, ...)`. In fact, at trans time, these are + // `fn(&mut self, ...)`. In fact, at codegen time, these are // basically the same thing, so we can just return llfn. Ok(false) } @@ -327,7 +333,7 @@ fn needs_fn_once_adapter_shim<'a, 'tcx>(actual_closure_kind: ty::ClosureKind, // fn call_once(self, ...) { call_mut(&self, ...) } // fn call_once(mut self, ...) { call_mut(&mut self, ...) } // - // These are both the same at trans time. + // These are both the same at codegen time. Ok(true) } (ty::ClosureKind::FnMut, _) | @@ -349,16 +355,12 @@ fn fn_once_adapter_instance<'a, 'tcx>( .unwrap().def_id; let def = ty::InstanceDef::ClosureOnceShim { call_once }; - let self_ty = tcx.mk_closure_from_closure_substs( - closure_did, substs); + let self_ty = tcx.mk_closure(closure_did, substs); let sig = substs.closure_sig(closure_did, tcx); - let sig = tcx.erase_late_bound_regions_and_normalize(&sig); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); assert_eq!(sig.inputs().len(), 1); - let substs = tcx.mk_substs([ - Kind::from(self_ty), - Kind::from(sig.inputs()[0]), - ].iter().cloned()); + let substs = tcx.mk_substs_trait(self_ty, &[sig.inputs()[0].into()]); debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig); Instance { def, substs } diff --git a/src/librustc/ty/item_path.rs b/src/librustc/ty/item_path.rs index 0c920a6f13e5..c44b7327a083 100644 --- a/src/librustc/ty/item_path.rs +++ b/src/librustc/ty/item_path.rs @@ -11,9 +11,10 @@ use hir::map::DefPathData; use hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use ty::{self, Ty, TyCtxt}; +use middle::cstore::{ExternCrate, ExternCrateSource}; use syntax::ast; use syntax::symbol::Symbol; -use syntax::symbol::InternedString; +use syntax::symbol::LocalInternedString; use std::cell::Cell; @@ -95,21 +96,20 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // `extern crate` manually, we put the `extern // crate` as the parent. So you wind up with // something relative to the current crate. - // 2. for an indirect crate, where there is no extern - // crate, we just prepend the crate name. + // 2. for an extern inferred from a path or an indirect crate, + // where there is no explicit `extern crate`, we just prepend + // the crate name. // // Returns `None` for the local crate. if cnum != LOCAL_CRATE { let opt_extern_crate = self.extern_crate(cnum.as_def_id()); - let opt_extern_crate = opt_extern_crate.and_then(|extern_crate| { - if extern_crate.direct { - Some(extern_crate.def_id) - } else { - None - } - }); - if let Some(extern_crate_def_id) = opt_extern_crate { - self.push_item_path(buffer, extern_crate_def_id); + if let Some(ExternCrate { + src: ExternCrateSource::Extern(def_id), + direct: true, + .. + }) = *opt_extern_crate + { + self.push_item_path(buffer, def_id); } else { buffer.push(&self.crate_name(cnum).as_str()); } @@ -131,20 +131,24 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { { let visible_parent_map = self.visible_parent_map(LOCAL_CRATE); - let (mut cur_def, mut cur_path) = (external_def_id, Vec::::new()); + let (mut cur_def, mut cur_path) = (external_def_id, Vec::::new()); loop { // If `cur_def` is a direct or injected extern crate, push the path to the crate // followed by the path to the item within the crate and return. if cur_def.index == CRATE_DEF_INDEX { match *self.extern_crate(cur_def) { - Some(ref extern_crate) if extern_crate.direct => { - self.push_item_path(buffer, extern_crate.def_id); - cur_path.iter().rev().map(|segment| buffer.push(&segment)).count(); + Some(ExternCrate { + src: ExternCrateSource::Extern(def_id), + direct: true, + .. + }) => { + self.push_item_path(buffer, def_id); + cur_path.iter().rev().for_each(|segment| buffer.push(&segment)); return true; } None => { buffer.push(&self.crate_name(cur_def.krate).as_str()); - cur_path.iter().rev().map(|segment| buffer.push(&segment)).count(); + cur_path.iter().rev().for_each(|segment| buffer.push(&segment)); return true; } _ => {}, @@ -164,8 +168,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } let data = cur_def_key.disambiguated_data.data; - let symbol = - data.get_opt_name().unwrap_or_else(|| Symbol::intern("").as_str()); + let symbol = data.get_opt_name().map(|n| n.as_str()).unwrap_or_else(|| { + if let DefPathData::CrateRoot = data { // reexported `extern crate` (#43189) + self.original_crate_name(cur_def.krate).as_str() + } else { + Symbol::intern("").as_str() + } + }); cur_path.push(symbol); match visible_parent_map.get(&cur_def) { @@ -200,21 +209,24 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // finer-grained distinctions, e.g. between enum/struct). data @ DefPathData::Misc | data @ DefPathData::TypeNs(..) | + data @ DefPathData::Trait(..) | + data @ DefPathData::AssocTypeInTrait(..) | + data @ DefPathData::AssocTypeInImpl(..) | + data @ DefPathData::AssocExistentialInImpl(..) | data @ DefPathData::ValueNs(..) | data @ DefPathData::Module(..) | data @ DefPathData::TypeParam(..) | - data @ DefPathData::LifetimeDef(..) | + data @ DefPathData::LifetimeParam(..) | data @ DefPathData::EnumVariant(..) | data @ DefPathData::Field(..) | - data @ DefPathData::Initializer | + data @ DefPathData::AnonConst | data @ DefPathData::MacroDef(..) | data @ DefPathData::ClosureExpr | data @ DefPathData::ImplTrait | - data @ DefPathData::Typeof | data @ DefPathData::GlobalMetaData(..) => { let parent_def_id = self.parent_def_id(def_id).unwrap(); self.push_item_path(buffer, parent_def_id); - buffer.push(&data.as_interned_str()); + buffer.push(&data.as_interned_str().as_symbol().as_str()); } DefPathData::StructCtor => { // present `X` instead of `X::{{constructor}}` let parent_def_id = self.parent_def_id(def_id).unwrap(); @@ -303,7 +315,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr => { - buffer.push(&format!("{}", self_ty)); + buffer.push(&self_ty.to_string()); } _ => { @@ -352,12 +364,13 @@ pub fn characteristic_def_id_of_type(ty: Ty) -> Option { ty::TyArray(subty, _) | ty::TySlice(subty) => characteristic_def_id_of_type(subty), - ty::TyRawPtr(mt) | - ty::TyRef(_, mt) => characteristic_def_id_of_type(mt.ty), + ty::TyRawPtr(mt) => characteristic_def_id_of_type(mt.ty), - ty::TyTuple(ref tys, _) => tys.iter() - .filter_map(|ty| characteristic_def_id_of_type(ty)) - .next(), + ty::TyRef(_, ty, _) => characteristic_def_id_of_type(ty), + + ty::TyTuple(ref tys) => tys.iter() + .filter_map(|ty| characteristic_def_id_of_type(ty)) + .next(), ty::TyFnDef(def_id, _) | ty::TyClosure(def_id, _) | @@ -375,6 +388,7 @@ pub fn characteristic_def_id_of_type(ty: Ty) -> Option { ty::TyAnon(..) | ty::TyInfer(_) | ty::TyError | + ty::TyGeneratorWitness(..) | ty::TyNever | ty::TyFloat(_) => None, } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 50efb7300373..0da4d5ddea2f 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -8,13 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -pub use self::Integer::*; -pub use self::Primitive::*; +use session::{self, DataTypeKind}; +use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions}; -use session::{self, DataTypeKind, Session}; -use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions, ReprFlags}; - -use syntax::ast::{self, FloatTy, IntTy, UintTy}; +use syntax::ast::{self, IntTy, UintTy}; use syntax::attr; use syntax_pos::DUMMY_SP; @@ -23,430 +20,26 @@ use std::fmt; use std::i128; use std::iter; use std::mem; -use std::ops::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive}; use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; -/// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout) -/// for a target, which contains everything needed to compute layouts. -pub struct TargetDataLayout { - pub endian: Endian, - pub i1_align: Align, - pub i8_align: Align, - pub i16_align: Align, - pub i32_align: Align, - pub i64_align: Align, - pub i128_align: Align, - pub f32_align: Align, - pub f64_align: Align, - pub pointer_size: Size, - pub pointer_align: Align, - pub aggregate_align: Align, +pub use rustc_target::abi::*; - /// Alignments for vector types. - pub vector_align: Vec<(Size, Align)> +pub trait IntegerExt { + fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>; + fn from_attr(cx: C, ity: attr::IntType) -> Integer; + fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, + repr: &ReprOptions, + min: i128, + max: i128) + -> (Integer, bool); } -impl Default for TargetDataLayout { - /// Creates an instance of `TargetDataLayout`. - fn default() -> TargetDataLayout { - TargetDataLayout { - endian: Endian::Big, - i1_align: Align::from_bits(8, 8).unwrap(), - i8_align: Align::from_bits(8, 8).unwrap(), - i16_align: Align::from_bits(16, 16).unwrap(), - i32_align: Align::from_bits(32, 32).unwrap(), - i64_align: Align::from_bits(32, 64).unwrap(), - i128_align: Align::from_bits(32, 64).unwrap(), - f32_align: Align::from_bits(32, 32).unwrap(), - f64_align: Align::from_bits(64, 64).unwrap(), - pointer_size: Size::from_bits(64), - pointer_align: Align::from_bits(64, 64).unwrap(), - aggregate_align: Align::from_bits(0, 64).unwrap(), - vector_align: vec![ - (Size::from_bits(64), Align::from_bits(64, 64).unwrap()), - (Size::from_bits(128), Align::from_bits(128, 128).unwrap()) - ] - } - } -} - -impl TargetDataLayout { - pub fn parse(sess: &Session) -> TargetDataLayout { - // Parse a bit count from a string. - let parse_bits = |s: &str, kind: &str, cause: &str| { - s.parse::().unwrap_or_else(|err| { - sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}", - kind, s, cause, err)); - 0 - }) - }; - - // Parse a size string. - let size = |s: &str, cause: &str| { - Size::from_bits(parse_bits(s, "size", cause)) - }; - - // Parse an alignment string. - let align = |s: &[&str], cause: &str| { - if s.is_empty() { - sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause)); - } - let abi = parse_bits(s[0], "alignment", cause); - let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause)); - Align::from_bits(abi, pref).unwrap_or_else(|err| { - sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}", - cause, err)); - Align::from_bits(8, 8).unwrap() - }) - }; - - let mut dl = TargetDataLayout::default(); - let mut i128_align_src = 64; - for spec in sess.target.target.data_layout.split("-") { - match &spec.split(":").collect::>()[..] { - &["e"] => dl.endian = Endian::Little, - &["E"] => dl.endian = Endian::Big, - &["a", ref a..] => dl.aggregate_align = align(a, "a"), - &["f32", ref a..] => dl.f32_align = align(a, "f32"), - &["f64", ref a..] => dl.f64_align = align(a, "f64"), - &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => { - dl.pointer_size = size(s, p); - dl.pointer_align = align(a, p); - } - &[s, ref a..] if s.starts_with("i") => { - let bits = match s[1..].parse::() { - Ok(bits) => bits, - Err(_) => { - size(&s[1..], "i"); // For the user error. - continue; - } - }; - let a = align(a, s); - match bits { - 1 => dl.i1_align = a, - 8 => dl.i8_align = a, - 16 => dl.i16_align = a, - 32 => dl.i32_align = a, - 64 => dl.i64_align = a, - _ => {} - } - if bits >= i128_align_src && bits <= 128 { - // Default alignment for i128 is decided by taking the alignment of - // largest-sized i{64...128}. - i128_align_src = bits; - dl.i128_align = a; - } - } - &[s, ref a..] if s.starts_with("v") => { - let v_size = size(&s[1..], "v"); - let a = align(a, s); - if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) { - v.1 = a; - continue; - } - // No existing entry, add a new one. - dl.vector_align.push((v_size, a)); - } - _ => {} // Ignore everything else. - } - } - - // Perform consistency checks against the Target information. - let endian_str = match dl.endian { - Endian::Little => "little", - Endian::Big => "big" - }; - if endian_str != sess.target.target.target_endian { - sess.err(&format!("inconsistent target specification: \"data-layout\" claims \ - architecture is {}-endian, while \"target-endian\" is `{}`", - endian_str, sess.target.target.target_endian)); - } - - if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width { - sess.err(&format!("inconsistent target specification: \"data-layout\" claims \ - pointers are {}-bit, while \"target-pointer-width\" is `{}`", - dl.pointer_size.bits(), sess.target.target.target_pointer_width)); - } - - dl - } - - /// Return exclusive upper bound on object size. - /// - /// The theoretical maximum object size is defined as the maximum positive `isize` value. - /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly - /// index every address within an object along with one byte past the end, along with allowing - /// `isize` to store the difference between any two pointers into an object. - /// - /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer - /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is - /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable - /// address space on 64-bit ARMv8 and x86_64. - pub fn obj_size_bound(&self) -> u64 { - match self.pointer_size.bits() { - 16 => 1 << 15, - 32 => 1 << 31, - 64 => 1 << 47, - bits => bug!("obj_size_bound: unknown pointer bit size {}", bits) - } - } - - pub fn ptr_sized_integer(&self) -> Integer { - match self.pointer_size.bits() { - 16 => I16, - 32 => I32, - 64 => I64, - bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits) - } - } - - pub fn vector_align(&self, vec_size: Size) -> Align { - for &(size, align) in &self.vector_align { - if size == vec_size { - return align; - } - } - // Default to natural alignment, which is what LLVM does. - // That is, use the size, rounded up to a power of 2. - let align = vec_size.bytes().next_power_of_two(); - Align::from_bytes(align, align).unwrap() - } -} - -pub trait HasDataLayout: Copy { - fn data_layout(&self) -> &TargetDataLayout; -} - -impl<'a> HasDataLayout for &'a TargetDataLayout { - fn data_layout(&self) -> &TargetDataLayout { - self - } -} - -/// Endianness of the target, which must match cfg(target-endian). -#[derive(Copy, Clone)] -pub enum Endian { - Little, - Big -} - -/// Size of a type in bytes. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -pub struct Size { - raw: u64 -} - -impl Size { - pub fn from_bits(bits: u64) -> Size { - // Avoid potential overflow from `bits + 7`. - Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8) - } - - pub fn from_bytes(bytes: u64) -> Size { - if bytes >= (1 << 61) { - bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes) - } - Size { - raw: bytes - } - } - - pub fn bytes(self) -> u64 { - self.raw - } - - pub fn bits(self) -> u64 { - self.bytes() * 8 - } - - pub fn abi_align(self, align: Align) -> Size { - let mask = align.abi() - 1; - Size::from_bytes((self.bytes() + mask) & !mask) - } - - pub fn is_abi_aligned(self, align: Align) -> bool { - let mask = align.abi() - 1; - self.bytes() & mask == 0 - } - - pub fn checked_add(self, offset: Size, cx: C) -> Option { - let dl = cx.data_layout(); - - // Each Size is less than dl.obj_size_bound(), so the sum is - // also less than 1 << 62 (and therefore can't overflow). - let bytes = self.bytes() + offset.bytes(); - - if bytes < dl.obj_size_bound() { - Some(Size::from_bytes(bytes)) - } else { - None - } - } - - pub fn checked_mul(self, count: u64, cx: C) -> Option { - let dl = cx.data_layout(); - - match self.bytes().checked_mul(count) { - Some(bytes) if bytes < dl.obj_size_bound() => { - Some(Size::from_bytes(bytes)) - } - _ => None - } - } -} - -// Panicking addition, subtraction and multiplication for convenience. -// Avoid during layout computation, return `LayoutError` instead. - -impl Add for Size { - type Output = Size; - fn add(self, other: Size) -> Size { - // Each Size is less than 1 << 61, so the sum is - // less than 1 << 62 (and therefore can't overflow). - Size::from_bytes(self.bytes() + other.bytes()) - } -} - -impl Sub for Size { - type Output = Size; - fn sub(self, other: Size) -> Size { - // Each Size is less than 1 << 61, so an underflow - // would result in a value larger than 1 << 61, - // which Size::from_bytes will catch for us. - Size::from_bytes(self.bytes() - other.bytes()) - } -} - -impl Mul for Size { - type Output = Size; - fn mul(self, count: u64) -> Size { - match self.bytes().checked_mul(count) { - Some(bytes) => Size::from_bytes(bytes), - None => { - bug!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count) - } - } - } -} - -impl AddAssign for Size { - fn add_assign(&mut self, other: Size) { - *self = *self + other; - } -} - -/// Alignment of a type in bytes, both ABI-mandated and preferred. -/// Each field is a power of two, giving the alignment a maximum -/// value of 2(28 - 1), which is limited by LLVM to a i32, with -/// a maximum capacity of 231 - 1 or 2147483647. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct Align { - abi: u8, - pref: u8, -} - -impl Align { - pub fn from_bits(abi: u64, pref: u64) -> Result { - Align::from_bytes(Size::from_bits(abi).bytes(), - Size::from_bits(pref).bytes()) - } - - pub fn from_bytes(abi: u64, pref: u64) -> Result { - let log2 = |align: u64| { - // Treat an alignment of 0 bytes like 1-byte alignment. - if align == 0 { - return Ok(0); - } - - let mut bytes = align; - let mut pow: u8 = 0; - while (bytes & 1) == 0 { - pow += 1; - bytes >>= 1; - } - if bytes != 1 { - Err(format!("`{}` is not a power of 2", align)) - } else if pow > 30 { - Err(format!("`{}` is too large", align)) - } else { - Ok(pow) - } - }; - - Ok(Align { - abi: log2(abi)?, - pref: log2(pref)?, - }) - } - - pub fn abi(self) -> u64 { - 1 << self.abi - } - - pub fn pref(self) -> u64 { - 1 << self.pref - } - - pub fn abi_bits(self) -> u64 { - self.abi() * 8 - } - - pub fn pref_bits(self) -> u64 { - self.pref() * 8 - } - - pub fn min(self, other: Align) -> Align { - Align { - abi: cmp::min(self.abi, other.abi), - pref: cmp::min(self.pref, other.pref), - } - } - - pub fn max(self, other: Align) -> Align { - Align { - abi: cmp::max(self.abi, other.abi), - pref: cmp::max(self.pref, other.pref), - } - } -} - -/// Integers, also used for enum discriminants. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -pub enum Integer { - I8, - I16, - I32, - I64, - I128, -} - -impl<'a, 'tcx> Integer { - pub fn size(&self) -> Size { - match *self { - I8 => Size::from_bytes(1), - I16 => Size::from_bytes(2), - I32 => Size::from_bytes(4), - I64 => Size::from_bytes(8), - I128 => Size::from_bytes(16), - } - } - - pub fn align(&self, cx: C) -> Align { - let dl = cx.data_layout(); - - match *self { - I8 => dl.i8_align, - I16 => dl.i16_align, - I32 => dl.i32_align, - I64 => dl.i64_align, - I128 => dl.i128_align, - } - } - - pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> { +impl IntegerExt for Integer { + fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> { match (*self, signed) { (I8, false) => tcx.types.u8, (I16, false) => tcx.types.u16, @@ -461,57 +54,8 @@ impl<'a, 'tcx> Integer { } } - /// Find the smallest Integer type which can represent the signed value. - pub fn fit_signed(x: i128) -> Integer { - match x { - -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8, - -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16, - -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32, - -0x8000_0000_0000_0000...0x7fff_ffff_ffff_ffff => I64, - _ => I128 - } - } - - /// Find the smallest Integer type which can represent the unsigned value. - pub fn fit_unsigned(x: u128) -> Integer { - match x { - 0...0x0000_0000_0000_00ff => I8, - 0...0x0000_0000_0000_ffff => I16, - 0...0x0000_0000_ffff_ffff => I32, - 0...0xffff_ffff_ffff_ffff => I64, - _ => I128, - } - } - - /// Find the smallest integer with the given alignment. - pub fn for_abi_align(cx: C, align: Align) -> Option { - let dl = cx.data_layout(); - - let wanted = align.abi(); - for &candidate in &[I8, I16, I32, I64, I128] { - if wanted == candidate.align(dl).abi() && wanted == candidate.size().bytes() { - return Some(candidate); - } - } - None - } - - /// Find the largest integer with the given alignment or less. - pub fn approximate_abi_align(cx: C, align: Align) -> Integer { - let dl = cx.data_layout(); - - let wanted = align.abi(); - // FIXME(eddyb) maybe include I128 in the future, when it works everywhere. - for &candidate in &[I64, I32, I16] { - if wanted >= candidate.align(dl).abi() && wanted >= candidate.size().bytes() { - return candidate; - } - } - I8 - } - /// Get the Integer type from an attr::IntType. - pub fn from_attr(cx: C, ity: attr::IntType) -> Integer { + fn from_attr(cx: C, ity: attr::IntType) -> Integer { let dl = cx.data_layout(); match ity { @@ -530,7 +74,7 @@ impl<'a, 'tcx> Integer { /// signed discriminant range and #[repr] attribute. /// N.B.: u128 values above i128::MAX will be treated as signed, but /// that shouldn't affect anything, other than maybe debuginfo. - fn repr_discr(tcx: TyCtxt<'a, 'tcx, 'tcx>, + fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, repr: &ReprOptions, min: i128, @@ -578,78 +122,21 @@ impl<'a, 'tcx> Integer { } } -/// Fundamental unit of memory access and layout. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub enum Primitive { - /// The `bool` is the signedness of the `Integer` type. - /// - /// One would think we would not care about such details this low down, - /// but some ABIs are described in terms of C types and ISAs where the - /// integer arithmetic is done on {sign,zero}-extended registers, e.g. - /// a negative integer passed by zero-extension will appear positive in - /// the callee, and most operations on it will produce the wrong values. - Int(Integer, bool), - F32, - F64, - Pointer +pub trait PrimitiveExt { + fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>; } -impl<'a, 'tcx> Primitive { - pub fn size(self, cx: C) -> Size { - let dl = cx.data_layout(); - - match self { - Int(i, _) => i.size(), - F32 => Size::from_bits(32), - F64 => Size::from_bits(64), - Pointer => dl.pointer_size - } - } - - pub fn align(self, cx: C) -> Align { - let dl = cx.data_layout(); - - match self { - Int(i, _) => i.align(dl), - F32 => dl.f32_align, - F64 => dl.f64_align, - Pointer => dl.pointer_align - } - } - - pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { +impl PrimitiveExt for Primitive { + fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { match *self { Int(i, signed) => i.to_ty(tcx, signed), - F32 => tcx.types.f32, - F64 => tcx.types.f64, + Float(FloatTy::F32) => tcx.types.f32, + Float(FloatTy::F64) => tcx.types.f64, Pointer => tcx.mk_mut_ptr(tcx.mk_nil()), } } } -/// Information about one scalar component of a Rust type. -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub struct Scalar { - pub value: Primitive, - - /// Inclusive wrap-around range of valid values, that is, if - /// min > max, it represents min..=u128::MAX followed by 0..=max. - // FIXME(eddyb) always use the shortest range, e.g. by finding - // the largest space between two consecutive valid values and - // taking everything else as the (shortest) valid range. - pub valid_range: RangeInclusive, -} - -impl Scalar { - pub fn is_bool(&self) -> bool { - if let Int(I8, _) = self.value { - self.valid_range == (0..=1) - } else { - false - } - } -} - /// The first half of a fat pointer. /// /// - For a trait object, this is the address of the box. @@ -662,173 +149,7 @@ pub const FAT_PTR_ADDR: usize = 0; /// - For a slice, this is the length. pub const FAT_PTR_EXTRA: usize = 1; -/// Describes how the fields of a type are located in memory. -#[derive(PartialEq, Eq, Hash, Debug)] -pub enum FieldPlacement { - /// All fields start at no offset. The `usize` is the field count. - Union(usize), - - /// Array/vector-like placement, with all fields of identical types. - Array { - stride: Size, - count: u64 - }, - - /// Struct-like placement, with precomputed offsets. - /// - /// Fields are guaranteed to not overlap, but note that gaps - /// before, between and after all the fields are NOT always - /// padding, and as such their contents may not be discarded. - /// For example, enum variants leave a gap at the start, - /// where the discriminant field in the enum layout goes. - Arbitrary { - /// Offsets for the first byte of each field, - /// ordered to match the source definition order. - /// This vector does not go in increasing order. - // FIXME(eddyb) use small vector optimization for the common case. - offsets: Vec, - - /// Maps source order field indices to memory order indices, - /// depending how fields were permuted. - // FIXME(camlorn) also consider small vector optimization here. - memory_index: Vec - } -} - -impl FieldPlacement { - pub fn count(&self) -> usize { - match *self { - FieldPlacement::Union(count) => count, - FieldPlacement::Array { count, .. } => { - let usize_count = count as usize; - assert_eq!(usize_count as u64, count); - usize_count - } - FieldPlacement::Arbitrary { ref offsets, .. } => offsets.len() - } - } - - pub fn offset(&self, i: usize) -> Size { - match *self { - FieldPlacement::Union(_) => Size::from_bytes(0), - FieldPlacement::Array { stride, count } => { - let i = i as u64; - assert!(i < count); - stride * i - } - FieldPlacement::Arbitrary { ref offsets, .. } => offsets[i] - } - } - - pub fn memory_index(&self, i: usize) -> usize { - match *self { - FieldPlacement::Union(_) | - FieldPlacement::Array { .. } => i, - FieldPlacement::Arbitrary { ref memory_index, .. } => { - let r = memory_index[i]; - assert_eq!(r as usize as u32, r); - r as usize - } - } - } - - /// Get source indices of the fields by increasing offsets. - #[inline] - pub fn index_by_increasing_offset<'a>(&'a self) -> impl iter::Iterator+'a { - let mut inverse_small = [0u8; 64]; - let mut inverse_big = vec![]; - let use_small = self.count() <= inverse_small.len(); - - // We have to write this logic twice in order to keep the array small. - if let FieldPlacement::Arbitrary { ref memory_index, .. } = *self { - if use_small { - for i in 0..self.count() { - inverse_small[memory_index[i] as usize] = i as u8; - } - } else { - inverse_big = vec![0; self.count()]; - for i in 0..self.count() { - inverse_big[memory_index[i] as usize] = i as u32; - } - } - } - - (0..self.count()).map(move |i| { - match *self { - FieldPlacement::Union(_) | - FieldPlacement::Array { .. } => i, - FieldPlacement::Arbitrary { .. } => { - if use_small { inverse_small[i] as usize } - else { inverse_big[i] as usize } - } - } - }) - } -} - -/// Describes how values of the type are passed by target ABIs, -/// in terms of categories of C types there are ABI rules for. -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub enum Abi { - Uninhabited, - Scalar(Scalar), - ScalarPair(Scalar, Scalar), - Vector { - element: Scalar, - count: u64 - }, - Aggregate { - /// If true, the size is exact, otherwise it's only a lower bound. - sized: bool, - } -} - -impl Abi { - /// Returns true if the layout corresponds to an unsized type. - pub fn is_unsized(&self) -> bool { - match *self { - Abi::Uninhabited | - Abi::Scalar(_) | - Abi::ScalarPair(..) | - Abi::Vector { .. } => false, - Abi::Aggregate { sized } => !sized - } - } -} - -#[derive(PartialEq, Eq, Hash, Debug)] -pub enum Variants { - /// Single enum variants, structs/tuples, unions, and all non-ADTs. - Single { - index: usize - }, - - /// General-case enums: for each case there is a struct, and they all have - /// all space reserved for the discriminant, and their first field starts - /// at a non-0 offset, after where the discriminant would go. - Tagged { - discr: Scalar, - variants: Vec, - }, - - /// Multiple cases distinguished by a niche (values invalid for a type): - /// the variant `dataful_variant` contains a niche at an arbitrary - /// offset (field 0 of the enum), which for a variant with discriminant - /// `d` is set to `(d - niche_variants.start).wrapping_add(niche_start)`. - /// - /// For example, `Option<(usize, &T)>` is represented such that - /// `None` has a null pointer for the second tuple field, and - /// `Some` is the identity function (with a non-null reference). - NicheFilling { - dataful_variant: usize, - niche_variants: RangeInclusive, - niche: Scalar, - niche_start: u128, - variants: Vec, - } -} - -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] pub enum LayoutError<'tcx> { Unknown(Ty<'tcx>), SizeOverflow(Ty<'tcx>) @@ -847,74 +168,51 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { } } -#[derive(PartialEq, Eq, Hash, Debug)] -pub struct LayoutDetails { - pub variants: Variants, - pub fields: FieldPlacement, - pub abi: Abi, - pub align: Align, - pub size: Size -} - -impl LayoutDetails { - fn scalar(cx: C, scalar: Scalar) -> Self { - let size = scalar.value.size(cx); - let align = scalar.value.align(cx); - LayoutDetails { - variants: Variants::Single { index: 0 }, - fields: FieldPlacement::Union(0), - abi: Abi::Scalar(scalar), - size, - align, - } - } - - fn uninhabited(field_count: usize) -> Self { - let align = Align::from_bytes(1, 1).unwrap(); - LayoutDetails { - variants: Variants::Single { index: 0 }, - fields: FieldPlacement::Union(field_count), - abi: Abi::Uninhabited, - align, - size: Size::from_bytes(0) - } - } -} - fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { - let (param_env, ty) = query.into_parts(); + ty::tls::with_related_context(tcx, move |icx| { + let rec_limit = *tcx.sess.recursion_limit.get(); + let (param_env, ty) = query.into_parts(); - let rec_limit = tcx.sess.recursion_limit.get(); - let depth = tcx.layout_depth.get(); - if depth > rec_limit { - tcx.sess.fatal( - &format!("overflow representing the type `{}`", ty)); - } + if icx.layout_depth > rec_limit { + tcx.sess.fatal( + &format!("overflow representing the type `{}`", ty)); + } - tcx.layout_depth.set(depth+1); - let layout = LayoutDetails::compute_uncached(tcx, param_env, ty); - tcx.layout_depth.set(depth); + // Update the ImplicitCtxt to increase the layout_depth + let icx = ty::tls::ImplicitCtxt { + layout_depth: icx.layout_depth + 1, + ..icx.clone() + }; - layout + ty::tls::enter_context(&icx, |_| { + let cx = LayoutCx { tcx, param_env }; + cx.layout_raw_uncached(ty) + }) + }) } -pub fn provide(providers: &mut ty::maps::Providers) { - *providers = ty::maps::Providers { +pub fn provide(providers: &mut ty::query::Providers) { + *providers = ty::query::Providers { layout_raw, ..*providers }; } -impl<'a, 'tcx> LayoutDetails { - fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>) - -> Result<&'tcx Self, LayoutError<'tcx>> { - let cx = (tcx, param_env); - let dl = cx.data_layout(); +#[derive(Copy, Clone)] +pub struct LayoutCx<'tcx, C> { + pub tcx: C, + pub param_env: ty::ParamEnv<'tcx> +} + +impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { + fn layout_raw_uncached(self, ty: Ty<'tcx>) + -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { + let tcx = self.tcx; + let param_env = self.param_env; + let dl = self.data_layout(); let scalar_unit = |value: Primitive| { let bits = value.size(dl).bits(); assert!(bits <= 128); @@ -924,7 +222,7 @@ impl<'a, 'tcx> LayoutDetails { } }; let scalar = |value: Primitive| { - tcx.intern_layout(LayoutDetails::scalar(cx, scalar_unit(value))) + tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value))) }; let scalar_pair = |a: Scalar, b: Scalar| { let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align); @@ -933,7 +231,7 @@ impl<'a, 'tcx> LayoutDetails { LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Arbitrary { - offsets: vec![Size::from_bytes(0), b_offset], + offsets: vec![Size::ZERO, b_offset], memory_index: vec![0, 1] }, abi: Abi::ScalarPair(a, b), @@ -957,6 +255,11 @@ impl<'a, 'tcx> LayoutDetails { bug!("struct cannot be packed and aligned"); } + let pack = { + let pack = repr.pack as u64; + Align::from_bytes(pack, pack).unwrap() + }; + let mut align = if packed { dl.i8_align } else { @@ -964,11 +267,10 @@ impl<'a, 'tcx> LayoutDetails { }; let mut sized = true; - let mut offsets = vec![Size::from_bytes(0); fields.len()]; + let mut offsets = vec![Size::ZERO; fields.len()]; let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); - // Anything with repr(C) or repr(packed) doesn't optimize. - let mut optimize = (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty(); + let mut optimize = !repr.inhibit_struct_field_reordering_opt(); if let StructKind::Prefixed(_, align) = kind { optimize &= align.abi() == 1; } @@ -980,6 +282,9 @@ impl<'a, 'tcx> LayoutDetails { fields.len() }; let optimizing = &mut inverse_memory_index[..end]; + let field_align = |f: &TyLayout| { + if packed { f.align.min(pack).abi() } else { f.align.abi() } + }; match kind { StructKind::AlwaysSized | StructKind::MaybeUnsized => { @@ -987,11 +292,11 @@ impl<'a, 'tcx> LayoutDetails { // Place ZSTs first to avoid "interesting offsets", // especially with only one or two non-ZST fields. let f = &fields[x as usize]; - (!f.is_zst(), cmp::Reverse(f.align.abi())) - }) + (!f.is_zst(), cmp::Reverse(field_align(f))) + }); } StructKind::Prefixed(..) => { - optimizing.sort_by_key(|&x| fields[x as usize].align.abi()); + optimizing.sort_by_key(|&x| field_align(&fields[x as usize])); } } } @@ -1002,10 +307,13 @@ impl<'a, 'tcx> LayoutDetails { // field 5 with offset 0 puts 0 in offsets[5]. // At the bottom of this function, we use inverse_memory_index to produce memory_index. - let mut offset = Size::from_bytes(0); + let mut offset = Size::ZERO; if let StructKind::Prefixed(prefix_size, prefix_align) = kind { - if !packed { + if packed { + let prefix_align = prefix_align.min(pack); + align = align.max(prefix_align); + } else { align = align.max(prefix_align); } offset = prefix_size.abi_align(prefix_align); @@ -1018,16 +326,17 @@ impl<'a, 'tcx> LayoutDetails { offsets.len(), ty); } - if field.abi == Abi::Uninhabited { - return Ok(LayoutDetails::uninhabited(fields.len())); - } - if field.is_unsized() { sized = false; } // Invariant: offset < dl.obj_size_bound() <= 1<<61 - if !packed { + if packed { + let field_pack = field.align.min(pack); + offset = offset.abi_align(field_pack); + align = align.max(field_pack); + } + else { offset = offset.abi_align(field.align); align = align.max(field.align); } @@ -1139,6 +448,10 @@ impl<'a, 'tcx> LayoutDetails { } } + if sized && fields.iter().any(|f| f.abi == Abi::Uninhabited) { + abi = Abi::Uninhabited; + } + Ok(LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Arbitrary { @@ -1153,18 +466,18 @@ impl<'a, 'tcx> LayoutDetails { let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| { Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?)) }; - assert!(!ty.has_infer_types()); + debug_assert!(!ty.has_infer_types()); Ok(match ty.sty { // Basic scalars. ty::TyBool => { - tcx.intern_layout(LayoutDetails::scalar(cx, Scalar { + tcx.intern_layout(LayoutDetails::scalar(self, Scalar { value: Int(I8, false), valid_range: 0..=1 })) } ty::TyChar => { - tcx.intern_layout(LayoutDetails::scalar(cx, Scalar { + tcx.intern_layout(LayoutDetails::scalar(self, Scalar { value: Int(I32, false), valid_range: 0..=0x10FFFF })) @@ -1175,43 +488,48 @@ impl<'a, 'tcx> LayoutDetails { ty::TyUint(ity) => { scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)) } - ty::TyFloat(FloatTy::F32) => scalar(F32), - ty::TyFloat(FloatTy::F64) => scalar(F64), + ty::TyFloat(fty) => scalar(Float(fty)), ty::TyFnPtr(_) => { let mut ptr = scalar_unit(Pointer); - ptr.valid_range.start = 1; - tcx.intern_layout(LayoutDetails::scalar(cx, ptr)) + ptr.valid_range = 1..=*ptr.valid_range.end(); + tcx.intern_layout(LayoutDetails::scalar(self, ptr)) } // The never type. ty::TyNever => { - tcx.intern_layout(LayoutDetails::uninhabited(0)) + tcx.intern_layout(LayoutDetails { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Union(0), + abi: Abi::Uninhabited, + align: dl.i8_align, + size: Size::ZERO + }) } // Potentially-fat pointers. - ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | + ty::TyRef(_, pointee, _) | ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { let mut data_ptr = scalar_unit(Pointer); if !ty.is_unsafe_ptr() { - data_ptr.valid_range.start = 1; + data_ptr.valid_range = 1..=*data_ptr.valid_range.end(); } - let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); - if pointee.is_sized(tcx, param_env, DUMMY_SP) { - return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr))); + let pointee = tcx.normalize_erasing_regions(param_env, pointee); + if pointee.is_sized(tcx.at(DUMMY_SP), param_env) { + return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr))); } let unsized_part = tcx.struct_tail(pointee); let metadata = match unsized_part.sty { ty::TyForeign(..) => { - return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr))); + return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr))); } ty::TySlice(_) | ty::TyStr => { scalar_unit(Int(dl.ptr_sized_integer(), false)) } ty::TyDynamic(..) => { let mut vtable = scalar_unit(Pointer); - vtable.valid_range.start = 1; + vtable.valid_range = 1..=*vtable.valid_range.end(); vtable } _ => return Err(LayoutError::Unknown(unsized_part)) @@ -1224,14 +542,14 @@ impl<'a, 'tcx> LayoutDetails { // Arrays and slices. ty::TyArray(element, mut count) => { if count.has_projections() { - count = tcx.normalize_associated_type_in_env(&count, param_env); + count = tcx.normalize_erasing_regions(param_env, count); if count.has_projections() { return Err(LayoutError::Unknown(ty)); } } - let element = cx.layout_of(element)?; - let count = count.val.to_const_int().unwrap().to_u64().unwrap(); + let element = self.layout_of(element)?; + let count = count.unwrap_usize(tcx); let size = element.size.checked_mul(count, dl) .ok_or(LayoutError::SizeOverflow(ty))?; @@ -1247,7 +565,7 @@ impl<'a, 'tcx> LayoutDetails { }) } ty::TySlice(element) => { - let element = cx.layout_of(element)?; + let element = self.layout_of(element)?; tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Array { @@ -1256,7 +574,7 @@ impl<'a, 'tcx> LayoutDetails { }, abi: Abi::Aggregate { sized: false }, align: element.align, - size: Size::from_bytes(0) + size: Size::ZERO }) } ty::TyStr => { @@ -1268,7 +586,7 @@ impl<'a, 'tcx> LayoutDetails { }, abi: Abi::Aggregate { sized: false }, align: dl.i8_align, - size: Size::from_bytes(0) + size: Size::ZERO }) } @@ -1289,32 +607,32 @@ impl<'a, 'tcx> LayoutDetails { // Tuples, generators and closures. ty::TyGenerator(def_id, ref substs, _) => { let tys = substs.field_tys(def_id, tcx); - univariant(&tys.map(|ty| cx.layout_of(ty)).collect::, _>>()?, + univariant(&tys.map(|ty| self.layout_of(ty)).collect::, _>>()?, &ReprOptions::default(), StructKind::AlwaysSized)? } ty::TyClosure(def_id, ref substs) => { let tys = substs.upvar_tys(def_id, tcx); - univariant(&tys.map(|ty| cx.layout_of(ty)).collect::, _>>()?, + univariant(&tys.map(|ty| self.layout_of(ty)).collect::, _>>()?, &ReprOptions::default(), StructKind::AlwaysSized)? } - ty::TyTuple(tys, _) => { + ty::TyTuple(tys) => { let kind = if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized }; - univariant(&tys.iter().map(|ty| cx.layout_of(ty)).collect::, _>>()?, + univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::, _>>()?, &ReprOptions::default(), kind)? } // SIMD vector types. ty::TyAdt(def, ..) if def.repr.simd() => { - let element = cx.layout_of(ty.simd_type(tcx))?; + let element = self.layout_of(ty.simd_type(tcx))?; let count = ty.simd_size(tcx) as u64; assert!(count > 0); let scalar = match element.abi { @@ -1350,7 +668,7 @@ impl<'a, 'tcx> LayoutDetails { // Cache the field layouts. let variants = def.variants.iter().map(|v| { v.fields.iter().map(|field| { - cx.layout_of(field.ty(tcx, substs)) + self.layout_of(field.ty(tcx, substs)) }).collect::, _>>() }).collect::, _>>()?; @@ -1360,7 +678,12 @@ impl<'a, 'tcx> LayoutDetails { bug!("Union cannot be packed and aligned"); } - let mut align = if def.repr.packed() { + let pack = { + let pack = def.repr.pack as u64; + Align::from_bytes(pack, pack).unwrap() + }; + + let mut align = if packed { dl.i8_align } else { dl.aggregate_align @@ -1372,11 +695,14 @@ impl<'a, 'tcx> LayoutDetails { Align::from_bytes(repr_align, repr_align).unwrap()); } - let mut size = Size::from_bytes(0); + let mut size = Size::ZERO; for field in &variants[0] { assert!(!field.is_unsized()); - if !packed { + if packed { + let field_pack = field.align.min(pack); + align = align.max(field_pack); + } else { align = align.max(field.align); } size = cmp::max(size, field.size); @@ -1391,38 +717,44 @@ impl<'a, 'tcx> LayoutDetails { })); } - let (inh_first, inh_second) = { - let mut inh_variants = (0..variants.len()).filter(|&v| { - variants[v].iter().all(|f| f.abi != Abi::Uninhabited) - }); - (inh_variants.next(), inh_variants.next()) + // A variant is absent if it's uninhabited and only has ZST fields. + // Present uninhabited variants only require space for their fields, + // but *not* an encoding of the discriminant (e.g. a tag value). + // See issue #49298 for more details on the need to leave space + // for non-ZST uninhabited data (mostly partial initialization). + let absent = |fields: &[TyLayout]| { + let uninhabited = fields.iter().any(|f| f.abi == Abi::Uninhabited); + let is_zst = fields.iter().all(|f| f.is_zst()); + uninhabited && is_zst }; - if inh_first.is_none() { - // Uninhabited because it has no variants, or only uninhabited ones. - return Ok(tcx.intern_layout(LayoutDetails::uninhabited(0))); + let (present_first, present_second) = { + let mut present_variants = (0..variants.len()).filter(|&v| { + !absent(&variants[v]) + }); + (present_variants.next(), present_variants.next()) + }; + if present_first.is_none() { + // Uninhabited because it has no variants, or only absent ones. + return tcx.layout_raw(param_env.and(tcx.types.never)); } let is_struct = !def.is_enum() || - // Only one variant is inhabited. - (inh_second.is_none() && + // Only one variant is present. + (present_second.is_none() && // Representation optimizations are allowed. - !def.repr.inhibit_enum_layout_opt() && - // Inhabited variant either has data ... - (!variants[inh_first.unwrap()].is_empty() || - // ... or there other, uninhabited, variants. - variants.len() > 1)); + !def.repr.inhibit_enum_layout_opt()); if is_struct { // Struct, or univariant enum equivalent to a struct. // (Typechecking will reject discriminant-sizing attrs.) - let v = inh_first.unwrap(); + let v = present_first.unwrap(); let kind = if def.is_enum() || variants[v].len() == 0 { StructKind::AlwaysSized } else { let param_env = tcx.param_env(def.did); let last_field = def.variants[v].fields.last().unwrap(); let always_sized = tcx.type_of(last_field.did) - .is_sized(tcx, param_env, DUMMY_SP); + .is_sized(tcx.at(DUMMY_SP), param_env); if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized } }; @@ -1430,12 +762,12 @@ impl<'a, 'tcx> LayoutDetails { let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?; st.variants = Variants::Single { index: v }; // Exclude 0 from the range of a newtype ABI NonZero. - if Some(def.did) == cx.tcx().lang_items().non_zero() { + if Some(def.did) == self.tcx.lang_items().non_zero() { match st.abi { Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { - if scalar.valid_range.start == 0 { - scalar.valid_range.start = 1; + if *scalar.valid_range.start() == 0 { + scalar.valid_range = 1..=*scalar.valid_range.end(); } } _ => {} @@ -1444,6 +776,9 @@ impl<'a, 'tcx> LayoutDetails { return Ok(tcx.intern_layout(st)); } + // The current code for niche-filling relies on variant indices + // instead of actual discriminants, so dataful enums with + // explicit discriminants (RFC #2363) would misbehave. let no_explicit_discriminants = def.variants.iter().enumerate() .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i)); @@ -1454,10 +789,10 @@ impl<'a, 'tcx> LayoutDetails { // Find one non-ZST variant. 'variants: for (v, fields) in variants.iter().enumerate() { + if absent(fields) { + continue 'variants; + } for f in fields { - if f.abi == Abi::Uninhabited { - continue 'variants; - } if !f.is_zst() { if dataful_variant.is_none() { dataful_variant = Some(v); @@ -1468,24 +803,25 @@ impl<'a, 'tcx> LayoutDetails { } } } - if niche_variants.start > v { - niche_variants.start = v; - } - niche_variants.end = v; + niche_variants = *niche_variants.start().min(&v)..=v; } - if niche_variants.start > niche_variants.end { + if niche_variants.start() > niche_variants.end() { dataful_variant = None; } if let Some(i) = dataful_variant { - let count = (niche_variants.end - niche_variants.start + 1) as u128; - for (field_index, field) in variants[i].iter().enumerate() { - let (offset, niche, niche_start) = - match field.find_niche(cx, count)? { - Some(niche) => niche, - None => continue - }; + let count = (niche_variants.end() - niche_variants.start() + 1) as u128; + for (field_index, &field) in variants[i].iter().enumerate() { + let niche = match self.find_niche(field)? { + Some(niche) => niche, + _ => continue, + }; + let (niche_start, niche_scalar) = match niche.reserve(self, count) { + Some(pair) => pair, + None => continue, + }; + let mut align = dl.aggregate_align; let st = variants.iter().enumerate().map(|(j, v)| { let mut st = univariant_uninterned(v, @@ -1497,20 +833,41 @@ impl<'a, 'tcx> LayoutDetails { Ok(st) }).collect::, _>>()?; - let offset = st[i].fields.offset(field_index) + offset; + let offset = st[i].fields.offset(field_index) + niche.offset; let size = st[i].size; - let abi = if offset.bytes() == 0 && niche.value.size(dl) == size { - Abi::Scalar(niche.clone()) - } else { - Abi::Aggregate { sized: true } + let mut abi = match st[i].abi { + Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()), + Abi::ScalarPair(ref first, ref second) => { + // We need to use scalar_unit to reset the + // valid range to the maximal one for that + // primitive, because only the niche is + // guaranteed to be initialised, not the + // other primitive. + if offset.bytes() == 0 { + Abi::ScalarPair( + niche_scalar.clone(), + scalar_unit(second.value), + ) + } else { + Abi::ScalarPair( + scalar_unit(first.value), + niche_scalar.clone(), + ) + } + } + _ => Abi::Aggregate { sized: true }, }; + if st.iter().all(|v| v.abi == Abi::Uninhabited) { + abi = Abi::Uninhabited; + } + return Ok(tcx.intern_layout(LayoutDetails { variants: Variants::NicheFilling { dataful_variant: i, niche_variants, - niche, + niche: niche_scalar, niche_start, variants: st, }, @@ -1527,19 +884,30 @@ impl<'a, 'tcx> LayoutDetails { } let (mut min, mut max) = (i128::max_value(), i128::min_value()); + let discr_type = def.repr.discr_type(); + let bits = Integer::from_attr(tcx, discr_type).size().bits(); for (i, discr) in def.discriminants(tcx).enumerate() { if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) { continue; } - let x = discr.to_u128_unchecked() as i128; + let mut x = discr.val as i128; + if discr_type.is_signed() { + // sign extend the raw representation to be an i128 + x = (x << (128 - bits)) >> (128 - bits); + } if x < min { min = x; } if x > max { max = x; } } + // We might have no inhabited variants, so pretend there's at least one. + if (min, max) == (i128::max_value(), i128::min_value()) { + min = 0; + max = 0; + } assert!(min <= max, "discriminant range is {}...{}", min, max); let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); let mut align = dl.aggregate_align; - let mut size = Size::from_bytes(0); + let mut size = Size::ZERO; // We're interested in the smallest alignment, so start large. let mut start_align = Align::from_bytes(256, 256).unwrap(); @@ -1560,7 +928,7 @@ impl<'a, 'tcx> LayoutDetails { } // Create the set of structs that represent each variant. - let mut variants = variants.into_iter().enumerate().map(|(i, field_layouts)| { + let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| { let mut st = univariant_uninterned(&field_layouts, &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?; st.variants = Variants::Single { index: i }; @@ -1591,14 +959,14 @@ impl<'a, 'tcx> LayoutDetails { // because this discriminant will be loaded, and then stored into variable of // type calculated by typeck. Consider such case (a bug): typeck decided on // byte-sized discriminant, but layout thinks we need a 16-bit to store all - // discriminant values. That would be a bug, because then, in trans, in order + // discriminant values. That would be a bug, because then, in codegen, in order // to store this 16-bit discriminant into 8-bit sized temporary some of the // space necessary to represent would have to be discarded (or layout is wrong // on thinking it needs 16 bits) bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})", min_ity, typeck_ity); // However, it is fine to make discr type however large (as an optimisation) - // after this point – we’ll just truncate the value we load in trans. + // after this point – we’ll just truncate the value we load in codegen. } // Check to see if we should use a different type for the @@ -1607,11 +975,15 @@ impl<'a, 'tcx> LayoutDetails { // We increase the size of the discriminant to avoid LLVM copying // padding when it doesn't need to. This normally causes unaligned // load/stores and excessive memcpy/memset operations. By using a - // bigger integer size, LLVM can be sure about it's contents and + // bigger integer size, LLVM can be sure about its contents and // won't be so conservative. // Use the initial field alignment - let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity); + let mut ity = if def.repr.c() || def.repr.int.is_some() { + min_ity + } else { + Integer::for_abi_align(dl, start_align).unwrap_or(min_ity) + }; // If the alignment is not larger than the chosen discriminant size, // don't use the alignment as the final size. @@ -1621,10 +993,7 @@ impl<'a, 'tcx> LayoutDetails { // Patch up the variants' first few fields. let old_ity_size = min_ity.size(); let new_ity_size = ity.size(); - for variant in &mut variants { - if variant.abi == Abi::Uninhabited { - continue; - } + for variant in &mut layout_variants { match variant.fields { FieldPlacement::Arbitrary { ref mut offsets, .. } => { for i in offsets { @@ -1643,22 +1012,88 @@ impl<'a, 'tcx> LayoutDetails { } } - let discr = Scalar { + let tag_mask = !0u128 >> (128 - ity.size().bits()); + let tag = Scalar { value: Int(ity, signed), - valid_range: (min as u128)..=(max as u128) + valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask), }; - let abi = if discr.value.size(dl) == size { - Abi::Scalar(discr.clone()) + let mut abi = Abi::Aggregate { sized: true }; + if tag.value.size(dl) == size { + abi = Abi::Scalar(tag.clone()); } else { - Abi::Aggregate { sized: true } - }; + // Try to use a ScalarPair for all tagged enums. + let mut common_prim = None; + for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) { + let offsets = match layout_variant.fields { + FieldPlacement::Arbitrary { ref offsets, .. } => offsets, + _ => bug!(), + }; + let mut fields = field_layouts + .iter() + .zip(offsets) + .filter(|p| !p.0.is_zst()); + let (field, offset) = match (fields.next(), fields.next()) { + (None, None) => continue, + (Some(pair), None) => pair, + _ => { + common_prim = None; + break; + } + }; + let prim = match field.details.abi { + Abi::Scalar(ref scalar) => scalar.value, + _ => { + common_prim = None; + break; + } + }; + if let Some(pair) = common_prim { + // This is pretty conservative. We could go fancier + // by conflating things like i32 and u32, or even + // realising that (u8, u8) could just cohabit with + // u16 or even u32. + if pair != (prim, offset) { + common_prim = None; + break; + } + } else { + common_prim = Some((prim, offset)); + } + } + if let Some((prim, offset)) = common_prim { + let pair = scalar_pair(tag.clone(), scalar_unit(prim)); + let pair_offsets = match pair.fields { + FieldPlacement::Arbitrary { + ref offsets, + ref memory_index + } => { + assert_eq!(memory_index, &[0, 1]); + offsets + } + _ => bug!() + }; + if pair_offsets[0] == Size::ZERO && + pair_offsets[1] == *offset && + align == pair.align && + size == pair.size { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; + } + } + } + + if layout_variants.iter().all(|v| v.abi == Abi::Uninhabited) { + abi = Abi::Uninhabited; + } + tcx.intern_layout(LayoutDetails { variants: Variants::Tagged { - discr, - variants + tag, + variants: layout_variants, }, fields: FieldPlacement::Arbitrary { - offsets: vec![Size::from_bytes(0)], + offsets: vec![Size::ZERO], memory_index: vec![0] }, abi, @@ -1669,86 +1104,81 @@ impl<'a, 'tcx> LayoutDetails { // Types with no meaningful known layout. ty::TyProjection(_) | ty::TyAnon(..) => { - let normalized = tcx.normalize_associated_type_in_env(&ty, param_env); + let normalized = tcx.normalize_erasing_regions(param_env, ty); if ty == normalized { return Err(LayoutError::Unknown(ty)); } tcx.layout_raw(param_env.and(normalized))? } - ty::TyParam(_) => { - return Err(LayoutError::Unknown(ty)); - } - ty::TyInfer(_) | ty::TyError => { + ty::TyGeneratorWitness(..) | ty::TyInfer(_) => { bug!("LayoutDetails::compute: unexpected type `{}`", ty) } + ty::TyParam(_) | ty::TyError => { + return Err(LayoutError::Unknown(ty)); + } }) } /// This is invoked by the `layout_raw` query to record the final /// layout of each type. #[inline] - fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - param_env: ty::ParamEnv<'tcx>, - layout: TyLayout<'tcx>) { + fn record_layout_for_printing(self, layout: TyLayout<'tcx>) { // If we are running with `-Zprint-type-sizes`, record layouts for // dumping later. Ignore layouts that are done with non-empty // environments or non-monomorphic layouts, as the user only wants - // to see the stuff resulting from the final trans session. + // to see the stuff resulting from the final codegen session. if - !tcx.sess.opts.debugging_opts.print_type_sizes || - ty.has_param_types() || - ty.has_self_ty() || - !param_env.caller_bounds.is_empty() + !self.tcx.sess.opts.debugging_opts.print_type_sizes || + layout.ty.has_param_types() || + layout.ty.has_self_ty() || + !self.param_env.caller_bounds.is_empty() { return; } - Self::record_layout_for_printing_outlined(tcx, ty, param_env, layout) + self.record_layout_for_printing_outlined(layout) } - fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - param_env: ty::ParamEnv<'tcx>, - layout: TyLayout<'tcx>) { - let cx = (tcx, param_env); + fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) { // (delay format until we actually need it) - let record = |kind, opt_discr_size, variants| { - let type_desc = format!("{:?}", ty); - tcx.sess.code_stats.borrow_mut().record_type_size(kind, - type_desc, - layout.align, - layout.size, - opt_discr_size, - variants); + let record = |kind, packed, opt_discr_size, variants| { + let type_desc = format!("{:?}", layout.ty); + self.tcx.sess.code_stats.borrow_mut().record_type_size(kind, + type_desc, + layout.align, + layout.size, + packed, + opt_discr_size, + variants); }; - let adt_def = match ty.sty { + let adt_def = match layout.ty.sty { ty::TyAdt(ref adt_def, _) => { - debug!("print-type-size t: `{:?}` process adt", ty); + debug!("print-type-size t: `{:?}` process adt", layout.ty); adt_def } ty::TyClosure(..) => { - debug!("print-type-size t: `{:?}` record closure", ty); - record(DataTypeKind::Closure, None, vec![]); + debug!("print-type-size t: `{:?}` record closure", layout.ty); + record(DataTypeKind::Closure, false, None, vec![]); return; } _ => { - debug!("print-type-size t: `{:?}` skip non-nominal", ty); + debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty); return; } }; let adt_kind = adt_def.adt_kind(); + let adt_packed = adt_def.repr.packed(); let build_variant_info = |n: Option, flds: &[ast::Name], layout: TyLayout<'tcx>| { - let mut min_size = Size::from_bytes(0); + let mut min_size = Size::ZERO; let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| { - match layout.field(cx, i) { + match layout.field(self, i) { Err(err) => { bug!("no layout found for field {}: `{:?}`", name, err); } @@ -1792,8 +1222,9 @@ impl<'a, 'tcx> LayoutDetails { if !adt_def.variants.is_empty() { let variant_def = &adt_def.variants[index]; let fields: Vec<_> = - variant_def.fields.iter().map(|f| f.name).collect(); + variant_def.fields.iter().map(|f| f.ident.name).collect(); record(adt_kind.into(), + adt_packed, None, vec![build_variant_info(Some(variant_def.name), &fields, @@ -1801,25 +1232,25 @@ impl<'a, 'tcx> LayoutDetails { } else { // (This case arises for *empty* enums; so give it // zero variants.) - record(adt_kind.into(), None, vec![]); + record(adt_kind.into(), adt_packed, None, vec![]); } } Variants::NicheFilling { .. } | Variants::Tagged { .. } => { debug!("print-type-size `{:#?}` adt general variants def {}", - ty, adt_def.variants.len()); + layout.ty, adt_def.variants.len()); let variant_infos: Vec<_> = adt_def.variants.iter().enumerate().map(|(i, variant_def)| { let fields: Vec<_> = - variant_def.fields.iter().map(|f| f.name).collect(); + variant_def.fields.iter().map(|f| f.ident.name).collect(); build_variant_info(Some(variant_def.name), &fields, - layout.for_variant(cx, i)) + layout.for_variant(self, i)) }) .collect(); - record(adt_kind.into(), match layout.variants { - Variants::Tagged { ref discr, .. } => Some(discr.value.size(tcx)), + record(adt_kind.into(), adt_packed, match layout.variants { + Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)), _ => None }, variant_infos); } @@ -1852,10 +1283,10 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) -> Result, LayoutError<'tcx>> { - assert!(!ty.has_infer_types()); + debug_assert!(!ty.has_infer_types()); // First try computing a static layout. - let err = match (tcx, param_env).layout_of(ty) { + let err = match tcx.layout_of(param_env.and(ty)) { Ok(layout) => { return Ok(SizeSkeleton::Known(layout.size)); } @@ -1863,13 +1294,13 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { }; match ty.sty { - ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | + ty::TyRef(_, pointee, _) | ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { let non_zero = !ty.is_unsafe_ptr(); let tail = tcx.struct_tail(pointee); match tail.sty { ty::TyParam(_) | ty::TyProjection(_) => { - assert!(tail.has_param_types() || tail.has_self_ty()); + debug_assert!(tail.has_param_types() || tail.has_self_ty()); Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(&tail) @@ -1943,7 +1374,7 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { } ty::TyProjection(_) | ty::TyAnon(..) => { - let normalized = tcx.normalize_associated_type_in_env(&ty, param_env); + let normalized = tcx.normalize_erasing_regions(param_env, ty); if ty == normalized { Err(err) } else { @@ -1965,26 +1396,6 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { } } -/// The details of the layout of a type, alongside the type itself. -/// Provides various type traversal APIs (e.g. recursing into fields). -/// -/// Note that the details are NOT guaranteed to always be identical -/// to those obtained from `layout_of(ty)`, as we need to produce -/// layouts for which Rust types do not exist, such as enum variants -/// or synthetic fields of enums (i.e. discriminants) and fat pointers. -#[derive(Copy, Clone, Debug)] -pub struct TyLayout<'tcx> { - pub ty: Ty<'tcx>, - details: &'tcx LayoutDetails -} - -impl<'tcx> Deref for TyLayout<'tcx> { - type Target = &'tcx LayoutDetails; - fn deref(&self) -> &&'tcx LayoutDetails { - &self.details - } -} - pub trait HasTyCtxt<'tcx>: HasDataLayout { fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>; } @@ -2001,15 +1412,15 @@ impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> { } } -impl<'a, 'gcx, 'tcx, T: Copy> HasDataLayout for (TyCtxt<'a, 'gcx, 'tcx>, T) { +impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> { fn data_layout(&self) -> &TargetDataLayout { - self.0.data_layout() + self.tcx.data_layout() } } -impl<'a, 'gcx, 'tcx, T: Copy> HasTyCtxt<'gcx> for (TyCtxt<'a, 'gcx, 'tcx>, T) { +impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> { - self.0.tcx() + self.tcx.tcx() } } @@ -2036,23 +1447,18 @@ impl MaybeResult for Result { } } -pub trait LayoutOf { - type TyLayout; +pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>; - fn layout_of(self, ty: T) -> Self::TyLayout; -} - -impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) { +impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { + type Ty = Ty<'tcx>; type TyLayout = Result, LayoutError<'tcx>>; /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. - #[inline] fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { - let (tcx, param_env) = self; - - let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); - let details = tcx.layout_raw(param_env.reveal_all().and(ty))?; + let param_env = self.param_env.with_reveal_all(); + let ty = self.tcx.normalize_erasing_regions(param_env, ty); + let details = self.tcx.layout_raw(param_env.and(ty))?; let layout = TyLayout { ty, details @@ -2062,26 +1468,24 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx // can however trigger recursive invocations of `layout_of`. // Therefore, we execute it *after* the main query has // completed, to avoid problems around recursive structures - // and the like. (Admitedly, I wasn't able to reproduce a problem + // and the like. (Admittedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) - LayoutDetails::record_layout_for_printing(tcx, ty, param_env, layout); + self.record_layout_for_printing(layout); Ok(layout) } } -impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, - ty::ParamEnv<'tcx>) { +impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> { + type Ty = Ty<'tcx>; type TyLayout = Result, LayoutError<'tcx>>; /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. - #[inline] fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { - let (tcx_at, param_env) = self; - - let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); - let details = tcx_at.layout_raw(param_env.reveal_all().and(ty))?; + let param_env = self.param_env.with_reveal_all(); + let ty = self.tcx.normalize_erasing_regions(param_env, ty); + let details = self.tcx.layout_raw(param_env.and(ty))?; let layout = TyLayout { ty, details @@ -2091,36 +1495,74 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, // can however trigger recursive invocations of `layout_of`. // Therefore, we execute it *after* the main query has // completed, to avoid problems around recursive structures - // and the like. (Admitedly, I wasn't able to reproduce a problem + // and the like. (Admittedly, I wasn't able to reproduce a problem // here, but it seems like the right thing to do. -nmatsakis) - LayoutDetails::record_layout_for_printing(tcx_at.tcx, ty, param_env, layout); + let cx = LayoutCx { + tcx: *self.tcx, + param_env: self.param_env + }; + cx.record_layout_for_printing(layout); Ok(layout) } } -impl<'a, 'tcx> TyLayout<'tcx> { - pub fn for_variant(&self, cx: C, variant_index: usize) -> Self - where C: LayoutOf> + HasTyCtxt<'tcx>, - C::TyLayout: MaybeResult> - { - let details = match self.variants { - Variants::Single { index } if index == variant_index => self.details, +// Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users. +impl TyCtxt<'a, 'tcx, '_> { + /// Computes the layout of a type. Note that this implicitly + /// executes in "reveal all" mode. + #[inline] + pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) + -> Result, LayoutError<'tcx>> { + let cx = LayoutCx { + tcx: self.global_tcx(), + param_env: param_env_and_ty.param_env + }; + cx.layout_of(param_env_and_ty.value) + } +} + +impl ty::query::TyCtxtAt<'a, 'tcx, '_> { + /// Computes the layout of a type. Note that this implicitly + /// executes in "reveal all" mode. + #[inline] + pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) + -> Result, LayoutError<'tcx>> { + let cx = LayoutCx { + tcx: self.global_tcx().at(self.span), + param_env: param_env_and_ty.param_env + }; + cx.layout_of(param_env_and_ty.value) + } +} + +impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx> + where C: LayoutOf> + HasTyCtxt<'tcx>, + C::TyLayout: MaybeResult> +{ + fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> { + let details = match this.variants { + Variants::Single { index } if index == variant_index => this.details, Variants::Single { index } => { // Deny calling for_variant more than once for non-Single enums. - cx.layout_of(self.ty).map_same(|layout| { + cx.layout_of(this.ty).map_same(|layout| { assert_eq!(layout.variants, Variants::Single { index }); layout }); - let fields = match self.ty.sty { + let fields = match this.ty.sty { ty::TyAdt(def, _) => def.variants[variant_index].fields.len(), _ => bug!() }; - let mut details = LayoutDetails::uninhabited(fields); - details.variants = Variants::Single { index: variant_index }; - cx.tcx().intern_layout(details) + let tcx = cx.tcx(); + tcx.intern_layout(LayoutDetails { + variants: Variants::Single { index: variant_index }, + fields: FieldPlacement::Union(fields), + abi: Abi::Uninhabited, + align: tcx.data_layout.i8_align, + size: Size::ZERO + }) } Variants::NicheFilling { ref variants, .. } | @@ -2132,17 +1574,14 @@ impl<'a, 'tcx> TyLayout<'tcx> { assert_eq!(details.variants, Variants::Single { index: variant_index }); TyLayout { - ty: self.ty, + ty: this.ty, details } } - pub fn field(&self, cx: C, i: usize) -> C::TyLayout - where C: LayoutOf> + HasTyCtxt<'tcx>, - C::TyLayout: MaybeResult> - { + fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout { let tcx = cx.tcx(); - cx.layout_of(match self.ty.sty { + cx.layout_of(match this.ty.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | @@ -2151,15 +1590,16 @@ impl<'a, 'tcx> TyLayout<'tcx> { ty::TyFnPtr(_) | ty::TyNever | ty::TyFnDef(..) | - ty::TyDynamic(..) | - ty::TyForeign(..) => { - bug!("TyLayout::field_type({:?}): not applicable", self) + ty::TyGeneratorWitness(..) | + ty::TyForeign(..) | + ty::TyDynamic(..) => { + bug!("TyLayout::field_type({:?}): not applicable", this) } // Potentially-fat pointers. - ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | + ty::TyRef(_, pointee, _) | ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - assert!(i < 2); + assert!(i < this.fields.count()); // Reuse the fat *T type as its own thin pointer data field. // This provides information about e.g. DST struct pointees @@ -2167,13 +1607,13 @@ impl<'a, 'tcx> TyLayout<'tcx> { // as the `Abi` or `FieldPlacement` is checked by users. if i == 0 { let nil = tcx.mk_nil(); - let ptr_ty = if self.ty.is_unsafe_ptr() { + let ptr_ty = if this.ty.is_unsafe_ptr() { tcx.mk_mut_ptr(nil) } else { tcx.mk_mut_ref(tcx.types.re_static, nil) }; return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| { - ptr_layout.ty = self.ty; + ptr_layout.ty = this.ty; ptr_layout }); } @@ -2181,12 +1621,27 @@ impl<'a, 'tcx> TyLayout<'tcx> { match tcx.struct_tail(pointee).sty { ty::TySlice(_) | ty::TyStr => tcx.types.usize, - ty::TyDynamic(..) => { - // FIXME(eddyb) use an usize/fn() array with - // the correct number of vtables slots. - tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil()) + ty::TyDynamic(data, _) => { + let trait_def_id = data.principal().unwrap().def_id(); + let num_fns: u64 = crate::traits::supertrait_def_ids(tcx, trait_def_id) + .map(|trait_def_id| { + tcx.associated_items(trait_def_id) + .filter(|item| item.kind == ty::AssociatedKind::Method) + .count() as u64 + }) + .sum(); + tcx.mk_imm_ref( + tcx.types.re_static, + tcx.mk_array(tcx.types.usize, 3 + num_fns), + ) + /* FIXME use actual fn pointers + tcx.mk_tup(&[ + tcx.mk_array(tcx.types.usize, 3), + tcx.mk_array(Option), + ]) + */ } - _ => bug!("TyLayout::field_type({:?}): not applicable", self) + _ => bug!("TyLayout::field_type({:?}): not applicable", this) } } @@ -2204,22 +1659,22 @@ impl<'a, 'tcx> TyLayout<'tcx> { substs.field_tys(def_id, tcx).nth(i).unwrap() } - ty::TyTuple(tys, _) => tys[i], + ty::TyTuple(tys) => tys[i], // SIMD vector types. ty::TyAdt(def, ..) if def.repr.simd() => { - self.ty.simd_type(tcx) + this.ty.simd_type(tcx) } // ADTs. ty::TyAdt(def, substs) => { - match self.variants { + match this.variants { Variants::Single { index } => { def.variants[index].fields[i].ty(tcx, substs) } // Discriminant field for enums (where applicable). - Variants::Tagged { ref discr, .. } | + Variants::Tagged { tag: ref discr, .. } | Variants::NicheFilling { niche: ref discr, .. } => { assert_eq!(i, 0); let layout = LayoutDetails::scalar(tcx, discr.clone()); @@ -2233,117 +1688,123 @@ impl<'a, 'tcx> TyLayout<'tcx> { ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) | ty::TyInfer(_) | ty::TyError => { - bug!("TyLayout::field_type: unexpected type `{}`", self.ty) + bug!("TyLayout::field_type: unexpected type `{}`", this.ty) } }) } +} - /// Returns true if the layout corresponds to an unsized type. - pub fn is_unsized(&self) -> bool { - self.abi.is_unsized() - } +struct Niche { + offset: Size, + scalar: Scalar, + available: u128, +} - /// Returns true if the type is a ZST and not unsized. - pub fn is_zst(&self) -> bool { - match self.abi { - Abi::Uninhabited => true, - Abi::Scalar(_) | - Abi::ScalarPair(..) | - Abi::Vector { .. } => false, - Abi::Aggregate { sized } => sized && self.size.bytes() == 0 +impl Niche { + fn reserve<'a, 'tcx>( + &self, + cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>, + count: u128, + ) -> Option<(u128, Scalar)> { + if count > self.available { + return None; } + let Scalar { value, valid_range: ref v } = self.scalar; + let bits = value.size(cx).bits(); + assert!(bits <= 128); + let max_value = !0u128 >> (128 - bits); + let start = v.end().wrapping_add(1) & max_value; + let end = v.end().wrapping_add(count) & max_value; + Some((start, Scalar { value, valid_range: *v.start()..=end })) } +} - pub fn size_and_align(&self) -> (Size, Align) { - (self.size, self.align) - } - +impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { /// Find the offset of a niche leaf field, starting from - /// the given type and recursing through aggregates, which - /// has at least `count` consecutive invalid values. - /// The tuple is `(offset, scalar, niche_value)`. + /// the given type and recursing through aggregates. // FIXME(eddyb) traverse already optimized enums. - fn find_niche(&self, cx: C, count: u128) - -> Result, LayoutError<'tcx>> - where C: LayoutOf, TyLayout = Result>> + - HasTyCtxt<'tcx> - { - let scalar_component = |scalar: &Scalar, offset| { + fn find_niche(self, layout: TyLayout<'tcx>) -> Result, LayoutError<'tcx>> { + let scalar_niche = |scalar: &Scalar, offset| { let Scalar { value, valid_range: ref v } = *scalar; - let bits = value.size(cx).bits(); + let bits = value.size(self).bits(); assert!(bits <= 128); let max_value = !0u128 >> (128 - bits); // Find out how many values are outside the valid range. - let niches = if v.start <= v.end { - v.start + (max_value - v.end) + let available = if v.start() <= v.end() { + v.start() + (max_value - v.end()) } else { - v.start - v.end - 1 + v.start() - v.end() - 1 }; - // Give up if we can't fit `count` consecutive niches. - if count > niches { + // Give up if there is no niche value available. + if available == 0 { return None; } - let niche_start = v.end.wrapping_add(1) & max_value; - let niche_end = v.end.wrapping_add(count) & max_value; - Some((offset, Scalar { - value, - valid_range: v.start..=niche_end - }, niche_start)) + Some(Niche { offset, scalar: scalar.clone(), available }) }; // Locals variables which live across yields are stored // in the generator type as fields. These may be uninitialized // so we don't look for niches there. - if let ty::TyGenerator(..) = self.ty.sty { + if let ty::TyGenerator(..) = layout.ty.sty { return Ok(None); } - match self.abi { + match layout.abi { Abi::Scalar(ref scalar) => { - return Ok(scalar_component(scalar, Size::from_bytes(0))); + return Ok(scalar_niche(scalar, Size::ZERO)); } Abi::ScalarPair(ref a, ref b) => { - return Ok(scalar_component(a, Size::from_bytes(0)).or_else(|| { - scalar_component(b, a.value.size(cx).abi_align(b.value.align(cx))) - })); + // HACK(nox): We iter on `b` and then `a` because `max_by_key` + // returns the last maximum. + let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self)))) + .chain(iter::once((a, Size::ZERO))) + .filter_map(|(scalar, offset)| scalar_niche(scalar, offset)) + .max_by_key(|niche| niche.available); + return Ok(niche); } Abi::Vector { ref element, .. } => { - return Ok(scalar_component(element, Size::from_bytes(0))); + return Ok(scalar_niche(element, Size::ZERO)); } _ => {} } // Perhaps one of the fields is non-zero, let's recurse and find out. - if let FieldPlacement::Union(_) = self.fields { + if let FieldPlacement::Union(_) = layout.fields { // Only Rust enums have safe-to-inspect fields // (a discriminant), other unions are unsafe. - if let Variants::Single { .. } = self.variants { + if let Variants::Single { .. } = layout.variants { return Ok(None); } } - if let FieldPlacement::Array { .. } = self.fields { - if self.fields.count() > 0 { - return self.field(cx, 0)?.find_niche(cx, count); + if let FieldPlacement::Array { .. } = layout.fields { + if layout.fields.count() > 0 { + return self.find_niche(layout.field(self, 0)?); + } else { + return Ok(None); } } - for i in 0..self.fields.count() { - let r = self.field(cx, i)?.find_niche(cx, count)?; - if let Some((offset, scalar, niche_value)) = r { - let offset = self.fields.offset(i) + offset; - return Ok(Some((offset, scalar, niche_value))); + let mut niche = None; + let mut available = 0; + for i in 0..layout.fields.count() { + if let Some(mut c) = self.find_niche(layout.field(self, i)?)? { + if c.available > available { + available = c.available; + c.offset += layout.fields.offset(i); + niche = Some(c); + } } } - Ok(None) + Ok(niche) } } -impl<'gcx> HashStable> for Variants { +impl<'a> HashStable> for Variants { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use ty::layout::Variants::*; mem::discriminant(self).hash_stable(hcx, hasher); @@ -2353,22 +1814,22 @@ impl<'gcx> HashStable> for Variants { index.hash_stable(hcx, hasher); } Tagged { - ref discr, + ref tag, ref variants, } => { - discr.hash_stable(hcx, hasher); + tag.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); } NicheFilling { dataful_variant, - niche_variants: RangeInclusive { start, end }, + ref niche_variants, ref niche, niche_start, ref variants, } => { dataful_variant.hash_stable(hcx, hasher); - start.hash_stable(hcx, hasher); - end.hash_stable(hcx, hasher); + niche_variants.start().hash_stable(hcx, hasher); + niche_variants.end().hash_stable(hcx, hasher); niche.hash_stable(hcx, hasher); niche_start.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); @@ -2377,9 +1838,9 @@ impl<'gcx> HashStable> for Variants { } } -impl<'gcx> HashStable> for FieldPlacement { +impl<'a> HashStable> for FieldPlacement { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use ty::layout::FieldPlacement::*; mem::discriminant(self).hash_stable(hcx, hasher); @@ -2400,9 +1861,9 @@ impl<'gcx> HashStable> for FieldPlacement { } } -impl<'gcx> HashStable> for Abi { +impl<'a> HashStable> for Abi { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { use ty::layout::Abi::*; mem::discriminant(self).hash_stable(hcx, hasher); @@ -2427,14 +1888,14 @@ impl<'gcx> HashStable> for Abi { } } -impl<'gcx> HashStable> for Scalar { +impl<'a> HashStable> for Scalar { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - let Scalar { value, valid_range: RangeInclusive { start, end } } = *self; + let Scalar { value, ref valid_range } = *self; value.hash_stable(hcx, hasher); - start.hash_stable(hcx, hasher); - end.hash_stable(hcx, hasher); + valid_range.start().hash_stable(hcx, hasher); + valid_range.end().hash_stable(hcx, hasher); } } @@ -2456,25 +1917,32 @@ impl_stable_hash_for!(enum ::ty::layout::Integer { impl_stable_hash_for!(enum ::ty::layout::Primitive { Int(integer, signed), - F32, - F64, + Float(fty), Pointer }); -impl_stable_hash_for!(struct ::ty::layout::Align { - abi, - pref -}); - -impl_stable_hash_for!(struct ::ty::layout::Size { - raw -}); - -impl<'gcx> HashStable> for LayoutError<'gcx> -{ +impl<'gcx> HashStable> for Align { fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { + self.abi().hash_stable(hcx, hasher); + self.pref().hash_stable(hcx, hasher); + } +} + +impl<'gcx> HashStable> for Size { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + self.bytes().hash_stable(hcx, hasher); + } +} + +impl<'a, 'gcx> HashStable> for LayoutError<'gcx> +{ + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { use ty::layout::LayoutError::*; mem::discriminant(self).hash_stable(hcx, hasher); diff --git a/src/librustc/ty/maps/README.md b/src/librustc/ty/maps/README.md deleted file mode 100644 index 8207c18e6779..000000000000 --- a/src/librustc/ty/maps/README.md +++ /dev/null @@ -1,302 +0,0 @@ -# The Rust Compiler Query System - -The Compiler Query System is the key to our new demand-driven -organization. The idea is pretty simple. You have various queries -that compute things about the input -- for example, there is a query -called `type_of(def_id)` that, given the def-id of some item, will -compute the type of that item and return it to you. - -Query execution is **memoized** -- so the first time you invoke a -query, it will go do the computation, but the next time, the result is -returned from a hashtable. Moreover, query execution fits nicely into -**incremental computation**; the idea is roughly that, when you do a -query, the result **may** be returned to you by loading stored data -from disk (but that's a separate topic we won't discuss further here). - -The overall vision is that, eventually, the entire compiler -control-flow will be query driven. There will effectively be one -top-level query ("compile") that will run compilation on a crate; this -will in turn demand information about that crate, starting from the -*end*. For example: - -- This "compile" query might demand to get a list of codegen-units - (i.e., modules that need to be compiled by LLVM). -- But computing the list of codegen-units would invoke some subquery - that returns the list of all modules defined in the Rust source. -- That query in turn would invoke something asking for the HIR. -- This keeps going further and further back until we wind up doing the - actual parsing. - -However, that vision is not fully realized. Still, big chunks of the -compiler (for example, generating MIR) work exactly like this. - -### Invoking queries - -To invoke a query is simple. The tcx ("type context") offers a method -for each defined query. So, for example, to invoke the `type_of` -query, you would just do this: - -```rust -let ty = tcx.type_of(some_def_id); -``` - -### Cycles between queries - -Currently, cycles during query execution should always result in a -compilation error. Typically, they arise because of illegal programs -that contain cyclic references they shouldn't (though sometimes they -arise because of compiler bugs, in which case we need to factor our -queries in a more fine-grained fashion to avoid them). - -However, it is nonetheless often useful to *recover* from a cycle -(after reporting an error, say) and try to soldier on, so as to give a -better user experience. In order to recover from a cycle, you don't -get to use the nice method-call-style syntax. Instead, you invoke -using the `try_get` method, which looks roughly like this: - -```rust -use ty::maps::queries; -... -match queries::type_of::try_get(tcx, DUMMY_SP, self.did) { - Ok(result) => { - // no cycle occurred! You can use `result` - } - Err(err) => { - // A cycle occurred! The error value `err` is a `DiagnosticBuilder`, - // meaning essentially an "in-progress", not-yet-reported error message. - // See below for more details on what to do here. - } -} -``` - -So, if you get back an `Err` from `try_get`, then a cycle *did* occur. This means that -you must ensure that a compiler error message is reported. You can do that in two ways: - -The simplest is to invoke `err.emit()`. This will emit the cycle error to the user. - -However, often cycles happen because of an illegal program, and you -know at that point that an error either already has been reported or -will be reported due to this cycle by some other bit of code. In that -case, you can invoke `err.cancel()` to not emit any error. It is -traditional to then invoke: - -``` -tcx.sess.delay_span_bug(some_span, "some message") -``` - -`delay_span_bug()` is a helper that says: we expect a compilation -error to have happened or to happen in the future; so, if compilation -ultimately succeeds, make an ICE with the message `"some -message"`. This is basically just a precaution in case you are wrong. - -### How the compiler executes a query - -So you may be wondering what happens when you invoke a query -method. The answer is that, for each query, the compiler maintains a -cache -- if your query has already been executed, then, the answer is -simple: we clone the return value out of the cache and return it -(therefore, you should try to ensure that the return types of queries -are cheaply cloneable; insert a `Rc` if necessary). - -#### Providers - -If, however, the query is *not* in the cache, then the compiler will -try to find a suitable **provider**. A provider is a function that has -been defined and linked into the compiler somewhere that contains the -code to compute the result of the query. - -**Providers are defined per-crate.** The compiler maintains, -internally, a table of providers for every crate, at least -conceptually. Right now, there are really two sets: the providers for -queries about the **local crate** (that is, the one being compiled) -and providers for queries about **external crates** (that is, -dependencies of the local crate). Note that what determines the crate -that a query is targeting is not the *kind* of query, but the *key*. -For example, when you invoke `tcx.type_of(def_id)`, that could be a -local query or an external query, depending on what crate the `def_id` -is referring to (see the `self::keys::Key` trait for more information -on how that works). - -Providers always have the same signature: - -```rust -fn provider<'cx, 'tcx>(tcx: TyCtxt<'cx, 'tcx, 'tcx>, - key: QUERY_KEY) - -> QUERY_RESULT -{ - ... -} -``` - -Providers take two arguments: the `tcx` and the query key. Note also -that they take the *global* tcx (i.e., they use the `'tcx` lifetime -twice), rather than taking a tcx with some active inference context. -They return the result of the query. - -#### How providers are setup - -When the tcx is created, it is given the providers by its creator using -the `Providers` struct. This struct is generate by the macros here, but it -is basically a big list of function pointers: - -```rust -struct Providers { - type_of: for<'cx, 'tcx> fn(TyCtxt<'cx, 'tcx, 'tcx>, DefId) -> Ty<'tcx>, - ... -} -``` - -At present, we have one copy of the struct for local crates, and one -for external crates, though the plan is that we may eventually have -one per crate. - -These `Provider` structs are ultimately created and populated by -`librustc_driver`, but it does this by distributing the work -throughout the other `rustc_*` crates. This is done by invoking -various `provide` functions. These functions tend to look something -like this: - -```rust -pub fn provide(providers: &mut Providers) { - *providers = Providers { - type_of, - ..*providers - }; -} -``` - -That is, they take an `&mut Providers` and mutate it in place. Usually -we use the formulation above just because it looks nice, but you could -as well do `providers.type_of = type_of`, which would be equivalent. -(Here, `type_of` would be a top-level function, defined as we saw -before.) So, if we want to add a provider for some other query, -let's call it `fubar`, into the crate above, we might modify the `provide()` -function like so: - -```rust -pub fn provide(providers: &mut Providers) { - *providers = Providers { - type_of, - fubar, - ..*providers - }; -} - -fn fubar<'cx, 'tcx>(tcx: TyCtxt<'cx, 'tcx>, key: DefId) -> Fubar<'tcx> { .. } -``` - -NB. Most of the `rustc_*` crates only provide **local -providers**. Almost all **extern providers** wind up going through the -`rustc_metadata` crate, which loads the information from the crate -metadata. But in some cases there are crates that provide queries for -*both* local and external crates, in which case they define both a -`provide` and a `provide_extern` function that `rustc_driver` can -invoke. - -### Adding a new kind of query - -So suppose you want to add a new kind of query, how do you do so? -Well, defining a query takes place in two steps: - -1. first, you have to specify the query name and arguments; and then, -2. you have to supply query providers where needed. - -To specify the query name and arguments, you simply add an entry -to the big macro invocation in `mod.rs`. This will probably have changed -by the time you read this README, but at present it looks something -like: - -``` -define_maps! { <'tcx> - /// Records the type of every item. - [] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, - - ... -} -``` - -Each line of the macro defines one query. The name is broken up like this: - -``` -[] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, -^^ ^^^^^^^ ^^^^^^^^^^ ^^^^^ ^^^^^^^^ -| | | | | -| | | | result type of query -| | | query key type -| | dep-node constructor -| name of query -query flags -``` - -Let's go over them one by one: - -- **Query flags:** these are largely unused right now, but the intention - is that we'll be able to customize various aspects of how the query is - processed. -- **Name of query:** the name of the query method - (`tcx.type_of(..)`). Also used as the name of a struct - (`ty::maps::queries::type_of`) that will be generated to represent - this query. -- **Dep-node constructor:** indicates the constructor function that - connects this query to incremental compilation. Typically, this is a - `DepNode` variant, which can be added by modifying the - `define_dep_nodes!` macro invocation in - `librustc/dep_graph/dep_node.rs`. - - However, sometimes we use a custom function, in which case the - name will be in snake case and the function will be defined at the - bottom of the file. This is typically used when the query key is - not a def-id, or just not the type that the dep-node expects. -- **Query key type:** the type of the argument to this query. - This type must implement the `ty::maps::keys::Key` trait, which - defines (for example) how to map it to a crate, and so forth. -- **Result type of query:** the type produced by this query. This type - should (a) not use `RefCell` or other interior mutability and (b) be - cheaply cloneable. Interning or using `Rc` or `Arc` is recommended for - non-trivial data types. - - The one exception to those rules is the `ty::steal::Steal` type, - which is used to cheaply modify MIR in place. See the definition - of `Steal` for more details. New uses of `Steal` should **not** be - added without alerting `@rust-lang/compiler`. - -So, to add a query: - -- Add an entry to `define_maps!` using the format above. -- Possibly add a corresponding entry to the dep-node macro. -- Link the provider by modifying the appropriate `provide` method; - or add a new one if needed and ensure that `rustc_driver` is invoking it. - -#### Query structs and descriptions - -For each kind, the `define_maps` macro will generate a "query struct" -named after the query. This struct is a kind of a place-holder -describing the query. Each such struct implements the -`self::config::QueryConfig` trait, which has associated types for the -key/value of that particular query. Basically the code generated looks something -like this: - -```rust -// Dummy struct representing a particular kind of query: -pub struct type_of<'tcx> { phantom: PhantomData<&'tcx ()> } - -impl<'tcx> QueryConfig for type_of<'tcx> { - type Key = DefId; - type Value = Ty<'tcx>; -} -``` - -There is an additional trait that you may wish to implement called -`self::config::QueryDescription`. This trait is used during cycle -errors to give a "human readable" name for the query, so that we can -summarize what was happening when the cycle occurred. Implementing -this trait is optional if the query key is `DefId`, but if you *don't* -implement it, you get a pretty generic error ("processing `foo`..."). -You can put new impls into the `config` module. They look something like this: - -```rust -impl<'tcx> QueryDescription for queries::type_of<'tcx> { - fn describe(tcx: TyCtxt, key: DefId) -> String { - format!("computing the type of `{}`", tcx.item_path_str(key)) - } -} -``` - diff --git a/src/librustc/ty/maps/config.rs b/src/librustc/ty/maps/config.rs deleted file mode 100644 index 8dedcb24c2fb..000000000000 --- a/src/librustc/ty/maps/config.rs +++ /dev/null @@ -1,664 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dep_graph::SerializedDepNodeIndex; -use hir::def_id::{CrateNum, DefId, DefIndex}; -use ty::{self, Ty, TyCtxt}; -use ty::maps::queries; -use ty::subst::Substs; - -use std::hash::Hash; -use syntax_pos::symbol::InternedString; - -/// Query configuration and description traits. - -pub trait QueryConfig { - type Key: Eq + Hash + Clone; - type Value; -} - -pub(super) trait QueryDescription<'tcx>: QueryConfig { - fn describe(tcx: TyCtxt, key: Self::Key) -> String; - - #[inline] - fn cache_on_disk(_: Self::Key) -> bool { - false - } - - fn try_load_from_disk(_: TyCtxt<'_, 'tcx, 'tcx>, - _: SerializedDepNodeIndex) - -> Option { - bug!("QueryDescription::load_from_disk() called for an unsupported query.") - } -} - -impl<'tcx, M: QueryConfig> QueryDescription<'tcx> for M { - default fn describe(tcx: TyCtxt, def_id: DefId) -> String { - if !tcx.sess.verbose() { - format!("processing `{}`", tcx.item_path_str(def_id)) - } else { - let name = unsafe { ::std::intrinsics::type_name::() }; - format!("processing `{}` applied to `{:?}`", name, def_id) - } - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_copy_raw<'tcx> { - fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { - format!("computing whether `{}` is `Copy`", env.value) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_sized_raw<'tcx> { - fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { - format!("computing whether `{}` is `Sized`", env.value) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_freeze_raw<'tcx> { - fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { - format!("computing whether `{}` is freeze", env.value) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::needs_drop_raw<'tcx> { - fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { - format!("computing whether `{}` needs drop", env.value) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::layout_raw<'tcx> { - fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { - format!("computing layout of `{}`", env.value) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::super_predicates_of<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("computing the supertraits of `{}`", - tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::erase_regions_ty<'tcx> { - fn describe(_tcx: TyCtxt, ty: Ty<'tcx>) -> String { - format!("erasing regions from `{:?}`", ty) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::type_param_predicates<'tcx> { - fn describe(tcx: TyCtxt, (_, def_id): (DefId, DefId)) -> String { - let id = tcx.hir.as_local_node_id(def_id).unwrap(); - format!("computing the bounds for type parameter `{}`", - tcx.hir.ty_param_name(id)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::coherent_trait<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("coherence checking all impls of trait `{}`", - tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_inherent_impls<'tcx> { - fn describe(_: TyCtxt, k: CrateNum) -> String { - format!("all inherent impls defined in crate `{:?}`", k) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_inherent_impls_overlap_check<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - format!("check for overlap between inherent impls defined in this crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_variances<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("computing the variances for items in this crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::mir_shims<'tcx> { - fn describe(tcx: TyCtxt, def: ty::InstanceDef<'tcx>) -> String { - format!("generating MIR shim for `{}`", - tcx.item_path_str(def.def_id())) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::privacy_access_levels<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - format!("privacy access levels") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::typeck_item_bodies<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - format!("type-checking all item bodies") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::reachable_set<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - format!("reachability") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::const_eval<'tcx> { - fn describe(tcx: TyCtxt, key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) -> String { - format!("const-evaluating `{}`", tcx.item_path_str(key.value.0)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::mir_keys<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - format!("getting a list of all mir_keys") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::symbol_name<'tcx> { - fn describe(_tcx: TyCtxt, instance: ty::Instance<'tcx>) -> String { - format!("computing the symbol for `{}`", instance) - } - - #[inline] - fn cache_on_disk(_: Self::Key) -> bool { - true - } - - #[inline] - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - tcx.on_disk_query_result_cache.try_load_query_result(tcx, id) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::describe_def<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("describe_def") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::def_span<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("def_span") - } -} - - -impl<'tcx> QueryDescription<'tcx> for queries::lookup_stability<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("stability") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::lookup_deprecation_entry<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("deprecation") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::item_attrs<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("item_attrs") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_exported_symbol<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("is_exported_symbol") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::fn_arg_names<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("fn_arg_names") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::impl_parent<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("impl_parent") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::trait_of_item<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - bug!("trait_of_item") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::item_body_nested_bodies<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("nested item bodies of `{}`", tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::const_is_rvalue_promotable_to_static<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("const checking if rvalue is promotable to static `{}`", - tcx.item_path_str(def_id)) - } - - #[inline] - fn cache_on_disk(_: Self::Key) -> bool { - true - } - - #[inline] - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - tcx.on_disk_query_result_cache.try_load_query_result(tcx, id) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::rvalue_promotable_map<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("checking which parts of `{}` are promotable to static", - tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_mir_available<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("checking if item is mir available: `{}`", - tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::trans_fulfill_obligation<'tcx> { - fn describe(tcx: TyCtxt, key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> String { - format!("checking if `{}` fulfills its obligations", tcx.item_path_str(key.1.def_id())) - } - - #[inline] - fn cache_on_disk(_: Self::Key) -> bool { - true - } - - #[inline] - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - tcx.on_disk_query_result_cache.try_load_query_result(tcx, id) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::trait_impls_of<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("trait impls of `{}`", tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_object_safe<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("determine object safety of trait `{}`", tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_const_fn<'tcx> { - fn describe(tcx: TyCtxt, def_id: DefId) -> String { - format!("checking if item is const fn: `{}`", tcx.item_path_str(def_id)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::dylib_dependency_formats<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - "dylib dependency formats of crate".to_string() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_panic_runtime<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - "checking if the crate is_panic_runtime".to_string() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_compiler_builtins<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - "checking if the crate is_compiler_builtins".to_string() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::has_global_allocator<'tcx> { - fn describe(_: TyCtxt, _: CrateNum) -> String { - "checking if the crate has_global_allocator".to_string() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::extern_crate<'tcx> { - fn describe(_: TyCtxt, _: DefId) -> String { - "getting crate's ExternCrateData".to_string() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::lint_levels<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("computing the lint levels for items in this crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::specializes<'tcx> { - fn describe(_tcx: TyCtxt, _: (DefId, DefId)) -> String { - format!("computing whether impls specialize one another") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::in_scope_traits_map<'tcx> { - fn describe(_tcx: TyCtxt, _: DefIndex) -> String { - format!("traits in scope at a block") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_no_builtins<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("test whether a crate has #![no_builtins]") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::panic_strategy<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("query a crate's configured panic strategy") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_profiler_runtime<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("query a crate is #![profiler_runtime]") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_sanitizer_runtime<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("query a crate is #![sanitizer_runtime]") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::exported_symbol_ids<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up the exported symbols of a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::native_libraries<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up the native libraries of a linked crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::plugin_registrar_fn<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up the plugin registrar for a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::derive_registrar_fn<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up the derive registrar for a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_disambiguator<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up the disambiguator a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_hash<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up the hash a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::original_crate_name<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up the original name a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::implementations_of_trait<'tcx> { - fn describe(_tcx: TyCtxt, _: (CrateNum, DefId)) -> String { - format!("looking up implementations of a trait in a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::all_trait_implementations<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up all (?) trait implementations") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::link_args<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up link arguments for a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::resolve_lifetimes<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("resolving lifetimes") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::named_region_map<'tcx> { - fn describe(_tcx: TyCtxt, _: DefIndex) -> String { - format!("looking up a named region") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_late_bound_map<'tcx> { - fn describe(_tcx: TyCtxt, _: DefIndex) -> String { - format!("testing if a region is late boudn") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::object_lifetime_defaults_map<'tcx> { - fn describe(_tcx: TyCtxt, _: DefIndex) -> String { - format!("looking up lifetime defaults for a region") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::dep_kind<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("fetching what a dependency looks like") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_name<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("fetching what a crate is named") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::get_lang_items<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("calculating the lang items map") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::defined_lang_items<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("calculating the lang items defined in a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::missing_lang_items<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("calculating the missing lang items in a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::visible_parent_map<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("calculating the visible parent map") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::missing_extern_crate_item<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("seeing if we're missing an `extern crate` item for this crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::used_crate_source<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking at the source for a crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::postorder_cnums<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("generating a postorder list of CrateNums") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::maybe_unused_extern_crates<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up all possibly unused extern crates") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::stability_index<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("calculating the stability index for the local crate") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::all_crate_nums<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("fetching all foreign CrateNum instances") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::exported_symbols<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("exported_symbols") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::collect_and_partition_translation_items<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("collect_and_partition_translation_items") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::codegen_unit<'tcx> { - fn describe(_tcx: TyCtxt, _: InternedString) -> String { - format!("codegen_unit") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::compile_codegen_unit<'tcx> { - fn describe(_tcx: TyCtxt, _: InternedString) -> String { - format!("compile_codegen_unit") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::output_filenames<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("output_filenames") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::has_clone_closures<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("seeing if the crate has enabled `Clone` closures") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::vtable_methods<'tcx> { - fn describe(tcx: TyCtxt, key: ty::PolyTraitRef<'tcx> ) -> String { - format!("finding all methods for trait {}", tcx.item_path_str(key.def_id())) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::has_copy_closures<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("seeing if the crate has enabled `Copy` closures") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::fully_normalize_monormophic_ty<'tcx> { - fn describe(_tcx: TyCtxt, _: Ty) -> String { - format!("normalizing types") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::typeck_tables_of<'tcx> { - #[inline] - fn cache_on_disk(def_id: Self::Key) -> bool { - def_id.is_local() - } - - fn try_load_from_disk(tcx: TyCtxt<'_, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - let typeck_tables: Option> = tcx - .on_disk_query_result_cache - .try_load_query_result(tcx, id); - - typeck_tables.map(|tables| tcx.alloc_tables(tables)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::optimized_mir<'tcx> { - #[inline] - fn cache_on_disk(def_id: Self::Key) -> bool { - def_id.is_local() - } - - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - let mir: Option<::mir::Mir<'tcx>> = tcx.on_disk_query_result_cache - .try_load_query_result(tcx, id); - mir.map(|x| tcx.alloc_mir(x)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::substitute_normalize_and_test_predicates<'tcx> { - fn describe(tcx: TyCtxt, key: (DefId, &'tcx Substs<'tcx>)) -> String { - format!("testing substituted normalized predicates:`{}`", tcx.item_path_str(key.0)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::target_features_whitelist<'tcx> { - fn describe(_tcx: TyCtxt, _: CrateNum) -> String { - format!("looking up the whitelist of target features") - } -} - -macro_rules! impl_disk_cacheable_query( - ($query_name:ident, |$key:tt| $cond:expr) => { - impl<'tcx> QueryDescription<'tcx> for queries::$query_name<'tcx> { - #[inline] - fn cache_on_disk($key: Self::Key) -> bool { - $cond - } - - #[inline] - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - tcx.on_disk_query_result_cache.try_load_query_result(tcx, id) - } - } - } -); - -impl_disk_cacheable_query!(unsafety_check_result, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(borrowck, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(mir_borrowck, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(mir_const_qualif, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(check_match, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(contains_extern_indicator, |_| true); -impl_disk_cacheable_query!(def_symbol_name, |_| true); diff --git a/src/librustc/ty/maps/keys.rs b/src/librustc/ty/maps/keys.rs deleted file mode 100644 index b7b64c9761a8..000000000000 --- a/src/librustc/ty/maps/keys.rs +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Defines the set of legal keys that can be used in queries. - -use hir::def_id::{CrateNum, DefId, LOCAL_CRATE, DefIndex}; -use ty::{self, Ty, TyCtxt}; -use ty::subst::Substs; -use ty::fast_reject::SimplifiedType; - -use std::fmt::Debug; -use std::hash::Hash; -use syntax_pos::{Span, DUMMY_SP}; -use syntax_pos::symbol::InternedString; - -/// The `Key` trait controls what types can legally be used as the key -/// for a query. -pub trait Key: Clone + Hash + Eq + Debug { - /// Given an instance of this key, what crate is it referring to? - /// This is used to find the provider. - fn map_crate(&self) -> CrateNum; - - /// In the event that a cycle occurs, if no explicit span has been - /// given for a query with key `self`, what span should we use? - fn default_span(&self, tcx: TyCtxt) -> Span; -} - -impl<'tcx> Key for ty::InstanceDef<'tcx> { - fn map_crate(&self) -> CrateNum { - LOCAL_CRATE - } - - fn default_span(&self, tcx: TyCtxt) -> Span { - tcx.def_span(self.def_id()) - } -} - -impl<'tcx> Key for ty::Instance<'tcx> { - fn map_crate(&self) -> CrateNum { - LOCAL_CRATE - } - - fn default_span(&self, tcx: TyCtxt) -> Span { - tcx.def_span(self.def_id()) - } -} - -impl Key for CrateNum { - fn map_crate(&self) -> CrateNum { - *self - } - fn default_span(&self, _: TyCtxt) -> Span { - DUMMY_SP - } -} - -impl Key for DefIndex { - fn map_crate(&self) -> CrateNum { - LOCAL_CRATE - } - fn default_span(&self, _tcx: TyCtxt) -> Span { - DUMMY_SP - } -} - -impl Key for DefId { - fn map_crate(&self) -> CrateNum { - self.krate - } - fn default_span(&self, tcx: TyCtxt) -> Span { - tcx.def_span(*self) - } -} - -impl Key for (DefId, DefId) { - fn map_crate(&self) -> CrateNum { - self.0.krate - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.1.default_span(tcx) - } -} - -impl Key for (CrateNum, DefId) { - fn map_crate(&self) -> CrateNum { - self.0 - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.1.default_span(tcx) - } -} - -impl Key for (DefId, SimplifiedType) { - fn map_crate(&self) -> CrateNum { - self.0.krate - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.0.default_span(tcx) - } -} - -impl<'tcx> Key for (DefId, &'tcx Substs<'tcx>) { - fn map_crate(&self) -> CrateNum { - self.0.krate - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.0.default_span(tcx) - } -} - -impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) { - fn map_crate(&self) -> CrateNum { - self.1.def_id().krate - } - fn default_span(&self, tcx: TyCtxt) -> Span { - tcx.def_span(self.1.def_id()) - } -} - -impl<'tcx> Key for ty::PolyTraitRef<'tcx>{ - fn map_crate(&self) -> CrateNum { - self.def_id().krate - } - fn default_span(&self, tcx: TyCtxt) -> Span { - tcx.def_span(self.def_id()) - } -} - -impl<'tcx> Key for Ty<'tcx> { - fn map_crate(&self) -> CrateNum { - LOCAL_CRATE - } - fn default_span(&self, _: TyCtxt) -> Span { - DUMMY_SP - } -} - -impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> { - fn map_crate(&self) -> CrateNum { - self.value.map_crate() - } - fn default_span(&self, tcx: TyCtxt) -> Span { - self.value.default_span(tcx) - } -} - -impl Key for InternedString { - fn map_crate(&self) -> CrateNum { - LOCAL_CRATE - } - fn default_span(&self, _tcx: TyCtxt) -> Span { - DUMMY_SP - } -} diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs deleted file mode 100644 index e7e92b8a4288..000000000000 --- a/src/librustc/ty/maps/mod.rs +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dep_graph::{DepConstructor, DepNode}; -use errors::DiagnosticBuilder; -use hir::def_id::{CrateNum, DefId, DefIndex}; -use hir::def::{Def, Export}; -use hir::{self, TraitCandidate, ItemLocalId}; -use hir::svh::Svh; -use lint; -use middle::borrowck::BorrowCheckResult; -use middle::const_val; -use middle::cstore::{ExternCrate, LinkagePreference, NativeLibrary, - ExternBodyNestedBodies}; -use middle::cstore::{NativeLibraryKind, DepKind, CrateSource, ExternConstBody}; -use middle::privacy::AccessLevels; -use middle::reachable::ReachableSet; -use middle::region; -use middle::resolve_lifetime::{ResolveLifetimes, Region, ObjectLifetimeDefault}; -use middle::stability::{self, DeprecationEntry}; -use middle::lang_items::{LanguageItems, LangItem}; -use middle::exported_symbols::SymbolExportLevel; -use mir::mono::{CodegenUnit, Stats}; -use mir; -use session::{CompileResult, CrateDisambiguator}; -use session::config::OutputFilenames; -use traits::Vtable; -use traits::specialization_graph; -use ty::{self, CrateInherentImpls, Ty, TyCtxt}; -use ty::steal::Steal; -use ty::subst::Substs; -use util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet}; -use util::common::{profq_msg, ErrorReported, ProfileQueriesMsg}; - -use rustc_data_structures::indexed_set::IdxSetBuf; -use rustc_back::PanicStrategy; -use rustc_data_structures::indexed_vec::IndexVec; -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_data_structures::stable_hasher::StableVec; - -use std::ops::Deref; -use std::rc::Rc; -use std::sync::Arc; -use syntax_pos::{Span, DUMMY_SP}; -use syntax_pos::symbol::InternedString; -use syntax::attr; -use syntax::ast; -use syntax::symbol::Symbol; - -#[macro_use] -mod plumbing; -use self::plumbing::*; -pub use self::plumbing::force_from_dep_node; - -mod keys; -pub use self::keys::Key; - -mod values; -use self::values::Value; - -mod config; -pub use self::config::QueryConfig; -use self::config::QueryDescription; - -mod on_disk_cache; -pub use self::on_disk_cache::OnDiskCache; - -// Each of these maps also corresponds to a method on a -// `Provider` trait for requesting a value of that type, -// and a method on `Maps` itself for doing that in a -// a way that memoizes and does dep-graph tracking, -// wrapping around the actual chain of providers that -// the driver creates (using several `rustc_*` crates). -define_maps! { <'tcx> - /// Records the type of every item. - [] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, - - /// Maps from the def-id of an item (trait/struct/enum/fn) to its - /// associated generics and predicates. - [] fn generics_of: GenericsOfItem(DefId) -> &'tcx ty::Generics, - [] fn predicates_of: PredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>, - - /// Maps from the def-id of a trait to the list of - /// super-predicates. This is a subset of the full list of - /// predicates. We store these in a separate map because we must - /// evaluate them even during type conversion, often before the - /// full predicates are available (note that supertraits have - /// additional acyclicity requirements). - [] fn super_predicates_of: SuperPredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>, - - /// To avoid cycles within the predicates of a single item we compute - /// per-type-parameter predicates for resolving `T::AssocTy`. - [] fn type_param_predicates: type_param_predicates((DefId, DefId)) - -> ty::GenericPredicates<'tcx>, - - [] fn trait_def: TraitDefOfItem(DefId) -> &'tcx ty::TraitDef, - [] fn adt_def: AdtDefOfItem(DefId) -> &'tcx ty::AdtDef, - [] fn adt_destructor: AdtDestructor(DefId) -> Option, - [] fn adt_sized_constraint: SizedConstraint(DefId) -> &'tcx [Ty<'tcx>], - [] fn adt_dtorck_constraint: DtorckConstraint(DefId) -> ty::DtorckConstraint<'tcx>, - - /// True if this is a const fn - [] fn is_const_fn: IsConstFn(DefId) -> bool, - - /// True if this is a foreign item (i.e., linked via `extern { ... }`). - [] fn is_foreign_item: IsForeignItem(DefId) -> bool, - - /// Get a map with the variance of every item; use `item_variance` - /// instead. - [] fn crate_variances: crate_variances(CrateNum) -> Rc, - - /// Maps from def-id of a type or region parameter to its - /// (inferred) variance. - [] fn variances_of: ItemVariances(DefId) -> Rc>, - - /// Maps from def-id of a type to its (inferred) outlives. - [] fn inferred_outlives_of: InferredOutlivesOf(DefId) -> Vec>, - - /// Maps from an impl/trait def-id to a list of the def-ids of its items - [] fn associated_item_def_ids: AssociatedItemDefIds(DefId) -> Rc>, - - /// Maps from a trait item to the trait item "descriptor" - [] fn associated_item: AssociatedItems(DefId) -> ty::AssociatedItem, - - [] fn impl_trait_ref: ImplTraitRef(DefId) -> Option>, - [] fn impl_polarity: ImplPolarity(DefId) -> hir::ImplPolarity, - - /// Maps a DefId of a type to a list of its inherent impls. - /// Contains implementations of methods that are inherent to a type. - /// Methods in these implementations don't need to be exported. - [] fn inherent_impls: InherentImpls(DefId) -> Rc>, - - /// Set of all the def-ids in this crate that have MIR associated with - /// them. This includes all the body owners, but also things like struct - /// constructors. - [] fn mir_keys: mir_keys(CrateNum) -> Rc, - - /// Maps DefId's that have an associated Mir to the result - /// of the MIR qualify_consts pass. The actual meaning of - /// the value isn't known except to the pass itself. - [] fn mir_const_qualif: MirConstQualif(DefId) -> (u8, Rc>), - - /// Fetch the MIR for a given def-id right after it's built - this includes - /// unreachable code. - [] fn mir_built: MirBuilt(DefId) -> &'tcx Steal>, - - /// Fetch the MIR for a given def-id up till the point where it is - /// ready for const evaluation. - /// - /// See the README for the `mir` module for details. - [] fn mir_const: MirConst(DefId) -> &'tcx Steal>, - - [] fn mir_validated: MirValidated(DefId) -> &'tcx Steal>, - - /// MIR after our optimization passes have run. This is MIR that is ready - /// for trans. This is also the only query that can fetch non-local MIR, at present. - [] fn optimized_mir: MirOptimized(DefId) -> &'tcx mir::Mir<'tcx>, - - /// The result of unsafety-checking this def-id. - [] fn unsafety_check_result: UnsafetyCheckResult(DefId) -> mir::UnsafetyCheckResult, - - /// HACK: when evaluated, this reports a "unsafe derive on repr(packed)" error - [] fn unsafe_derive_on_repr_packed: UnsafeDeriveOnReprPacked(DefId) -> (), - - /// The signature of functions and closures. - [] fn fn_sig: FnSignature(DefId) -> ty::PolyFnSig<'tcx>, - - /// Caches CoerceUnsized kinds for impls on custom types. - [] fn coerce_unsized_info: CoerceUnsizedInfo(DefId) - -> ty::adjustment::CoerceUnsizedInfo, - - [] fn typeck_item_bodies: typeck_item_bodies_dep_node(CrateNum) -> CompileResult, - - [] fn typeck_tables_of: TypeckTables(DefId) -> &'tcx ty::TypeckTables<'tcx>, - - [] fn used_trait_imports: UsedTraitImports(DefId) -> Rc, - - [] fn has_typeck_tables: HasTypeckTables(DefId) -> bool, - - [] fn coherent_trait: CoherenceCheckTrait(DefId) -> (), - - [] fn borrowck: BorrowCheck(DefId) -> Rc, - - /// Borrow checks the function body. If this is a closure, returns - /// additional requirements that the closure's creator must verify. - [] fn mir_borrowck: MirBorrowCheck(DefId) -> Option>, - - /// Gets a complete map from all types to their inherent impls. - /// Not meant to be used directly outside of coherence. - /// (Defined only for LOCAL_CRATE) - [] fn crate_inherent_impls: crate_inherent_impls_dep_node(CrateNum) -> CrateInherentImpls, - - /// Checks all types in the krate for overlap in their inherent impls. Reports errors. - /// Not meant to be used directly outside of coherence. - /// (Defined only for LOCAL_CRATE) - [] fn crate_inherent_impls_overlap_check: inherent_impls_overlap_check_dep_node(CrateNum) -> (), - - /// Results of evaluating const items or constants embedded in - /// other items (such as enum variant explicit discriminants). - [] fn const_eval: const_eval_dep_node(ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) - -> const_val::EvalResult<'tcx>, - - [] fn check_match: CheckMatch(DefId) - -> Result<(), ErrorReported>, - - /// Performs the privacy check and computes "access levels". - [] fn privacy_access_levels: PrivacyAccessLevels(CrateNum) -> Rc, - - [] fn reachable_set: reachability_dep_node(CrateNum) -> ReachableSet, - - /// Per-body `region::ScopeTree`. The `DefId` should be the owner-def-id for the body; - /// in the case of closures, this will be redirected to the enclosing function. - [] fn region_scope_tree: RegionScopeTree(DefId) -> Rc, - - [] fn mir_shims: mir_shim_dep_node(ty::InstanceDef<'tcx>) -> &'tcx mir::Mir<'tcx>, - - [] fn def_symbol_name: SymbolName(DefId) -> ty::SymbolName, - [] fn symbol_name: symbol_name_dep_node(ty::Instance<'tcx>) -> ty::SymbolName, - - [] fn describe_def: DescribeDef(DefId) -> Option, - [] fn def_span: DefSpan(DefId) -> Span, - [] fn lookup_stability: LookupStability(DefId) -> Option<&'tcx attr::Stability>, - [] fn lookup_deprecation_entry: LookupDeprecationEntry(DefId) -> Option, - [] fn item_attrs: ItemAttrs(DefId) -> Rc<[ast::Attribute]>, - [] fn fn_arg_names: FnArgNames(DefId) -> Vec, - [] fn impl_parent: ImplParent(DefId) -> Option, - [] fn trait_of_item: TraitOfItem(DefId) -> Option, - [] fn is_exported_symbol: IsExportedSymbol(DefId) -> bool, - [] fn item_body_nested_bodies: ItemBodyNestedBodies(DefId) -> ExternBodyNestedBodies, - [] fn const_is_rvalue_promotable_to_static: ConstIsRvaluePromotableToStatic(DefId) -> bool, - [] fn rvalue_promotable_map: RvaluePromotableMap(DefId) -> Rc, - [] fn is_mir_available: IsMirAvailable(DefId) -> bool, - [] fn vtable_methods: vtable_methods_node(ty::PolyTraitRef<'tcx>) - -> Rc)>>>, - - [] fn trans_fulfill_obligation: fulfill_obligation_dep_node( - (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> Vtable<'tcx, ()>, - [] fn trait_impls_of: TraitImpls(DefId) -> Rc, - [] fn specialization_graph_of: SpecializationGraph(DefId) -> Rc, - [] fn is_object_safe: ObjectSafety(DefId) -> bool, - - // Get the ParameterEnvironment for a given item; this environment - // will be in "user-facing" mode, meaning that it is suitabe for - // type-checking etc, and it does not normalize specializable - // associated types. This is almost always what you want, - // unless you are doing MIR optimizations, in which case you - // might want to use `reveal_all()` method to change modes. - [] fn param_env: ParamEnv(DefId) -> ty::ParamEnv<'tcx>, - - // Trait selection queries. These are best used by invoking `ty.moves_by_default()`, - // `ty.is_copy()`, etc, since that will prune the environment where possible. - [] fn is_copy_raw: is_copy_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] fn is_sized_raw: is_sized_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result<&'tcx ty::layout::LayoutDetails, - ty::layout::LayoutError<'tcx>>, - - [] fn dylib_dependency_formats: DylibDepFormats(CrateNum) - -> Rc>, - - [] fn is_panic_runtime: IsPanicRuntime(CrateNum) -> bool, - [] fn is_compiler_builtins: IsCompilerBuiltins(CrateNum) -> bool, - [] fn has_global_allocator: HasGlobalAllocator(CrateNum) -> bool, - [] fn is_sanitizer_runtime: IsSanitizerRuntime(CrateNum) -> bool, - [] fn is_profiler_runtime: IsProfilerRuntime(CrateNum) -> bool, - [] fn panic_strategy: GetPanicStrategy(CrateNum) -> PanicStrategy, - [] fn is_no_builtins: IsNoBuiltins(CrateNum) -> bool, - - [] fn extern_crate: ExternCrate(DefId) -> Rc>, - - [] fn specializes: specializes_node((DefId, DefId)) -> bool, - [] fn in_scope_traits_map: InScopeTraits(DefIndex) - -> Option>>>>, - [] fn module_exports: ModuleExports(DefId) -> Option>>, - [] fn lint_levels: lint_levels_node(CrateNum) -> Rc, - - [] fn impl_defaultness: ImplDefaultness(DefId) -> hir::Defaultness, - [] fn exported_symbol_ids: ExportedSymbolIds(CrateNum) -> Rc, - [] fn native_libraries: NativeLibraries(CrateNum) -> Rc>, - [] fn plugin_registrar_fn: PluginRegistrarFn(CrateNum) -> Option, - [] fn derive_registrar_fn: DeriveRegistrarFn(CrateNum) -> Option, - [] fn crate_disambiguator: CrateDisambiguator(CrateNum) -> CrateDisambiguator, - [] fn crate_hash: CrateHash(CrateNum) -> Svh, - [] fn original_crate_name: OriginalCrateName(CrateNum) -> Symbol, - - [] fn implementations_of_trait: implementations_of_trait_node((CrateNum, DefId)) - -> Rc>, - [] fn all_trait_implementations: AllTraitImplementations(CrateNum) - -> Rc>, - - [] fn is_dllimport_foreign_item: IsDllimportForeignItem(DefId) -> bool, - [] fn is_statically_included_foreign_item: IsStaticallyIncludedForeignItem(DefId) -> bool, - [] fn native_library_kind: NativeLibraryKind(DefId) - -> Option, - [] fn link_args: link_args_node(CrateNum) -> Rc>, - - // Lifetime resolution. See `middle::resolve_lifetimes`. - [] fn resolve_lifetimes: ResolveLifetimes(CrateNum) -> Rc, - [] fn named_region_map: NamedRegion(DefIndex) -> - Option>>, - [] fn is_late_bound_map: IsLateBound(DefIndex) -> - Option>>, - [] fn object_lifetime_defaults_map: ObjectLifetimeDefaults(DefIndex) - -> Option>>>>, - - [] fn visibility: Visibility(DefId) -> ty::Visibility, - [] fn dep_kind: DepKind(CrateNum) -> DepKind, - [] fn crate_name: CrateName(CrateNum) -> Symbol, - [] fn item_children: ItemChildren(DefId) -> Rc>, - [] fn extern_mod_stmt_cnum: ExternModStmtCnum(DefId) -> Option, - - [] fn get_lang_items: get_lang_items_node(CrateNum) -> Rc, - [] fn defined_lang_items: DefinedLangItems(CrateNum) -> Rc>, - [] fn missing_lang_items: MissingLangItems(CrateNum) -> Rc>, - [] fn extern_const_body: ExternConstBody(DefId) -> ExternConstBody<'tcx>, - [] fn visible_parent_map: visible_parent_map_node(CrateNum) - -> Rc>, - [] fn missing_extern_crate_item: MissingExternCrateItem(CrateNum) -> bool, - [] fn used_crate_source: UsedCrateSource(CrateNum) -> Rc, - [] fn postorder_cnums: postorder_cnums_node(CrateNum) -> Rc>, - - [] fn freevars: Freevars(DefId) -> Option>>, - [] fn maybe_unused_trait_import: MaybeUnusedTraitImport(DefId) -> bool, - [] fn maybe_unused_extern_crates: maybe_unused_extern_crates_node(CrateNum) - -> Rc>, - - [] fn stability_index: stability_index_node(CrateNum) -> Rc>, - [] fn all_crate_nums: all_crate_nums_node(CrateNum) -> Rc>, - - [] fn exported_symbols: ExportedSymbols(CrateNum) - -> Arc, SymbolExportLevel)>>, - [] fn collect_and_partition_translation_items: - collect_and_partition_translation_items_node(CrateNum) - -> (Arc, Arc>>>), - [] fn export_name: ExportName(DefId) -> Option, - [] fn contains_extern_indicator: ContainsExternIndicator(DefId) -> bool, - [] fn is_translated_function: IsTranslatedFunction(DefId) -> bool, - [] fn codegen_unit: CodegenUnit(InternedString) -> Arc>, - [] fn compile_codegen_unit: CompileCodegenUnit(InternedString) -> Stats, - [] fn output_filenames: output_filenames_node(CrateNum) - -> Arc, - - [] fn has_copy_closures: HasCopyClosures(CrateNum) -> bool, - [] fn has_clone_closures: HasCloneClosures(CrateNum) -> bool, - - // Erases regions from `ty` to yield a new type. - // Normally you would just use `tcx.erase_regions(&value)`, - // however, which uses this query as a kind of cache. - [] fn erase_regions_ty: erase_regions_ty(Ty<'tcx>) -> Ty<'tcx>, - [] fn fully_normalize_monormophic_ty: normalize_ty_node(Ty<'tcx>) -> Ty<'tcx>, - - [] fn substitute_normalize_and_test_predicates: - substitute_normalize_and_test_predicates_node((DefId, &'tcx Substs<'tcx>)) -> bool, - - [] fn target_features_whitelist: - target_features_whitelist_node(CrateNum) -> Rc>, - [] fn target_features_enabled: TargetFeaturesEnabled(DefId) -> Rc>, - -} - -////////////////////////////////////////////////////////////////////// -// These functions are little shims used to find the dep-node for a -// given query when there is not a *direct* mapping: - -fn erase_regions_ty<'tcx>(ty: Ty<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::EraseRegionsTy { ty } -} - -fn type_param_predicates<'tcx>((item_id, param_id): (DefId, DefId)) -> DepConstructor<'tcx> { - DepConstructor::TypeParamPredicates { - item_id, - param_id - } -} - -fn fulfill_obligation_dep_node<'tcx>((param_env, trait_ref): - (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> DepConstructor<'tcx> { - DepConstructor::FulfillObligation { - param_env, - trait_ref - } -} - -fn crate_inherent_impls_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::Coherence -} - -fn inherent_impls_overlap_check_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::CoherenceInherentImplOverlapCheck -} - -fn reachability_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::Reachability -} - -fn mir_shim_dep_node<'tcx>(instance_def: ty::InstanceDef<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::MirShim { - instance_def - } -} - -fn symbol_name_dep_node<'tcx>(instance: ty::Instance<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::InstanceSymbolName { instance } -} - -fn typeck_item_bodies_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::TypeckBodiesKrate -} - -fn const_eval_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) - -> DepConstructor<'tcx> { - DepConstructor::ConstEval { param_env } -} - -fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::MirKeys -} - -fn crate_variances<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::CrateVariances -} - -fn is_copy_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::IsCopy { param_env } -} - -fn is_sized_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::IsSized { param_env } -} - -fn is_freeze_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::IsFreeze { param_env } -} - -fn needs_drop_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::NeedsDrop { param_env } -} - -fn layout_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::Layout { param_env } -} - -fn lint_levels_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::LintLevels -} - -fn specializes_node<'tcx>((a, b): (DefId, DefId)) -> DepConstructor<'tcx> { - DepConstructor::Specializes { impl1: a, impl2: b } -} - -fn implementations_of_trait_node<'tcx>((krate, trait_id): (CrateNum, DefId)) - -> DepConstructor<'tcx> -{ - DepConstructor::ImplementationsOfTrait { krate, trait_id } -} - -fn link_args_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::LinkArgs -} - -fn get_lang_items_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::GetLangItems -} - -fn visible_parent_map_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::VisibleParentMap -} - -fn postorder_cnums_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::PostorderCnums -} - -fn maybe_unused_extern_crates_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::MaybeUnusedExternCrates -} - -fn stability_index_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::StabilityIndex -} - -fn all_crate_nums_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::AllCrateNums -} - -fn collect_and_partition_translation_items_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::CollectAndPartitionTranslationItems -} - -fn output_filenames_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::OutputFilenames -} - -fn vtable_methods_node<'tcx>(trait_ref: ty::PolyTraitRef<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::VtableMethods{ trait_ref } -} -fn normalize_ty_node<'tcx>(_: Ty<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::NormalizeTy -} - -fn substitute_normalize_and_test_predicates_node<'tcx>(key: (DefId, &'tcx Substs<'tcx>)) - -> DepConstructor<'tcx> { - DepConstructor::SubstituteNormalizeAndTestPredicates { key } -} - -fn target_features_whitelist_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::TargetFeaturesWhitelist -} diff --git a/src/librustc/ty/maps/plumbing.rs b/src/librustc/ty/maps/plumbing.rs deleted file mode 100644 index d670ecc2691a..000000000000 --- a/src/librustc/ty/maps/plumbing.rs +++ /dev/null @@ -1,985 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The implementation of the query system itself. Defines the macros -//! that generate the actual methods on tcx which find and execute the -//! provider, manage the caches, and so forth. - -use dep_graph::{DepNodeIndex, DepNode, DepKind, DepNodeColor}; -use errors::DiagnosticBuilder; -use ty::{TyCtxt}; -use ty::maps::Query; // NB: actually generated by the macros in this file -use ty::maps::config::QueryDescription; -use ty::item_path; - -use rustc_data_structures::fx::{FxHashMap}; -use std::cell::{Ref, RefMut}; -use std::marker::PhantomData; -use std::mem; -use syntax_pos::Span; - -pub(super) struct QueryMap<'tcx, D: QueryDescription<'tcx>> { - phantom: PhantomData<(D, &'tcx ())>, - pub(super) map: FxHashMap>, -} - -pub(super) struct QueryValue { - pub(super) value: T, - pub(super) index: DepNodeIndex, -} - -impl QueryValue { - pub(super) fn new(value: T, - dep_node_index: DepNodeIndex) - -> QueryValue { - QueryValue { - value, - index: dep_node_index, - } - } -} - -impl<'tcx, M: QueryDescription<'tcx>> QueryMap<'tcx, M> { - pub(super) fn new() -> QueryMap<'tcx, M> { - QueryMap { - phantom: PhantomData, - map: FxHashMap(), - } - } -} - -pub(super) trait GetCacheInternal<'tcx>: QueryDescription<'tcx> + Sized { - fn get_cache_internal<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> Ref<'a, QueryMap<'tcx, Self>>; -} - -pub(super) struct CycleError<'a, 'tcx: 'a> { - span: Span, - cycle: RefMut<'a, [(Span, Query<'tcx>)]>, -} - -impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - pub(super) fn report_cycle(self, CycleError { span, cycle }: CycleError) - -> DiagnosticBuilder<'a> - { - // Subtle: release the refcell lock before invoking `describe()` - // below by dropping `cycle`. - let stack = cycle.to_vec(); - mem::drop(cycle); - - assert!(!stack.is_empty()); - - // Disable naming impls with types in this path, since that - // sometimes cycles itself, leading to extra cycle errors. - // (And cycle errors around impls tend to occur during the - // collect/coherence phases anyhow.) - item_path::with_forced_impl_filename_line(|| { - let span = self.sess.codemap().def_span(span); - let mut err = - struct_span_err!(self.sess, span, E0391, - "unsupported cyclic reference between types/traits detected"); - err.span_label(span, "cyclic reference"); - - err.span_note(self.sess.codemap().def_span(stack[0].0), - &format!("the cycle begins when {}...", stack[0].1.describe(self))); - - for &(span, ref query) in &stack[1..] { - err.span_note(self.sess.codemap().def_span(span), - &format!("...which then requires {}...", query.describe(self))); - } - - err.note(&format!("...which then again requires {}, completing the cycle.", - stack[0].1.describe(self))); - - return err - }) - } - - pub(super) fn cycle_check(self, span: Span, query: Query<'gcx>, compute: F) - -> Result> - where F: FnOnce() -> R - { - { - let mut stack = self.maps.query_stack.borrow_mut(); - if let Some((i, _)) = stack.iter().enumerate().rev() - .find(|&(_, &(_, ref q))| *q == query) { - return Err(CycleError { - span, - cycle: RefMut::map(stack, |stack| &mut stack[i..]) - }); - } - stack.push((span, query)); - } - - let result = compute(); - - self.maps.query_stack.borrow_mut().pop(); - - Ok(result) - } - - /// Try to read a node index for the node dep_node. - /// A node will have an index, when it's already been marked green, or when we can mark it - /// green. This function will mark the current task as a reader of the specified node, when - /// the a node index can be found for that node. - pub(super) fn try_mark_green_and_read(self, dep_node: &DepNode) -> Option { - match self.dep_graph.node_color(dep_node) { - Some(DepNodeColor::Green(dep_node_index)) => { - self.dep_graph.read_index(dep_node_index); - Some(dep_node_index) - } - Some(DepNodeColor::Red) => { - None - } - None => { - // try_mark_green (called below) will panic when full incremental - // compilation is disabled. If that's the case, we can't try to mark nodes - // as green anyway, so we can safely return None here. - if !self.dep_graph.is_fully_enabled() { - return None; - } - match self.dep_graph.try_mark_green(self.global_tcx(), &dep_node) { - Some(dep_node_index) => { - debug_assert!(self.dep_graph.is_green(dep_node_index)); - self.dep_graph.read_index(dep_node_index); - Some(dep_node_index) - } - None => { - None - } - } - } - } - } -} - -// If enabled, send a message to the profile-queries thread -macro_rules! profq_msg { - ($tcx:expr, $msg:expr) => { - if cfg!(debug_assertions) { - if $tcx.sess.profile_queries() { - profq_msg($msg) - } - } - } -} - -// If enabled, format a key using its debug string, which can be -// expensive to compute (in terms of time). -macro_rules! profq_key { - ($tcx:expr, $key:expr) => { - if cfg!(debug_assertions) { - if $tcx.sess.profile_queries_and_keys() { - Some(format!("{:?}", $key)) - } else { None } - } else { None } - } -} - -macro_rules! define_maps { - (<$tcx:tt> - $($(#[$attr:meta])* - [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { - - use dep_graph::DepNodeIndex; - use std::cell::RefCell; - - define_map_struct! { - tcx: $tcx, - input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) - } - - impl<$tcx> Maps<$tcx> { - pub fn new(providers: IndexVec>) - -> Self { - Maps { - providers, - query_stack: RefCell::new(vec![]), - $($name: RefCell::new(QueryMap::new())),* - } - } - } - - #[allow(bad_style)] - #[derive(Copy, Clone, Debug, PartialEq, Eq)] - pub enum Query<$tcx> { - $($(#[$attr])* $name($K)),* - } - - #[allow(bad_style)] - #[derive(Clone, Debug, PartialEq, Eq)] - pub enum QueryMsg { - $($name(Option)),* - } - - impl<$tcx> Query<$tcx> { - pub fn describe(&self, tcx: TyCtxt) -> String { - let (r, name) = match *self { - $(Query::$name(key) => { - (queries::$name::describe(tcx, key), stringify!($name)) - })* - }; - if tcx.sess.verbose() { - format!("{} [{}]", r, name) - } else { - r - } - } - } - - pub mod queries { - use std::marker::PhantomData; - - $(#[allow(bad_style)] - pub struct $name<$tcx> { - data: PhantomData<&$tcx ()> - })* - } - - $(impl<$tcx> QueryConfig for queries::$name<$tcx> { - type Key = $K; - type Value = $V; - } - - impl<$tcx> GetCacheInternal<$tcx> for queries::$name<$tcx> { - fn get_cache_internal<'a>(tcx: TyCtxt<'a, $tcx, $tcx>) - -> ::std::cell::Ref<'a, QueryMap<$tcx, Self>> { - tcx.maps.$name.borrow() - } - } - - impl<'a, $tcx, 'lcx> queries::$name<$tcx> { - - #[allow(unused)] - fn to_dep_node(tcx: TyCtxt<'a, $tcx, 'lcx>, key: &$K) -> DepNode { - use dep_graph::DepConstructor::*; - - DepNode::new(tcx, $node(*key)) - } - - fn try_get_with(tcx: TyCtxt<'a, $tcx, 'lcx>, - mut span: Span, - key: $K) - -> Result<$V, CycleError<'a, $tcx>> - { - debug!("ty::queries::{}::try_get_with(key={:?}, span={:?})", - stringify!($name), - key, - span); - - profq_msg!(tcx, - ProfileQueriesMsg::QueryBegin( - span.data(), - QueryMsg::$name(profq_key!(tcx, key)) - ) - ); - - if let Some(value) = tcx.maps.$name.borrow().map.get(&key) { - profq_msg!(tcx, ProfileQueriesMsg::CacheHit); - tcx.dep_graph.read_index(value.index); - return Ok((&value.value).clone()); - } - - // FIXME(eddyb) Get more valid Span's on queries. - // def_span guard is necessary to prevent a recursive loop, - // default_span calls def_span query internally. - if span == DUMMY_SP && stringify!($name) != "def_span" { - span = key.default_span(tcx) - } - - // Fast path for when incr. comp. is off. `to_dep_node` is - // expensive for some DepKinds. - if !tcx.dep_graph.is_fully_enabled() { - let null_dep_node = DepNode::new_no_params(::dep_graph::DepKind::Null); - return Self::force(tcx, key, span, null_dep_node) - .map(|(v, _)| v); - } - - let dep_node = Self::to_dep_node(tcx, &key); - - if dep_node.kind.is_anon() { - profq_msg!(tcx, ProfileQueriesMsg::ProviderBegin); - - let res = tcx.cycle_check(span, Query::$name(key), || { - tcx.sess.diagnostic().track_diagnostics(|| { - tcx.dep_graph.with_anon_task(dep_node.kind, || { - Self::compute_result(tcx.global_tcx(), key) - }) - }) - })?; - - profq_msg!(tcx, ProfileQueriesMsg::ProviderEnd); - let ((result, dep_node_index), diagnostics) = res; - - tcx.dep_graph.read_index(dep_node_index); - - tcx.on_disk_query_result_cache - .store_diagnostics_for_anon_node(dep_node_index, diagnostics); - - let value = QueryValue::new(result, dep_node_index); - - return Ok((&tcx.maps - .$name - .borrow_mut() - .map - .entry(key) - .or_insert(value) - .value).clone()); - } - - if !dep_node.kind.is_input() { - if let Some(dep_node_index) = tcx.try_mark_green_and_read(&dep_node) { - profq_msg!(tcx, ProfileQueriesMsg::CacheHit); - return Self::load_from_disk_and_cache_in_memory(tcx, - key, - span, - dep_node_index, - &dep_node) - } - } - - match Self::force(tcx, key, span, dep_node) { - Ok((result, dep_node_index)) => { - tcx.dep_graph.read_index(dep_node_index); - Ok(result) - } - Err(e) => Err(e) - } - } - - /// Ensure that either this query has all green inputs or been executed. - /// Executing query::ensure(D) is considered a read of the dep-node D. - /// - /// This function is particularly useful when executing passes for their - /// side-effects -- e.g., in order to report errors for erroneous programs. - /// - /// Note: The optimization is only available during incr. comp. - pub fn ensure(tcx: TyCtxt<'a, $tcx, 'lcx>, key: $K) -> () { - let dep_node = Self::to_dep_node(tcx, &key); - - // Ensuring an "input" or anonymous query makes no sense - assert!(!dep_node.kind.is_anon()); - assert!(!dep_node.kind.is_input()); - if tcx.try_mark_green_and_read(&dep_node).is_none() { - // A None return from `try_mark_green_and_read` means that this is either - // a new dep node or that the dep node has already been marked red. - // Either way, we can't call `dep_graph.read()` as we don't have the - // DepNodeIndex. We must invoke the query itself. The performance cost - // this introduces should be negligible as we'll immediately hit the - // in-memory cache, or another query down the line will. - let _ = tcx.$name(key); - } - } - - fn compute_result(tcx: TyCtxt<'a, $tcx, 'lcx>, key: $K) -> $V { - let provider = tcx.maps.providers[key.map_crate()].$name; - provider(tcx.global_tcx(), key) - } - - fn load_from_disk_and_cache_in_memory(tcx: TyCtxt<'a, $tcx, 'lcx>, - key: $K, - span: Span, - dep_node_index: DepNodeIndex, - dep_node: &DepNode) - -> Result<$V, CycleError<'a, $tcx>> - { - debug_assert!(tcx.dep_graph.is_green(dep_node_index)); - - // First we try to load the result from the on-disk cache - let result = if Self::cache_on_disk(key) && - tcx.sess.opts.debugging_opts.incremental_queries { - let prev_dep_node_index = - tcx.dep_graph.prev_dep_node_index_of(dep_node); - let result = Self::try_load_from_disk(tcx.global_tcx(), - prev_dep_node_index); - - // We always expect to find a cached result for things that - // can be forced from DepNode. - debug_assert!(!dep_node.kind.can_reconstruct_query_key() || - result.is_some(), - "Missing on-disk cache entry for {:?}", - dep_node); - result - } else { - // Some things are never cached on disk. - None - }; - - let result = if let Some(result) = result { - result - } else { - // We could not load a result from the on-disk cache, so - // recompute. - let (result, _ ) = tcx.cycle_check(span, Query::$name(key), || { - // The diagnostics for this query have already been - // promoted to the current session during - // try_mark_green(), so we can ignore them here. - tcx.sess.diagnostic().track_diagnostics(|| { - // The dep-graph for this computation is already in - // place - tcx.dep_graph.with_ignore(|| { - Self::compute_result(tcx, key) - }) - }) - })?; - result - }; - - // If -Zincremental-verify-ich is specified, re-hash results from - // the cache and make sure that they have the expected fingerprint. - if tcx.sess.opts.debugging_opts.incremental_verify_ich { - use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; - use ich::Fingerprint; - - assert!(Some(tcx.dep_graph.fingerprint_of(dep_node_index)) == - tcx.dep_graph.prev_fingerprint_of(dep_node), - "Fingerprint for green query instance not loaded \ - from cache: {:?}", dep_node); - - debug!("BEGIN verify_ich({:?})", dep_node); - let mut hcx = tcx.create_stable_hashing_context(); - let mut hasher = StableHasher::new(); - - result.hash_stable(&mut hcx, &mut hasher); - - let new_hash: Fingerprint = hasher.finish(); - debug!("END verify_ich({:?})", dep_node); - - let old_hash = tcx.dep_graph.fingerprint_of(dep_node_index); - - assert!(new_hash == old_hash, "Found unstable fingerprints \ - for {:?}", dep_node); - } - - if tcx.sess.opts.debugging_opts.query_dep_graph { - tcx.dep_graph.mark_loaded_from_cache(dep_node_index, true); - } - - let value = QueryValue::new(result, dep_node_index); - - Ok((&tcx.maps - .$name - .borrow_mut() - .map - .entry(key) - .or_insert(value) - .value).clone()) - } - - fn force(tcx: TyCtxt<'a, $tcx, 'lcx>, - key: $K, - span: Span, - dep_node: DepNode) - -> Result<($V, DepNodeIndex), CycleError<'a, $tcx>> { - debug_assert!(tcx.dep_graph.node_color(&dep_node).is_none()); - - profq_msg!(tcx, ProfileQueriesMsg::ProviderBegin); - let res = tcx.cycle_check(span, Query::$name(key), || { - tcx.sess.diagnostic().track_diagnostics(|| { - if dep_node.kind.is_eval_always() { - tcx.dep_graph.with_eval_always_task(dep_node, - tcx, - key, - Self::compute_result) - } else { - tcx.dep_graph.with_task(dep_node, - tcx, - key, - Self::compute_result) - } - }) - })?; - profq_msg!(tcx, ProfileQueriesMsg::ProviderEnd); - - let ((result, dep_node_index), diagnostics) = res; - - if tcx.sess.opts.debugging_opts.query_dep_graph { - tcx.dep_graph.mark_loaded_from_cache(dep_node_index, false); - } - - if dep_node.kind != ::dep_graph::DepKind::Null { - tcx.on_disk_query_result_cache - .store_diagnostics(dep_node_index, diagnostics); - } - - let value = QueryValue::new(result, dep_node_index); - - Ok(((&tcx.maps - .$name - .borrow_mut() - .map - .entry(key) - .or_insert(value) - .value).clone(), - dep_node_index)) - } - - pub fn try_get(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K) - -> Result<$V, DiagnosticBuilder<'a>> { - match Self::try_get_with(tcx, span, key) { - Ok(e) => Ok(e), - Err(e) => Err(tcx.report_cycle(e)), - } - } - })* - - #[derive(Copy, Clone)] - pub struct TyCtxtAt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { - pub tcx: TyCtxt<'a, 'gcx, 'tcx>, - pub span: Span, - } - - impl<'a, 'gcx, 'tcx> Deref for TyCtxtAt<'a, 'gcx, 'tcx> { - type Target = TyCtxt<'a, 'gcx, 'tcx>; - fn deref(&self) -> &Self::Target { - &self.tcx - } - } - - impl<'a, $tcx, 'lcx> TyCtxt<'a, $tcx, 'lcx> { - /// Return a transparent wrapper for `TyCtxt` which uses - /// `span` as the location of queries performed through it. - pub fn at(self, span: Span) -> TyCtxtAt<'a, $tcx, 'lcx> { - TyCtxtAt { - tcx: self, - span - } - } - - $($(#[$attr])* - pub fn $name(self, key: $K) -> $V { - self.at(DUMMY_SP).$name(key) - })* - } - - impl<'a, $tcx, 'lcx> TyCtxtAt<'a, $tcx, 'lcx> { - $($(#[$attr])* - pub fn $name(self, key: $K) -> $V { - queries::$name::try_get(self.tcx, self.span, key).unwrap_or_else(|mut e| { - e.emit(); - Value::from_cycle_error(self.global_tcx()) - }) - })* - } - - define_provider_struct! { - tcx: $tcx, - input: ($(([$($modifiers)*] [$name] [$K] [$V]))*) - } - - impl<$tcx> Copy for Providers<$tcx> {} - impl<$tcx> Clone for Providers<$tcx> { - fn clone(&self) -> Self { *self } - } - } -} - -macro_rules! define_map_struct { - (tcx: $tcx:tt, - input: ($(([$(modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => { - pub struct Maps<$tcx> { - providers: IndexVec>, - query_stack: RefCell)>>, - $($(#[$attr])* $name: RefCell>>,)* - } - }; -} - -macro_rules! define_provider_struct { - (tcx: $tcx:tt, - input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => { - pub struct Providers<$tcx> { - $(pub $name: for<'a> fn(TyCtxt<'a, $tcx, $tcx>, $K) -> $R,)* - } - - impl<$tcx> Default for Providers<$tcx> { - fn default() -> Self { - $(fn $name<'a, $tcx>(_: TyCtxt<'a, $tcx, $tcx>, key: $K) -> $R { - bug!("tcx.maps.{}({:?}) unsupported by its crate", - stringify!($name), key); - })* - Providers { $($name),* } - } - } - }; -} - - -/// The red/green evaluation system will try to mark a specific DepNode in the -/// dependency graph as green by recursively trying to mark the dependencies of -/// that DepNode as green. While doing so, it will sometimes encounter a DepNode -/// where we don't know if it is red or green and we therefore actually have -/// to recompute its value in order to find out. Since the only piece of -/// information that we have at that point is the DepNode we are trying to -/// re-evaluate, we need some way to re-run a query from just that. This is what -/// `force_from_dep_node()` implements. -/// -/// In the general case, a DepNode consists of a DepKind and an opaque -/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint -/// is usually constructed by computing a stable hash of the query-key that the -/// DepNode corresponds to. Consequently, it is not in general possible to go -/// back from hash to query-key (since hash functions are not reversible). For -/// this reason `force_from_dep_node()` is expected to fail from time to time -/// because we just cannot find out, from the DepNode alone, what the -/// corresponding query-key is and therefore cannot re-run the query. -/// -/// The system deals with this case letting `try_mark_green` fail which forces -/// the root query to be re-evaluated. -/// -/// Now, if force_from_dep_node() would always fail, it would be pretty useless. -/// Fortunately, we can use some contextual information that will allow us to -/// reconstruct query-keys for certain kinds of DepNodes. In particular, we -/// enforce by construction that the GUID/fingerprint of certain DepNodes is a -/// valid DefPathHash. Since we also always build a huge table that maps every -/// DefPathHash in the current codebase to the corresponding DefId, we have -/// everything we need to re-run the query. -/// -/// Take the `mir_validated` query as an example. Like many other queries, it -/// just has a single parameter: the DefId of the item it will compute the -/// validated MIR for. Now, when we call `force_from_dep_node()` on a dep-node -/// with kind `MirValidated`, we know that the GUID/fingerprint of the dep-node -/// is actually a DefPathHash, and can therefore just look up the corresponding -/// DefId in `tcx.def_path_hash_to_def_id`. -/// -/// When you implement a new query, it will likely have a corresponding new -/// DepKind, and you'll have to support it here in `force_from_dep_node()`. As -/// a rule of thumb, if your query takes a DefId or DefIndex as sole parameter, -/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just -/// add it to the "We don't have enough information to reconstruct..." group in -/// the match below. -pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, - dep_node: &DepNode) - -> bool { - use ty::maps::keys::Key; - use hir::def_id::LOCAL_CRATE; - - // We must avoid ever having to call force_from_dep_node() for a - // DepNode::CodegenUnit: - // Since we cannot reconstruct the query key of a DepNode::CodegenUnit, we - // would always end up having to evaluate the first caller of the - // `codegen_unit` query that *is* reconstructible. This might very well be - // the `compile_codegen_unit` query, thus re-translating the whole CGU just - // to re-trigger calling the `codegen_unit` query with the right key. At - // that point we would already have re-done all the work we are trying to - // avoid doing in the first place. - // The solution is simple: Just explicitly call the `codegen_unit` query for - // each CGU, right after partitioning. This way `try_mark_green` will always - // hit the cache instead of having to go through `force_from_dep_node`. - // This assertion makes sure, we actually keep applying the solution above. - debug_assert!(dep_node.kind != DepKind::CodegenUnit, - "calling force_from_dep_node() on DepKind::CodegenUnit"); - - if !dep_node.kind.can_reconstruct_query_key() { - return false - } - - macro_rules! def_id { - () => { - if let Some(def_id) = dep_node.extract_def_id(tcx) { - def_id - } else { - // return from the whole function - return false - } - } - }; - - macro_rules! krate { - () => { (def_id!()).krate } - }; - - macro_rules! force { - ($query:ident, $key:expr) => { - { - use $crate::util::common::{ProfileQueriesMsg, profq_msg}; - - // FIXME(eddyb) Get more valid Span's on queries. - // def_span guard is necessary to prevent a recursive loop, - // default_span calls def_span query internally. - let span = if stringify!($query) != "def_span" { - $key.default_span(tcx) - } else { - ::syntax_pos::DUMMY_SP - }; - - profq_msg!(tcx, - ProfileQueriesMsg::QueryBegin( - span.data(), - ::ty::maps::QueryMsg::$query(profq_key!(tcx, $key)) - ) - ); - - match ::ty::maps::queries::$query::force(tcx, $key, span, *dep_node) { - Ok(_) => {}, - Err(e) => { - tcx.report_cycle(e).emit(); - } - } - } - } - }; - - // FIXME(#45015): We should try move this boilerplate code into a macro - // somehow. - match dep_node.kind { - // These are inputs that are expected to be pre-allocated and that - // should therefore always be red or green already - DepKind::AllLocalTraitImpls | - DepKind::Krate | - DepKind::CrateMetadata | - DepKind::HirBody | - DepKind::Hir | - - // This are anonymous nodes - DepKind::TraitSelect | - - // We don't have enough information to reconstruct the query key of - // these - DepKind::IsCopy | - DepKind::IsSized | - DepKind::IsFreeze | - DepKind::NeedsDrop | - DepKind::Layout | - DepKind::ConstEval | - DepKind::InstanceSymbolName | - DepKind::MirShim | - DepKind::BorrowCheckKrate | - DepKind::Specializes | - DepKind::ImplementationsOfTrait | - DepKind::TypeParamPredicates | - DepKind::CodegenUnit | - DepKind::CompileCodegenUnit | - DepKind::FulfillObligation | - DepKind::VtableMethods | - DepKind::EraseRegionsTy | - DepKind::NormalizeTy | - DepKind::SubstituteNormalizeAndTestPredicates | - - // This one should never occur in this context - DepKind::Null => { - bug!("force_from_dep_node() - Encountered {:?}", dep_node) - } - - // These are not queries - DepKind::CoherenceCheckTrait | - DepKind::ItemVarianceConstraints => { - return false - } - - DepKind::RegionScopeTree => { force!(region_scope_tree, def_id!()); } - - DepKind::Coherence => { force!(crate_inherent_impls, LOCAL_CRATE); } - DepKind::CoherenceInherentImplOverlapCheck => { - force!(crate_inherent_impls_overlap_check, LOCAL_CRATE) - }, - DepKind::PrivacyAccessLevels => { force!(privacy_access_levels, LOCAL_CRATE); } - DepKind::MirBuilt => { force!(mir_built, def_id!()); } - DepKind::MirConstQualif => { force!(mir_const_qualif, def_id!()); } - DepKind::MirConst => { force!(mir_const, def_id!()); } - DepKind::MirValidated => { force!(mir_validated, def_id!()); } - DepKind::MirOptimized => { force!(optimized_mir, def_id!()); } - - DepKind::BorrowCheck => { force!(borrowck, def_id!()); } - DepKind::MirBorrowCheck => { force!(mir_borrowck, def_id!()); } - DepKind::UnsafetyCheckResult => { force!(unsafety_check_result, def_id!()); } - DepKind::UnsafeDeriveOnReprPacked => { force!(unsafe_derive_on_repr_packed, def_id!()); } - DepKind::Reachability => { force!(reachable_set, LOCAL_CRATE); } - DepKind::MirKeys => { force!(mir_keys, LOCAL_CRATE); } - DepKind::CrateVariances => { force!(crate_variances, LOCAL_CRATE); } - DepKind::AssociatedItems => { force!(associated_item, def_id!()); } - DepKind::TypeOfItem => { force!(type_of, def_id!()); } - DepKind::GenericsOfItem => { force!(generics_of, def_id!()); } - DepKind::PredicatesOfItem => { force!(predicates_of, def_id!()); } - DepKind::InferredOutlivesOf => { force!(inferred_outlives_of, def_id!()); } - DepKind::SuperPredicatesOfItem => { force!(super_predicates_of, def_id!()); } - DepKind::TraitDefOfItem => { force!(trait_def, def_id!()); } - DepKind::AdtDefOfItem => { force!(adt_def, def_id!()); } - DepKind::ImplTraitRef => { force!(impl_trait_ref, def_id!()); } - DepKind::ImplPolarity => { force!(impl_polarity, def_id!()); } - DepKind::FnSignature => { force!(fn_sig, def_id!()); } - DepKind::CoerceUnsizedInfo => { force!(coerce_unsized_info, def_id!()); } - DepKind::ItemVariances => { force!(variances_of, def_id!()); } - DepKind::IsConstFn => { force!(is_const_fn, def_id!()); } - DepKind::IsForeignItem => { force!(is_foreign_item, def_id!()); } - DepKind::SizedConstraint => { force!(adt_sized_constraint, def_id!()); } - DepKind::DtorckConstraint => { force!(adt_dtorck_constraint, def_id!()); } - DepKind::AdtDestructor => { force!(adt_destructor, def_id!()); } - DepKind::AssociatedItemDefIds => { force!(associated_item_def_ids, def_id!()); } - DepKind::InherentImpls => { force!(inherent_impls, def_id!()); } - DepKind::TypeckBodiesKrate => { force!(typeck_item_bodies, LOCAL_CRATE); } - DepKind::TypeckTables => { force!(typeck_tables_of, def_id!()); } - DepKind::UsedTraitImports => { force!(used_trait_imports, def_id!()); } - DepKind::HasTypeckTables => { force!(has_typeck_tables, def_id!()); } - DepKind::SymbolName => { force!(def_symbol_name, def_id!()); } - DepKind::SpecializationGraph => { force!(specialization_graph_of, def_id!()); } - DepKind::ObjectSafety => { force!(is_object_safe, def_id!()); } - DepKind::TraitImpls => { force!(trait_impls_of, def_id!()); } - DepKind::CheckMatch => { force!(check_match, def_id!()); } - - DepKind::ParamEnv => { force!(param_env, def_id!()); } - DepKind::DescribeDef => { force!(describe_def, def_id!()); } - DepKind::DefSpan => { force!(def_span, def_id!()); } - DepKind::LookupStability => { force!(lookup_stability, def_id!()); } - DepKind::LookupDeprecationEntry => { - force!(lookup_deprecation_entry, def_id!()); - } - DepKind::ItemBodyNestedBodies => { force!(item_body_nested_bodies, def_id!()); } - DepKind::ConstIsRvaluePromotableToStatic => { - force!(const_is_rvalue_promotable_to_static, def_id!()); - } - DepKind::RvaluePromotableMap => { force!(rvalue_promotable_map, def_id!()); } - DepKind::ImplParent => { force!(impl_parent, def_id!()); } - DepKind::TraitOfItem => { force!(trait_of_item, def_id!()); } - DepKind::IsExportedSymbol => { force!(is_exported_symbol, def_id!()); } - DepKind::IsMirAvailable => { force!(is_mir_available, def_id!()); } - DepKind::ItemAttrs => { force!(item_attrs, def_id!()); } - DepKind::FnArgNames => { force!(fn_arg_names, def_id!()); } - DepKind::DylibDepFormats => { force!(dylib_dependency_formats, krate!()); } - DepKind::IsPanicRuntime => { force!(is_panic_runtime, krate!()); } - DepKind::IsCompilerBuiltins => { force!(is_compiler_builtins, krate!()); } - DepKind::HasGlobalAllocator => { force!(has_global_allocator, krate!()); } - DepKind::ExternCrate => { force!(extern_crate, def_id!()); } - DepKind::LintLevels => { force!(lint_levels, LOCAL_CRATE); } - DepKind::InScopeTraits => { force!(in_scope_traits_map, def_id!().index); } - DepKind::ModuleExports => { force!(module_exports, def_id!()); } - DepKind::IsSanitizerRuntime => { force!(is_sanitizer_runtime, krate!()); } - DepKind::IsProfilerRuntime => { force!(is_profiler_runtime, krate!()); } - DepKind::GetPanicStrategy => { force!(panic_strategy, krate!()); } - DepKind::IsNoBuiltins => { force!(is_no_builtins, krate!()); } - DepKind::ImplDefaultness => { force!(impl_defaultness, def_id!()); } - DepKind::ExportedSymbolIds => { force!(exported_symbol_ids, krate!()); } - DepKind::NativeLibraries => { force!(native_libraries, krate!()); } - DepKind::PluginRegistrarFn => { force!(plugin_registrar_fn, krate!()); } - DepKind::DeriveRegistrarFn => { force!(derive_registrar_fn, krate!()); } - DepKind::CrateDisambiguator => { force!(crate_disambiguator, krate!()); } - DepKind::CrateHash => { force!(crate_hash, krate!()); } - DepKind::OriginalCrateName => { force!(original_crate_name, krate!()); } - - DepKind::AllTraitImplementations => { - force!(all_trait_implementations, krate!()); - } - - DepKind::IsDllimportForeignItem => { - force!(is_dllimport_foreign_item, def_id!()); - } - DepKind::IsStaticallyIncludedForeignItem => { - force!(is_statically_included_foreign_item, def_id!()); - } - DepKind::NativeLibraryKind => { force!(native_library_kind, def_id!()); } - DepKind::LinkArgs => { force!(link_args, LOCAL_CRATE); } - - DepKind::ResolveLifetimes => { force!(resolve_lifetimes, krate!()); } - DepKind::NamedRegion => { force!(named_region_map, def_id!().index); } - DepKind::IsLateBound => { force!(is_late_bound_map, def_id!().index); } - DepKind::ObjectLifetimeDefaults => { - force!(object_lifetime_defaults_map, def_id!().index); - } - - DepKind::Visibility => { force!(visibility, def_id!()); } - DepKind::DepKind => { force!(dep_kind, krate!()); } - DepKind::CrateName => { force!(crate_name, krate!()); } - DepKind::ItemChildren => { force!(item_children, def_id!()); } - DepKind::ExternModStmtCnum => { force!(extern_mod_stmt_cnum, def_id!()); } - DepKind::GetLangItems => { force!(get_lang_items, LOCAL_CRATE); } - DepKind::DefinedLangItems => { force!(defined_lang_items, krate!()); } - DepKind::MissingLangItems => { force!(missing_lang_items, krate!()); } - DepKind::ExternConstBody => { force!(extern_const_body, def_id!()); } - DepKind::VisibleParentMap => { force!(visible_parent_map, LOCAL_CRATE); } - DepKind::MissingExternCrateItem => { - force!(missing_extern_crate_item, krate!()); - } - DepKind::UsedCrateSource => { force!(used_crate_source, krate!()); } - DepKind::PostorderCnums => { force!(postorder_cnums, LOCAL_CRATE); } - DepKind::HasCloneClosures => { force!(has_clone_closures, krate!()); } - DepKind::HasCopyClosures => { force!(has_copy_closures, krate!()); } - - DepKind::Freevars => { force!(freevars, def_id!()); } - DepKind::MaybeUnusedTraitImport => { - force!(maybe_unused_trait_import, def_id!()); - } - DepKind::MaybeUnusedExternCrates => { force!(maybe_unused_extern_crates, LOCAL_CRATE); } - DepKind::StabilityIndex => { force!(stability_index, LOCAL_CRATE); } - DepKind::AllCrateNums => { force!(all_crate_nums, LOCAL_CRATE); } - DepKind::ExportedSymbols => { force!(exported_symbols, krate!()); } - DepKind::CollectAndPartitionTranslationItems => { - force!(collect_and_partition_translation_items, LOCAL_CRATE); - } - DepKind::ExportName => { force!(export_name, def_id!()); } - DepKind::ContainsExternIndicator => { - force!(contains_extern_indicator, def_id!()); - } - DepKind::IsTranslatedFunction => { force!(is_translated_function, def_id!()); } - DepKind::OutputFilenames => { force!(output_filenames, LOCAL_CRATE); } - - DepKind::TargetFeaturesWhitelist => { force!(target_features_whitelist, LOCAL_CRATE); } - DepKind::TargetFeaturesEnabled => { force!(target_features_enabled, def_id!()); } - } - - true -} - - -// FIXME(#45015): Another piece of boilerplate code that could be generated in -// a combined define_dep_nodes!()/define_maps!() macro. -macro_rules! impl_load_from_cache { - ($($dep_kind:ident => $query_name:ident,)*) => { - impl DepNode { - // Check whether the query invocation corresponding to the given - // DepNode is eligible for on-disk-caching. - pub fn cache_on_disk(&self, tcx: TyCtxt) -> bool { - use ty::maps::queries; - use ty::maps::QueryDescription; - - match self.kind { - $(DepKind::$dep_kind => { - let def_id = self.extract_def_id(tcx).unwrap(); - queries::$query_name::cache_on_disk(def_id) - })* - _ => false - } - } - - // This is method will execute the query corresponding to the given - // DepNode. It is only expected to work for DepNodes where the - // above `cache_on_disk` methods returns true. - // Also, as a sanity check, it expects that the corresponding query - // invocation has been marked as green already. - pub fn load_from_on_disk_cache(&self, tcx: TyCtxt) { - match self.kind { - $(DepKind::$dep_kind => { - debug_assert!(tcx.dep_graph - .node_color(self) - .map(|c| c.is_green()) - .unwrap_or(false)); - - let def_id = self.extract_def_id(tcx).unwrap(); - let _ = tcx.$query_name(def_id); - })* - _ => { - bug!() - } - } - } - } - } -} - -impl_load_from_cache!( - TypeckTables => typeck_tables_of, - MirOptimized => optimized_mir, - UnsafetyCheckResult => unsafety_check_result, - BorrowCheck => borrowck, - MirBorrowCheck => mir_borrowck, - MirConstQualif => mir_const_qualif, - SymbolName => def_symbol_name, - ConstIsRvaluePromotableToStatic => const_is_rvalue_promotable_to_static, - ContainsExternIndicator => contains_extern_indicator, - CheckMatch => check_match, -); diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 2b4d2c80c6f9..6c27d527ae89 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -12,47 +12,48 @@ pub use self::Variance::*; pub use self::AssociatedItemContainer::*; pub use self::BorrowKind::*; pub use self::IntVarValue::*; -pub use self::LvaluePreference::*; pub use self::fold::TypeFoldable; use hir::{map as hir_map, FreevarMap, TraitMap}; use hir::def::{Def, CtorKind, ExportMap}; -use hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE}; +use hir::def_id::{CrateNum, DefId, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use hir::map::DefPathData; -use hir::svh::Svh; +use rustc_data_structures::svh::Svh; +use ich::Fingerprint; use ich::StableHashingContext; -use middle::const_val::ConstVal; +use infer::canonical::Canonical; use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem}; use middle::privacy::AccessLevels; use middle::resolve_lifetime::ObjectLifetimeDefault; use mir::Mir; +use mir::interpret::GlobalId; use mir::GeneratorLayout; use session::CrateDisambiguator; -use traits; +use traits::{self, Reveal}; use ty; use ty::subst::{Subst, Substs}; -use ty::util::IntTypeExt; +use ty::util::{IntTypeExt, Discr}; use ty::walk::TypeWalker; -use util::common::ErrorReported; -use util::nodemap::{NodeSet, DefIdMap, FxHashMap, FxHashSet}; +use util::captures::Captures; +use util::nodemap::{NodeSet, DefIdMap, FxHashMap}; +use arena::SyncDroplessArena; +use session::DataTypeKind; use serialize::{self, Encodable, Encoder}; -use std::collections::BTreeMap; -use std::cmp; +use std::cell::RefCell; +use std::cmp::{self, Ordering}; use std::fmt; use std::hash::{Hash, Hasher}; -use std::iter::FromIterator; use std::ops::Deref; -use std::rc::Rc; +use rustc_data_structures::sync::{self, Lrc, ParallelIterator, par_iter}; use std::slice; use std::vec::IntoIter; -use std::mem; +use std::{mem, ptr}; use syntax::ast::{self, DUMMY_NODE_ID, Name, Ident, NodeId}; use syntax::attr; -use syntax::ext::hygiene::{Mark, SyntaxContext}; -use syntax::symbol::{Symbol, InternedString}; +use syntax::ext::hygiene::Mark; +use syntax::symbol::{keywords, Symbol, LocalInternedString, InternedString}; use syntax_pos::{DUMMY_SP, Span}; -use rustc_const_math::ConstInt; use rustc_data_structures::accumulate_vec::IntoIter as AccIntoIter; use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult, @@ -60,16 +61,16 @@ use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult, use hir; -pub use self::sty::{Binder, DebruijnIndex}; +pub use self::sty::{Binder, CanonicalVar, DebruijnIndex, INNERMOST}; pub use self::sty::{FnSig, GenSig, PolyFnSig, PolyGenSig}; pub use self::sty::{InferTy, ParamTy, ProjectionTy, ExistentialPredicate}; -pub use self::sty::{ClosureSubsts, GeneratorInterior, TypeAndMut}; +pub use self::sty::{ClosureSubsts, GeneratorSubsts, UpvarSubsts, TypeAndMut}; pub use self::sty::{TraitRef, TypeVariants, PolyTraitRef}; pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef}; pub use self::sty::{ExistentialProjection, PolyExistentialProjection, Const}; pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region}; pub use self::sty::RegionKind; -pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid}; +pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid}; pub use self::sty::BoundRegion::*; pub use self::sty::InferTy::*; pub use self::sty::RegionKind::*; @@ -85,7 +86,7 @@ pub use self::instance::{Instance, InstanceDef}; pub use self::trait_def::TraitDef; -pub use self::maps::queries; +pub use self::query::queries; pub mod adjustment; pub mod binding; @@ -100,8 +101,8 @@ pub mod inhabitedness; pub mod item_path; pub mod layout; pub mod _match; -pub mod maps; pub mod outlives; +pub mod query; pub mod relate; pub mod steal; pub mod subst; @@ -119,13 +120,13 @@ mod sty; // Data types /// The complete set of all analyses described in this module. This is -/// produced by the driver and fed to trans and later passes. +/// produced by the driver and fed to codegen and later passes. /// /// NB: These contents are being migrated into queries using the /// *on-demand* infrastructure. #[derive(Clone)] pub struct CrateAnalysis { - pub access_levels: Rc, + pub access_levels: Lrc, pub name: String, pub glob_map: Option, } @@ -174,10 +175,10 @@ pub struct ImplHeader<'tcx> { pub predicates: Vec>, } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq)] pub struct AssociatedItem { pub def_id: DefId, - pub name: Name, + pub ident: Ident, pub kind: AssociatedKind, pub vis: Visibility, pub defaultness: hir::Defaultness, @@ -192,6 +193,7 @@ pub struct AssociatedItem { pub enum AssociatedKind { Const, Method, + Existential, Type } @@ -201,6 +203,7 @@ impl AssociatedItem { AssociatedKind::Const => Def::AssociatedConst(self.def_id), AssociatedKind::Method => Def::Method(self.def_id), AssociatedKind::Type => Def::AssociatedTy(self.def_id), + AssociatedKind::Existential => Def::AssociatedExistential(self.def_id), } } @@ -208,7 +211,8 @@ impl AssociatedItem { /// for ! pub fn relevant_for_never<'tcx>(&self) -> bool { match self.kind { - AssociatedKind::Const => true, + AssociatedKind::Existential | + AssociatedKind::Const | AssociatedKind::Type => true, // FIXME(canndrew): Be more thorough here, check if any argument is uninhabited. AssociatedKind::Method => !self.method_has_self_argument, @@ -222,11 +226,12 @@ impl AssociatedItem { // late-bound regions, and we don't want method signatures to show up // `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound // regions just fine, showing `fn(&MyType)`. - format!("{}", tcx.fn_sig(self.def_id).skip_binder()) + tcx.fn_sig(self.def_id).skip_binder().to_string() } - ty::AssociatedKind::Type => format!("type {};", self.name.to_string()), + ty::AssociatedKind::Type => format!("type {};", self.ident), + ty::AssociatedKind::Existential => format!("existential type {};", self.ident), ty::AssociatedKind::Const => { - format!("const {}: {:?};", self.name.to_string(), tcx.type_of(self.def_id)) + format!("const {}: {:?};", self.ident, tcx.type_of(self.def_id)) } } } @@ -268,16 +273,16 @@ impl<'a, 'gcx, 'tcx> DefIdTree for TyCtxt<'a, 'gcx, 'tcx> { impl Visibility { pub fn from_hir(visibility: &hir::Visibility, id: NodeId, tcx: TyCtxt) -> Self { - match *visibility { - hir::Public => Visibility::Public, - hir::Visibility::Crate => Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)), - hir::Visibility::Restricted { ref path, .. } => match path.def { + match visibility.node { + hir::VisibilityKind::Public => Visibility::Public, + hir::VisibilityKind::Crate(_) => Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)), + hir::VisibilityKind::Restricted { ref path, .. } => match path.def { // If there is no resolution, `resolve` will have already reported an error, so // assume that the visibility is public to avoid reporting more privacy errors. Def::Err => Visibility::Public, def => Visibility::Restricted(def.def_id()), }, - hir::Inherited => { + hir::VisibilityKind::Inherited => { Visibility::Restricted(tcx.hir.get_module_parent(id)) } } @@ -337,10 +342,10 @@ pub struct CrateVariancesMap { /// For each item with generics, maps to a vector of the variance /// of its generics. If an item has no generics, it will have no /// entry. - pub variances: FxHashMap>>, + pub variances: FxHashMap>>, /// An empty vector, useful for cloning. - pub empty_variance: Rc>, + pub empty_variance: Lrc>, } impl Variance { @@ -442,7 +447,7 @@ bitflags! { // true if there are "names" of types and regions and so forth // that are local to a particular fn - const HAS_LOCAL_NAMES = 1 << 10; + const HAS_FREE_LOCAL_NAMES = 1 << 10; // Present if the type belongs in a local type context. // Only set for TyInfer other than Fresh. @@ -452,6 +457,14 @@ bitflags! { // Currently we can't normalize projections w/ bound regions. const HAS_NORMALIZABLE_PROJECTION = 1 << 12; + // Set if this includes a "canonical" type or region var -- + // ought to be true only for the results of canonicalization. + const HAS_CANONICAL_VARS = 1 << 13; + + /// Does this have any `ReLateBound` regions? Used to check + /// if a global bound is safe to evaluate. + const HAS_RE_LATE_BOUND = 1 << 14; + const NEEDS_SUBST = TypeFlags::HAS_PARAMS.bits | TypeFlags::HAS_SELF.bits | TypeFlags::HAS_RE_EARLY_BOUND.bits; @@ -469,8 +482,10 @@ bitflags! { TypeFlags::HAS_TY_ERR.bits | TypeFlags::HAS_PROJECTION.bits | TypeFlags::HAS_TY_CLOSURE.bits | - TypeFlags::HAS_LOCAL_NAMES.bits | - TypeFlags::KEEP_IN_LOCAL_TCX.bits; + TypeFlags::HAS_FREE_LOCAL_NAMES.bits | + TypeFlags::KEEP_IN_LOCAL_TCX.bits | + TypeFlags::HAS_CANONICAL_VARS.bits | + TypeFlags::HAS_RE_LATE_BOUND.bits; } } @@ -478,15 +493,42 @@ pub struct TyS<'tcx> { pub sty: TypeVariants<'tcx>, pub flags: TypeFlags, - // the maximal depth of any bound regions appearing in this type. - region_depth: u32, + /// This is a kind of confusing thing: it stores the smallest + /// binder such that + /// + /// (a) the binder itself captures nothing but + /// (b) all the late-bound things within the type are captured + /// by some sub-binder. + /// + /// So, for a type without any late-bound things, like `u32`, this + /// will be INNERMOST, because that is the innermost binder that + /// captures nothing. But for a type `&'D u32`, where `'D` is a + /// late-bound region with debruijn index D, this would be D+1 -- + /// the binder itself does not capture D, but D is captured by an + /// inner binder. + /// + /// We call this concept an "exclusive" binder D (because all + /// debruijn indices within the type are contained within `0..D` + /// (exclusive)). + outer_exclusive_binder: ty::DebruijnIndex, +} + +impl<'tcx> Ord for TyS<'tcx> { + fn cmp(&self, other: &TyS<'tcx>) -> Ordering { + self.sty.cmp(&other.sty) + } +} + +impl<'tcx> PartialOrd for TyS<'tcx> { + fn partial_cmp(&self, other: &TyS<'tcx>) -> Option { + Some(self.sty.cmp(&other.sty)) + } } impl<'tcx> PartialEq for TyS<'tcx> { #[inline] fn eq(&self, other: &TyS<'tcx>) -> bool { - // (self as *const _) == (other as *const _) - (self as *const TyS<'tcx>) == (other as *const TyS<'tcx>) + ptr::eq(self, other) } } impl<'tcx> Eq for TyS<'tcx> {} @@ -509,7 +551,7 @@ impl<'tcx> TyS<'tcx> { TypeVariants::TyInfer(InferTy::FloatVar(_)) | TypeVariants::TyInfer(InferTy::FreshIntTy(_)) | TypeVariants::TyInfer(InferTy::FreshFloatTy(_)) => true, - TypeVariants::TyRef(_, x) => x.ty.is_primitive_ty(), + TypeVariants::TyRef(_, x, _) => x.is_primitive_ty(), _ => false, } } @@ -528,9 +570,9 @@ impl<'tcx> TyS<'tcx> { } } -impl<'gcx> HashStable> for ty::TyS<'gcx> { +impl<'a, 'gcx> HashStable> for ty::TyS<'gcx> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let ty::TyS { ref sty, @@ -538,7 +580,8 @@ impl<'gcx> HashStable> for ty::TyS<'gcx> { // The other fields just provide fast access to information that is // also contained in `sty`, so no need to hash them. flags: _, - region_depth: _, + + outer_exclusive_binder: _, } = *self; sty.hash_stable(hcx, hasher); @@ -550,38 +593,117 @@ pub type Ty<'tcx> = &'tcx TyS<'tcx>; impl<'tcx> serialize::UseSpecializedEncodable for Ty<'tcx> {} impl<'tcx> serialize::UseSpecializedDecodable for Ty<'tcx> {} +pub type CanonicalTy<'gcx> = Canonical<'gcx, Ty<'gcx>>; + +extern { + /// A dummy type used to force Slice to by unsized without requiring fat pointers + type OpaqueSliceContents; +} + /// A wrapper for slices with the additional invariant /// that the slice is interned and no other slice with /// the same contents can exist in the same context. -/// This means we can use pointer + length for both +/// This means we can use pointer for both /// equality comparisons and hashing. -#[derive(Debug, RustcEncodable)] -pub struct Slice([T]); +#[repr(C)] +pub struct Slice { + len: usize, + data: [T; 0], + opaque: OpaqueSliceContents, +} -impl PartialEq for Slice { +unsafe impl Sync for Slice {} + +impl Slice { #[inline] - fn eq(&self, other: &Slice) -> bool { - (&self.0 as *const [T]) == (&other.0 as *const [T]) + fn from_arena<'tcx>(arena: &'tcx SyncDroplessArena, slice: &[T]) -> &'tcx Slice { + assert!(!mem::needs_drop::()); + assert!(mem::size_of::() != 0); + assert!(slice.len() != 0); + + // Align up the size of the len (usize) field + let align = mem::align_of::(); + let align_mask = align - 1; + let offset = mem::size_of::(); + let offset = (offset + align_mask) & !align_mask; + + let size = offset + slice.len() * mem::size_of::(); + + let mem = arena.alloc_raw( + size, + cmp::max(mem::align_of::(), mem::align_of::())); + unsafe { + let result = &mut *(mem.as_mut_ptr() as *mut Slice); + // Write the length + result.len = slice.len(); + + // Write the elements + let arena_slice = slice::from_raw_parts_mut(result.data.as_mut_ptr(), result.len); + arena_slice.copy_from_slice(slice); + + result + } } } -impl Eq for Slice {} + +impl fmt::Debug for Slice { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (**self).fmt(f) + } +} + +impl Encodable for Slice { + #[inline] + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + (**self).encode(s) + } +} + +impl Ord for Slice where T: Ord { + fn cmp(&self, other: &Slice) -> Ordering { + if self == other { Ordering::Equal } else { + <[T] as Ord>::cmp(&**self, &**other) + } + } +} + +impl PartialOrd for Slice where T: PartialOrd { + fn partial_cmp(&self, other: &Slice) -> Option { + if self == other { Some(Ordering::Equal) } else { + <[T] as PartialOrd>::partial_cmp(&**self, &**other) + } + } +} + +impl PartialEq for Slice { + #[inline] + fn eq(&self, other: &Slice) -> bool { + ptr::eq(self, other) + } +} +impl Eq for Slice {} impl Hash for Slice { + #[inline] fn hash(&self, s: &mut H) { - (self.as_ptr(), self.len()).hash(s) + (self as *const Slice).hash(s) } } impl Deref for Slice { type Target = [T]; + #[inline(always)] fn deref(&self) -> &[T] { - &self.0 + unsafe { + slice::from_raw_parts(self.data.as_ptr(), self.len) + } } } impl<'a, T> IntoIterator for &'a Slice { type Item = &'a T; type IntoIter = <&'a [T] as IntoIterator>::IntoIter; + #[inline(always)] fn into_iter(self) -> Self::IntoIter { self[..].iter() } @@ -590,9 +712,14 @@ impl<'a, T> IntoIterator for &'a Slice { impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Slice> {} impl Slice { + #[inline(always)] pub fn empty<'a>() -> &'a Slice { + #[repr(align(64), C)] + struct EmptySlice([u8; 64]); + static EMPTY_SLICE: EmptySlice = EmptySlice([0; 64]); + assert!(mem::align_of::() <= 64); unsafe { - mem::transmute(slice::from_raw_parts(0x1 as *const T, 0)) + &*(&EMPTY_SLICE as *const _ as *const Slice) } } } @@ -685,155 +812,164 @@ pub struct ClosureUpvar<'tcx> { pub ty: Ty<'tcx>, } -#[derive(Clone, Copy, PartialEq)] +#[derive(Clone, Copy, PartialEq, Eq)] pub enum IntVarValue { IntType(ast::IntTy), UintType(ast::UintTy), } -#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] -pub struct TypeParameterDef { - pub name: Name, - pub def_id: DefId, - pub index: u32, - pub has_default: bool, - pub object_lifetime_default: ObjectLifetimeDefault, - - /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute - /// on generic parameter `T`, asserts data behind the parameter - /// `T` won't be accessed during the parent type's `Drop` impl. - pub pure_wrt_drop: bool, - - pub synthetic: Option, -} - -#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] -pub struct RegionParameterDef { - pub name: Name, - pub def_id: DefId, - pub index: u32, - - /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute - /// on generic parameter `'a`, asserts data of lifetime `'a` - /// won't be accessed during the parent type's `Drop` impl. - pub pure_wrt_drop: bool, -} - -impl RegionParameterDef { - pub fn to_early_bound_region_data(&self) -> ty::EarlyBoundRegion { - ty::EarlyBoundRegion { - def_id: self.def_id, - index: self.index, - name: self.name, - } - } - - pub fn to_bound_region(&self) -> ty::BoundRegion { - self.to_early_bound_region_data().to_bound_region() - } -} +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct FloatVarValue(pub ast::FloatTy); impl ty::EarlyBoundRegion { pub fn to_bound_region(&self) -> ty::BoundRegion { ty::BoundRegion::BrNamed(self.def_id, self.name) } + + /// Does this early bound region have a name? Early bound regions normally + /// always have names except when using anonymous lifetimes (`'_`). + pub fn has_name(&self) -> bool { + self.name != keywords::UnderscoreLifetime.name().as_interned_str() + } +} + +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub enum GenericParamDefKind { + Lifetime, + Type { + has_default: bool, + object_lifetime_default: ObjectLifetimeDefault, + synthetic: Option, + } +} + +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct GenericParamDef { + pub name: InternedString, + pub def_id: DefId, + pub index: u32, + + /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute + /// on generic parameter `'a`/`T`, asserts data behind the parameter + /// `'a`/`T` won't be accessed during the parent type's `Drop` impl. + pub pure_wrt_drop: bool, + + pub kind: GenericParamDefKind, +} + +impl GenericParamDef { + pub fn to_early_bound_region_data(&self) -> ty::EarlyBoundRegion { + match self.kind { + GenericParamDefKind::Lifetime => { + ty::EarlyBoundRegion { + def_id: self.def_id, + index: self.index, + name: self.name, + } + } + _ => bug!("cannot convert a non-lifetime parameter def to an early bound region") + } + } + + pub fn to_bound_region(&self) -> ty::BoundRegion { + match self.kind { + GenericParamDefKind::Lifetime => { + self.to_early_bound_region_data().to_bound_region() + } + _ => bug!("cannot convert a non-lifetime parameter def to an early bound region") + } + } +} + +pub struct GenericParamCount { + pub lifetimes: usize, + pub types: usize, } /// Information about the formal type/lifetime parameters associated /// with an item or method. Analogous to hir::Generics. /// -/// Note that in the presence of a `Self` parameter, the ordering here -/// is different from the ordering in a Substs. Substs are ordered as -/// Self, *Regions, *Other Type Params, (...child generics) -/// while this struct is ordered as -/// regions = Regions -/// types = [Self, *Other Type Params] +/// The ordering of parameters is the same as in Subst (excluding child generics): +/// Self (optionally), Lifetime params..., Type params... #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub struct Generics { pub parent: Option, - pub parent_regions: u32, - pub parent_types: u32, - pub regions: Vec, - pub types: Vec, + pub parent_count: usize, + pub params: Vec, - /// Reverse map to each `TypeParameterDef`'s `index` field, from - /// `def_id.index` (`def_id.krate` is the same as the item's). - pub type_param_to_index: BTreeMap, + /// Reverse map to the `index` field of each `GenericParamDef` + pub param_def_id_to_index: FxHashMap, pub has_self: bool, pub has_late_bound_regions: Option, } impl<'a, 'gcx, 'tcx> Generics { - pub fn parent_count(&self) -> usize { - self.parent_regions as usize + self.parent_types as usize - } - - pub fn own_count(&self) -> usize { - self.regions.len() + self.types.len() - } - pub fn count(&self) -> usize { - self.parent_count() + self.own_count() + self.parent_count + self.params.len() + } + + pub fn own_counts(&self) -> GenericParamCount { + // We could cache this as a property of `GenericParamCount`, but + // the aim is to refactor this away entirely eventually and the + // presence of this method will be a constant reminder. + let mut own_counts = GenericParamCount { + lifetimes: 0, + types: 0, + }; + + for param in &self.params { + match param.kind { + GenericParamDefKind::Lifetime => own_counts.lifetimes += 1, + GenericParamDefKind::Type {..} => own_counts.types += 1, + }; + } + + own_counts + } + + pub fn requires_monomorphization(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { + for param in &self.params { + match param.kind { + GenericParamDefKind::Type {..} => return true, + GenericParamDefKind::Lifetime => {} + } + } + if let Some(parent_def_id) = self.parent { + let parent = tcx.generics_of(parent_def_id); + parent.requires_monomorphization(tcx) + } else { + false + } } pub fn region_param(&'tcx self, param: &EarlyBoundRegion, tcx: TyCtxt<'a, 'gcx, 'tcx>) - -> &'tcx RegionParameterDef + -> &'tcx GenericParamDef { - if let Some(index) = param.index.checked_sub(self.parent_count() as u32) { - &self.regions[index as usize - self.has_self as usize] + if let Some(index) = param.index.checked_sub(self.parent_count as u32) { + let param = &self.params[index as usize]; + match param.kind { + ty::GenericParamDefKind::Lifetime => param, + _ => bug!("expected lifetime parameter, but found another generic parameter") + } } else { tcx.generics_of(self.parent.expect("parent_count>0 but no parent?")) .region_param(param, tcx) } } - /// Returns the `TypeParameterDef` associated with this `ParamTy`. + /// Returns the `GenericParamDef` associated with this `ParamTy`. pub fn type_param(&'tcx self, param: &ParamTy, tcx: TyCtxt<'a, 'gcx, 'tcx>) - -> &TypeParameterDef { - if let Some(idx) = param.idx.checked_sub(self.parent_count() as u32) { - // non-Self type parameters are always offset by exactly - // `self.regions.len()`. In the absence of a Self, this is obvious, - // but even in the presence of a `Self` we just have to "compensate" - // for the regions: - // - // Without a `Self` (or in a nested generics that doesn't have - // a `Self` in itself, even through it parent does), for example - // for `fn foo<'a, T1, T2>()`, the situation is: - // Substs: - // 0 1 2 - // 'a T1 T2 - // generics.types: - // 0 1 - // T1 T2 - // - // And with a `Self`, for example for `trait Foo<'a, 'b, T1, T2>`, the - // situation is: - // Substs: - // 0 1 2 3 4 - // Self 'a 'b T1 T2 - // generics.types: - // 0 1 2 - // Self T1 T2 - // - // And it can be seen that in both cases, to move from a substs - // offset to a generics offset you just have to offset by the - // number of regions. - let type_param_offset = self.regions.len(); - - let has_self = self.has_self && self.parent.is_none(); - let is_separated_self = type_param_offset != 0 && idx == 0 && has_self; - - if let Some(idx) = (idx as usize).checked_sub(type_param_offset) { - assert!(!is_separated_self, "found a Self after type_param_offset"); - &self.types[idx] - } else { - assert!(is_separated_self, "non-Self param before type_param_offset"); - &self.types[0] + -> &'tcx GenericParamDef { + if let Some(index) = param.idx.checked_sub(self.parent_count as u32) { + let param = &self.params[index as usize]; + match param.kind { + ty::GenericParamDefKind::Type {..} => param, + _ => bug!("expected type parameter, but found another generic parameter") } } else { tcx.generics_of(self.parent.expect("parent_count>0 but no parent?")) @@ -910,9 +1046,6 @@ pub enum Predicate<'tcx> { /// would be the type parameters. Trait(PolyTraitPredicate<'tcx>), - /// where `T1 == T2`. - Equate(PolyEquatePredicate<'tcx>), - /// where 'a : 'b RegionOutlives(PolyRegionOutlivesPredicate<'tcx>), @@ -941,6 +1074,22 @@ pub enum Predicate<'tcx> { ConstEvaluatable(DefId, &'tcx Substs<'tcx>), } +/// The crate outlives map is computed during typeck and contains the +/// outlives of every item in the local crate. You should not use it +/// directly, because to do so will make your pass dependent on the +/// HIR of every item in the local crate. Instead, use +/// `tcx.inferred_outlives_of()` to get the outlives for a *particular* +/// item. +pub struct CratePredicatesMap<'tcx> { + /// For each struct with outlive bounds, maps to a vector of the + /// predicate of its outlive bounds. If an item has no outlives + /// bounds, it will have no entry. + pub predicates: FxHashMap>>>, + + /// An empty vector, useful for cloning. + pub empty_predicate: Lrc>>, +} + impl<'tcx> AsRef> for Predicate<'tcx> { fn as_ref(&self) -> &Predicate<'tcx> { self @@ -1017,20 +1166,18 @@ impl<'a, 'gcx, 'tcx> Predicate<'tcx> { // from the substitution and the value being substituted into, and // this trick achieves that). - let substs = &trait_ref.0.substs; + let substs = &trait_ref.skip_binder().substs; match *self { - Predicate::Trait(ty::Binder(ref data)) => - Predicate::Trait(ty::Binder(data.subst(tcx, substs))), - Predicate::Equate(ty::Binder(ref data)) => - Predicate::Equate(ty::Binder(data.subst(tcx, substs))), - Predicate::Subtype(ty::Binder(ref data)) => - Predicate::Subtype(ty::Binder(data.subst(tcx, substs))), - Predicate::RegionOutlives(ty::Binder(ref data)) => - Predicate::RegionOutlives(ty::Binder(data.subst(tcx, substs))), - Predicate::TypeOutlives(ty::Binder(ref data)) => - Predicate::TypeOutlives(ty::Binder(data.subst(tcx, substs))), - Predicate::Projection(ty::Binder(ref data)) => - Predicate::Projection(ty::Binder(data.subst(tcx, substs))), + Predicate::Trait(ref binder) => + Predicate::Trait(binder.map_bound(|data| data.subst(tcx, substs))), + Predicate::Subtype(ref binder) => + Predicate::Subtype(binder.map_bound(|data| data.subst(tcx, substs))), + Predicate::RegionOutlives(ref binder) => + Predicate::RegionOutlives(binder.map_bound(|data| data.subst(tcx, substs))), + Predicate::TypeOutlives(ref binder) => + Predicate::TypeOutlives(binder.map_bound(|data| data.subst(tcx, substs))), + Predicate::Projection(ref binder) => + Predicate::Projection(binder.map_bound(|data| data.subst(tcx, substs))), Predicate::WellFormed(data) => Predicate::WellFormed(data.subst(tcx, substs)), Predicate::ObjectSafe(trait_def_id) => @@ -1066,20 +1213,19 @@ impl<'tcx> TraitPredicate<'tcx> { impl<'tcx> PolyTraitPredicate<'tcx> { pub fn def_id(&self) -> DefId { // ok to skip binder since trait def-id does not care about regions - self.0.def_id() + self.skip_binder().def_id() } } -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] -pub struct EquatePredicate<'tcx>(pub Ty<'tcx>, pub Ty<'tcx>); // `0 == 1` -pub type PolyEquatePredicate<'tcx> = ty::Binder>; - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct OutlivesPredicate(pub A, pub B); // `A : B` pub type PolyOutlivesPredicate = ty::Binder>; -pub type PolyRegionOutlivesPredicate<'tcx> = PolyOutlivesPredicate, - ty::Region<'tcx>>; -pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate, ty::Region<'tcx>>; +pub type RegionOutlivesPredicate<'tcx> = OutlivesPredicate, + ty::Region<'tcx>>; +pub type TypeOutlivesPredicate<'tcx> = OutlivesPredicate, + ty::Region<'tcx>>; +pub type PolyRegionOutlivesPredicate<'tcx> = ty::Binder>; +pub type PolyTypeOutlivesPredicate<'tcx> = ty::Binder>; #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct SubtypePredicate<'tcx> { @@ -1110,17 +1256,31 @@ pub struct ProjectionPredicate<'tcx> { pub type PolyProjectionPredicate<'tcx> = Binder>; impl<'tcx> PolyProjectionPredicate<'tcx> { + /// Returns the def-id of the associated item being projected. + pub fn item_def_id(&self) -> DefId { + self.skip_binder().projection_ty.item_def_id + } + pub fn to_poly_trait_ref(&self, tcx: TyCtxt) -> PolyTraitRef<'tcx> { // Note: unlike with TraitRef::to_poly_trait_ref(), // self.0.trait_ref is permitted to have escaping regions. // This is because here `self` has a `Binder` and so does our // return value, so we are preserving the number of binding // levels. - ty::Binder(self.0.projection_ty.trait_ref(tcx)) + self.map_bound(|predicate| predicate.projection_ty.trait_ref(tcx)) } pub fn ty(&self) -> Binder> { - Binder(self.skip_binder().ty) // preserves binding levels + self.map_bound(|predicate| predicate.ty) + } + + /// The DefId of the TraitItem for the associated type. + /// + /// Note that this is not the DefId of the TraitRef containing this + /// associated type, which is in tcx.associated_item(projection_def_id()).container. + pub fn projection_def_id(&self) -> DefId { + // ok to skip binder since trait def-id does not care about regions + self.skip_binder().projection_ty.item_def_id } } @@ -1130,8 +1290,7 @@ pub trait ToPolyTraitRef<'tcx> { impl<'tcx> ToPolyTraitRef<'tcx> for TraitRef<'tcx> { fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> { - assert!(!self.has_escaping_regions()); - ty::Binder(self.clone()) + ty::Binder::dummy(self.clone()) } } @@ -1147,12 +1306,7 @@ pub trait ToPredicate<'tcx> { impl<'tcx> ToPredicate<'tcx> for TraitRef<'tcx> { fn to_predicate(&self) -> Predicate<'tcx> { - // we're about to add a binder, so let's check that we don't - // accidentally capture anything, or else that might be some - // weird debruijn accounting. - assert!(!self.has_escaping_regions()); - - ty::Predicate::Trait(ty::Binder(ty::TraitPredicate { + ty::Predicate::Trait(ty::Binder::dummy(ty::TraitPredicate { trait_ref: self.clone() })) } @@ -1164,12 +1318,6 @@ impl<'tcx> ToPredicate<'tcx> for PolyTraitRef<'tcx> { } } -impl<'tcx> ToPredicate<'tcx> for PolyEquatePredicate<'tcx> { - fn to_predicate(&self) -> Predicate<'tcx> { - Predicate::Equate(self.clone()) - } -} - impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate<'tcx> { fn to_predicate(&self) -> Predicate<'tcx> { Predicate::RegionOutlives(self.clone()) @@ -1197,20 +1345,19 @@ impl<'tcx> Predicate<'tcx> { ty::Predicate::Trait(ref data) => { data.skip_binder().input_types().collect() } - ty::Predicate::Equate(ty::Binder(ref data)) => { - vec![data.0, data.1] - } - ty::Predicate::Subtype(ty::Binder(SubtypePredicate { a, b, a_is_expected: _ })) => { + ty::Predicate::Subtype(binder) => { + let SubtypePredicate { a, b, a_is_expected: _ } = binder.skip_binder(); vec![a, b] } - ty::Predicate::TypeOutlives(ty::Binder(ref data)) => { - vec![data.0] + ty::Predicate::TypeOutlives(binder) => { + vec![binder.skip_binder().0] } ty::Predicate::RegionOutlives(..) => { vec![] } ty::Predicate::Projection(ref data) => { - data.0.projection_ty.substs.types().chain(Some(data.0.ty)).collect() + let inner = data.skip_binder(); + inner.projection_ty.substs.types().chain(Some(inner.ty)).collect() } ty::Predicate::WellFormed(data) => { vec![data] @@ -1240,7 +1387,6 @@ impl<'tcx> Predicate<'tcx> { Some(t.to_poly_trait_ref()) } Predicate::Projection(..) | - Predicate::Equate(..) | Predicate::Subtype(..) | Predicate::RegionOutlives(..) | Predicate::WellFormed(..) | @@ -1260,7 +1406,6 @@ impl<'tcx> Predicate<'tcx> { } Predicate::Trait(..) | Predicate::Projection(..) | - Predicate::Equate(..) | Predicate::Subtype(..) | Predicate::RegionOutlives(..) | Predicate::WellFormed(..) | @@ -1307,6 +1452,104 @@ impl<'tcx> InstantiatedPredicates<'tcx> { } } +/// "Universes" are used during type- and trait-checking in the +/// presence of `for<..>` binders to control what sets of names are +/// visible. Universes are arranged into a tree: the root universe +/// contains names that are always visible. But when you enter into +/// some subuniverse, then it may add names that are only visible +/// within that subtree (but it can still name the names of its +/// ancestor universes). +/// +/// To make this more concrete, consider this program: +/// +/// ``` +/// struct Foo { } +/// fn bar(x: T) { +/// let y: for<'a> fn(&'a u8, Foo) = ...; +/// } +/// ``` +/// +/// The struct name `Foo` is in the root universe U0. But the type +/// parameter `T`, introduced on `bar`, is in a subuniverse U1 -- +/// i.e., within `bar`, we can name both `T` and `Foo`, but outside of +/// `bar`, we cannot name `T`. Then, within the type of `y`, the +/// region `'a` is in a subuniverse U2 of U1, because we can name it +/// inside the fn type but not outside. +/// +/// Universes are related to **skolemization** -- which is a way of +/// doing type- and trait-checking around these "forall" binders (also +/// called **universal quantification**). The idea is that when, in +/// the body of `bar`, we refer to `T` as a type, we aren't referring +/// to any type in particular, but rather a kind of "fresh" type that +/// is distinct from all other types we have actually declared. This +/// is called a **skolemized** type, and we use universes to talk +/// about this. In other words, a type name in universe 0 always +/// corresponds to some "ground" type that the user declared, but a +/// type name in a non-zero universe is a skolemized type -- an +/// idealized representative of "types in general" that we use for +/// checking generic functions. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub struct UniverseIndex(u32); + +impl UniverseIndex { + /// The root universe, where things that the user defined are + /// visible. + pub const ROOT: Self = UniverseIndex(0); + + /// The "max universe" -- this isn't really a valid universe, but + /// it's useful sometimes as a "starting value" when you are + /// taking the minimum of a (non-empty!) set of universes. + pub const MAX: Self = UniverseIndex(::std::u32::MAX); + + /// Creates a universe index from the given integer. Not to be + /// used lightly lest you pick a bad value. But sometimes we + /// convert universe indicies into integers and back for various + /// reasons. + pub fn from_u32(index: u32) -> Self { + UniverseIndex(index) + } + + /// A "subuniverse" corresponds to being inside a `forall` quantifier. + /// So, for example, suppose we have this type in universe `U`: + /// + /// ``` + /// for<'a> fn(&'a u32) + /// ``` + /// + /// Once we "enter" into this `for<'a>` quantifier, we are in a + /// subuniverse of `U` -- in this new universe, we can name the + /// region `'a`, but that region was not nameable from `U` because + /// it was not in scope there. + pub fn subuniverse(self) -> UniverseIndex { + UniverseIndex(self.0.checked_add(1).unwrap()) + } + + /// True if the names in this universe are a subset of the names in `other`. + pub fn is_subset_of(self, other: UniverseIndex) -> bool { + self.0 <= other.0 + } + + pub fn as_u32(&self) -> u32 { + self.0 + } + + pub fn as_usize(&self) -> usize { + self.0 as usize + } +} + +impl fmt::Debug for UniverseIndex { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "U{}", self.as_u32()) + } +} + +impl From for UniverseIndex { + fn from(index: u32) -> Self { + UniverseIndex(index) + } +} + /// When type checking, we use the `ParamEnv` to track /// details about the set of where-clauses that are in scope at this /// particular point. @@ -1317,39 +1560,91 @@ pub struct ParamEnv<'tcx> { /// into Obligations, and elaborated and normalized. pub caller_bounds: &'tcx Slice>, - /// Typically, this is `Reveal::UserFacing`, but during trans we + /// Typically, this is `Reveal::UserFacing`, but during codegen we /// want `Reveal::All` -- note that this is always paired with an /// empty environment. To get that, use `ParamEnv::reveal()`. pub reveal: traits::Reveal, } impl<'tcx> ParamEnv<'tcx> { - /// Creates a suitable environment in which to perform trait - /// queries on the given value. This will either be `self` *or* - /// the empty environment, depending on whether `value` references - /// type parameters that are in scope. (If it doesn't, then any - /// judgements should be completely independent of the context, - /// and hence we can safely use the empty environment so as to - /// enable more sharing across functions.) + /// Construct a trait environment suitable for contexts where + /// there are no where clauses in scope. Hidden types (like `impl + /// Trait`) are left hidden, so this is suitable for ordinary + /// type-checking. + pub fn empty() -> Self { + Self::new(ty::Slice::empty(), Reveal::UserFacing) + } + + /// Construct a trait environment with no where clauses in scope + /// where the values of all `impl Trait` and other hidden types + /// are revealed. This is suitable for monomorphized, post-typeck + /// environments like codegen or doing optimizations. /// - /// NB: This is a mildly dubious thing to do, in that a function - /// (or other environment) might have wacky where-clauses like + /// NB. If you want to have predicates in scope, use `ParamEnv::new`, + /// or invoke `param_env.with_reveal_all()`. + pub fn reveal_all() -> Self { + Self::new(ty::Slice::empty(), Reveal::All) + } + + /// Construct a trait environment with the given set of predicates. + pub fn new(caller_bounds: &'tcx ty::Slice>, + reveal: Reveal) + -> Self { + ty::ParamEnv { caller_bounds, reveal } + } + + /// Returns a new parameter environment with the same clauses, but + /// which "reveals" the true results of projections in all cases + /// (even for associated types that are specializable). This is + /// the desired behavior during codegen and certain other special + /// contexts; normally though we want to use `Reveal::UserFacing`, + /// which is the default. + pub fn with_reveal_all(self) -> Self { + ty::ParamEnv { reveal: Reveal::All, ..self } + } + + /// Returns this same environment but with no caller bounds. + pub fn without_caller_bounds(self) -> Self { + ty::ParamEnv { caller_bounds: ty::Slice::empty(), ..self } + } + + /// Creates a suitable environment in which to perform trait + /// queries on the given value. When type-checking, this is simply + /// the pair of the environment plus value. But when reveal is set to + /// All, then if `value` does not reference any type parameters, we will + /// pair it with the empty environment. This improves caching and is generally + /// invisible. + /// + /// NB: We preserve the environment when type-checking because it + /// is possible for the user to have wacky where-clauses like /// `where Box: Copy`, which are clearly never - /// satisfiable. The code will at present ignore these, - /// effectively, when type-checking the body of said - /// function. This preserves existing behavior in any - /// case. --nmatsakis + /// satisfiable. We generally want to behave as if they were true, + /// although the surrounding function is never reachable. pub fn and>(self, value: T) -> ParamEnvAnd<'tcx, T> { - assert!(!value.needs_infer()); - if value.has_param_types() || value.has_self_ty() { - ParamEnvAnd { - param_env: self, - value, + match self.reveal { + Reveal::UserFacing => { + ParamEnvAnd { + param_env: self, + value, + } } - } else { - ParamEnvAnd { - param_env: ParamEnv::empty(self.reveal), - value, + + Reveal::All => { + if value.has_skol() + || value.needs_infer() + || value.has_param_types() + || value.has_self_ty() + { + ParamEnvAnd { + param_env: self, + value, + } + } else { + ParamEnvAnd { + param_env: self.without_caller_bounds(), + value, + } + } } } } @@ -1367,11 +1662,11 @@ impl<'tcx, T> ParamEnvAnd<'tcx, T> { } } -impl<'gcx, T> HashStable> for ParamEnvAnd<'gcx, T> - where T: HashStable> +impl<'a, 'gcx, T> HashStable> for ParamEnvAnd<'gcx, T> + where T: HashStable> { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let ParamEnvAnd { ref param_env, @@ -1433,7 +1728,7 @@ pub enum VariantDiscr { #[derive(Debug)] pub struct FieldDef { pub did: DefId, - pub name: Name, + pub ident: Ident, pub vis: Visibility, } @@ -1448,10 +1743,24 @@ pub struct AdtDef { pub repr: ReprOptions, } +impl PartialOrd for AdtDef { + fn partial_cmp(&self, other: &AdtDef) -> Option { + Some(self.cmp(&other)) + } +} + +/// There should be only one AdtDef for each `did`, therefore +/// it is fine to implement `Ord` only based on `did`. +impl Ord for AdtDef { + fn cmp(&self, other: &AdtDef) -> Ordering { + self.did.cmp(&other.did) + } +} + impl PartialEq for AdtDef { // AdtDef are always interned and this is part of TyS equality #[inline] - fn eq(&self, other: &Self) -> bool { self as *const _ == other as *const _ } + fn eq(&self, other: &Self) -> bool { ptr::eq(self, other) } } impl Eq for AdtDef {} @@ -1472,39 +1781,63 @@ impl<'tcx> serialize::UseSpecializedEncodable for &'tcx AdtDef { impl<'tcx> serialize::UseSpecializedDecodable for &'tcx AdtDef {} -impl<'gcx> HashStable> for AdtDef { +impl<'a> HashStable> for AdtDef { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - let ty::AdtDef { - did, - ref variants, - ref flags, - ref repr, - } = *self; + thread_local! { + static CACHE: RefCell> = + RefCell::new(FxHashMap()); + } - did.hash_stable(hcx, hasher); - variants.hash_stable(hcx, hasher); - flags.hash_stable(hcx, hasher); - repr.hash_stable(hcx, hasher); + let hash: Fingerprint = CACHE.with(|cache| { + let addr = self as *const AdtDef as usize; + *cache.borrow_mut().entry(addr).or_insert_with(|| { + let ty::AdtDef { + did, + ref variants, + ref flags, + ref repr, + } = *self; + + let mut hasher = StableHasher::new(); + did.hash_stable(hcx, &mut hasher); + variants.hash_stable(hcx, &mut hasher); + flags.hash_stable(hcx, &mut hasher); + repr.hash_stable(hcx, &mut hasher); + + hasher.finish() + }) + }); + + hash.hash_stable(hcx, hasher); } } -#[derive(Copy, Clone, Debug, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum AdtKind { Struct, Union, Enum } +impl Into for AdtKind { + fn into(self) -> DataTypeKind { + match self { + AdtKind::Struct => DataTypeKind::Struct, + AdtKind::Union => DataTypeKind::Union, + AdtKind::Enum => DataTypeKind::Enum, + } + } +} + bitflags! { #[derive(RustcEncodable, RustcDecodable, Default)] pub struct ReprFlags: u8 { const IS_C = 1 << 0; - const IS_PACKED = 1 << 1; - const IS_SIMD = 1 << 2; + const IS_SIMD = 1 << 1; + const IS_TRANSPARENT = 1 << 2; // Internal only for now. If true, don't reorder fields. const IS_LINEAR = 1 << 3; // Any of these flags being set prevent field reordering optimisation. const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits | - ReprFlags::IS_PACKED.bits | ReprFlags::IS_SIMD.bits | ReprFlags::IS_LINEAR.bits; } @@ -1521,11 +1854,13 @@ impl_stable_hash_for!(struct ReprFlags { pub struct ReprOptions { pub int: Option, pub align: u32, + pub pack: u32, pub flags: ReprFlags, } impl_stable_hash_for!(struct ReprOptions { align, + pack, int, flags }); @@ -1535,11 +1870,20 @@ impl ReprOptions { let mut flags = ReprFlags::empty(); let mut size = None; let mut max_align = 0; + let mut min_pack = 0; for attr in tcx.get_attrs(did).iter() { for r in attr::find_repr_attrs(tcx.sess.diagnostic(), attr) { flags.insert(match r { attr::ReprC => ReprFlags::IS_C, - attr::ReprPacked => ReprFlags::IS_PACKED, + attr::ReprPacked(pack) => { + min_pack = if min_pack > 0 { + cmp::min(pack, min_pack) + } else { + pack + }; + ReprFlags::empty() + }, + attr::ReprTransparent => ReprFlags::IS_TRANSPARENT, attr::ReprSimd => ReprFlags::IS_SIMD, attr::ReprInt(i) => { size = Some(i); @@ -1557,7 +1901,7 @@ impl ReprOptions { if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.item_path_str(did))) { flags.insert(ReprFlags::IS_LINEAR); } - ReprOptions { int: size, align: max_align, flags: flags } + ReprOptions { int: size, align: max_align, pack: min_pack, flags: flags } } #[inline] @@ -1565,7 +1909,9 @@ impl ReprOptions { #[inline] pub fn c(&self) -> bool { self.flags.contains(ReprFlags::IS_C) } #[inline] - pub fn packed(&self) -> bool { self.flags.contains(ReprFlags::IS_PACKED) } + pub fn packed(&self) -> bool { self.pack > 0 } + #[inline] + pub fn transparent(&self) -> bool { self.flags.contains(ReprFlags::IS_TRANSPARENT) } #[inline] pub fn linear(&self) -> bool { self.flags.contains(ReprFlags::IS_LINEAR) } @@ -1579,6 +1925,12 @@ impl ReprOptions { pub fn inhibit_enum_layout_opt(&self) -> bool { self.c() || self.int.is_some() } + + /// Returns true if this `#[repr()]` should inhibit struct field reordering + /// optimizations, such as with repr(C) or repr(packed(1)). + pub fn inhibit_struct_field_reordering_opt(&self) -> bool { + !(self.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty() || (self.pack == 1) + } } impl<'a, 'gcx, 'tcx> AdtDef { @@ -1733,27 +2085,65 @@ impl<'a, 'gcx, 'tcx> AdtDef { } #[inline] - pub fn discriminants(&'a self, tcx: TyCtxt<'a, 'gcx, 'tcx>) - -> impl Iterator + 'a { - let param_env = ParamEnv::empty(traits::Reveal::UserFacing); + pub fn eval_explicit_discr( + &self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + expr_did: DefId, + ) -> Option> { + let param_env = ParamEnv::empty(); + let repr_type = self.repr.discr_type(); + let substs = Substs::identity_for_item(tcx.global_tcx(), expr_did); + let instance = ty::Instance::new(expr_did, substs); + let cid = GlobalId { + instance, + promoted: None + }; + match tcx.const_eval(param_env.and(cid)) { + Ok(val) => { + // FIXME: Find the right type and use it instead of `val.ty` here + if let Some(b) = val.assert_bits(tcx.global_tcx(), param_env.and(val.ty)) { + trace!("discriminants: {} ({:?})", b, repr_type); + Some(Discr { + val: b, + ty: val.ty, + }) + } else { + info!("invalid enum discriminant: {:#?}", val); + ::mir::interpret::struct_error( + tcx.at(tcx.def_span(expr_did)), + "constant evaluation of enum discriminant resulted in non-integer", + ).emit(); + None + } + } + Err(err) => { + err.report_as_error( + tcx.at(tcx.def_span(expr_did)), + "could not evaluate enum discriminant", + ); + if !expr_did.is_local() { + span_bug!(tcx.def_span(expr_did), + "variant discriminant evaluation succeeded \ + in its crate but failed locally"); + } + None + } + } + } + + #[inline] + pub fn discriminants( + &'a self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + ) -> impl Iterator> + Captures<'gcx> + 'a { let repr_type = self.repr.discr_type(); let initial = repr_type.initial_discriminant(tcx.global_tcx()); - let mut prev_discr = None::; + let mut prev_discr = None::>; self.variants.iter().map(move |v| { - let mut discr = prev_discr.map_or(initial, |d| d.wrap_incr()); + let mut discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx)); if let VariantDiscr::Explicit(expr_did) = v.discr { - let substs = Substs::identity_for_item(tcx.global_tcx(), expr_did); - match tcx.const_eval(param_env.and((expr_did, substs))) { - Ok(&ty::Const { val: ConstVal::Integral(v), .. }) => { - discr = v; - } - err => { - if !expr_did.is_local() { - span_bug!(tcx.def_span(expr_did), - "variant discriminant evaluation succeeded \ - in its crate but failed locally: {:?}", err); - } - } + if let Some(new_discr) = self.eval_explicit_discr(tcx, expr_did) { + discr = new_discr; } } prev_discr = Some(discr); @@ -1770,51 +2160,39 @@ impl<'a, 'gcx, 'tcx> AdtDef { pub fn discriminant_for_variant(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, variant_index: usize) - -> ConstInt { - let param_env = ParamEnv::empty(traits::Reveal::UserFacing); - let repr_type = self.repr.discr_type(); - let mut explicit_value = repr_type.initial_discriminant(tcx.global_tcx()); + -> Discr<'tcx> { + let (val, offset) = self.discriminant_def_for_variant(variant_index); + let explicit_value = val + .and_then(|expr_did| self.eval_explicit_discr(tcx, expr_did)) + .unwrap_or_else(|| self.repr.discr_type().initial_discriminant(tcx.global_tcx())); + explicit_value.checked_add(tcx, offset as u128).0 + } + + /// Yields a DefId for the discriminant and an offset to add to it + /// Alternatively, if there is no explicit discriminant, returns the + /// inferred discriminant directly + pub fn discriminant_def_for_variant( + &self, + variant_index: usize, + ) -> (Option, usize) { let mut explicit_index = variant_index; + let expr_did; loop { match self.variants[explicit_index].discr { - ty::VariantDiscr::Relative(0) => break, + ty::VariantDiscr::Relative(0) => { + expr_did = None; + break; + }, ty::VariantDiscr::Relative(distance) => { explicit_index -= distance; } - ty::VariantDiscr::Explicit(expr_did) => { - let substs = Substs::identity_for_item(tcx.global_tcx(), expr_did); - match tcx.const_eval(param_env.and((expr_did, substs))) { - Ok(&ty::Const { val: ConstVal::Integral(v), .. }) => { - explicit_value = v; - break; - } - err => { - if !expr_did.is_local() { - span_bug!(tcx.def_span(expr_did), - "variant discriminant evaluation succeeded \ - in its crate but failed locally: {:?}", err); - } - if explicit_index == 0 { - break; - } - explicit_index -= 1; - } - } + ty::VariantDiscr::Explicit(did) => { + expr_did = Some(did); + break; } } } - let discr = explicit_value.to_u128_unchecked() - .wrapping_add((variant_index - explicit_index) as u128); - match repr_type { - attr::UnsignedInt(ty) => { - ConstInt::new_unsigned_truncating(discr, ty, - tcx.sess.target.usize_ty) - } - attr::SignedInt(ty) => { - ConstInt::new_signed_truncating(discr as i128, ty, - tcx.sess.target.isize_ty) - } - } + (expr_did, variant_index - explicit_index) } pub fn destructor(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { @@ -1832,7 +2210,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { /// Due to normalization being eager, this applies even if /// the associated type is behind a pointer, e.g. issue #31299. pub fn sized_constraint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> &'tcx [Ty<'tcx>] { - match queries::adt_sized_constraint::try_get(tcx, DUMMY_SP, self.did) { + match tcx.try_adt_sized_constraint(DUMMY_SP, self.did) { Ok(tys) => tys, Err(mut bug) => { debug!("adt_sized_constraint: {:?} is recursive", self); @@ -1859,12 +2237,17 @@ impl<'a, 'gcx, 'tcx> AdtDef { vec![] } - TyStr | TyDynamic(..) | TySlice(_) | TyForeign(..) | TyError => { + TyStr | + TyDynamic(..) | + TySlice(_) | + TyForeign(..) | + TyError | + TyGeneratorWitness(..) => { // these are never sized - return the target type vec![ty] } - TyTuple(ref tys, _) => { + TyTuple(ref tys) => { match tys.last() { None => vec![], Some(ty) => self.sized_constraint_for_ty(tcx, ty) @@ -1897,7 +2280,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { Some(x) => x, _ => return vec![ty] }; - let sized_predicate = Binder(TraitRef { + let sized_predicate = Binder::dummy(TraitRef { def_id: sized_trait, substs: tcx.mk_substs_trait(ty, &[]) }).to_predicate(); @@ -1919,32 +2302,6 @@ impl<'a, 'gcx, 'tcx> AdtDef { } } -impl<'a, 'gcx, 'tcx> VariantDef { - #[inline] - pub fn find_field_named(&self, name: ast::Name) -> Option<&FieldDef> { - self.index_of_field_named(name).map(|index| &self.fields[index]) - } - - pub fn index_of_field_named(&self, name: ast::Name) -> Option { - if let Some(index) = self.fields.iter().position(|f| f.name == name) { - return Some(index); - } - let mut ident = name.to_ident(); - while ident.ctxt != SyntaxContext::empty() { - ident.ctxt.remove_mark(); - if let Some(field) = self.fields.iter().position(|f| f.name.to_ident() == ident) { - return Some(field); - } - } - None - } - - #[inline] - pub fn field_named(&self, name: ast::Name) -> &FieldDef { - self.find_field_named(name).unwrap() - } -} - impl<'a, 'gcx, 'tcx> FieldDef { pub fn ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, subst: &Substs<'tcx>) -> Ty<'tcx> { tcx.type_of(self.did).subst(tcx, subst) @@ -2047,21 +2404,6 @@ impl<'tcx> TyS<'tcx> { } } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum LvaluePreference { - PreferMutLvalue, - NoPreference -} - -impl LvaluePreference { - pub fn from_mutbl(m: hir::Mutability) -> Self { - match m { - hir::MutMutable => PreferMutLvalue, - hir::MutImmutable => NoPreference, - } - } -} - impl BorrowKind { pub fn from_mutbl(m: hir::Mutability) -> BorrowKind { match m { @@ -2097,7 +2439,7 @@ impl BorrowKind { #[derive(Debug, Clone)] pub enum Attributes<'gcx> { - Owned(Rc<[ast::Attribute]>), + Owned(Lrc<[ast::Attribute]>), Borrowed(&'gcx [ast::Attribute]) } @@ -2120,13 +2462,21 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Returns an iterator of the def-ids for all body-owners in this /// crate. If you would prefer to iterate over the bodies /// themselves, you can do `self.hir.krate().body_ids.iter()`. - pub fn body_owners(self) -> impl Iterator + 'a { + pub fn body_owners( + self, + ) -> impl Iterator + Captures<'tcx> + Captures<'gcx> + 'a { self.hir.krate() .body_ids .iter() .map(move |&body_id| self.hir.body_owner_def_id(body_id)) } + pub fn par_body_owners(self, f: F) { + par_iter(&self.hir.krate().body_ids).for_each(|&body_id| { + f(self.hir.body_owner_def_id(body_id)) + }); + } + pub fn expr_span(self, id: NodeId) -> Span { match self.hir.find(id) { Some(hir_map::NodeExpr(e)) => { @@ -2141,60 +2491,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - pub fn expr_is_lval(self, expr: &hir::Expr) -> bool { - match expr.node { - hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { - match path.def { - Def::Local(..) | Def::Upvar(..) | Def::Static(..) | Def::Err => true, - _ => false, - } - } - - hir::ExprType(ref e, _) => { - self.expr_is_lval(e) - } - - hir::ExprUnary(hir::UnDeref, _) | - hir::ExprField(..) | - hir::ExprTupField(..) | - hir::ExprIndex(..) => { - true - } - - // Partially qualified paths in expressions can only legally - // refer to associated items which are always rvalues. - hir::ExprPath(hir::QPath::TypeRelative(..)) | - - hir::ExprCall(..) | - hir::ExprMethodCall(..) | - hir::ExprStruct(..) | - hir::ExprTup(..) | - hir::ExprIf(..) | - hir::ExprMatch(..) | - hir::ExprClosure(..) | - hir::ExprBlock(..) | - hir::ExprRepeat(..) | - hir::ExprArray(..) | - hir::ExprBreak(..) | - hir::ExprAgain(..) | - hir::ExprRet(..) | - hir::ExprWhile(..) | - hir::ExprLoop(..) | - hir::ExprAssign(..) | - hir::ExprInlineAsm(..) | - hir::ExprAssignOp(..) | - hir::ExprLit(_) | - hir::ExprUnary(..) | - hir::ExprBox(..) | - hir::ExprAddrOf(..) | - hir::ExprBinary(..) | - hir::ExprYield(..) | - hir::ExprCast(..) => { - false - } - } - } - pub fn provided_trait_methods(self, id: DefId) -> Vec { self.associated_items(id) .filter(|item| item.kind == AssociatedKind::Method && item.defaultness.has_value()) @@ -2239,10 +2535,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { (ty::AssociatedKind::Method, has_self) } hir::AssociatedItemKind::Type => (ty::AssociatedKind::Type, false), + hir::AssociatedItemKind::Existential => bug!("only impls can have existentials"), }; AssociatedItem { - name: trait_item_ref.name, + ident: trait_item_ref.ident, kind, // Visibility of trait items is inherited from their traits. vis: Visibility::from_hir(parent_vis, trait_item_ref.id.node_id, self), @@ -2264,10 +2561,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { (ty::AssociatedKind::Method, has_self) } hir::AssociatedItemKind::Type => (ty::AssociatedKind::Type, false), + hir::AssociatedItemKind::Existential => (ty::AssociatedKind::Existential, false), }; - ty::AssociatedItem { - name: impl_item_ref.name, + AssociatedItem { + ident: impl_item_ref.ident, kind, // Visibility of trait impl items doesn't matter. vis: ty::Visibility::from_hir(&impl_item_ref.vis, impl_item_ref.id.node_id, self), @@ -2278,17 +2576,30 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - #[inline] // FIXME(#35870) Avoid closures being unexported due to impl Trait. - pub fn associated_items(self, def_id: DefId) - -> impl Iterator + 'a { + pub fn field_index(self, node_id: NodeId, tables: &TypeckTables) -> usize { + let hir_id = self.hir.node_to_hir_id(node_id); + tables.field_indices().get(hir_id).cloned().expect("no index for a field") + } + + pub fn find_field_index(self, ident: Ident, variant: &VariantDef) -> Option { + variant.fields.iter().position(|field| { + self.adjust_ident(ident, variant.did, DUMMY_NODE_ID).0 == field.ident.modern() + }) + } + + pub fn associated_items( + self, + def_id: DefId, + ) -> impl Iterator + 'a { let def_ids = self.associated_item_def_ids(def_id); - (0..def_ids.len()).map(move |i| self.associated_item(def_ids[i])) + Box::new((0..def_ids.len()).map(move |i| self.associated_item(def_ids[i]))) + as Box + 'a> } /// Returns true if the impls are the same polarity and are implementing /// a trait which contains no items pub fn impls_are_allowed_to_overlap(self, def_id1: DefId, def_id2: DefId) -> bool { - if !self.sess.features.borrow().overlapping_marker_traits { + if !self.features().overlapping_marker_traits { return false; } let trait1_is_empty = self.impl_trait_ref(def_id1) @@ -2339,7 +2650,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn item_name(self, id: DefId) -> InternedString { if id.index == CRATE_DEF_INDEX { - self.original_crate_name(id.krate).as_str() + self.original_crate_name(id.krate).as_interned_str() } else { let def_key = self.def_key(id); // The name of a StructCtor is that of its struct parent. @@ -2427,15 +2738,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.opt_associated_item(def_id) }; - match item { - Some(trait_item) => { - match trait_item.container { - TraitContainer(_) => None, - ImplContainer(def_id) => Some(def_id), - } + item.and_then(|trait_item| + match trait_item.container { + TraitContainer(_) => None, + ImplContainer(def_id) => Some(def_id), } - None => None - } + ) } /// Looks up the span of `impl_did` if the impl is local; otherwise returns `Err` @@ -2452,21 +2760,19 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // Hygienically compare a use-site name (`use_name`) for a field or an associated item with its // supposed definition name (`def_name`). The method also needs `DefId` of the supposed // definition's parent/scope to perform comparison. - pub fn hygienic_eq(self, use_name: Name, def_name: Name, def_parent_def_id: DefId) -> bool { - self.adjust(use_name, def_parent_def_id, DUMMY_NODE_ID).0 == def_name.to_ident() - } - - pub fn adjust(self, name: Name, scope: DefId, block: NodeId) -> (Ident, DefId) { - self.adjust_ident(name.to_ident(), scope, block) + pub fn hygienic_eq(self, use_name: Ident, def_name: Ident, def_parent_def_id: DefId) -> bool { + self.adjust_ident(use_name, def_parent_def_id, DUMMY_NODE_ID).0 == def_name.modern() } pub fn adjust_ident(self, mut ident: Ident, scope: DefId, block: NodeId) -> (Ident, DefId) { - let expansion = match scope.krate { - LOCAL_CRATE => self.hir.definitions().expansion(scope.index), + ident = ident.modern(); + let target_expansion = match scope.krate { + LOCAL_CRATE => self.hir.definitions().expansion_that_defined(scope.index), _ => Mark::root(), }; - let scope = match ident.ctxt.adjust(expansion) { - Some(macro_def) => self.hir.definitions().macro_def_scope(macro_def), + let scope = match ident.span.adjust(target_expansion) { + Some(actual_expansion) => + self.hir.definitions().parent_module_of_macro_def(actual_expansion), None if block == DUMMY_NODE_ID => DefId::local(CRATE_DEF_INDEX), // Dummy DefId None => self.hir.get_module_parent(block), }; @@ -2494,7 +2800,7 @@ fn associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) let parent_def_id = tcx.hir.local_def_id(parent_id); let parent_item = tcx.hir.expect_item(parent_id); match parent_item.node { - hir::ItemImpl(.., ref impl_item_refs) => { + hir::ItemKind::Impl(.., ref impl_item_refs) => { if let Some(impl_item_ref) = impl_item_refs.iter().find(|i| i.id.node_id == id) { let assoc_item = tcx.associated_item_from_impl_item_ref(parent_def_id, impl_item_ref); @@ -2503,7 +2809,7 @@ fn associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) } } - hir::ItemTrait(.., ref trait_item_refs) => { + hir::ItemKind::Trait(.., ref trait_item_refs) => { if let Some(trait_item_ref) = trait_item_refs.iter().find(|i| i.id.node_id == id) { let assoc_item = tcx.associated_item_from_trait_item_ref(parent_def_id, &parent_item.vis, @@ -2535,71 +2841,39 @@ fn adt_sized_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, -> &'tcx [Ty<'tcx>] { let def = tcx.adt_def(def_id); - let result = tcx.intern_type_list(&def.variants.iter().flat_map(|v| { + let result = tcx.mk_type_list(def.variants.iter().flat_map(|v| { v.fields.last() }).flat_map(|f| { def.sized_constraint_for_ty(tcx, tcx.type_of(f.did)) - }).collect::>()); + })); debug!("adt_sized_constraint: {:?} => {:?}", def, result); result } -/// Calculates the dtorck constraint for a type. -fn adt_dtorck_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - def_id: DefId) - -> DtorckConstraint<'tcx> { - let def = tcx.adt_def(def_id); - let span = tcx.def_span(def_id); - debug!("dtorck_constraint: {:?}", def); - - if def.is_phantom_data() { - let result = DtorckConstraint { - outlives: vec![], - dtorck_types: vec![ - tcx.mk_param_from_def(&tcx.generics_of(def_id).types[0]) - ] - }; - debug!("dtorck_constraint: {:?} => {:?}", def, result); - return result; - } - - let mut result = def.all_fields() - .map(|field| tcx.type_of(field.did)) - .map(|fty| tcx.dtorck_constraint_for_ty(span, fty, 0, fty)) - .collect::>() - .unwrap_or(DtorckConstraint::empty()); - result.outlives.extend(tcx.destructor_constraints(def)); - result.dedup(); - - debug!("dtorck_constraint: {:?} => {:?}", def, result); - - result -} - fn associated_item_def_ids<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) - -> Rc> { + -> Lrc> { let id = tcx.hir.as_local_node_id(def_id).unwrap(); let item = tcx.hir.expect_item(id); let vec: Vec<_> = match item.node { - hir::ItemTrait(.., ref trait_item_refs) => { + hir::ItemKind::Trait(.., ref trait_item_refs) => { trait_item_refs.iter() .map(|trait_item_ref| trait_item_ref.id) .map(|id| tcx.hir.local_def_id(id.node_id)) .collect() } - hir::ItemImpl(.., ref impl_item_refs) => { + hir::ItemKind::Impl(.., ref impl_item_refs) => { impl_item_refs.iter() .map(|impl_item_ref| impl_item_ref.id) .map(|id| tcx.hir.local_def_id(id.node_id)) .collect() } - hir::ItemTraitAlias(..) => vec![], + hir::ItemKind::TraitAlias(..) => vec![], _ => span_bug!(item.span, "associated_item_def_ids: not impl or trait") }; - Rc::new(vec) + Lrc::new(vec) } fn def_span<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Span { @@ -2619,14 +2893,31 @@ fn trait_of_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Option }) } +/// Yields the parent function's `DefId` if `def_id` is an `impl Trait` definition +pub fn is_impl_trait_defn(tcx: TyCtxt, def_id: DefId) -> Option { + if let Some(node_id) = tcx.hir.as_local_node_id(def_id) { + if let hir::map::NodeItem(item) = tcx.hir.get(node_id) { + if let hir::ItemKind::Existential(ref exist_ty) = item.node { + return exist_ty.impl_trait_fn; + } + } + } + None +} + /// See `ParamEnv` struct def'n for details. fn param_env<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ParamEnv<'tcx> { + + // The param_env of an impl Trait type is its defining function's param_env + if let Some(parent) = is_impl_trait_defn(tcx, def_id) { + return param_env(tcx, parent); + } // Compute the bounds on Self and the type parameters. - let bounds = tcx.predicates_of(def_id).instantiate_identity(tcx); - let predicates = bounds.predicates; + let InstantiatedPredicates { predicates } = + tcx.predicates_of(def_id).instantiate_identity(tcx); // Finally, we have to normalize the bounds in the environment, in // case they contain any associated type projections. This process @@ -2669,16 +2960,29 @@ fn crate_hash<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx.hir.crate_hash } -pub fn provide(providers: &mut ty::maps::Providers) { +fn instance_def_size_estimate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + instance_def: InstanceDef<'tcx>) + -> usize { + match instance_def { + InstanceDef::Item(..) | + InstanceDef::DropGlue(..) => { + let mir = tcx.instance_mir(instance_def); + mir.basic_blocks().iter().map(|bb| bb.statements.len()).sum() + }, + // Estimate the size of other compiler-generated shims to be 1. + _ => 1 + } +} + +pub fn provide(providers: &mut ty::query::Providers) { context::provide(providers); erase_regions::provide(providers); layout::provide(providers); util::provide(providers); - *providers = ty::maps::Providers { + *providers = ty::query::Providers { associated_item, associated_item_def_ids, adt_sized_constraint, - adt_dtorck_constraint, def_span, param_env, trait_of_item, @@ -2686,6 +2990,7 @@ pub fn provide(providers: &mut ty::maps::Providers) { original_crate_name, crate_hash, trait_impls_of: trait_def::trait_impls_of_provider, + instance_def_size_estimate, ..*providers }; } @@ -2697,53 +3002,10 @@ pub fn provide(providers: &mut ty::maps::Providers) { /// (constructing this map requires touching the entire crate). #[derive(Clone, Debug)] pub struct CrateInherentImpls { - pub inherent_impls: DefIdMap>>, + pub inherent_impls: DefIdMap>>, } -/// A set of constraints that need to be satisfied in order for -/// a type to be valid for destruction. -#[derive(Clone, Debug)] -pub struct DtorckConstraint<'tcx> { - /// Types that are required to be alive in order for this - /// type to be valid for destruction. - pub outlives: Vec>, - /// Types that could not be resolved: projections and params. - pub dtorck_types: Vec>, -} - -impl<'tcx> FromIterator> for DtorckConstraint<'tcx> -{ - fn from_iter>>(iter: I) -> Self { - let mut result = Self::empty(); - - for constraint in iter { - result.outlives.extend(constraint.outlives); - result.dtorck_types.extend(constraint.dtorck_types); - } - - result - } -} - - -impl<'tcx> DtorckConstraint<'tcx> { - fn empty() -> DtorckConstraint<'tcx> { - DtorckConstraint { - outlives: vec![], - dtorck_types: vec![] - } - } - - fn dedup<'a>(&mut self) { - let mut outlives = FxHashSet(); - let mut dtorck_types = FxHashSet(); - - self.outlives.retain(|&val| outlives.replace(val).is_none()); - self.dtorck_types.retain(|&val| dtorck_types.replace(val).is_none()); - } -} - -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, RustcEncodable, RustcDecodable)] pub struct SymbolName { // FIXME: we don't rely on interning or equality here - better have // this be a `&'tcx str`. @@ -2754,10 +3016,16 @@ impl_stable_hash_for!(struct self::SymbolName { name }); -impl Deref for SymbolName { - type Target = str; +impl SymbolName { + pub fn new(name: &str) -> SymbolName { + SymbolName { + name: Symbol::intern(name).as_interned_str() + } + } - fn deref(&self) -> &str { &self.name } + pub fn as_str(&self) -> LocalInternedString { + self.name.as_str() + } } impl fmt::Display for SymbolName { @@ -2765,3 +3033,9 @@ impl fmt::Display for SymbolName { fmt::Display::fmt(&self.name, fmt) } } + +impl fmt::Debug for SymbolName { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.name, fmt) + } +} diff --git a/src/librustc/ty/outlives.rs b/src/librustc/ty/outlives.rs index 707137649d77..ff99a4b7ff63 100644 --- a/src/librustc/ty/outlives.rs +++ b/src/librustc/ty/outlives.rs @@ -79,16 +79,19 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - ty::TyGenerator(def_id, ref substs, ref interior) => { + ty::TyGenerator(def_id, ref substs, _) => { // Same as the closure case for upvar_ty in substs.upvar_tys(def_id, *self) { self.compute_components(upvar_ty, out); } - // But generators can have additional interior types - self.compute_components(interior.witness, out); + // We ignore regions in the generator interior as we don't + // want these to affect region inference } + // All regions are bound inside a witness + ty::TyGeneratorWitness(..) => (), + // OutlivesTypeParameterEnv -- the actual checking that `X:'a` // is implied by the environment is done in regionck. ty::TyParam(p) => { diff --git a/src/librustc/ty/query/README.md b/src/librustc/ty/query/README.md new file mode 100644 index 000000000000..ca6f0b77b664 --- /dev/null +++ b/src/librustc/ty/query/README.md @@ -0,0 +1,302 @@ +# The Rust Compiler Query System + +The Compiler Query System is the key to our new demand-driven +organization. The idea is pretty simple. You have various queries +that compute things about the input -- for example, there is a query +called `type_of(def_id)` that, given the def-id of some item, will +compute the type of that item and return it to you. + +Query execution is **memoized** -- so the first time you invoke a +query, it will go do the computation, but the next time, the result is +returned from a hashtable. Moreover, query execution fits nicely into +**incremental computation**; the idea is roughly that, when you do a +query, the result **may** be returned to you by loading stored data +from disk (but that's a separate topic we won't discuss further here). + +The overall vision is that, eventually, the entire compiler +control-flow will be query driven. There will effectively be one +top-level query ("compile") that will run compilation on a crate; this +will in turn demand information about that crate, starting from the +*end*. For example: + +- This "compile" query might demand to get a list of codegen-units + (i.e., modules that need to be compiled by LLVM). +- But computing the list of codegen-units would invoke some subquery + that returns the list of all modules defined in the Rust source. +- That query in turn would invoke something asking for the HIR. +- This keeps going further and further back until we wind up doing the + actual parsing. + +However, that vision is not fully realized. Still, big chunks of the +compiler (for example, generating MIR) work exactly like this. + +### Invoking queries + +To invoke a query is simple. The tcx ("type context") offers a method +for each defined query. So, for example, to invoke the `type_of` +query, you would just do this: + +```rust +let ty = tcx.type_of(some_def_id); +``` + +### Cycles between queries + +Currently, cycles during query execution should always result in a +compilation error. Typically, they arise because of illegal programs +that contain cyclic references they shouldn't (though sometimes they +arise because of compiler bugs, in which case we need to factor our +queries in a more fine-grained fashion to avoid them). + +However, it is nonetheless often useful to *recover* from a cycle +(after reporting an error, say) and try to soldier on, so as to give a +better user experience. In order to recover from a cycle, you don't +get to use the nice method-call-style syntax. Instead, you invoke +using the `try_get` method, which looks roughly like this: + +```rust +use ty::query::queries; +... +match queries::type_of::try_get(tcx, DUMMY_SP, self.did) { + Ok(result) => { + // no cycle occurred! You can use `result` + } + Err(err) => { + // A cycle occurred! The error value `err` is a `DiagnosticBuilder`, + // meaning essentially an "in-progress", not-yet-reported error message. + // See below for more details on what to do here. + } +} +``` + +So, if you get back an `Err` from `try_get`, then a cycle *did* occur. This means that +you must ensure that a compiler error message is reported. You can do that in two ways: + +The simplest is to invoke `err.emit()`. This will emit the cycle error to the user. + +However, often cycles happen because of an illegal program, and you +know at that point that an error either already has been reported or +will be reported due to this cycle by some other bit of code. In that +case, you can invoke `err.cancel()` to not emit any error. It is +traditional to then invoke: + +``` +tcx.sess.delay_span_bug(some_span, "some message") +``` + +`delay_span_bug()` is a helper that says: we expect a compilation +error to have happened or to happen in the future; so, if compilation +ultimately succeeds, make an ICE with the message `"some +message"`. This is basically just a precaution in case you are wrong. + +### How the compiler executes a query + +So you may be wondering what happens when you invoke a query +method. The answer is that, for each query, the compiler maintains a +cache -- if your query has already been executed, then, the answer is +simple: we clone the return value out of the cache and return it +(therefore, you should try to ensure that the return types of queries +are cheaply cloneable; insert a `Rc` if necessary). + +#### Providers + +If, however, the query is *not* in the cache, then the compiler will +try to find a suitable **provider**. A provider is a function that has +been defined and linked into the compiler somewhere that contains the +code to compute the result of the query. + +**Providers are defined per-crate.** The compiler maintains, +internally, a table of providers for every crate, at least +conceptually. Right now, there are really two sets: the providers for +queries about the **local crate** (that is, the one being compiled) +and providers for queries about **external crates** (that is, +dependencies of the local crate). Note that what determines the crate +that a query is targeting is not the *kind* of query, but the *key*. +For example, when you invoke `tcx.type_of(def_id)`, that could be a +local query or an external query, depending on what crate the `def_id` +is referring to (see the `self::keys::Key` trait for more information +on how that works). + +Providers always have the same signature: + +```rust +fn provider<'cx, 'tcx>(tcx: TyCtxt<'cx, 'tcx, 'tcx>, + key: QUERY_KEY) + -> QUERY_RESULT +{ + ... +} +``` + +Providers take two arguments: the `tcx` and the query key. Note also +that they take the *global* tcx (i.e., they use the `'tcx` lifetime +twice), rather than taking a tcx with some active inference context. +They return the result of the query. + +#### How providers are setup + +When the tcx is created, it is given the providers by its creator using +the `Providers` struct. This struct is generate by the macros here, but it +is basically a big list of function pointers: + +```rust +struct Providers { + type_of: for<'cx, 'tcx> fn(TyCtxt<'cx, 'tcx, 'tcx>, DefId) -> Ty<'tcx>, + ... +} +``` + +At present, we have one copy of the struct for local crates, and one +for external crates, though the plan is that we may eventually have +one per crate. + +These `Provider` structs are ultimately created and populated by +`librustc_driver`, but it does this by distributing the work +throughout the other `rustc_*` crates. This is done by invoking +various `provide` functions. These functions tend to look something +like this: + +```rust +pub fn provide(providers: &mut Providers) { + *providers = Providers { + type_of, + ..*providers + }; +} +``` + +That is, they take an `&mut Providers` and mutate it in place. Usually +we use the formulation above just because it looks nice, but you could +as well do `providers.type_of = type_of`, which would be equivalent. +(Here, `type_of` would be a top-level function, defined as we saw +before.) So, if we want to add a provider for some other query, +let's call it `fubar`, into the crate above, we might modify the `provide()` +function like so: + +```rust +pub fn provide(providers: &mut Providers) { + *providers = Providers { + type_of, + fubar, + ..*providers + }; +} + +fn fubar<'cx, 'tcx>(tcx: TyCtxt<'cx, 'tcx>, key: DefId) -> Fubar<'tcx> { .. } +``` + +NB. Most of the `rustc_*` crates only provide **local +providers**. Almost all **extern providers** wind up going through the +`rustc_metadata` crate, which loads the information from the crate +metadata. But in some cases there are crates that provide queries for +*both* local and external crates, in which case they define both a +`provide` and a `provide_extern` function that `rustc_driver` can +invoke. + +### Adding a new kind of query + +So suppose you want to add a new kind of query, how do you do so? +Well, defining a query takes place in two steps: + +1. first, you have to specify the query name and arguments; and then, +2. you have to supply query providers where needed. + +To specify the query name and arguments, you simply add an entry +to the big macro invocation in `mod.rs`. This will probably have changed +by the time you read this README, but at present it looks something +like: + +``` +define_queries! { <'tcx> + /// Records the type of every item. + [] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, + + ... +} +``` + +Each line of the macro defines one query. The name is broken up like this: + +``` +[] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, +^^ ^^^^^^^ ^^^^^^^^^^ ^^^^^ ^^^^^^^^ +| | | | | +| | | | result type of query +| | | query key type +| | dep-node constructor +| name of query +query flags +``` + +Let's go over them one by one: + +- **Query flags:** these are largely unused right now, but the intention + is that we'll be able to customize various aspects of how the query is + processed. +- **Name of query:** the name of the query method + (`tcx.type_of(..)`). Also used as the name of a struct + (`ty::query::queries::type_of`) that will be generated to represent + this query. +- **Dep-node constructor:** indicates the constructor function that + connects this query to incremental compilation. Typically, this is a + `DepNode` variant, which can be added by modifying the + `define_dep_nodes!` macro invocation in + `librustc/dep_graph/dep_node.rs`. + - However, sometimes we use a custom function, in which case the + name will be in snake case and the function will be defined at the + bottom of the file. This is typically used when the query key is + not a def-id, or just not the type that the dep-node expects. +- **Query key type:** the type of the argument to this query. + This type must implement the `ty::query::keys::Key` trait, which + defines (for example) how to map it to a crate, and so forth. +- **Result type of query:** the type produced by this query. This type + should (a) not use `RefCell` or other interior mutability and (b) be + cheaply cloneable. Interning or using `Rc` or `Arc` is recommended for + non-trivial data types. + - The one exception to those rules is the `ty::steal::Steal` type, + which is used to cheaply modify MIR in place. See the definition + of `Steal` for more details. New uses of `Steal` should **not** be + added without alerting `@rust-lang/compiler`. + +So, to add a query: + +- Add an entry to `define_queries!` using the format above. +- Possibly add a corresponding entry to the dep-node macro. +- Link the provider by modifying the appropriate `provide` method; + or add a new one if needed and ensure that `rustc_driver` is invoking it. + +#### Query structs and descriptions + +For each kind, the `define_queries` macro will generate a "query struct" +named after the query. This struct is a kind of a place-holder +describing the query. Each such struct implements the +`self::config::QueryConfig` trait, which has associated types for the +key/value of that particular query. Basically the code generated looks something +like this: + +```rust +// Dummy struct representing a particular kind of query: +pub struct type_of<'tcx> { phantom: PhantomData<&'tcx ()> } + +impl<'tcx> QueryConfig for type_of<'tcx> { + type Key = DefId; + type Value = Ty<'tcx>; +} +``` + +There is an additional trait that you may wish to implement called +`self::config::QueryDescription`. This trait is used during cycle +errors to give a "human readable" name for the query, so that we can +summarize what was happening when the cycle occurred. Implementing +this trait is optional if the query key is `DefId`, but if you *don't* +implement it, you get a pretty generic error ("processing `foo`..."). +You can put new impls into the `config` module. They look something like this: + +```rust +impl<'tcx> QueryDescription for queries::type_of<'tcx> { + fn describe(tcx: TyCtxt, key: DefId) -> String { + format!("computing the type of `{}`", tcx.item_path_str(key)) + } +} +``` + diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs new file mode 100644 index 000000000000..ecf35c1b0da3 --- /dev/null +++ b/src/librustc/ty/query/config.rs @@ -0,0 +1,866 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use dep_graph::SerializedDepNodeIndex; +use dep_graph::DepNode; +use hir::def_id::{CrateNum, DefId, DefIndex}; +use mir::interpret::GlobalId; +use traits::query::{ + CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal, CanonicalTypeOpEqGoal, + CanonicalTypeOpNormalizeGoal, CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, +}; +use ty::{self, ParamEnvAnd, Ty, TyCtxt}; +use ty::subst::Substs; +use ty::query::queries; +use ty::query::Query; +use ty::query::QueryCache; +use util::profiling::ProfileCategory; + +use std::hash::Hash; +use std::fmt::Debug; +use syntax_pos::symbol::InternedString; +use rustc_data_structures::sync::Lock; +use rustc_data_structures::stable_hasher::HashStable; +use ich::StableHashingContext; + +// Query configuration and description traits. + +pub trait QueryConfig<'tcx> { + const NAME: &'static str; + const CATEGORY: ProfileCategory; + + type Key: Eq + Hash + Clone + Debug; + type Value: Clone + for<'a> HashStable>; +} + +pub(super) trait QueryAccessors<'tcx>: QueryConfig<'tcx> { + fn query(key: Self::Key) -> Query<'tcx>; + + // Don't use this method to access query results, instead use the methods on TyCtxt + fn query_cache<'a>(tcx: TyCtxt<'a, 'tcx, '_>) -> &'a Lock>; + + fn to_dep_node(tcx: TyCtxt<'_, 'tcx, '_>, key: &Self::Key) -> DepNode; + + // Don't use this method to compute query results, instead use the methods on TyCtxt + fn compute(tcx: TyCtxt<'_, 'tcx, '_>, key: Self::Key) -> Self::Value; + + fn handle_cycle_error(tcx: TyCtxt<'_, 'tcx, '_>) -> Self::Value; +} + +pub(super) trait QueryDescription<'tcx>: QueryAccessors<'tcx> { + fn describe(tcx: TyCtxt, key: Self::Key) -> String; + + #[inline] + fn cache_on_disk(_: Self::Key) -> bool { + false + } + + fn try_load_from_disk(_: TyCtxt<'_, 'tcx, 'tcx>, + _: SerializedDepNodeIndex) + -> Option { + bug!("QueryDescription::load_from_disk() called for an unsupported query.") + } +} + +impl<'tcx, M: QueryAccessors<'tcx, Key=DefId>> QueryDescription<'tcx> for M { + default fn describe(tcx: TyCtxt, def_id: DefId) -> String { + if !tcx.sess.verbose() { + format!("processing `{}`", tcx.item_path_str(def_id)) + } else { + let name = unsafe { ::std::intrinsics::type_name::() }; + format!("processing `{}` applied to `{:?}`", name, def_id) + } + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::normalize_projection_ty<'tcx> { + fn describe( + _tcx: TyCtxt, + goal: CanonicalProjectionGoal<'tcx>, + ) -> String { + format!("normalizing `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::implied_outlives_bounds<'tcx> { + fn describe(_tcx: TyCtxt, goal: CanonicalTyGoal<'tcx>) -> String { + format!("computing implied outlives bounds for `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::dropck_outlives<'tcx> { + fn describe(_tcx: TyCtxt, goal: CanonicalTyGoal<'tcx>) -> String { + format!("computing dropck types for `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::normalize_ty_after_erasing_regions<'tcx> { + fn describe(_tcx: TyCtxt, goal: ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("normalizing `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::evaluate_obligation<'tcx> { + fn describe(_tcx: TyCtxt, goal: CanonicalPredicateGoal<'tcx>) -> String { + format!("evaluating trait selection obligation `{}`", goal.value.value) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::type_op_eq<'tcx> { + fn describe(_tcx: TyCtxt, goal: CanonicalTypeOpEqGoal<'tcx>) -> String { + format!("evaluating `type_op_eq` `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::type_op_subtype<'tcx> { + fn describe(_tcx: TyCtxt, goal: CanonicalTypeOpSubtypeGoal<'tcx>) -> String { + format!("evaluating `type_op_subtype` `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::type_op_prove_predicate<'tcx> { + fn describe(_tcx: TyCtxt, goal: CanonicalTypeOpProvePredicateGoal<'tcx>) -> String { + format!("evaluating `type_op_prove_predicate` `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::type_op_normalize_ty<'tcx> { + fn describe(_tcx: TyCtxt, goal: CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>>) -> String { + format!("normalizing `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::type_op_normalize_predicate<'tcx> { + fn describe( + _tcx: TyCtxt, + goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::Predicate<'tcx>>, + ) -> String { + format!("normalizing `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::type_op_normalize_poly_fn_sig<'tcx> { + fn describe( + _tcx: TyCtxt, + goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::PolyFnSig<'tcx>>, + ) -> String { + format!("normalizing `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::type_op_normalize_fn_sig<'tcx> { + fn describe(_tcx: TyCtxt, goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::FnSig<'tcx>>) -> String { + format!("normalizing `{:?}`", goal) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_copy_raw<'tcx> { + fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("computing whether `{}` is `Copy`", env.value) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_sized_raw<'tcx> { + fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("computing whether `{}` is `Sized`", env.value) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_freeze_raw<'tcx> { + fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("computing whether `{}` is freeze", env.value) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::needs_drop_raw<'tcx> { + fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("computing whether `{}` needs drop", env.value) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::layout_raw<'tcx> { + fn describe(_tcx: TyCtxt, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> String { + format!("computing layout of `{}`", env.value) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::super_predicates_of<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("computing the supertraits of `{}`", + tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::const_value_to_allocation<'tcx> { + fn describe(_tcx: TyCtxt, val: &'tcx ty::Const<'tcx>) -> String { + format!("converting value `{:?}` to an allocation", val) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::erase_regions_ty<'tcx> { + fn describe(_tcx: TyCtxt, ty: Ty<'tcx>) -> String { + format!("erasing regions from `{:?}`", ty) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::type_param_predicates<'tcx> { + fn describe(tcx: TyCtxt, (_, def_id): (DefId, DefId)) -> String { + let id = tcx.hir.as_local_node_id(def_id).unwrap(); + format!("computing the bounds for type parameter `{}`", + tcx.hir.ty_param_name(id)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::coherent_trait<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("coherence checking all impls of trait `{}`", + tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::upstream_monomorphizations<'tcx> { + fn describe(_: TyCtxt, k: CrateNum) -> String { + format!("collecting available upstream monomorphizations `{:?}`", k) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::crate_inherent_impls<'tcx> { + fn describe(_: TyCtxt, k: CrateNum) -> String { + format!("all inherent impls defined in crate `{:?}`", k) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::crate_inherent_impls_overlap_check<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "check for overlap between inherent impls defined in this crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::crate_variances<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "computing the variances for items in this crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::inferred_outlives_crate<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "computing the inferred outlives predicates for items in this crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::mir_shims<'tcx> { + fn describe(tcx: TyCtxt, def: ty::InstanceDef<'tcx>) -> String { + format!("generating MIR shim for `{}`", + tcx.item_path_str(def.def_id())) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::privacy_access_levels<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "privacy access levels".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::typeck_item_bodies<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "type-checking all item bodies".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::reachable_set<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "reachability".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::const_eval<'tcx> { + fn describe(tcx: TyCtxt, key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) -> String { + format!("const-evaluating `{}`", tcx.item_path_str(key.value.instance.def.def_id())) + } + + #[inline] + fn cache_on_disk(_key: Self::Key) -> bool { + true + } + + #[inline] + fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: SerializedDepNodeIndex) + -> Option { + tcx.queries.on_disk_cache.try_load_query_result(tcx, id).map(Ok) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::mir_keys<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "getting a list of all mir_keys".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::symbol_name<'tcx> { + fn describe(_tcx: TyCtxt, instance: ty::Instance<'tcx>) -> String { + format!("computing the symbol for `{}`", instance) + } + + #[inline] + fn cache_on_disk(_: Self::Key) -> bool { + true + } + + #[inline] + fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: SerializedDepNodeIndex) + -> Option { + tcx.queries.on_disk_cache.try_load_query_result(tcx, id) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::describe_def<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("describe_def") + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::def_span<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("def_span") + } +} + + +impl<'tcx> QueryDescription<'tcx> for queries::lookup_stability<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("stability") + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::lookup_deprecation_entry<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("deprecation") + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::item_attrs<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("item_attrs") + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_reachable_non_generic<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("is_reachable_non_generic") + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::fn_arg_names<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("fn_arg_names") + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::impl_parent<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("impl_parent") + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::trait_of_item<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + bug!("trait_of_item") + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::const_is_rvalue_promotable_to_static<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("const checking if rvalue is promotable to static `{}`", + tcx.item_path_str(def_id)) + } + + #[inline] + fn cache_on_disk(_: Self::Key) -> bool { + true + } + + #[inline] + fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: SerializedDepNodeIndex) + -> Option { + tcx.queries.on_disk_cache.try_load_query_result(tcx, id) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::rvalue_promotable_map<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("checking which parts of `{}` are promotable to static", + tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_mir_available<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("checking if item is mir available: `{}`", + tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::codegen_fulfill_obligation<'tcx> { + fn describe(tcx: TyCtxt, key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> String { + format!("checking if `{}` fulfills its obligations", tcx.item_path_str(key.1.def_id())) + } + + #[inline] + fn cache_on_disk(_: Self::Key) -> bool { + true + } + + #[inline] + fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: SerializedDepNodeIndex) + -> Option { + tcx.queries.on_disk_cache.try_load_query_result(tcx, id) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::trait_impls_of<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("trait impls of `{}`", tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_object_safe<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("determine object safety of trait `{}`", tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_const_fn<'tcx> { + fn describe(tcx: TyCtxt, def_id: DefId) -> String { + format!("checking if item is const fn: `{}`", tcx.item_path_str(def_id)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::dylib_dependency_formats<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "dylib dependency formats of crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_panic_runtime<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "checking if the crate is_panic_runtime".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_compiler_builtins<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "checking if the crate is_compiler_builtins".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::has_global_allocator<'tcx> { + fn describe(_: TyCtxt, _: CrateNum) -> String { + "checking if the crate has_global_allocator".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::extern_crate<'tcx> { + fn describe(_: TyCtxt, _: DefId) -> String { + "getting crate's ExternCrateData".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::lint_levels<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "computing the lint levels for items in this crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::specializes<'tcx> { + fn describe(_tcx: TyCtxt, _: (DefId, DefId)) -> String { + "computing whether impls specialize one another".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::in_scope_traits_map<'tcx> { + fn describe(_tcx: TyCtxt, _: DefIndex) -> String { + "traits in scope at a block".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_no_builtins<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "test whether a crate has #![no_builtins]".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::panic_strategy<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "query a crate's configured panic strategy".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_profiler_runtime<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "query a crate is #![profiler_runtime]".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_sanitizer_runtime<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "query a crate is #![sanitizer_runtime]".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::reachable_non_generics<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up the exported symbols of a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::native_libraries<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up the native libraries of a linked crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::foreign_modules<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up the foreign modules of a linked crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::plugin_registrar_fn<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up the plugin registrar for a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::derive_registrar_fn<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up the derive registrar for a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::crate_disambiguator<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up the disambiguator a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::crate_hash<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up the hash a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::original_crate_name<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up the original name a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::extra_filename<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up the extra filename for a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::implementations_of_trait<'tcx> { + fn describe(_tcx: TyCtxt, _: (CrateNum, DefId)) -> String { + "looking up implementations of a trait in a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::all_trait_implementations<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up all (?) trait implementations".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::link_args<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up link arguments for a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::resolve_lifetimes<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "resolving lifetimes".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::named_region_map<'tcx> { + fn describe(_tcx: TyCtxt, _: DefIndex) -> String { + "looking up a named region".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::is_late_bound_map<'tcx> { + fn describe(_tcx: TyCtxt, _: DefIndex) -> String { + "testing if a region is late bound".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::object_lifetime_defaults_map<'tcx> { + fn describe(_tcx: TyCtxt, _: DefIndex) -> String { + "looking up lifetime defaults for a region".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::dep_kind<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "fetching what a dependency looks like".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::crate_name<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "fetching what a crate is named".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::get_lib_features<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("calculating the lib features map") + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::defined_lib_features<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + format!("calculating the lib features defined in a crate") + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::get_lang_items<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "calculating the lang items map".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::defined_lang_items<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "calculating the lang items defined in a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::missing_lang_items<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "calculating the missing lang items in a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::visible_parent_map<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "calculating the visible parent map".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::missing_extern_crate_item<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "seeing if we're missing an `extern crate` item for this crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::used_crate_source<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking at the source for a crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::postorder_cnums<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "generating a postorder list of CrateNums".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::maybe_unused_extern_crates<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up all possibly unused extern crates".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::stability_index<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "calculating the stability index for the local crate".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::all_traits<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "fetching all foreign and local traits".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::all_crate_nums<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "fetching all foreign CrateNum instances".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::exported_symbols<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "exported_symbols".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::collect_and_partition_mono_items<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "collect_and_partition_mono_items".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::codegen_unit<'tcx> { + fn describe(_tcx: TyCtxt, _: InternedString) -> String { + "codegen_unit".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::compile_codegen_unit<'tcx> { + fn describe(_tcx: TyCtxt, _: InternedString) -> String { + "compile_codegen_unit".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::output_filenames<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "output_filenames".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::vtable_methods<'tcx> { + fn describe(tcx: TyCtxt, key: ty::PolyTraitRef<'tcx> ) -> String { + format!("finding all methods for trait {}", tcx.item_path_str(key.def_id())) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::features_query<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up enabled feature gates".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::typeck_tables_of<'tcx> { + #[inline] + fn cache_on_disk(def_id: Self::Key) -> bool { + def_id.is_local() + } + + fn try_load_from_disk(tcx: TyCtxt<'_, 'tcx, 'tcx>, + id: SerializedDepNodeIndex) + -> Option { + let typeck_tables: Option> = tcx + .queries.on_disk_cache + .try_load_query_result(tcx, id); + + typeck_tables.map(|tables| tcx.alloc_tables(tables)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::optimized_mir<'tcx> { + #[inline] + fn cache_on_disk(def_id: Self::Key) -> bool { + def_id.is_local() + } + + fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: SerializedDepNodeIndex) + -> Option { + let mir: Option<::mir::Mir<'tcx>> = tcx.queries.on_disk_cache + .try_load_query_result(tcx, id); + mir.map(|x| tcx.alloc_mir(x)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::substitute_normalize_and_test_predicates<'tcx> { + fn describe(tcx: TyCtxt, key: (DefId, &'tcx Substs<'tcx>)) -> String { + format!("testing substituted normalized predicates:`{}`", tcx.item_path_str(key.0)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::target_features_whitelist<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "looking up the whitelist of target features".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::instance_def_size_estimate<'tcx> { + fn describe(tcx: TyCtxt, def: ty::InstanceDef<'tcx>) -> String { + format!("estimating size for `{}`", tcx.item_path_str(def.def_id())) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::generics_of<'tcx> { + #[inline] + fn cache_on_disk(def_id: Self::Key) -> bool { + def_id.is_local() + } + + fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: SerializedDepNodeIndex) + -> Option { + let generics: Option = tcx.queries.on_disk_cache + .try_load_query_result(tcx, id); + generics.map(|x| tcx.alloc_generics(x)) + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::program_clauses_for<'tcx> { + fn describe(_tcx: TyCtxt, _: DefId) -> String { + "generating chalk-style clauses".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::program_clauses_for_env<'tcx> { + fn describe(_tcx: TyCtxt, _: ty::ParamEnv<'tcx>) -> String { + "generating chalk-style clauses for param env".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::wasm_import_module_map<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "wasm import module map".to_string() + } +} + +impl<'tcx> QueryDescription<'tcx> for queries::dllimport_foreign_items<'tcx> { + fn describe(_tcx: TyCtxt, _: CrateNum) -> String { + "wasm import module map".to_string() + } +} + +macro_rules! impl_disk_cacheable_query( + ($query_name:ident, |$key:tt| $cond:expr) => { + impl<'tcx> QueryDescription<'tcx> for queries::$query_name<'tcx> { + #[inline] + fn cache_on_disk($key: Self::Key) -> bool { + $cond + } + + #[inline] + fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: SerializedDepNodeIndex) + -> Option { + tcx.queries.on_disk_cache.try_load_query_result(tcx, id) + } + } + } +); + +impl_disk_cacheable_query!(unsafety_check_result, |def_id| def_id.is_local()); +impl_disk_cacheable_query!(borrowck, |def_id| def_id.is_local()); +impl_disk_cacheable_query!(mir_borrowck, |def_id| def_id.is_local()); +impl_disk_cacheable_query!(mir_const_qualif, |def_id| def_id.is_local()); +impl_disk_cacheable_query!(check_match, |def_id| def_id.is_local()); +impl_disk_cacheable_query!(def_symbol_name, |_| true); +impl_disk_cacheable_query!(type_of, |def_id| def_id.is_local()); +impl_disk_cacheable_query!(predicates_of, |def_id| def_id.is_local()); +impl_disk_cacheable_query!(used_trait_imports, |def_id| def_id.is_local()); +impl_disk_cacheable_query!(codegen_fn_attrs, |_| true); +impl_disk_cacheable_query!(specialization_graph_of, |_| true); diff --git a/src/librustc/ty/query/job.rs b/src/librustc/ty/query/job.rs new file mode 100644 index 000000000000..56a8c13a8d3b --- /dev/null +++ b/src/librustc/ty/query/job.rs @@ -0,0 +1,525 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(warnings)] + +use std::mem; +use rustc_data_structures::sync::{Lock, LockGuard, Lrc, Weak}; +use rustc_data_structures::OnDrop; +use syntax_pos::Span; +use ty::tls; +use ty::query::Query; +use ty::query::plumbing::CycleError; +use ty::context::TyCtxt; +use errors::Diagnostic; +use std::process; +use std::{fmt, ptr}; +use std::collections::HashSet; +#[cfg(parallel_queries)] +use { + rayon_core, + parking_lot::{Mutex, Condvar}, + std::sync::atomic::Ordering, + std::thread, + std::iter, + std::iter::FromIterator, + syntax_pos::DUMMY_SP, + rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, HashStable}, +}; + +/// Indicates the state of a query for a given key in a query map +pub(super) enum QueryResult<'tcx> { + /// An already executing query. The query job can be used to await for its completion + Started(Lrc>), + + /// The query panicked. Queries trying to wait on this will raise a fatal error / silently panic + Poisoned, +} + +/// A span and a query key +#[derive(Clone, Debug)] +pub struct QueryInfo<'tcx> { + /// The span for a reason this query was required + pub span: Span, + pub query: Query<'tcx>, +} + +/// A object representing an active query job. +pub struct QueryJob<'tcx> { + pub info: QueryInfo<'tcx>, + + /// The parent query job which created this job and is implicitly waiting on it. + pub parent: Option>>, + + /// Diagnostic messages which are emitted while the query executes + pub diagnostics: Lock>, + + /// The latch which is used to wait on this job + #[cfg(parallel_queries)] + latch: QueryLatch<'tcx>, +} + +impl<'tcx> QueryJob<'tcx> { + /// Creates a new query job + pub fn new(info: QueryInfo<'tcx>, parent: Option>>) -> Self { + QueryJob { + diagnostics: Lock::new(Vec::new()), + info, + parent, + #[cfg(parallel_queries)] + latch: QueryLatch::new(), + } + } + + /// Awaits for the query job to complete. + /// + /// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any + /// query that means that there is a query cycle, thus this always running a cycle error. + pub(super) fn await<'lcx>( + &self, + tcx: TyCtxt<'_, 'tcx, 'lcx>, + span: Span, + ) -> Result<(), CycleError<'tcx>> { + #[cfg(not(parallel_queries))] + { + self.find_cycle_in_stack(tcx, span) + } + + #[cfg(parallel_queries)] + { + tls::with_related_context(tcx, move |icx| { + let mut waiter = Lrc::new(QueryWaiter { + query: icx.query.clone(), + span, + cycle: Lock::new(None), + condvar: Condvar::new(), + }); + self.latch.await(&waiter); + + match Lrc::get_mut(&mut waiter).unwrap().cycle.get_mut().take() { + None => Ok(()), + Some(cycle) => Err(cycle) + } + }) + } + } + + #[cfg(not(parallel_queries))] + fn find_cycle_in_stack<'lcx>( + &self, + tcx: TyCtxt<'_, 'tcx, 'lcx>, + span: Span, + ) -> Result<(), CycleError<'tcx>> { + // Get the current executing query (waiter) and find the waitee amongst its parents + let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone()); + let mut cycle = Vec::new(); + + while let Some(job) = current_job { + cycle.insert(0, job.info.clone()); + + if ptr::eq(&*job, self) { + // This is the end of the cycle + // The span entry we included was for the usage + // of the cycle itself, and not part of the cycle + // Replace it with the span which caused the cycle to form + cycle[0].span = span; + // Find out why the cycle itself was used + let usage = job.parent.as_ref().map(|parent| { + (job.info.span, parent.info.query.clone()) + }); + return Err(CycleError { usage, cycle }); + } + + current_job = job.parent.clone(); + } + + panic!("did not find a cycle") + } + + /// Signals to waiters that the query is complete. + /// + /// This does nothing for single threaded rustc, + /// as there are no concurrent jobs which could be waiting on us + pub fn signal_complete(&self) { + #[cfg(parallel_queries)] + self.latch.set(); + } + + fn as_ptr(&self) -> *const QueryJob<'tcx> { + self as *const _ + } +} + +#[cfg(parallel_queries)] +struct QueryWaiter<'tcx> { + query: Option>>, + condvar: Condvar, + span: Span, + cycle: Lock>>, +} + +#[cfg(parallel_queries)] +impl<'tcx> QueryWaiter<'tcx> { + fn notify(&self, registry: &rayon_core::Registry) { + rayon_core::mark_unblocked(registry); + self.condvar.notify_one(); + } +} + +#[cfg(parallel_queries)] +struct QueryLatchInfo<'tcx> { + complete: bool, + waiters: Vec>>, +} + +#[cfg(parallel_queries)] +struct QueryLatch<'tcx> { + info: Mutex>, +} + +#[cfg(parallel_queries)] +impl<'tcx> QueryLatch<'tcx> { + fn new() -> Self { + QueryLatch { + info: Mutex::new(QueryLatchInfo { + complete: false, + waiters: Vec::new(), + }), + } + } + + /// Awaits the caller on this latch by blocking the current thread. + fn await(&self, waiter: &Lrc>) { + let mut info = self.info.lock(); + if !info.complete { + // We push the waiter on to the `waiters` list. It can be accessed inside + // the `wait` call below, by 1) the `set` method or 2) by deadlock detection. + // Both of these will remove it from the `waiters` list before resuming + // this thread. + info.waiters.push(waiter.clone()); + + // If this detects a deadlock and the deadlock handler wants to resume this thread + // we have to be in the `wait` call. This is ensured by the deadlock handler + // getting the self.info lock. + rayon_core::mark_blocked(); + waiter.condvar.wait(&mut info); + } + } + + /// Sets the latch and resumes all waiters on it + fn set(&self) { + let mut info = self.info.lock(); + debug_assert!(!info.complete); + info.complete = true; + let registry = rayon_core::Registry::current(); + for waiter in info.waiters.drain(..) { + waiter.notify(®istry); + } + } + + /// Remove a single waiter from the list of waiters. + /// This is used to break query cycles. + fn extract_waiter( + &self, + waiter: usize, + ) -> Lrc> { + let mut info = self.info.lock(); + debug_assert!(!info.complete); + // Remove the waiter from the list of waiters + info.waiters.remove(waiter) + } +} + +/// A resumable waiter of a query. The usize is the index into waiters in the query's latch +#[cfg(parallel_queries)] +type Waiter<'tcx> = (Lrc>, usize); + +/// Visits all the non-resumable and resumable waiters of a query. +/// Only waiters in a query are visited. +/// `visit` is called for every waiter and is passed a query waiting on `query_ref` +/// and a span indicating the reason the query waited on `query_ref`. +/// If `visit` returns Some, this function returns. +/// For visits of non-resumable waiters it returns the return value of `visit`. +/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the +/// required information to resume the waiter. +/// If all `visit` calls returns None, this function also returns None. +#[cfg(parallel_queries)] +fn visit_waiters<'tcx, F>(query: Lrc>, mut visit: F) -> Option>> +where + F: FnMut(Span, Lrc>) -> Option>> +{ + // Visit the parent query which is a non-resumable waiter since it's on the same stack + if let Some(ref parent) = query.parent { + if let Some(cycle) = visit(query.info.span, parent.clone()) { + return Some(cycle); + } + } + + // Visit the explict waiters which use condvars and are resumable + for (i, waiter) in query.latch.info.lock().waiters.iter().enumerate() { + if let Some(ref waiter_query) = waiter.query { + if visit(waiter.span, waiter_query.clone()).is_some() { + // Return a value which indicates that this waiter can be resumed + return Some(Some((query.clone(), i))); + } + } + } + None +} + +/// Look for query cycles by doing a depth first search starting at `query`. +/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP. +/// If a cycle is detected, this initial value is replaced with the span causing +/// the cycle. +#[cfg(parallel_queries)] +fn cycle_check<'tcx>(query: Lrc>, + span: Span, + stack: &mut Vec<(Span, Lrc>)>, + visited: &mut HashSet<*const QueryJob<'tcx>> +) -> Option>> { + if visited.contains(&query.as_ptr()) { + return if let Some(p) = stack.iter().position(|q| q.1.as_ptr() == query.as_ptr()) { + // We detected a query cycle, fix up the initial span and return Some + + // Remove previous stack entries + stack.splice(0..p, iter::empty()); + // Replace the span for the first query with the cycle cause + stack[0].0 = span; + Some(None) + } else { + None + } + } + + // Mark this query is visited and add it to the stack + visited.insert(query.as_ptr()); + stack.push((span, query.clone())); + + // Visit all the waiters + let r = visit_waiters(query, |span, successor| { + cycle_check(successor, span, stack, visited) + }); + + // Remove the entry in our stack if we didn't find a cycle + if r.is_none() { + stack.pop(); + } + + r +} + +/// Finds out if there's a path to the compiler root (aka. code which isn't in a query) +/// from `query` without going through any of the queries in `visited`. +/// This is achieved with a depth first search. +#[cfg(parallel_queries)] +fn connected_to_root<'tcx>( + query: Lrc>, + visited: &mut HashSet<*const QueryJob<'tcx>> +) -> bool { + // We already visited this or we're deliberately ignoring it + if visited.contains(&query.as_ptr()) { + return false; + } + + // This query is connected to the root (it has no query parent), return true + if query.parent.is_none() { + return true; + } + + visited.insert(query.as_ptr()); + + let mut connected = false; + + visit_waiters(query, |_, successor| { + if connected_to_root(successor, visited) { + Some(None) + } else { + None + } + }).is_some() +} + +/// Looks for query cycles starting from the last query in `jobs`. +/// If a cycle is found, all queries in the cycle is removed from `jobs` and +/// the function return true. +/// If a cycle was not found, the starting query is removed from `jobs` and +/// the function returns false. +#[cfg(parallel_queries)] +fn remove_cycle<'tcx>( + jobs: &mut Vec>>, + wakelist: &mut Vec>>, + tcx: TyCtxt<'_, 'tcx, '_> +) -> bool { + let mut visited = HashSet::new(); + let mut stack = Vec::new(); + // Look for a cycle starting with the last query in `jobs` + if let Some(waiter) = cycle_check(jobs.pop().unwrap(), + DUMMY_SP, + &mut stack, + &mut visited) { + // Reverse the stack so earlier entries require later entries + stack.reverse(); + + // Extract the spans and queries into separate arrays + let mut spans: Vec<_> = stack.iter().map(|e| e.0).collect(); + let queries = stack.into_iter().map(|e| e.1); + + // Shift the spans so that queries are matched with the span for their waitee + let last = spans.pop().unwrap(); + spans.insert(0, last); + + // Zip them back together + let mut stack: Vec<_> = spans.into_iter().zip(queries).collect(); + + // Remove the queries in our cycle from the list of jobs to look at + for r in &stack { + if let Some(pos) = jobs.iter().position(|j| j.as_ptr() == r.1.as_ptr()) { + jobs.remove(pos); + } + } + + // Find the queries in the cycle which are + // connected to queries outside the cycle + let entry_points: Vec>> = stack.iter().filter_map(|query| { + // Mark all the other queries in the cycle as already visited + let mut visited = HashSet::from_iter(stack.iter().filter_map(|q| { + if q.1.as_ptr() != query.1.as_ptr() { + Some(q.1.as_ptr()) + } else { + None + } + })); + + if connected_to_root(query.1.clone(), &mut visited) { + Some(query.1.clone()) + } else { + None + } + }).collect(); + + // Deterministically pick an entry point + // FIXME: Sort this instead + let mut hcx = tcx.create_stable_hashing_context(); + let entry_point = entry_points.iter().min_by_key(|q| { + let mut stable_hasher = StableHasher::::new(); + q.info.query.hash_stable(&mut hcx, &mut stable_hasher); + stable_hasher.finish() + }).unwrap().as_ptr(); + + // Shift the stack until our entry point is first + while stack[0].1.as_ptr() != entry_point { + let last = stack.pop().unwrap(); + stack.insert(0, last); + } + + // Create the cycle error + let mut error = CycleError { + usage: None, + cycle: stack.iter().map(|&(s, ref q)| QueryInfo { + span: s, + query: q.info.query.clone(), + } ).collect(), + }; + + // We unwrap `waiter` here since there must always be one + // edge which is resumeable / waited using a query latch + let (waitee_query, waiter_idx) = waiter.unwrap(); + + // Extract the waiter we want to resume + let waiter = waitee_query.latch.extract_waiter(waiter_idx); + + // Set the cycle error so it will be picked up when resumed + *waiter.cycle.lock() = Some(error); + + // Put the waiter on the list of things to resume + wakelist.push(waiter); + + true + } else { + false + } +} + +/// Creates a new thread and forwards information in thread locals to it. +/// The new thread runs the deadlock handler. +/// Must only be called when a deadlock is about to happen. +#[cfg(parallel_queries)] +pub unsafe fn handle_deadlock() { + use syntax; + use syntax_pos; + + let registry = rayon_core::Registry::current(); + + let gcx_ptr = tls::GCX_PTR.with(|gcx_ptr| { + gcx_ptr as *const _ + }); + let gcx_ptr = &*gcx_ptr; + + let syntax_globals = syntax::GLOBALS.with(|syntax_globals| { + syntax_globals as *const _ + }); + let syntax_globals = &*syntax_globals; + + let syntax_pos_globals = syntax_pos::GLOBALS.with(|syntax_pos_globals| { + syntax_pos_globals as *const _ + }); + let syntax_pos_globals = &*syntax_pos_globals; + thread::spawn(move || { + tls::GCX_PTR.set(gcx_ptr, || { + syntax_pos::GLOBALS.set(syntax_pos_globals, || { + syntax_pos::GLOBALS.set(syntax_pos_globals, || { + tls::with_thread_locals(|| { + tls::with_global(|tcx| deadlock(tcx, ®istry)) + }) + }) + }) + }) + }); +} + +/// Detects query cycles by using depth first search over all active query jobs. +/// If a query cycle is found it will break the cycle by finding an edge which +/// uses a query latch and then resuming that waiter. +/// There may be multiple cycles involved in a deadlock, so this searches +/// all active queries for cycles before finally resuming all the waiters at once. +#[cfg(parallel_queries)] +fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { + let on_panic = OnDrop(|| { + eprintln!("deadlock handler panicked, aborting process"); + process::abort(); + }); + + let mut wakelist = Vec::new(); + let mut jobs: Vec<_> = tcx.queries.collect_active_jobs(); + + let mut found_cycle = false; + + while jobs.len() > 0 { + if remove_cycle(&mut jobs, &mut wakelist, tcx) { + found_cycle = true; + } + } + + // Check that a cycle was found. It is possible for a deadlock to occur without + // a query cycle if a query which can be waited on uses Rayon to do multithreading + // internally. Such a query (X) may be executing on 2 threads (A and B) and A may + // wait using Rayon on B. Rayon may then switch to executing another query (Y) + // which in turn will wait on X causing a deadlock. We have a false dependency from + // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here + // only considers the true dependency and won't detect a cycle. + assert!(found_cycle); + + // FIXME: Ensure this won't cause a deadlock before we return + for waiter in wakelist.into_iter() { + waiter.notify(registry); + } + + on_panic.disable(); +} diff --git a/src/librustc/ty/query/keys.rs b/src/librustc/ty/query/keys.rs new file mode 100644 index 000000000000..8423b02ee758 --- /dev/null +++ b/src/librustc/ty/query/keys.rs @@ -0,0 +1,206 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Defines the set of legal keys that can be used in queries. + +use infer::canonical::Canonical; +use hir::def_id::{CrateNum, DefId, LOCAL_CRATE, DefIndex}; +use ty::{self, Ty, TyCtxt}; +use ty::subst::Substs; +use ty::fast_reject::SimplifiedType; +use mir; + +use std::fmt::Debug; +use std::hash::Hash; +use syntax_pos::{Span, DUMMY_SP}; +use syntax_pos::symbol::InternedString; + +/// The `Key` trait controls what types can legally be used as the key +/// for a query. +pub(super) trait Key: Clone + Hash + Eq + Debug { + /// Given an instance of this key, what crate is it referring to? + /// This is used to find the provider. + fn query_crate(&self) -> CrateNum; + + /// In the event that a cycle occurs, if no explicit span has been + /// given for a query with key `self`, what span should we use? + fn default_span(&self, tcx: TyCtxt) -> Span; +} + +impl<'tcx> Key for ty::InstanceDef<'tcx> { + fn query_crate(&self) -> CrateNum { + LOCAL_CRATE + } + + fn default_span(&self, tcx: TyCtxt) -> Span { + tcx.def_span(self.def_id()) + } +} + +impl<'tcx> Key for ty::Instance<'tcx> { + fn query_crate(&self) -> CrateNum { + LOCAL_CRATE + } + + fn default_span(&self, tcx: TyCtxt) -> Span { + tcx.def_span(self.def_id()) + } +} + +impl<'tcx> Key for mir::interpret::GlobalId<'tcx> { + fn query_crate(&self) -> CrateNum { + self.instance.query_crate() + } + + fn default_span(&self, tcx: TyCtxt) -> Span { + self.instance.default_span(tcx) + } +} + +impl Key for CrateNum { + fn query_crate(&self) -> CrateNum { + *self + } + fn default_span(&self, _: TyCtxt) -> Span { + DUMMY_SP + } +} + +impl Key for DefIndex { + fn query_crate(&self) -> CrateNum { + LOCAL_CRATE + } + fn default_span(&self, _tcx: TyCtxt) -> Span { + DUMMY_SP + } +} + +impl Key for DefId { + fn query_crate(&self) -> CrateNum { + self.krate + } + fn default_span(&self, tcx: TyCtxt) -> Span { + tcx.def_span(*self) + } +} + +impl Key for (DefId, DefId) { + fn query_crate(&self) -> CrateNum { + self.0.krate + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.1.default_span(tcx) + } +} + +impl Key for (CrateNum, DefId) { + fn query_crate(&self) -> CrateNum { + self.0 + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.1.default_span(tcx) + } +} + +impl Key for (DefId, SimplifiedType) { + fn query_crate(&self) -> CrateNum { + self.0.krate + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.0.default_span(tcx) + } +} + +impl<'tcx> Key for (DefId, &'tcx Substs<'tcx>) { + fn query_crate(&self) -> CrateNum { + self.0.krate + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.0.default_span(tcx) + } +} + +impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) { + fn query_crate(&self) -> CrateNum { + self.1.def_id().krate + } + fn default_span(&self, tcx: TyCtxt) -> Span { + tcx.def_span(self.1.def_id()) + } +} + +impl<'tcx> Key for ty::PolyTraitRef<'tcx>{ + fn query_crate(&self) -> CrateNum { + self.def_id().krate + } + fn default_span(&self, tcx: TyCtxt) -> Span { + tcx.def_span(self.def_id()) + } +} + +impl<'tcx> Key for &'tcx ty::Const<'tcx> { + fn query_crate(&self) -> CrateNum { + LOCAL_CRATE + } + fn default_span(&self, _: TyCtxt) -> Span { + DUMMY_SP + } +} + +impl<'tcx> Key for Ty<'tcx> { + fn query_crate(&self) -> CrateNum { + LOCAL_CRATE + } + fn default_span(&self, _: TyCtxt) -> Span { + DUMMY_SP + } +} + +impl<'tcx> Key for ty::ParamEnv<'tcx> { + fn query_crate(&self) -> CrateNum { + LOCAL_CRATE + } + fn default_span(&self, _: TyCtxt) -> Span { + DUMMY_SP + } +} + +impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> { + fn query_crate(&self) -> CrateNum { + self.value.query_crate() + } + fn default_span(&self, tcx: TyCtxt) -> Span { + self.value.default_span(tcx) + } +} + +impl Key for InternedString { + fn query_crate(&self) -> CrateNum { + LOCAL_CRATE + } + fn default_span(&self, _tcx: TyCtxt) -> Span { + DUMMY_SP + } +} + +/// Canonical query goals correspond to abstract trait operations that +/// are not tied to any crate in particular. +impl<'tcx, T> Key for Canonical<'tcx, T> +where + T: Debug + Hash + Clone + Eq, +{ + fn query_crate(&self) -> CrateNum { + LOCAL_CRATE + } + + fn default_span(&self, _tcx: TyCtxt) -> Span { + DUMMY_SP + } +} diff --git a/src/librustc/ty/query/mod.rs b/src/librustc/ty/query/mod.rs new file mode 100644 index 000000000000..ef22ebef9d7d --- /dev/null +++ b/src/librustc/ty/query/mod.rs @@ -0,0 +1,865 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use dep_graph::{DepConstructor, DepNode}; +use errors::DiagnosticBuilder; +use hir::def_id::{CrateNum, DefId, DefIndex}; +use hir::def::{Def, Export}; +use hir::{self, TraitCandidate, ItemLocalId, CodegenFnAttrs}; +use rustc_data_structures::svh::Svh; +use infer::canonical::{self, Canonical}; +use lint; +use middle::borrowck::BorrowCheckResult; +use middle::cstore::{ExternCrate, LinkagePreference, NativeLibrary, ForeignModule}; +use middle::cstore::{NativeLibraryKind, DepKind, CrateSource}; +use middle::privacy::AccessLevels; +use middle::reachable::ReachableSet; +use middle::region; +use middle::resolve_lifetime::{ResolveLifetimes, Region, ObjectLifetimeDefault}; +use middle::stability::{self, DeprecationEntry}; +use middle::lib_features::LibFeatures; +use middle::lang_items::{LanguageItems, LangItem}; +use middle::exported_symbols::{SymbolExportLevel, ExportedSymbol}; +use mir::interpret::ConstEvalResult; +use mir::mono::{CodegenUnit, Stats}; +use mir; +use mir::interpret::{GlobalId, Allocation}; +use session::{CompileResult, CrateDisambiguator}; +use session::config::OutputFilenames; +use traits::{self, Vtable}; +use traits::query::{CanonicalPredicateGoal, CanonicalProjectionGoal, + CanonicalTyGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpSubtypeGoal, + CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpNormalizeGoal, NoSolution}; +use traits::query::dropck_outlives::{DtorckConstraint, DropckOutlivesResult}; +use traits::query::normalize::NormalizationResult; +use traits::query::outlives_bounds::OutlivesBound; +use traits::specialization_graph; +use traits::Clauses; +use ty::{self, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt}; +use ty::steal::Steal; +use ty::subst::Substs; +use util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet}; +use util::common::{ErrorReported}; +use util::profiling::ProfileCategory::*; + +use rustc_data_structures::indexed_set::IdxSetBuf; +use rustc_target::spec::PanicStrategy; +use rustc_data_structures::indexed_vec::IndexVec; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::stable_hasher::StableVec; + +use std::ops::Deref; +use rustc_data_structures::sync::Lrc; +use std::sync::Arc; +use syntax_pos::{Span, DUMMY_SP}; +use syntax_pos::symbol::InternedString; +use syntax::attr; +use syntax::ast; +use syntax::feature_gate; +use syntax::symbol::Symbol; + +#[macro_use] +mod plumbing; +use self::plumbing::*; +pub use self::plumbing::{force_from_dep_node, CycleError}; + +mod job; +pub use self::job::{QueryJob, QueryInfo}; +#[cfg(parallel_queries)] +pub use self::job::handle_deadlock; + +mod keys; +use self::keys::Key; + +mod values; +use self::values::Value; + +mod config; +pub use self::config::QueryConfig; +use self::config::{QueryAccessors, QueryDescription}; + +mod on_disk_cache; +pub use self::on_disk_cache::OnDiskCache; + +// Each of these quries corresponds to a function pointer field in the +// `Providers` struct for requesting a value of that type, and a method +// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way +// which memoizes and does dep-graph tracking, wrapping around the actual +// `Providers` that the driver creates (using several `rustc_*` crates). +// +// The result type of each query must implement `Clone`, and additionally +// `ty::query::values::Value`, which produces an appropriate placeholder +// (error) value if the query resulted in a query cycle. +// Queries marked with `fatal_cycle` do not need the latter implementation, +// as they will raise an fatal error on query cycles instead. +define_queries! { <'tcx> + Other { + /// Records the type of every item. + [] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, + + /// Maps from the def-id of an item (trait/struct/enum/fn) to its + /// associated generics. + [] fn generics_of: GenericsOfItem(DefId) -> &'tcx ty::Generics, + + /// Maps from the def-id of an item (trait/struct/enum/fn) to the + /// predicates (where clauses) that must be proven true in order + /// to reference it. This is almost always the "predicates query" + /// that you want. + /// + /// `predicates_of` builds on `predicates_defined_on` -- in fact, + /// it is almost always the same as that query, except for the + /// case of traits. For traits, `predicates_of` contains + /// an additional `Self: Trait<...>` predicate that users don't + /// actually write. This reflects the fact that to invoke the + /// trait (e.g., via `Default::default`) you must supply types + /// that actually implement the trait. (However, this extra + /// predicate gets in the way of some checks, which are intended + /// to operate over only the actual where-clauses written by the + /// user.) + [] fn predicates_of: PredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>, + + /// Maps from the def-id of an item (trait/struct/enum/fn) to the + /// predicates (where clauses) directly defined on it. This is + /// equal to the `explicit_predicates_of` predicates plus the + /// `inferred_outlives_of` predicates. + [] fn predicates_defined_on: PredicatesDefinedOnItem(DefId) -> ty::GenericPredicates<'tcx>, + + /// Returns the predicates written explicit by the user. + [] fn explicit_predicates_of: ExplicitPredicatesOfItem(DefId) + -> ty::GenericPredicates<'tcx>, + + /// Returns the inferred outlives predicates (e.g., for `struct + /// Foo<'a, T> { x: &'a T }`, this would return `T: 'a`). + [] fn inferred_outlives_of: InferredOutlivesOf(DefId) -> Lrc>>, + + /// Maps from the def-id of a trait to the list of + /// super-predicates. This is a subset of the full list of + /// predicates. We store these in a separate map because we must + /// evaluate them even during type conversion, often before the + /// full predicates are available (note that supertraits have + /// additional acyclicity requirements). + [] fn super_predicates_of: SuperPredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>, + + /// To avoid cycles within the predicates of a single item we compute + /// per-type-parameter predicates for resolving `T::AssocTy`. + [] fn type_param_predicates: type_param_predicates((DefId, DefId)) + -> ty::GenericPredicates<'tcx>, + + [] fn trait_def: TraitDefOfItem(DefId) -> &'tcx ty::TraitDef, + [] fn adt_def: AdtDefOfItem(DefId) -> &'tcx ty::AdtDef, + [] fn adt_destructor: AdtDestructor(DefId) -> Option, + [] fn adt_sized_constraint: SizedConstraint(DefId) -> &'tcx [Ty<'tcx>], + [] fn adt_dtorck_constraint: DtorckConstraint( + DefId + ) -> Result, NoSolution>, + + /// True if this is a const fn + [] fn is_const_fn: IsConstFn(DefId) -> bool, + + /// True if this is a foreign item (i.e., linked via `extern { ... }`). + [] fn is_foreign_item: IsForeignItem(DefId) -> bool, + + /// Get a map with the variance of every item; use `item_variance` + /// instead. + [] fn crate_variances: crate_variances(CrateNum) -> Lrc, + + /// Maps from def-id of a type or region parameter to its + /// (inferred) variance. + [] fn variances_of: ItemVariances(DefId) -> Lrc>, + }, + + TypeChecking { + /// Maps from def-id of a type to its (inferred) outlives. + [] fn inferred_outlives_crate: InferredOutlivesCrate(CrateNum) + -> Lrc>, + }, + + Other { + /// Maps from an impl/trait def-id to a list of the def-ids of its items + [] fn associated_item_def_ids: AssociatedItemDefIds(DefId) -> Lrc>, + + /// Maps from a trait item to the trait item "descriptor" + [] fn associated_item: AssociatedItems(DefId) -> ty::AssociatedItem, + + [] fn impl_trait_ref: ImplTraitRef(DefId) -> Option>, + [] fn impl_polarity: ImplPolarity(DefId) -> hir::ImplPolarity, + }, + + TypeChecking { + /// Maps a DefId of a type to a list of its inherent impls. + /// Contains implementations of methods that are inherent to a type. + /// Methods in these implementations don't need to be exported. + [] fn inherent_impls: InherentImpls(DefId) -> Lrc>, + }, + + Codegen { + /// Set of all the def-ids in this crate that have MIR associated with + /// them. This includes all the body owners, but also things like struct + /// constructors. + [] fn mir_keys: mir_keys(CrateNum) -> Lrc, + + /// Maps DefId's that have an associated Mir to the result + /// of the MIR qualify_consts pass. The actual meaning of + /// the value isn't known except to the pass itself. + [] fn mir_const_qualif: MirConstQualif(DefId) -> (u8, Lrc>), + + /// Fetch the MIR for a given def-id right after it's built - this includes + /// unreachable code. + [] fn mir_built: MirBuilt(DefId) -> &'tcx Steal>, + + /// Fetch the MIR for a given def-id up till the point where it is + /// ready for const evaluation. + /// + /// See the README for the `mir` module for details. + [] fn mir_const: MirConst(DefId) -> &'tcx Steal>, + + [] fn mir_validated: MirValidated(DefId) -> &'tcx Steal>, + + /// MIR after our optimization passes have run. This is MIR that is ready + /// for codegen. This is also the only query that can fetch non-local MIR, at present. + [] fn optimized_mir: MirOptimized(DefId) -> &'tcx mir::Mir<'tcx>, + }, + + TypeChecking { + /// The result of unsafety-checking this def-id. + [] fn unsafety_check_result: UnsafetyCheckResult(DefId) -> mir::UnsafetyCheckResult, + + /// HACK: when evaluated, this reports a "unsafe derive on repr(packed)" error + [] fn unsafe_derive_on_repr_packed: UnsafeDeriveOnReprPacked(DefId) -> (), + + /// The signature of functions and closures. + [] fn fn_sig: FnSignature(DefId) -> ty::PolyFnSig<'tcx>, + }, + + Other { + /// Caches CoerceUnsized kinds for impls on custom types. + [] fn coerce_unsized_info: CoerceUnsizedInfo(DefId) + -> ty::adjustment::CoerceUnsizedInfo, + }, + + TypeChecking { + [] fn typeck_item_bodies: typeck_item_bodies_dep_node(CrateNum) -> CompileResult, + + [] fn typeck_tables_of: TypeckTables(DefId) -> &'tcx ty::TypeckTables<'tcx>, + }, + + Other { + [] fn used_trait_imports: UsedTraitImports(DefId) -> Lrc, + }, + + TypeChecking { + [] fn has_typeck_tables: HasTypeckTables(DefId) -> bool, + + [] fn coherent_trait: CoherenceCheckTrait(DefId) -> (), + }, + + BorrowChecking { + [] fn borrowck: BorrowCheck(DefId) -> Lrc, + + /// Borrow checks the function body. If this is a closure, returns + /// additional requirements that the closure's creator must verify. + [] fn mir_borrowck: MirBorrowCheck(DefId) -> mir::BorrowCheckResult<'tcx>, + }, + + TypeChecking { + /// Gets a complete map from all types to their inherent impls. + /// Not meant to be used directly outside of coherence. + /// (Defined only for LOCAL_CRATE) + [] fn crate_inherent_impls: crate_inherent_impls_dep_node(CrateNum) -> CrateInherentImpls, + + /// Checks all types in the krate for overlap in their inherent impls. Reports errors. + /// Not meant to be used directly outside of coherence. + /// (Defined only for LOCAL_CRATE) + [] fn crate_inherent_impls_overlap_check: inherent_impls_overlap_check_dep_node(CrateNum) + -> (), + }, + + Other { + /// Results of evaluating const items or constants embedded in + /// other items (such as enum variant explicit discriminants). + [] fn const_eval: const_eval_dep_node(ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) + -> ConstEvalResult<'tcx>, + + /// Converts a constant value to an constant allocation + [] fn const_value_to_allocation: const_value_to_allocation( + &'tcx ty::Const<'tcx> + ) -> &'tcx Allocation, + }, + + TypeChecking { + [] fn check_match: CheckMatch(DefId) + -> Result<(), ErrorReported>, + + /// Performs the privacy check and computes "access levels". + [] fn privacy_access_levels: PrivacyAccessLevels(CrateNum) -> Lrc, + }, + + Other { + [] fn reachable_set: reachability_dep_node(CrateNum) -> ReachableSet, + + /// Per-body `region::ScopeTree`. The `DefId` should be the owner-def-id for the body; + /// in the case of closures, this will be redirected to the enclosing function. + [] fn region_scope_tree: RegionScopeTree(DefId) -> Lrc, + + [] fn mir_shims: mir_shim_dep_node(ty::InstanceDef<'tcx>) -> &'tcx mir::Mir<'tcx>, + + [] fn def_symbol_name: SymbolName(DefId) -> ty::SymbolName, + [] fn symbol_name: symbol_name_dep_node(ty::Instance<'tcx>) -> ty::SymbolName, + + [] fn describe_def: DescribeDef(DefId) -> Option, + [] fn def_span: DefSpan(DefId) -> Span, + [] fn lookup_stability: LookupStability(DefId) -> Option<&'tcx attr::Stability>, + [] fn lookup_deprecation_entry: LookupDeprecationEntry(DefId) -> Option, + [] fn item_attrs: ItemAttrs(DefId) -> Lrc<[ast::Attribute]>, + }, + + Codegen { + [] fn codegen_fn_attrs: codegen_fn_attrs(DefId) -> CodegenFnAttrs, + }, + + Other { + [] fn fn_arg_names: FnArgNames(DefId) -> Vec, + /// Gets the rendered value of the specified constant or associated constant. + /// Used by rustdoc. + [] fn rendered_const: RenderedConst(DefId) -> String, + [] fn impl_parent: ImplParent(DefId) -> Option, + }, + + TypeChecking { + [] fn trait_of_item: TraitOfItem(DefId) -> Option, + [] fn const_is_rvalue_promotable_to_static: ConstIsRvaluePromotableToStatic(DefId) -> bool, + [] fn rvalue_promotable_map: RvaluePromotableMap(DefId) -> Lrc, + }, + + Codegen { + [] fn is_mir_available: IsMirAvailable(DefId) -> bool, + }, + + Other { + [] fn vtable_methods: vtable_methods_node(ty::PolyTraitRef<'tcx>) + -> Lrc)>>>, + }, + + Codegen { + [] fn codegen_fulfill_obligation: fulfill_obligation_dep_node( + (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> Vtable<'tcx, ()>, + }, + + TypeChecking { + [] fn trait_impls_of: TraitImpls(DefId) -> Lrc, + [] fn specialization_graph_of: SpecializationGraph(DefId) + -> Lrc, + [] fn is_object_safe: ObjectSafety(DefId) -> bool, + + // Get the ParameterEnvironment for a given item; this environment + // will be in "user-facing" mode, meaning that it is suitabe for + // type-checking etc, and it does not normalize specializable + // associated types. This is almost always what you want, + // unless you are doing MIR optimizations, in which case you + // might want to use `reveal_all()` method to change modes. + [] fn param_env: ParamEnv(DefId) -> ty::ParamEnv<'tcx>, + + // Trait selection queries. These are best used by invoking `ty.moves_by_default()`, + // `ty.is_copy()`, etc, since that will prune the environment where possible. + [] fn is_copy_raw: is_copy_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, + [] fn is_sized_raw: is_sized_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, + [] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, + [] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, + [] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) + -> Result<&'tcx ty::layout::LayoutDetails, + ty::layout::LayoutError<'tcx>>, + }, + + Other { + [] fn dylib_dependency_formats: DylibDepFormats(CrateNum) + -> Lrc>, + }, + + Codegen { + [fatal_cycle] fn is_panic_runtime: IsPanicRuntime(CrateNum) -> bool, + [fatal_cycle] fn is_compiler_builtins: IsCompilerBuiltins(CrateNum) -> bool, + [fatal_cycle] fn has_global_allocator: HasGlobalAllocator(CrateNum) -> bool, + [fatal_cycle] fn is_sanitizer_runtime: IsSanitizerRuntime(CrateNum) -> bool, + [fatal_cycle] fn is_profiler_runtime: IsProfilerRuntime(CrateNum) -> bool, + [fatal_cycle] fn panic_strategy: GetPanicStrategy(CrateNum) -> PanicStrategy, + [fatal_cycle] fn is_no_builtins: IsNoBuiltins(CrateNum) -> bool, + + [] fn extern_crate: ExternCrate(DefId) -> Lrc>, + }, + + TypeChecking { + [] fn specializes: specializes_node((DefId, DefId)) -> bool, + [] fn in_scope_traits_map: InScopeTraits(DefIndex) + -> Option>>>>, + }, + + Other { + [] fn module_exports: ModuleExports(DefId) -> Option>>, + [] fn lint_levels: lint_levels_node(CrateNum) -> Lrc, + }, + + TypeChecking { + [] fn impl_defaultness: ImplDefaultness(DefId) -> hir::Defaultness, + + [] fn check_item_well_formed: CheckItemWellFormed(DefId) -> (), + [] fn check_trait_item_well_formed: CheckTraitItemWellFormed(DefId) -> (), + [] fn check_impl_item_well_formed: CheckImplItemWellFormed(DefId) -> (), + }, + + Linking { + // The DefIds of all non-generic functions and statics in the given crate + // that can be reached from outside the crate. + // + // We expect this items to be available for being linked to. + // + // This query can also be called for LOCAL_CRATE. In this case it will + // compute which items will be reachable to other crates, taking into account + // the kind of crate that is currently compiled. Crates with only a + // C interface have fewer reachable things. + // + // Does not include external symbols that don't have a corresponding DefId, + // like the compiler-generated `main` function and so on. + [] fn reachable_non_generics: ReachableNonGenerics(CrateNum) + -> Lrc>, + [] fn is_reachable_non_generic: IsReachableNonGeneric(DefId) -> bool, + [] fn is_unreachable_local_definition: IsUnreachableLocalDefinition(DefId) -> bool, + }, + + Codegen { + [] fn upstream_monomorphizations: UpstreamMonomorphizations(CrateNum) + -> Lrc, CrateNum>>>>, + [] fn upstream_monomorphizations_for: UpstreamMonomorphizationsFor(DefId) + -> Option, CrateNum>>>, + }, + + Other { + [] fn native_libraries: NativeLibraries(CrateNum) -> Lrc>, + + [] fn foreign_modules: ForeignModules(CrateNum) -> Lrc>, + + [] fn plugin_registrar_fn: PluginRegistrarFn(CrateNum) -> Option, + [] fn derive_registrar_fn: DeriveRegistrarFn(CrateNum) -> Option, + [] fn crate_disambiguator: CrateDisambiguator(CrateNum) -> CrateDisambiguator, + [] fn crate_hash: CrateHash(CrateNum) -> Svh, + [] fn original_crate_name: OriginalCrateName(CrateNum) -> Symbol, + [] fn extra_filename: ExtraFileName(CrateNum) -> String, + }, + + TypeChecking { + [] fn implementations_of_trait: implementations_of_trait_node((CrateNum, DefId)) + -> Lrc>, + [] fn all_trait_implementations: AllTraitImplementations(CrateNum) + -> Lrc>, + }, + + Other { + [] fn dllimport_foreign_items: DllimportForeignItems(CrateNum) + -> Lrc>, + [] fn is_dllimport_foreign_item: IsDllimportForeignItem(DefId) -> bool, + [] fn is_statically_included_foreign_item: IsStaticallyIncludedForeignItem(DefId) -> bool, + [] fn native_library_kind: NativeLibraryKind(DefId) + -> Option, + }, + + Linking { + [] fn link_args: link_args_node(CrateNum) -> Lrc>, + }, + + BorrowChecking { + // Lifetime resolution. See `middle::resolve_lifetimes`. + [] fn resolve_lifetimes: ResolveLifetimes(CrateNum) -> Lrc, + [] fn named_region_map: NamedRegion(DefIndex) -> + Option>>, + [] fn is_late_bound_map: IsLateBound(DefIndex) -> + Option>>, + [] fn object_lifetime_defaults_map: ObjectLifetimeDefaults(DefIndex) + -> Option>>>>, + }, + + TypeChecking { + [] fn visibility: Visibility(DefId) -> ty::Visibility, + }, + + Other { + [] fn dep_kind: DepKind(CrateNum) -> DepKind, + [] fn crate_name: CrateName(CrateNum) -> Symbol, + [] fn item_children: ItemChildren(DefId) -> Lrc>, + [] fn extern_mod_stmt_cnum: ExternModStmtCnum(DefId) -> Option, + + [] fn get_lib_features: get_lib_features_node(CrateNum) -> Lrc, + [] fn defined_lib_features: DefinedLibFeatures(CrateNum) + -> Lrc)>>, + [] fn get_lang_items: get_lang_items_node(CrateNum) -> Lrc, + [] fn defined_lang_items: DefinedLangItems(CrateNum) -> Lrc>, + [] fn missing_lang_items: MissingLangItems(CrateNum) -> Lrc>, + [] fn visible_parent_map: visible_parent_map_node(CrateNum) + -> Lrc>, + [] fn missing_extern_crate_item: MissingExternCrateItem(CrateNum) -> bool, + [] fn used_crate_source: UsedCrateSource(CrateNum) -> Lrc, + [] fn postorder_cnums: postorder_cnums_node(CrateNum) -> Lrc>, + + [] fn freevars: Freevars(DefId) -> Option>>, + [] fn maybe_unused_trait_import: MaybeUnusedTraitImport(DefId) -> bool, + [] fn maybe_unused_extern_crates: maybe_unused_extern_crates_node(CrateNum) + -> Lrc>, + + [] fn stability_index: stability_index_node(CrateNum) -> Lrc>, + [] fn all_crate_nums: all_crate_nums_node(CrateNum) -> Lrc>, + + /// A vector of every trait accessible in the whole crate + /// (i.e. including those from subcrates). This is used only for + /// error reporting. + [] fn all_traits: all_traits_node(CrateNum) -> Lrc>, + }, + + Linking { + [] fn exported_symbols: ExportedSymbols(CrateNum) + -> Arc, SymbolExportLevel)>>, + }, + + Codegen { + [] fn collect_and_partition_mono_items: + collect_and_partition_mono_items_node(CrateNum) + -> (Arc, Arc>>>), + [] fn is_codegened_item: IsCodegenedItem(DefId) -> bool, + [] fn codegen_unit: CodegenUnit(InternedString) -> Arc>, + [] fn compile_codegen_unit: CompileCodegenUnit(InternedString) -> Stats, + }, + + Other { + [] fn output_filenames: output_filenames_node(CrateNum) + -> Arc, + }, + + TypeChecking { + // Erases regions from `ty` to yield a new type. + // Normally you would just use `tcx.erase_regions(&value)`, + // however, which uses this query as a kind of cache. + [] fn erase_regions_ty: erase_regions_ty(Ty<'tcx>) -> Ty<'tcx>, + + /// Do not call this query directly: invoke `normalize` instead. + [] fn normalize_projection_ty: NormalizeProjectionTy( + CanonicalProjectionGoal<'tcx> + ) -> Result< + Lrc>>>, + NoSolution, + >, + + /// Do not call this query directly: invoke `normalize_erasing_regions` instead. + [] fn normalize_ty_after_erasing_regions: NormalizeTyAfterErasingRegions( + ParamEnvAnd<'tcx, Ty<'tcx>> + ) -> Ty<'tcx>, + + [] fn implied_outlives_bounds: ImpliedOutlivesBounds( + CanonicalTyGoal<'tcx> + ) -> Result< + Lrc>>>>, + NoSolution, + >, + + /// Do not call this query directly: invoke `infcx.at().dropck_outlives()` instead. + [] fn dropck_outlives: DropckOutlives( + CanonicalTyGoal<'tcx> + ) -> Result< + Lrc>>>, + NoSolution, + >, + + /// Do not call this query directly: invoke `infcx.predicate_may_hold()` or + /// `infcx.predicate_must_hold()` instead. + [] fn evaluate_obligation: EvaluateObligation( + CanonicalPredicateGoal<'tcx> + ) -> Result, + + /// Do not call this query directly: part of the `Eq` type-op + [] fn type_op_eq: TypeOpEq( + CanonicalTypeOpEqGoal<'tcx> + ) -> Result< + Lrc>>, + NoSolution, + >, + + /// Do not call this query directly: part of the `Subtype` type-op + [] fn type_op_subtype: TypeOpSubtype( + CanonicalTypeOpSubtypeGoal<'tcx> + ) -> Result< + Lrc>>, + NoSolution, + >, + + /// Do not call this query directly: part of the `ProvePredicate` type-op + [] fn type_op_prove_predicate: TypeOpProvePredicate( + CanonicalTypeOpProvePredicateGoal<'tcx> + ) -> Result< + Lrc>>, + NoSolution, + >, + + /// Do not call this query directly: part of the `Normalize` type-op + [] fn type_op_normalize_ty: TypeOpNormalizeTy( + CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>> + ) -> Result< + Lrc>>>, + NoSolution, + >, + + /// Do not call this query directly: part of the `Normalize` type-op + [] fn type_op_normalize_predicate: TypeOpNormalizePredicate( + CanonicalTypeOpNormalizeGoal<'tcx, ty::Predicate<'tcx>> + ) -> Result< + Lrc>>>, + NoSolution, + >, + + /// Do not call this query directly: part of the `Normalize` type-op + [] fn type_op_normalize_poly_fn_sig: TypeOpNormalizePolyFnSig( + CanonicalTypeOpNormalizeGoal<'tcx, ty::PolyFnSig<'tcx>> + ) -> Result< + Lrc>>>, + NoSolution, + >, + + /// Do not call this query directly: part of the `Normalize` type-op + [] fn type_op_normalize_fn_sig: TypeOpNormalizeFnSig( + CanonicalTypeOpNormalizeGoal<'tcx, ty::FnSig<'tcx>> + ) -> Result< + Lrc>>>, + NoSolution, + >, + + [] fn substitute_normalize_and_test_predicates: + substitute_normalize_and_test_predicates_node((DefId, &'tcx Substs<'tcx>)) -> bool, + }, + + Other { + [] fn target_features_whitelist: + target_features_whitelist_node(CrateNum) -> Lrc>>, + + // Get an estimate of the size of an InstanceDef based on its MIR for CGU partitioning. + [] fn instance_def_size_estimate: instance_def_size_estimate_dep_node(ty::InstanceDef<'tcx>) + -> usize, + + [] fn features_query: features_node(CrateNum) -> Lrc, + }, + + TypeChecking { + [] fn program_clauses_for: ProgramClausesFor(DefId) -> Clauses<'tcx>, + + [] fn program_clauses_for_env: ProgramClausesForEnv( + ty::ParamEnv<'tcx> + ) -> Clauses<'tcx>, + }, + + Linking { + [] fn wasm_import_module_map: WasmImportModuleMap(CrateNum) + -> Lrc>, + }, +} + +// `try_get_query` can't be public because it uses the private query +// implementation traits, so we provide access to it selectively. +impl<'a, 'tcx, 'lcx> TyCtxt<'a, 'tcx, 'lcx> { + pub fn try_adt_sized_constraint( + self, + span: Span, + key: DefId, + ) -> Result<&'tcx [Ty<'tcx>], DiagnosticBuilder<'a>> { + self.try_get_query::(span, key) + } + pub fn try_needs_drop_raw( + self, + span: Span, + key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, + ) -> Result> { + self.try_get_query::(span, key) + } + pub fn try_optimized_mir( + self, + span: Span, + key: DefId, + ) -> Result<&'tcx mir::Mir<'tcx>, DiagnosticBuilder<'a>> { + self.try_get_query::(span, key) + } +} + +////////////////////////////////////////////////////////////////////// +// These functions are little shims used to find the dep-node for a +// given query when there is not a *direct* mapping: + + +fn features_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::Features +} + +fn codegen_fn_attrs<'tcx>(id: DefId) -> DepConstructor<'tcx> { + DepConstructor::CodegenFnAttrs { 0: id } +} + +fn erase_regions_ty<'tcx>(ty: Ty<'tcx>) -> DepConstructor<'tcx> { + DepConstructor::EraseRegionsTy { ty } +} + +fn const_value_to_allocation<'tcx>( + val: &'tcx ty::Const<'tcx>, +) -> DepConstructor<'tcx> { + DepConstructor::ConstValueToAllocation { val } +} + +fn type_param_predicates<'tcx>((item_id, param_id): (DefId, DefId)) -> DepConstructor<'tcx> { + DepConstructor::TypeParamPredicates { + item_id, + param_id + } +} + +fn fulfill_obligation_dep_node<'tcx>((param_env, trait_ref): + (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> DepConstructor<'tcx> { + DepConstructor::FulfillObligation { + param_env, + trait_ref + } +} + +fn crate_inherent_impls_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::Coherence +} + +fn inherent_impls_overlap_check_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::CoherenceInherentImplOverlapCheck +} + +fn reachability_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::Reachability +} + +fn mir_shim_dep_node<'tcx>(instance_def: ty::InstanceDef<'tcx>) -> DepConstructor<'tcx> { + DepConstructor::MirShim { + instance_def + } +} + +fn symbol_name_dep_node<'tcx>(instance: ty::Instance<'tcx>) -> DepConstructor<'tcx> { + DepConstructor::InstanceSymbolName { instance } +} + +fn typeck_item_bodies_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::TypeckBodiesKrate +} + +fn const_eval_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) + -> DepConstructor<'tcx> { + DepConstructor::ConstEval { param_env } +} + +fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::MirKeys +} + +fn crate_variances<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::CrateVariances +} + +fn is_copy_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsCopy { param_env } +} + +fn is_sized_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsSized { param_env } +} + +fn is_freeze_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::IsFreeze { param_env } +} + +fn needs_drop_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::NeedsDrop { param_env } +} + +fn layout_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { + DepConstructor::Layout { param_env } +} + +fn lint_levels_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::LintLevels +} + +fn specializes_node<'tcx>((a, b): (DefId, DefId)) -> DepConstructor<'tcx> { + DepConstructor::Specializes { impl1: a, impl2: b } +} + +fn implementations_of_trait_node<'tcx>((krate, trait_id): (CrateNum, DefId)) + -> DepConstructor<'tcx> +{ + DepConstructor::ImplementationsOfTrait { krate, trait_id } +} + +fn link_args_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::LinkArgs +} + +fn get_lib_features_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::GetLibFeatures +} + +fn get_lang_items_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::GetLangItems +} + +fn visible_parent_map_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::VisibleParentMap +} + +fn postorder_cnums_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::PostorderCnums +} + +fn maybe_unused_extern_crates_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::MaybeUnusedExternCrates +} + +fn stability_index_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::StabilityIndex +} + +fn all_crate_nums_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::AllCrateNums +} + +fn all_traits_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::AllTraits +} + +fn collect_and_partition_mono_items_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::CollectAndPartitionMonoItems +} + +fn output_filenames_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::OutputFilenames +} + +fn vtable_methods_node<'tcx>(trait_ref: ty::PolyTraitRef<'tcx>) -> DepConstructor<'tcx> { + DepConstructor::VtableMethods{ trait_ref } +} + +fn substitute_normalize_and_test_predicates_node<'tcx>(key: (DefId, &'tcx Substs<'tcx>)) + -> DepConstructor<'tcx> { + DepConstructor::SubstituteNormalizeAndTestPredicates { key } +} + +fn target_features_whitelist_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { + DepConstructor::TargetFeaturesWhitelist +} + +fn instance_def_size_estimate_dep_node<'tcx>(instance_def: ty::InstanceDef<'tcx>) + -> DepConstructor<'tcx> { + DepConstructor::InstanceDefSizeEstimate { + instance_def + } +} diff --git a/src/librustc/ty/maps/on_disk_cache.rs b/src/librustc/ty/query/on_disk_cache.rs similarity index 83% rename from src/librustc/ty/maps/on_disk_cache.rs rename to src/librustc/ty/query/on_disk_cache.rs index 4e2421dad216..aa42b4072bd8 100644 --- a/src/librustc/ty/maps/on_disk_cache.rs +++ b/src/librustc/ty/query/on_disk_cache.rs @@ -15,16 +15,16 @@ use hir::def_id::{CrateNum, DefIndex, DefId, LocalDefId, RESERVED_FOR_INCR_COMP_CACHE, LOCAL_CRATE}; use hir::map::definitions::DefPathHash; use ich::{CachingCodemapView, Fingerprint}; -use mir; +use mir::{self, interpret}; +use mir::interpret::{AllocDecodingSession, AllocDecodingState}; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sync::{Lrc, Lock, HashMapExt, Once}; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque, SpecializedDecoder, SpecializedEncoder, UseSpecializedDecodable, UseSpecializedEncodable}; use session::{CrateDisambiguator, Session}; -use std::cell::RefCell; use std::mem; -use std::rc::Rc; use syntax::ast::NodeId; use syntax::codemap::{CodeMap, StableFilemapId}; use syntax_pos::{BytePos, Span, DUMMY_SP, FileMap}; @@ -32,6 +32,7 @@ use syntax_pos::hygiene::{Mark, SyntaxContext, ExpnInfo}; use ty; use ty::codec::{self as ty_codec, TyDecoder, TyEncoder}; use ty::context::TyCtxt; +use util::common::time; const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE; @@ -56,17 +57,17 @@ pub struct OnDiskCache<'sess> { // This field collects all Diagnostics emitted during the current // compilation session. - current_diagnostics: RefCell>>, + current_diagnostics: Lock>>, prev_cnums: Vec<(u32, String, CrateDisambiguator)>, - cnum_map: RefCell>>>, + cnum_map: Once>>, codemap: &'sess CodeMap, file_index_to_stable_id: FxHashMap, // These two fields caches that are populated lazily during decoding. - file_index_to_file: RefCell>>, - synthetic_expansion_infos: RefCell>, + file_index_to_file: Lock>>, + synthetic_expansion_infos: Lock>, // A map from dep-node to the position of the cached query result in // `serialized_data`. @@ -75,6 +76,8 @@ pub struct OnDiskCache<'sess> { // A map from dep-node to the position of any associated diagnostics in // `serialized_data`. prev_diagnostics_index: FxHashMap, + + alloc_decoding_state: AllocDecodingState, } // This type is used only for (de-)serialization. @@ -84,6 +87,8 @@ struct Footer { prev_cnums: Vec<(u32, String, CrateDisambiguator)>, query_result_index: EncodedQueryResultIndex, diagnostics_index: EncodedQueryResultIndex, + // the location of all allocations + interpret_alloc_index: Vec, } type EncodedQueryResultIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>; @@ -132,14 +137,15 @@ impl<'sess> OnDiskCache<'sess> { OnDiskCache { serialized_data: data, file_index_to_stable_id: footer.file_index_to_stable_id, - file_index_to_file: RefCell::new(FxHashMap()), + file_index_to_file: Lock::new(FxHashMap()), prev_cnums: footer.prev_cnums, - cnum_map: RefCell::new(None), + cnum_map: Once::new(), codemap: sess.codemap(), - current_diagnostics: RefCell::new(FxHashMap()), + current_diagnostics: Lock::new(FxHashMap()), query_result_index: footer.query_result_index.into_iter().collect(), prev_diagnostics_index: footer.diagnostics_index.into_iter().collect(), - synthetic_expansion_infos: RefCell::new(FxHashMap()), + synthetic_expansion_infos: Lock::new(FxHashMap()), + alloc_decoding_state: AllocDecodingState::new(footer.interpret_alloc_index), } } @@ -147,14 +153,15 @@ impl<'sess> OnDiskCache<'sess> { OnDiskCache { serialized_data: Vec::new(), file_index_to_stable_id: FxHashMap(), - file_index_to_file: RefCell::new(FxHashMap()), + file_index_to_file: Lock::new(FxHashMap()), prev_cnums: vec![], - cnum_map: RefCell::new(None), + cnum_map: Once::new(), codemap, - current_diagnostics: RefCell::new(FxHashMap()), + current_diagnostics: Lock::new(FxHashMap()), query_result_index: FxHashMap(), prev_diagnostics_index: FxHashMap(), - synthetic_expansion_infos: RefCell::new(FxHashMap()), + synthetic_expansion_infos: Lock::new(FxHashMap()), + alloc_decoding_state: AllocDecodingState::new(Vec::new()), } } @@ -187,6 +194,8 @@ impl<'sess> OnDiskCache<'sess> { type_shorthands: FxHashMap(), predicate_shorthands: FxHashMap(), expn_info_shorthands: FxHashMap(), + interpret_allocs: FxHashMap(), + interpret_allocs_inverse: Vec::new(), codemap: CachingCodemapView::new(tcx.sess.codemap()), file_to_file_index, }; @@ -199,13 +208,17 @@ impl<'sess> OnDiskCache<'sess> { // Encode query results let mut query_result_index = EncodedQueryResultIndex::new(); - { - use ty::maps::queries::*; + time(tcx.sess, "encode query results", || { + use ty::query::queries::*; let enc = &mut encoder; let qri = &mut query_result_index; - // Encode TypeckTables + encode_query_results::(tcx, enc, qri)?; + encode_query_results::(tcx, enc, qri)?; + encode_query_results::(tcx, enc, qri)?; + encode_query_results::(tcx, enc, qri)?; encode_query_results::(tcx, enc, qri)?; + encode_query_results::(tcx, enc, qri)?; encode_query_results::(tcx, enc, qri)?; encode_query_results::(tcx, enc, qri)?; encode_query_results::(tcx, enc, qri)?; @@ -213,11 +226,33 @@ impl<'sess> OnDiskCache<'sess> { encode_query_results::(tcx, enc, qri)?; encode_query_results::(tcx, enc, qri)?; encode_query_results::(tcx, enc, qri)?; - encode_query_results::(tcx, enc, qri)?; encode_query_results::(tcx, enc, qri)?; - encode_query_results::(tcx, enc, qri)?; encode_query_results::(tcx, enc, qri)?; - } + encode_query_results::(tcx, enc, qri)?; + encode_query_results::(tcx, enc, qri)?; + + // const eval is special, it only encodes successfully evaluated constants + use ty::query::QueryAccessors; + let cache = const_eval::query_cache(tcx).borrow(); + assert!(cache.active.is_empty()); + for (key, entry) in cache.results.iter() { + use ty::query::config::QueryDescription; + if const_eval::cache_on_disk(key.clone()) { + if let Ok(ref value) = entry.value { + let dep_node = SerializedDepNodeIndex::new(entry.index.index()); + + // Record position of the cache entry + qri.push((dep_node, AbsoluteBytePos::new(enc.position()))); + + // Encode the type check tables with the SerializedDepNodeIndex + // as tag. + enc.encode_tagged(dep_node, value)?; + } + } + } + + Ok(()) + })?; // Encode diagnostics let diagnostics_index = { @@ -238,6 +273,31 @@ impl<'sess> OnDiskCache<'sess> { diagnostics_index }; + let interpret_alloc_index = { + let mut interpret_alloc_index = Vec::new(); + let mut n = 0; + loop { + let new_n = encoder.interpret_allocs_inverse.len(); + // if we have found new ids, serialize those, too + if n == new_n { + // otherwise, abort + break; + } + for idx in n..new_n { + let id = encoder.interpret_allocs_inverse[idx]; + let pos = encoder.position() as u32; + interpret_alloc_index.push(pos); + interpret::specialized_encode_alloc_id( + &mut encoder, + tcx, + id, + )?; + } + n = new_n; + } + interpret_alloc_index + }; + let sorted_cnums = sorted_cnums_including_local_crate(tcx); let prev_cnums: Vec<_> = sorted_cnums.iter().map(|&cnum| { let crate_name = tcx.original_crate_name(cnum).as_str().to_string(); @@ -252,6 +312,7 @@ impl<'sess> OnDiskCache<'sess> { prev_cnums, query_result_index, diagnostics_index, + interpret_alloc_index, })?; // Encode the position of the footer as the last 8 bytes of the @@ -344,22 +405,20 @@ impl<'sess> OnDiskCache<'sess> { return None }; - let mut cnum_map = self.cnum_map.borrow_mut(); - if cnum_map.is_none() { - *cnum_map = Some(Self::compute_cnum_map(tcx, &self.prev_cnums[..])); - } - - let mut synthetic_expansion_infos = self.synthetic_expansion_infos.borrow_mut(); - let mut file_index_to_file = self.file_index_to_file.borrow_mut(); + // Initialize the cnum_map using the value from the thread which finishes the closure first + self.cnum_map.init_nonlocking_same(|| { + Self::compute_cnum_map(tcx, &self.prev_cnums[..]) + }); let mut decoder = CacheDecoder { tcx, opaque: opaque::Decoder::new(&self.serialized_data[..], pos.to_usize()), codemap: self.codemap, - cnum_map: cnum_map.as_ref().unwrap(), - file_index_to_file: &mut file_index_to_file, + cnum_map: self.cnum_map.get(), + file_index_to_file: &self.file_index_to_file, file_index_to_stable_id: &self.file_index_to_stable_id, - synthetic_expansion_infos: &mut synthetic_expansion_infos, + synthetic_expansion_infos: &self.synthetic_expansion_infos, + alloc_decoding_session: self.alloc_decoding_state.new_decoding_session(), }; match decode_tagged(&mut decoder, dep_node_index) { @@ -418,21 +477,22 @@ struct CacheDecoder<'a, 'tcx: 'a, 'x> { opaque: opaque::Decoder<'x>, codemap: &'x CodeMap, cnum_map: &'x IndexVec>, - synthetic_expansion_infos: &'x mut FxHashMap, - file_index_to_file: &'x mut FxHashMap>, + synthetic_expansion_infos: &'x Lock>, + file_index_to_file: &'x Lock>>, file_index_to_stable_id: &'x FxHashMap, + alloc_decoding_session: AllocDecodingSession<'x>, } impl<'a, 'tcx, 'x> CacheDecoder<'a, 'tcx, 'x> { - fn file_index_to_file(&mut self, index: FileMapIndex) -> Rc { + fn file_index_to_file(&self, index: FileMapIndex) -> Lrc { let CacheDecoder { - ref mut file_index_to_file, + ref file_index_to_file, ref file_index_to_stable_id, ref codemap, .. } = *self; - file_index_to_file.entry(index).or_insert_with(|| { + file_index_to_file.borrow_mut().entry(index).or_insert_with(|| { let stable_id = file_index_to_stable_id[&index]; codemap.filemap_by_stable_id(stable_id) .expect("Failed to lookup FileMap in new context.") @@ -515,7 +575,8 @@ impl<'a, 'tcx: 'a, 'x> ty_codec::TyDecoder<'a, 'tcx> for CacheDecoder<'a, 'tcx, } let ty = or_insert_with(self)?; - tcx.rcache.borrow_mut().insert(cache_key, ty); + // This may overwrite the entry, but it should overwrite with the same value + tcx.rcache.borrow_mut().insert_same(cache_key, ty); Ok(ty) } @@ -540,6 +601,12 @@ impl<'a, 'tcx: 'a, 'x> ty_codec::TyDecoder<'a, 'tcx> for CacheDecoder<'a, 'tcx, implement_ty_decoder!( CacheDecoder<'a, 'tcx, 'x> ); +impl<'a, 'tcx, 'x> SpecializedDecoder for CacheDecoder<'a, 'tcx, 'x> { + fn specialized_decode(&mut self) -> Result { + let alloc_decoding_session = self.alloc_decoding_session; + alloc_decoding_session.decode_alloc_id(self) + } +} impl<'a, 'tcx, 'x> SpecializedDecoder for CacheDecoder<'a, 'tcx, 'x> { fn specialized_decode(&mut self) -> Result { let tag: u8 = Decodable::decode(self)?; @@ -556,7 +623,7 @@ impl<'a, 'tcx, 'x> SpecializedDecoder for CacheDecoder<'a, 'tcx, 'x> { let len = BytePos::decode(self)?; let file_lo = self.file_index_to_file(file_lo_index); - let lo = file_lo.lines.borrow()[line_lo - 1] + col_lo; + let lo = file_lo.lines[line_lo - 1] + col_lo; let hi = lo + len; let expn_info_tag = u8::decode(self)?; @@ -569,19 +636,24 @@ impl<'a, 'tcx, 'x> SpecializedDecoder for CacheDecoder<'a, 'tcx, 'x> { let pos = AbsoluteBytePos::new(self.opaque.position()); let expn_info: ExpnInfo = Decodable::decode(self)?; let ctxt = SyntaxContext::allocate_directly(expn_info); - self.synthetic_expansion_infos.insert(pos, ctxt); + self.synthetic_expansion_infos.borrow_mut().insert(pos, ctxt); ctxt } TAG_EXPANSION_INFO_SHORTHAND => { let pos = AbsoluteBytePos::decode(self)?; - if let Some(ctxt) = self.synthetic_expansion_infos.get(&pos).cloned() { + let cached_ctxt = self.synthetic_expansion_infos + .borrow() + .get(&pos) + .cloned(); + + if let Some(ctxt) = cached_ctxt { ctxt } else { let expn_info = self.with_position(pos.to_usize(), |this| { ExpnInfo::decode(this) })?; let ctxt = SyntaxContext::allocate_directly(expn_info); - self.synthetic_expansion_infos.insert(pos, ctxt); + self.synthetic_expansion_infos.borrow_mut().insert(pos, ctxt); ctxt } } @@ -696,6 +768,8 @@ struct CacheEncoder<'enc, 'a, 'tcx, E> type_shorthands: FxHashMap, usize>, predicate_shorthands: FxHashMap, usize>, expn_info_shorthands: FxHashMap, + interpret_allocs: FxHashMap, + interpret_allocs_inverse: Vec, codemap: CachingCodemapView<'tcx>, file_to_file_index: FxHashMap<*const FileMap, FileMapIndex>, } @@ -703,7 +777,7 @@ struct CacheEncoder<'enc, 'a, 'tcx, E> impl<'enc, 'a, 'tcx, E> CacheEncoder<'enc, 'a, 'tcx, E> where E: 'enc + ty_codec::TyEncoder { - fn filemap_index(&mut self, filemap: Rc) -> FileMapIndex { + fn filemap_index(&mut self, filemap: Lrc) -> FileMapIndex { self.file_to_file_index[&(&*filemap as *const FileMap)] } @@ -728,6 +802,25 @@ impl<'enc, 'a, 'tcx, E> CacheEncoder<'enc, 'a, 'tcx, E> } } +impl<'enc, 'a, 'tcx, E> SpecializedEncoder for CacheEncoder<'enc, 'a, 'tcx, E> + where E: 'enc + ty_codec::TyEncoder +{ + fn specialized_encode(&mut self, alloc_id: &interpret::AllocId) -> Result<(), Self::Error> { + use std::collections::hash_map::Entry; + let index = match self.interpret_allocs.entry(*alloc_id) { + Entry::Occupied(e) => *e.get(), + Entry::Vacant(e) => { + let idx = self.interpret_allocs_inverse.len(); + self.interpret_allocs_inverse.push(*alloc_id); + e.insert(idx); + idx + }, + }; + + index.encode(self) + } +} + impl<'enc, 'a, 'tcx, E> SpecializedEncoder for CacheEncoder<'enc, 'a, 'tcx, E> where E: 'enc + ty_codec::TyEncoder { @@ -886,7 +979,7 @@ impl<'enc, 'a, 'tcx, E> SpecializedEncoder for CacheEncoder<'enc, 'a, 't } impl<'enc, 'a, 'tcx> SpecializedEncoder -for CacheEncoder<'enc, 'a, 'tcx, opaque::Encoder<'enc>> +for CacheEncoder<'enc, 'a, 'tcx, opaque::Encoder> { fn specialized_encode(&mut self, f: &Fingerprint) -> Result<(), Self::Error> { f.encode_opaque(&mut self.encoder) @@ -964,7 +1057,7 @@ impl IntEncodedWithFixedSize { impl UseSpecializedEncodable for IntEncodedWithFixedSize {} impl UseSpecializedDecodable for IntEncodedWithFixedSize {} -impl<'enc> SpecializedEncoder for opaque::Encoder<'enc> { +impl SpecializedEncoder for opaque::Encoder { fn specialized_encode(&mut self, x: &IntEncodedWithFixedSize) -> Result<(), Self::Error> { let start_pos = self.position(); for i in 0 .. IntEncodedWithFixedSize::ENCODED_SIZE { @@ -997,11 +1090,18 @@ fn encode_query_results<'enc, 'a, 'tcx, Q, E>(tcx: TyCtxt<'a, 'tcx, 'tcx>, encoder: &mut CacheEncoder<'enc, 'a, 'tcx, E>, query_result_index: &mut EncodedQueryResultIndex) -> Result<(), E::Error> - where Q: super::plumbing::GetCacheInternal<'tcx>, + where Q: super::config::QueryDescription<'tcx>, E: 'enc + TyEncoder, Q::Value: Encodable, { - for (key, entry) in Q::get_cache_internal(tcx).map.iter() { + let desc = &format!("encode_query_results for {}", + unsafe { ::std::intrinsics::type_name::() }); + + time(tcx.sess, desc, || { + + let map = Q::query_cache(tcx).borrow(); + assert!(map.active.is_empty()); + for (key, entry) in map.results.iter() { if Q::cache_on_disk(key.clone()) { let dep_node = SerializedDepNodeIndex::new(entry.index.index()); @@ -1015,4 +1115,5 @@ fn encode_query_results<'enc, 'a, 'tcx, Q, E>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } Ok(()) + }) } diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs new file mode 100644 index 000000000000..f59e48cb3515 --- /dev/null +++ b/src/librustc/ty/query/plumbing.rs @@ -0,0 +1,1329 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The implementation of the query system itself. Defines the macros +//! that generate the actual methods on tcx which find and execute the +//! provider, manage the caches, and so forth. + +use dep_graph::{DepNodeIndex, DepNode, DepKind, DepNodeColor}; +use errors::DiagnosticBuilder; +use errors::Level; +use errors::Diagnostic; +use errors::FatalError; +use ty::tls; +use ty::{TyCtxt}; +use ty::query::Query; +use ty::query::config::{QueryConfig, QueryDescription}; +use ty::query::job::{QueryJob, QueryResult, QueryInfo}; +use ty::item_path; + +use util::common::{profq_msg, ProfileQueriesMsg, QueryMsg}; + +use rustc_data_structures::fx::{FxHashMap}; +use rustc_data_structures::sync::{Lrc, Lock}; +use std::mem; +use std::ptr; +use std::collections::hash_map::Entry; +use syntax_pos::Span; +use syntax::codemap::DUMMY_SP; + +pub struct QueryCache<'tcx, D: QueryConfig<'tcx> + ?Sized> { + pub(super) results: FxHashMap>, + pub(super) active: FxHashMap>, +} + +pub(super) struct QueryValue { + pub(super) value: T, + pub(super) index: DepNodeIndex, +} + +impl QueryValue { + pub(super) fn new(value: T, + dep_node_index: DepNodeIndex) + -> QueryValue { + QueryValue { + value, + index: dep_node_index, + } + } +} + +impl<'tcx, M: QueryConfig<'tcx>> QueryCache<'tcx, M> { + pub(super) fn new() -> QueryCache<'tcx, M> { + QueryCache { + results: FxHashMap(), + active: FxHashMap(), + } + } +} + +// If enabled, send a message to the profile-queries thread +macro_rules! profq_msg { + ($tcx:expr, $msg:expr) => { + if cfg!(debug_assertions) { + if $tcx.sess.profile_queries() { + profq_msg($tcx.sess, $msg) + } + } + } +} + +// If enabled, format a key using its debug string, which can be +// expensive to compute (in terms of time). +macro_rules! profq_query_msg { + ($query:expr, $tcx:expr, $key:expr) => {{ + let msg = if cfg!(debug_assertions) { + if $tcx.sess.profile_queries_and_keys() { + Some(format!("{:?}", $key)) + } else { None } + } else { None }; + QueryMsg { + query: $query, + msg, + } + }} +} + +/// A type representing the responsibility to execute the job in the `job` field. +/// This will poison the relevant query if dropped. +pub(super) struct JobOwner<'a, 'tcx: 'a, Q: QueryDescription<'tcx> + 'a> { + cache: &'a Lock>, + key: Q::Key, + job: Lrc>, +} + +impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { + /// Either gets a JobOwner corresponding the the query, allowing us to + /// start executing the query, or it returns with the result of the query. + /// If the query is executing elsewhere, this will wait for it. + /// If the query panicked, this will silently panic. + /// + /// This function is inlined because that results in a noticeable speedup + /// for some compile-time benchmarks. + #[inline(always)] + pub(super) fn try_get( + tcx: TyCtxt<'a, 'tcx, '_>, + span: Span, + key: &Q::Key, + ) -> TryGetJob<'a, 'tcx, Q> { + let cache = Q::query_cache(tcx); + loop { + let mut lock = cache.borrow_mut(); + if let Some(value) = lock.results.get(key) { + profq_msg!(tcx, ProfileQueriesMsg::CacheHit); + tcx.sess.profiler(|p| { + p.record_query(Q::CATEGORY); + p.record_query_hit(Q::CATEGORY); + }); + + let result = Ok((value.value.clone(), value.index)); + return TryGetJob::JobCompleted(result); + } + let job = match lock.active.entry((*key).clone()) { + Entry::Occupied(entry) => { + match *entry.get() { + QueryResult::Started(ref job) => job.clone(), + QueryResult::Poisoned => FatalError.raise(), + } + } + Entry::Vacant(entry) => { + // No job entry for this query. Return a new one to be started later + return tls::with_related_context(tcx, |icx| { + let info = QueryInfo { + span, + query: Q::query(key.clone()), + }; + let job = Lrc::new(QueryJob::new(info, icx.query.clone())); + let owner = JobOwner { + cache, + job: job.clone(), + key: (*key).clone(), + }; + entry.insert(QueryResult::Started(job)); + TryGetJob::NotYetStarted(owner) + }) + } + }; + mem::drop(lock); + + if let Err(cycle) = job.await(tcx, span) { + return TryGetJob::JobCompleted(Err(cycle)); + } + } + } + + /// Completes the query by updating the query cache with the `result`, + /// signals the waiter and forgets the JobOwner, so it won't poison the query + pub(super) fn complete(self, result: &Q::Value, dep_node_index: DepNodeIndex) { + // We can move out of `self` here because we `mem::forget` it below + let key = unsafe { ptr::read(&self.key) }; + let job = unsafe { ptr::read(&self.job) }; + let cache = self.cache; + + // Forget ourself so our destructor won't poison the query + mem::forget(self); + + let value = QueryValue::new(result.clone(), dep_node_index); + { + let mut lock = cache.borrow_mut(); + lock.active.remove(&key); + lock.results.insert(key, value); + } + + job.signal_complete(); + } + + /// Executes a job by changing the ImplicitCtxt to point to the + /// new query job while it executes. It returns the diagnostics + /// captured during execution and the actual result. + pub(super) fn start<'lcx, F, R>( + &self, + tcx: TyCtxt<'_, 'tcx, 'lcx>, + compute: F) + -> (R, Vec) + where + F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'lcx>) -> R + { + // The TyCtxt stored in TLS has the same global interner lifetime + // as `tcx`, so we use `with_related_context` to relate the 'gcx lifetimes + // when accessing the ImplicitCtxt + let r = tls::with_related_context(tcx, move |current_icx| { + // Update the ImplicitCtxt to point to our new query job + let new_icx = tls::ImplicitCtxt { + tcx, + query: Some(self.job.clone()), + layout_depth: current_icx.layout_depth, + task: current_icx.task, + }; + + // Use the ImplicitCtxt while we execute the query + tls::enter_context(&new_icx, |_| { + compute(tcx) + }) + }); + + // Extract the diagnostic from the job + let diagnostics = mem::replace(&mut *self.job.diagnostics.lock(), Vec::new()); + + (r, diagnostics) + } +} + +impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> { + fn drop(&mut self) { + // Poison the query so jobs waiting on it panic + self.cache.borrow_mut().active.insert(self.key.clone(), QueryResult::Poisoned); + // Also signal the completion of the job, so waiters + // will continue execution + self.job.signal_complete(); + } +} + +#[derive(Clone)] +pub struct CycleError<'tcx> { + /// The query and related span which uses the cycle + pub(super) usage: Option<(Span, Query<'tcx>)>, + pub(super) cycle: Vec>, +} + +/// The result of `try_get_lock` +pub(super) enum TryGetJob<'a, 'tcx: 'a, D: QueryDescription<'tcx> + 'a> { + /// The query is not yet started. Contains a guard to the cache eventually used to start it. + NotYetStarted(JobOwner<'a, 'tcx, D>), + + /// The query was already completed. + /// Returns the result of the query and its dep node index + /// if it succeeded or a cycle error if it failed + JobCompleted(Result<(D::Value, DepNodeIndex), CycleError<'tcx>>), +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub(super) fn report_cycle(self, CycleError { usage, cycle: stack }: CycleError<'gcx>) + -> DiagnosticBuilder<'a> + { + assert!(!stack.is_empty()); + + let fix_span = |span: Span, query: &Query<'gcx>| { + self.sess.codemap().def_span(query.default_span(self, span)) + }; + + // Disable naming impls with types in this path, since that + // sometimes cycles itself, leading to extra cycle errors. + // (And cycle errors around impls tend to occur during the + // collect/coherence phases anyhow.) + item_path::with_forced_impl_filename_line(|| { + let span = fix_span(stack[1 % stack.len()].span, &stack[0].query); + let mut err = struct_span_err!(self.sess, + span, + E0391, + "cycle detected when {}", + stack[0].query.describe(self)); + + for i in 1..stack.len() { + let query = &stack[i].query; + let span = fix_span(stack[(i + 1) % stack.len()].span, query); + err.span_note(span, &format!("...which requires {}...", query.describe(self))); + } + + err.note(&format!("...which again requires {}, completing the cycle", + stack[0].query.describe(self))); + + if let Some((span, query)) = usage { + err.span_note(fix_span(span, &query), + &format!("cycle used when {}", query.describe(self))); + } + + return err + }) + } + + pub fn try_print_query_stack() { + eprintln!("query stack during panic:"); + + tls::with_context_opt(|icx| { + if let Some(icx) = icx { + let mut current_query = icx.query.clone(); + let mut i = 0; + + while let Some(query) = current_query { + let mut db = DiagnosticBuilder::new(icx.tcx.sess.diagnostic(), + Level::FailureNote, + &format!("#{} [{}] {}", + i, + query.info.query.name(), + query.info.query.describe(icx.tcx))); + db.set_span(icx.tcx.sess.codemap().def_span(query.info.span)); + icx.tcx.sess.diagnostic().force_print_db(db); + + current_query = query.parent.clone(); + i += 1; + } + } + }); + + eprintln!("end of query stack"); + } + + /// Try to read a node index for the node dep_node. + /// A node will have an index, when it's already been marked green, or when we can mark it + /// green. This function will mark the current task as a reader of the specified node, when + /// the a node index can be found for that node. + pub(super) fn try_mark_green_and_read(self, dep_node: &DepNode) -> Option { + match self.dep_graph.node_color(dep_node) { + Some(DepNodeColor::Green(dep_node_index)) => { + self.dep_graph.read_index(dep_node_index); + Some(dep_node_index) + } + Some(DepNodeColor::Red) => { + None + } + None => { + // try_mark_green (called below) will panic when full incremental + // compilation is disabled. If that's the case, we can't try to mark nodes + // as green anyway, so we can safely return None here. + if !self.dep_graph.is_fully_enabled() { + return None; + } + match self.dep_graph.try_mark_green(self.global_tcx(), &dep_node) { + Some(dep_node_index) => { + debug_assert!(self.dep_graph.is_green(&dep_node)); + self.dep_graph.read_index(dep_node_index); + Some(dep_node_index) + } + None => { + None + } + } + } + } + } + + fn try_get_with>( + self, + span: Span, + key: Q::Key) + -> Result> + { + debug!("ty::queries::{}::try_get_with(key={:?}, span={:?})", + Q::NAME, + key, + span); + + profq_msg!(self, + ProfileQueriesMsg::QueryBegin( + span.data(), + profq_query_msg!(Q::NAME, self, key), + ) + ); + + self.sess.profiler(|p| p.record_query(Q::CATEGORY)); + + let job = match JobOwner::try_get(self, span, &key) { + TryGetJob::NotYetStarted(job) => job, + TryGetJob::JobCompleted(result) => { + return result.map(|(v, index)| { + self.sess.profiler(|p| p.record_query_hit(Q::CATEGORY)); + self.dep_graph.read_index(index); + v + }) + } + }; + + // Fast path for when incr. comp. is off. `to_dep_node` is + // expensive for some DepKinds. + if !self.dep_graph.is_fully_enabled() { + let null_dep_node = DepNode::new_no_params(::dep_graph::DepKind::Null); + return self.force_query_with_job::(key, job, null_dep_node).map(|(v, _)| v); + } + + let dep_node = Q::to_dep_node(self, &key); + + if dep_node.kind.is_anon() { + profq_msg!(self, ProfileQueriesMsg::ProviderBegin); + self.sess.profiler(|p| p.start_activity(Q::CATEGORY)); + + let res = job.start(self, |tcx| { + tcx.dep_graph.with_anon_task(dep_node.kind, || { + Q::compute(tcx.global_tcx(), key) + }) + }); + + self.sess.profiler(|p| p.end_activity(Q::CATEGORY)); + profq_msg!(self, ProfileQueriesMsg::ProviderEnd); + let ((result, dep_node_index), diagnostics) = res; + + self.dep_graph.read_index(dep_node_index); + + self.queries.on_disk_cache + .store_diagnostics_for_anon_node(dep_node_index, diagnostics); + + job.complete(&result, dep_node_index); + + return Ok(result); + } + + if !dep_node.kind.is_input() { + if let Some(dep_node_index) = self.try_mark_green_and_read(&dep_node) { + profq_msg!(self, ProfileQueriesMsg::CacheHit); + self.sess.profiler(|p| p.record_query_hit(Q::CATEGORY)); + + return self.load_from_disk_and_cache_in_memory::(key, + job, + dep_node_index, + &dep_node) + } + } + + match self.force_query_with_job::(key, job, dep_node) { + Ok((result, dep_node_index)) => { + self.dep_graph.read_index(dep_node_index); + Ok(result) + } + Err(e) => Err(e) + } + } + + fn load_from_disk_and_cache_in_memory>( + self, + key: Q::Key, + job: JobOwner<'a, 'gcx, Q>, + dep_node_index: DepNodeIndex, + dep_node: &DepNode + ) -> Result> + { + // Note this function can be called concurrently from the same query + // We must ensure that this is handled correctly + + debug_assert!(self.dep_graph.is_green(dep_node)); + + // First we try to load the result from the on-disk cache + let result = if Q::cache_on_disk(key.clone()) && + self.sess.opts.debugging_opts.incremental_queries { + let prev_dep_node_index = + self.dep_graph.prev_dep_node_index_of(dep_node); + let result = Q::try_load_from_disk(self.global_tcx(), + prev_dep_node_index); + + // We always expect to find a cached result for things that + // can be forced from DepNode. + debug_assert!(!dep_node.kind.can_reconstruct_query_key() || + result.is_some(), + "Missing on-disk cache entry for {:?}", + dep_node); + result + } else { + // Some things are never cached on disk. + None + }; + + let result = if let Some(result) = result { + result + } else { + // We could not load a result from the on-disk cache, so + // recompute. + + // The diagnostics for this query have already been + // promoted to the current session during + // try_mark_green(), so we can ignore them here. + let (result, _) = job.start(self, |tcx| { + // The dep-graph for this computation is already in + // place + tcx.dep_graph.with_ignore(|| { + Q::compute(tcx, key) + }) + }); + result + }; + + // If -Zincremental-verify-ich is specified, re-hash results from + // the cache and make sure that they have the expected fingerprint. + if self.sess.opts.debugging_opts.incremental_verify_ich { + use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; + use ich::Fingerprint; + + assert!(Some(self.dep_graph.fingerprint_of(dep_node_index)) == + self.dep_graph.prev_fingerprint_of(dep_node), + "Fingerprint for green query instance not loaded \ + from cache: {:?}", dep_node); + + debug!("BEGIN verify_ich({:?})", dep_node); + let mut hcx = self.create_stable_hashing_context(); + let mut hasher = StableHasher::new(); + + result.hash_stable(&mut hcx, &mut hasher); + + let new_hash: Fingerprint = hasher.finish(); + debug!("END verify_ich({:?})", dep_node); + + let old_hash = self.dep_graph.fingerprint_of(dep_node_index); + + assert!(new_hash == old_hash, "Found unstable fingerprints \ + for {:?}", dep_node); + } + + if self.sess.opts.debugging_opts.query_dep_graph { + self.dep_graph.mark_loaded_from_cache(dep_node_index, true); + } + + job.complete(&result, dep_node_index); + + Ok(result) + } + + fn force_query_with_job>( + self, + key: Q::Key, + job: JobOwner<'_, 'gcx, Q>, + dep_node: DepNode) + -> Result<(Q::Value, DepNodeIndex), CycleError<'gcx>> { + // If the following assertion triggers, it can have two reasons: + // 1. Something is wrong with DepNode creation, either here or + // in DepGraph::try_mark_green() + // 2. Two distinct query keys get mapped to the same DepNode + // (see for example #48923) + assert!(!self.dep_graph.dep_node_exists(&dep_node), + "Forcing query with already existing DepNode.\n\ + - query-key: {:?}\n\ + - dep-node: {:?}", + key, dep_node); + + profq_msg!(self, ProfileQueriesMsg::ProviderBegin); + self.sess.profiler(|p| { + p.start_activity(Q::CATEGORY); + p.record_query(Q::CATEGORY); + }); + + let res = job.start(self, |tcx| { + if dep_node.kind.is_eval_always() { + tcx.dep_graph.with_eval_always_task(dep_node, + tcx, + key, + Q::compute) + } else { + tcx.dep_graph.with_task(dep_node, + tcx, + key, + Q::compute) + } + }); + + self.sess.profiler(|p| p.end_activity(Q::CATEGORY)); + profq_msg!(self, ProfileQueriesMsg::ProviderEnd); + + let ((result, dep_node_index), diagnostics) = res; + + if self.sess.opts.debugging_opts.query_dep_graph { + self.dep_graph.mark_loaded_from_cache(dep_node_index, false); + } + + if dep_node.kind != ::dep_graph::DepKind::Null { + self.queries.on_disk_cache + .store_diagnostics(dep_node_index, diagnostics); + } + + job.complete(&result, dep_node_index); + + Ok((result, dep_node_index)) + } + + /// Ensure that either this query has all green inputs or been executed. + /// Executing query::ensure(D) is considered a read of the dep-node D. + /// + /// This function is particularly useful when executing passes for their + /// side-effects -- e.g., in order to report errors for erroneous programs. + /// + /// Note: The optimization is only available during incr. comp. + pub(super) fn ensure_query>(self, key: Q::Key) -> () { + let dep_node = Q::to_dep_node(self, &key); + + // Ensuring an "input" or anonymous query makes no sense + assert!(!dep_node.kind.is_anon()); + assert!(!dep_node.kind.is_input()); + if self.try_mark_green_and_read(&dep_node).is_none() { + // A None return from `try_mark_green_and_read` means that this is either + // a new dep node or that the dep node has already been marked red. + // Either way, we can't call `dep_graph.read()` as we don't have the + // DepNodeIndex. We must invoke the query itself. The performance cost + // this introduces should be negligible as we'll immediately hit the + // in-memory cache, or another query down the line will. + + self.sess.profiler(|p| { + p.start_activity(Q::CATEGORY); + p.record_query(Q::CATEGORY); + }); + + let _ = self.get_query::(DUMMY_SP, key); + + self.sess.profiler(|p| p.end_activity(Q::CATEGORY)); + } + } + + #[allow(dead_code)] + fn force_query>( + self, + key: Q::Key, + span: Span, + dep_node: DepNode + ) -> Result<(Q::Value, DepNodeIndex), CycleError<'gcx>> { + // We may be concurrently trying both execute and force a query + // Ensure that only one of them runs the query + let job = match JobOwner::try_get(self, span, &key) { + TryGetJob::NotYetStarted(job) => job, + TryGetJob::JobCompleted(result) => return result, + }; + self.force_query_with_job::(key, job, dep_node) + } + + pub(super) fn try_get_query>( + self, + span: Span, + key: Q::Key, + ) -> Result> { + match self.try_get_with::(span, key) { + Ok(e) => Ok(e), + Err(e) => Err(self.report_cycle(e)), + } + } + + pub(super) fn get_query>( + self, + span: Span, + key: Q::Key, + ) -> Q::Value { + self.try_get_query::(span, key).unwrap_or_else(|mut e| { + e.emit(); + Q::handle_cycle_error(self) + }) + } +} + +macro_rules! handle_cycle_error { + ([][$this: expr]) => {{ + Value::from_cycle_error($this.global_tcx()) + }}; + ([fatal_cycle$(, $modifiers:ident)*][$this:expr]) => {{ + $this.sess.abort_if_errors(); + unreachable!(); + }}; + ([$other:ident$(, $modifiers:ident)*][$($args:tt)*]) => { + handle_cycle_error!([$($modifiers),*][$($args)*]) + }; +} + +macro_rules! define_queries { + (<$tcx:tt> $($category:tt { + $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)* + },)*) => { + define_queries_inner! { <$tcx> + $($( $(#[$attr])* category<$category> [$($modifiers)*] fn $name: $node($K) -> $V,)*)* + } + } +} + +macro_rules! define_queries_inner { + (<$tcx:tt> + $($(#[$attr:meta])* category<$category:tt> + [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { + + use std::mem; + #[cfg(parallel_queries)] + use ty::query::job::QueryResult; + use rustc_data_structures::sync::Lock; + use { + rustc_data_structures::stable_hasher::HashStable, + rustc_data_structures::stable_hasher::StableHasherResult, + rustc_data_structures::stable_hasher::StableHasher, + ich::StableHashingContext + }; + use util::profiling::ProfileCategory; + + define_queries_struct! { + tcx: $tcx, + input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) + } + + impl<$tcx> Queries<$tcx> { + pub fn new( + providers: IndexVec>, + on_disk_cache: OnDiskCache<'tcx>, + ) -> Self { + Queries { + providers, + on_disk_cache, + $($name: Lock::new(QueryCache::new())),* + } + } + + #[cfg(parallel_queries)] + pub fn collect_active_jobs(&self) -> Vec>> { + let mut jobs = Vec::new(); + + // We use try_lock here since we are only called from the + // deadlock handler, and this shouldn't be locked + $(for v in self.$name.try_lock().unwrap().active.values() { + match *v { + QueryResult::Started(ref job) => jobs.push(job.clone()), + _ => (), + } + })* + + return jobs; + } + } + + #[allow(bad_style)] + #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] + pub enum Query<$tcx> { + $($(#[$attr])* $name($K)),* + } + + impl<$tcx> Query<$tcx> { + pub fn name(&self) -> &'static str { + match *self { + $(Query::$name(_) => stringify!($name),)* + } + } + + pub fn describe(&self, tcx: TyCtxt) -> String { + let (r, name) = match *self { + $(Query::$name(key) => { + (queries::$name::describe(tcx, key), stringify!($name)) + })* + }; + if tcx.sess.verbose() { + format!("{} [{}]", r, name) + } else { + r + } + } + + // FIXME(eddyb) Get more valid Span's on queries. + pub fn default_span(&self, tcx: TyCtxt<'_, $tcx, '_>, span: Span) -> Span { + if !span.is_dummy() { + return span; + } + // The def_span query is used to calculate default_span, + // so exit to avoid infinite recursion + match *self { + Query::def_span(..) => return span, + _ => () + } + match *self { + $(Query::$name(key) => key.default_span(tcx),)* + } + } + } + + impl<'a, $tcx> HashStable> for Query<$tcx> { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + mem::discriminant(self).hash_stable(hcx, hasher); + match *self { + $(Query::$name(key) => key.hash_stable(hcx, hasher),)* + } + } + } + + pub mod queries { + use std::marker::PhantomData; + + $(#[allow(bad_style)] + pub struct $name<$tcx> { + data: PhantomData<&$tcx ()> + })* + } + + // This module and the functions in it exist only to provide a + // predictable symbol name prefix for query providers. This is helpful + // for analyzing queries in profilers. + pub(super) mod __query_compute { + $(#[inline(never)] + pub fn $name R, R>(f: F) -> R { + f() + })* + } + + $(impl<$tcx> QueryConfig<$tcx> for queries::$name<$tcx> { + type Key = $K; + type Value = $V; + + const NAME: &'static str = stringify!($name); + const CATEGORY: ProfileCategory = $category; + } + + impl<$tcx> QueryAccessors<$tcx> for queries::$name<$tcx> { + fn query(key: Self::Key) -> Query<'tcx> { + Query::$name(key) + } + + fn query_cache<'a>(tcx: TyCtxt<'a, $tcx, '_>) -> &'a Lock> { + &tcx.queries.$name + } + + #[allow(unused)] + fn to_dep_node(tcx: TyCtxt<'_, $tcx, '_>, key: &Self::Key) -> DepNode { + use dep_graph::DepConstructor::*; + + DepNode::new(tcx, $node(*key)) + } + + #[inline] + fn compute(tcx: TyCtxt<'_, 'tcx, '_>, key: Self::Key) -> Self::Value { + __query_compute::$name(move || { + let provider = tcx.queries.providers[key.query_crate()].$name; + provider(tcx.global_tcx(), key) + }) + } + + fn handle_cycle_error(tcx: TyCtxt<'_, 'tcx, '_>) -> Self::Value { + handle_cycle_error!([$($modifiers)*][tcx]) + } + } + + impl<'a, $tcx, 'lcx> queries::$name<$tcx> { + /// Ensure that either this query has all green inputs or been executed. + /// Executing query::ensure(D) is considered a read of the dep-node D. + /// + /// This function is particularly useful when executing passes for their + /// side-effects -- e.g., in order to report errors for erroneous programs. + /// + /// Note: The optimization is only available during incr. comp. + pub fn ensure(tcx: TyCtxt<'a, $tcx, 'lcx>, key: $K) -> () { + tcx.ensure_query::(key); + } + })* + + #[derive(Copy, Clone)] + pub struct TyCtxtAt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'gcx, 'tcx>, + pub span: Span, + } + + impl<'a, 'gcx, 'tcx> Deref for TyCtxtAt<'a, 'gcx, 'tcx> { + type Target = TyCtxt<'a, 'gcx, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.tcx + } + } + + impl<'a, $tcx, 'lcx> TyCtxt<'a, $tcx, 'lcx> { + /// Return a transparent wrapper for `TyCtxt` which uses + /// `span` as the location of queries performed through it. + pub fn at(self, span: Span) -> TyCtxtAt<'a, $tcx, 'lcx> { + TyCtxtAt { + tcx: self, + span + } + } + + $($(#[$attr])* + pub fn $name(self, key: $K) -> $V { + self.at(DUMMY_SP).$name(key) + })* + } + + impl<'a, $tcx, 'lcx> TyCtxtAt<'a, $tcx, 'lcx> { + $($(#[$attr])* + pub fn $name(self, key: $K) -> $V { + self.tcx.get_query::(self.span, key) + })* + } + + define_provider_struct! { + tcx: $tcx, + input: ($(([$($modifiers)*] [$name] [$K] [$V]))*) + } + + impl<$tcx> Copy for Providers<$tcx> {} + impl<$tcx> Clone for Providers<$tcx> { + fn clone(&self) -> Self { *self } + } + } +} + +macro_rules! define_queries_struct { + (tcx: $tcx:tt, + input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => { + pub(crate) struct Queries<$tcx> { + /// This provides access to the incr. comp. on-disk cache for query results. + /// Do not access this directly. It is only meant to be used by + /// `DepGraph::try_mark_green()` and the query infrastructure. + pub(crate) on_disk_cache: OnDiskCache<'tcx>, + + providers: IndexVec>, + + $($(#[$attr])* $name: Lock>>,)* + } + }; +} + +macro_rules! define_provider_struct { + (tcx: $tcx:tt, + input: ($(([$($modifiers:tt)*] [$name:ident] [$K:ty] [$R:ty]))*)) => { + pub struct Providers<$tcx> { + $(pub $name: for<'a> fn(TyCtxt<'a, $tcx, $tcx>, $K) -> $R,)* + } + + impl<$tcx> Default for Providers<$tcx> { + fn default() -> Self { + $(fn $name<'a, $tcx>(_: TyCtxt<'a, $tcx, $tcx>, key: $K) -> $R { + bug!("tcx.{}({:?}) unsupported by its crate", + stringify!($name), key); + })* + Providers { $($name),* } + } + } + }; +} + + +/// The red/green evaluation system will try to mark a specific DepNode in the +/// dependency graph as green by recursively trying to mark the dependencies of +/// that DepNode as green. While doing so, it will sometimes encounter a DepNode +/// where we don't know if it is red or green and we therefore actually have +/// to recompute its value in order to find out. Since the only piece of +/// information that we have at that point is the DepNode we are trying to +/// re-evaluate, we need some way to re-run a query from just that. This is what +/// `force_from_dep_node()` implements. +/// +/// In the general case, a DepNode consists of a DepKind and an opaque +/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint +/// is usually constructed by computing a stable hash of the query-key that the +/// DepNode corresponds to. Consequently, it is not in general possible to go +/// back from hash to query-key (since hash functions are not reversible). For +/// this reason `force_from_dep_node()` is expected to fail from time to time +/// because we just cannot find out, from the DepNode alone, what the +/// corresponding query-key is and therefore cannot re-run the query. +/// +/// The system deals with this case letting `try_mark_green` fail which forces +/// the root query to be re-evaluated. +/// +/// Now, if force_from_dep_node() would always fail, it would be pretty useless. +/// Fortunately, we can use some contextual information that will allow us to +/// reconstruct query-keys for certain kinds of DepNodes. In particular, we +/// enforce by construction that the GUID/fingerprint of certain DepNodes is a +/// valid DefPathHash. Since we also always build a huge table that maps every +/// DefPathHash in the current codebase to the corresponding DefId, we have +/// everything we need to re-run the query. +/// +/// Take the `mir_validated` query as an example. Like many other queries, it +/// just has a single parameter: the DefId of the item it will compute the +/// validated MIR for. Now, when we call `force_from_dep_node()` on a dep-node +/// with kind `MirValidated`, we know that the GUID/fingerprint of the dep-node +/// is actually a DefPathHash, and can therefore just look up the corresponding +/// DefId in `tcx.def_path_hash_to_def_id`. +/// +/// When you implement a new query, it will likely have a corresponding new +/// DepKind, and you'll have to support it here in `force_from_dep_node()`. As +/// a rule of thumb, if your query takes a DefId or DefIndex as sole parameter, +/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just +/// add it to the "We don't have enough information to reconstruct..." group in +/// the match below. +pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, + dep_node: &DepNode) + -> bool { + use hir::def_id::LOCAL_CRATE; + + // We must avoid ever having to call force_from_dep_node() for a + // DepNode::CodegenUnit: + // Since we cannot reconstruct the query key of a DepNode::CodegenUnit, we + // would always end up having to evaluate the first caller of the + // `codegen_unit` query that *is* reconstructible. This might very well be + // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just + // to re-trigger calling the `codegen_unit` query with the right key. At + // that point we would already have re-done all the work we are trying to + // avoid doing in the first place. + // The solution is simple: Just explicitly call the `codegen_unit` query for + // each CGU, right after partitioning. This way `try_mark_green` will always + // hit the cache instead of having to go through `force_from_dep_node`. + // This assertion makes sure, we actually keep applying the solution above. + debug_assert!(dep_node.kind != DepKind::CodegenUnit, + "calling force_from_dep_node() on DepKind::CodegenUnit"); + + if !dep_node.kind.can_reconstruct_query_key() { + return false + } + + macro_rules! def_id { + () => { + if let Some(def_id) = dep_node.extract_def_id(tcx) { + def_id + } else { + // return from the whole function + return false + } + } + }; + + macro_rules! krate { + () => { (def_id!()).krate } + }; + + macro_rules! force { + ($query:ident, $key:expr) => { + { + use $crate::util::common::{ProfileQueriesMsg, profq_msg}; + + profq_msg!(tcx, + ProfileQueriesMsg::QueryBegin( + DUMMY_SP.data(), + profq_query_msg!(::ty::query::queries::$query::NAME, tcx, $key), + ) + ); + + match tcx.force_query::<::ty::query::queries::$query>($key, DUMMY_SP, *dep_node) { + Ok(_) => {}, + Err(e) => { + tcx.report_cycle(e).emit(); + } + } + } + } + }; + + // FIXME(#45015): We should try move this boilerplate code into a macro + // somehow. + match dep_node.kind { + // These are inputs that are expected to be pre-allocated and that + // should therefore always be red or green already + DepKind::AllLocalTraitImpls | + DepKind::Krate | + DepKind::CrateMetadata | + DepKind::HirBody | + DepKind::Hir | + + // This are anonymous nodes + DepKind::TraitSelect | + + // We don't have enough information to reconstruct the query key of + // these + DepKind::IsCopy | + DepKind::IsSized | + DepKind::IsFreeze | + DepKind::NeedsDrop | + DepKind::Layout | + DepKind::ConstEval | + DepKind::InstanceSymbolName | + DepKind::MirShim | + DepKind::BorrowCheckKrate | + DepKind::Specializes | + DepKind::ImplementationsOfTrait | + DepKind::TypeParamPredicates | + DepKind::CodegenUnit | + DepKind::CompileCodegenUnit | + DepKind::FulfillObligation | + DepKind::VtableMethods | + DepKind::EraseRegionsTy | + DepKind::ConstValueToAllocation | + DepKind::NormalizeProjectionTy | + DepKind::NormalizeTyAfterErasingRegions | + DepKind::ImpliedOutlivesBounds | + DepKind::DropckOutlives | + DepKind::EvaluateObligation | + DepKind::TypeOpEq | + DepKind::TypeOpSubtype | + DepKind::TypeOpProvePredicate | + DepKind::TypeOpNormalizeTy | + DepKind::TypeOpNormalizePredicate | + DepKind::TypeOpNormalizePolyFnSig | + DepKind::TypeOpNormalizeFnSig | + DepKind::SubstituteNormalizeAndTestPredicates | + DepKind::InstanceDefSizeEstimate | + DepKind::ProgramClausesForEnv | + + // This one should never occur in this context + DepKind::Null => { + bug!("force_from_dep_node() - Encountered {:?}", dep_node) + } + + // These are not queries + DepKind::CoherenceCheckTrait | + DepKind::ItemVarianceConstraints => { + return false + } + + DepKind::RegionScopeTree => { force!(region_scope_tree, def_id!()); } + + DepKind::Coherence => { force!(crate_inherent_impls, LOCAL_CRATE); } + DepKind::CoherenceInherentImplOverlapCheck => { + force!(crate_inherent_impls_overlap_check, LOCAL_CRATE) + }, + DepKind::PrivacyAccessLevels => { force!(privacy_access_levels, LOCAL_CRATE); } + DepKind::MirBuilt => { force!(mir_built, def_id!()); } + DepKind::MirConstQualif => { force!(mir_const_qualif, def_id!()); } + DepKind::MirConst => { force!(mir_const, def_id!()); } + DepKind::MirValidated => { force!(mir_validated, def_id!()); } + DepKind::MirOptimized => { force!(optimized_mir, def_id!()); } + + DepKind::BorrowCheck => { force!(borrowck, def_id!()); } + DepKind::MirBorrowCheck => { force!(mir_borrowck, def_id!()); } + DepKind::UnsafetyCheckResult => { force!(unsafety_check_result, def_id!()); } + DepKind::UnsafeDeriveOnReprPacked => { force!(unsafe_derive_on_repr_packed, def_id!()); } + DepKind::Reachability => { force!(reachable_set, LOCAL_CRATE); } + DepKind::MirKeys => { force!(mir_keys, LOCAL_CRATE); } + DepKind::CrateVariances => { force!(crate_variances, LOCAL_CRATE); } + DepKind::AssociatedItems => { force!(associated_item, def_id!()); } + DepKind::TypeOfItem => { force!(type_of, def_id!()); } + DepKind::GenericsOfItem => { force!(generics_of, def_id!()); } + DepKind::PredicatesOfItem => { force!(predicates_of, def_id!()); } + DepKind::PredicatesDefinedOnItem => { force!(predicates_defined_on, def_id!()); } + DepKind::ExplicitPredicatesOfItem => { force!(explicit_predicates_of, def_id!()); } + DepKind::InferredOutlivesOf => { force!(inferred_outlives_of, def_id!()); } + DepKind::InferredOutlivesCrate => { force!(inferred_outlives_crate, LOCAL_CRATE); } + DepKind::SuperPredicatesOfItem => { force!(super_predicates_of, def_id!()); } + DepKind::TraitDefOfItem => { force!(trait_def, def_id!()); } + DepKind::AdtDefOfItem => { force!(adt_def, def_id!()); } + DepKind::ImplTraitRef => { force!(impl_trait_ref, def_id!()); } + DepKind::ImplPolarity => { force!(impl_polarity, def_id!()); } + DepKind::FnSignature => { force!(fn_sig, def_id!()); } + DepKind::CoerceUnsizedInfo => { force!(coerce_unsized_info, def_id!()); } + DepKind::ItemVariances => { force!(variances_of, def_id!()); } + DepKind::IsConstFn => { force!(is_const_fn, def_id!()); } + DepKind::IsForeignItem => { force!(is_foreign_item, def_id!()); } + DepKind::SizedConstraint => { force!(adt_sized_constraint, def_id!()); } + DepKind::DtorckConstraint => { force!(adt_dtorck_constraint, def_id!()); } + DepKind::AdtDestructor => { force!(adt_destructor, def_id!()); } + DepKind::AssociatedItemDefIds => { force!(associated_item_def_ids, def_id!()); } + DepKind::InherentImpls => { force!(inherent_impls, def_id!()); } + DepKind::TypeckBodiesKrate => { force!(typeck_item_bodies, LOCAL_CRATE); } + DepKind::TypeckTables => { force!(typeck_tables_of, def_id!()); } + DepKind::UsedTraitImports => { force!(used_trait_imports, def_id!()); } + DepKind::HasTypeckTables => { force!(has_typeck_tables, def_id!()); } + DepKind::SymbolName => { force!(def_symbol_name, def_id!()); } + DepKind::SpecializationGraph => { force!(specialization_graph_of, def_id!()); } + DepKind::ObjectSafety => { force!(is_object_safe, def_id!()); } + DepKind::TraitImpls => { force!(trait_impls_of, def_id!()); } + DepKind::CheckMatch => { force!(check_match, def_id!()); } + + DepKind::ParamEnv => { force!(param_env, def_id!()); } + DepKind::DescribeDef => { force!(describe_def, def_id!()); } + DepKind::DefSpan => { force!(def_span, def_id!()); } + DepKind::LookupStability => { force!(lookup_stability, def_id!()); } + DepKind::LookupDeprecationEntry => { + force!(lookup_deprecation_entry, def_id!()); + } + DepKind::ConstIsRvaluePromotableToStatic => { + force!(const_is_rvalue_promotable_to_static, def_id!()); + } + DepKind::RvaluePromotableMap => { force!(rvalue_promotable_map, def_id!()); } + DepKind::ImplParent => { force!(impl_parent, def_id!()); } + DepKind::TraitOfItem => { force!(trait_of_item, def_id!()); } + DepKind::IsReachableNonGeneric => { force!(is_reachable_non_generic, def_id!()); } + DepKind::IsUnreachableLocalDefinition => { + force!(is_unreachable_local_definition, def_id!()); + } + DepKind::IsMirAvailable => { force!(is_mir_available, def_id!()); } + DepKind::ItemAttrs => { force!(item_attrs, def_id!()); } + DepKind::CodegenFnAttrs => { force!(codegen_fn_attrs, def_id!()); } + DepKind::FnArgNames => { force!(fn_arg_names, def_id!()); } + DepKind::RenderedConst => { force!(rendered_const, def_id!()); } + DepKind::DylibDepFormats => { force!(dylib_dependency_formats, krate!()); } + DepKind::IsPanicRuntime => { force!(is_panic_runtime, krate!()); } + DepKind::IsCompilerBuiltins => { force!(is_compiler_builtins, krate!()); } + DepKind::HasGlobalAllocator => { force!(has_global_allocator, krate!()); } + DepKind::ExternCrate => { force!(extern_crate, def_id!()); } + DepKind::LintLevels => { force!(lint_levels, LOCAL_CRATE); } + DepKind::InScopeTraits => { force!(in_scope_traits_map, def_id!().index); } + DepKind::ModuleExports => { force!(module_exports, def_id!()); } + DepKind::IsSanitizerRuntime => { force!(is_sanitizer_runtime, krate!()); } + DepKind::IsProfilerRuntime => { force!(is_profiler_runtime, krate!()); } + DepKind::GetPanicStrategy => { force!(panic_strategy, krate!()); } + DepKind::IsNoBuiltins => { force!(is_no_builtins, krate!()); } + DepKind::ImplDefaultness => { force!(impl_defaultness, def_id!()); } + DepKind::CheckItemWellFormed => { force!(check_item_well_formed, def_id!()); } + DepKind::CheckTraitItemWellFormed => { force!(check_trait_item_well_formed, def_id!()); } + DepKind::CheckImplItemWellFormed => { force!(check_impl_item_well_formed, def_id!()); } + DepKind::ReachableNonGenerics => { force!(reachable_non_generics, krate!()); } + DepKind::NativeLibraries => { force!(native_libraries, krate!()); } + DepKind::PluginRegistrarFn => { force!(plugin_registrar_fn, krate!()); } + DepKind::DeriveRegistrarFn => { force!(derive_registrar_fn, krate!()); } + DepKind::CrateDisambiguator => { force!(crate_disambiguator, krate!()); } + DepKind::CrateHash => { force!(crate_hash, krate!()); } + DepKind::OriginalCrateName => { force!(original_crate_name, krate!()); } + DepKind::ExtraFileName => { force!(extra_filename, krate!()); } + + DepKind::AllTraitImplementations => { + force!(all_trait_implementations, krate!()); + } + + DepKind::DllimportForeignItems => { + force!(dllimport_foreign_items, krate!()); + } + DepKind::IsDllimportForeignItem => { + force!(is_dllimport_foreign_item, def_id!()); + } + DepKind::IsStaticallyIncludedForeignItem => { + force!(is_statically_included_foreign_item, def_id!()); + } + DepKind::NativeLibraryKind => { force!(native_library_kind, def_id!()); } + DepKind::LinkArgs => { force!(link_args, LOCAL_CRATE); } + + DepKind::ResolveLifetimes => { force!(resolve_lifetimes, krate!()); } + DepKind::NamedRegion => { force!(named_region_map, def_id!().index); } + DepKind::IsLateBound => { force!(is_late_bound_map, def_id!().index); } + DepKind::ObjectLifetimeDefaults => { + force!(object_lifetime_defaults_map, def_id!().index); + } + + DepKind::Visibility => { force!(visibility, def_id!()); } + DepKind::DepKind => { force!(dep_kind, krate!()); } + DepKind::CrateName => { force!(crate_name, krate!()); } + DepKind::ItemChildren => { force!(item_children, def_id!()); } + DepKind::ExternModStmtCnum => { force!(extern_mod_stmt_cnum, def_id!()); } + DepKind::GetLibFeatures => { force!(get_lib_features, LOCAL_CRATE); } + DepKind::DefinedLibFeatures => { force!(defined_lib_features, krate!()); } + DepKind::GetLangItems => { force!(get_lang_items, LOCAL_CRATE); } + DepKind::DefinedLangItems => { force!(defined_lang_items, krate!()); } + DepKind::MissingLangItems => { force!(missing_lang_items, krate!()); } + DepKind::VisibleParentMap => { force!(visible_parent_map, LOCAL_CRATE); } + DepKind::MissingExternCrateItem => { + force!(missing_extern_crate_item, krate!()); + } + DepKind::UsedCrateSource => { force!(used_crate_source, krate!()); } + DepKind::PostorderCnums => { force!(postorder_cnums, LOCAL_CRATE); } + + DepKind::Freevars => { force!(freevars, def_id!()); } + DepKind::MaybeUnusedTraitImport => { + force!(maybe_unused_trait_import, def_id!()); + } + DepKind::MaybeUnusedExternCrates => { force!(maybe_unused_extern_crates, LOCAL_CRATE); } + DepKind::StabilityIndex => { force!(stability_index, LOCAL_CRATE); } + DepKind::AllTraits => { force!(all_traits, LOCAL_CRATE); } + DepKind::AllCrateNums => { force!(all_crate_nums, LOCAL_CRATE); } + DepKind::ExportedSymbols => { force!(exported_symbols, krate!()); } + DepKind::CollectAndPartitionMonoItems => { + force!(collect_and_partition_mono_items, LOCAL_CRATE); + } + DepKind::IsCodegenedItem => { force!(is_codegened_item, def_id!()); } + DepKind::OutputFilenames => { force!(output_filenames, LOCAL_CRATE); } + + DepKind::TargetFeaturesWhitelist => { force!(target_features_whitelist, LOCAL_CRATE); } + + DepKind::Features => { force!(features_query, LOCAL_CRATE); } + + DepKind::ProgramClausesFor => { force!(program_clauses_for, def_id!()); } + DepKind::WasmImportModuleMap => { force!(wasm_import_module_map, krate!()); } + DepKind::ForeignModules => { force!(foreign_modules, krate!()); } + + DepKind::UpstreamMonomorphizations => { + force!(upstream_monomorphizations, krate!()); + } + DepKind::UpstreamMonomorphizationsFor => { + force!(upstream_monomorphizations_for, def_id!()); + } + } + + true +} + + +// FIXME(#45015): Another piece of boilerplate code that could be generated in +// a combined define_dep_nodes!()/define_queries!() macro. +macro_rules! impl_load_from_cache { + ($($dep_kind:ident => $query_name:ident,)*) => { + impl DepNode { + // Check whether the query invocation corresponding to the given + // DepNode is eligible for on-disk-caching. + pub fn cache_on_disk(&self, tcx: TyCtxt) -> bool { + use ty::query::queries; + use ty::query::QueryDescription; + + match self.kind { + $(DepKind::$dep_kind => { + let def_id = self.extract_def_id(tcx).unwrap(); + queries::$query_name::cache_on_disk(def_id) + })* + _ => false + } + } + + // This is method will execute the query corresponding to the given + // DepNode. It is only expected to work for DepNodes where the + // above `cache_on_disk` methods returns true. + // Also, as a sanity check, it expects that the corresponding query + // invocation has been marked as green already. + pub fn load_from_on_disk_cache(&self, tcx: TyCtxt) { + match self.kind { + $(DepKind::$dep_kind => { + debug_assert!(tcx.dep_graph + .node_color(self) + .map(|c| c.is_green()) + .unwrap_or(false)); + + let def_id = self.extract_def_id(tcx).unwrap(); + let _ = tcx.$query_name(def_id); + })* + _ => { + bug!() + } + } + } + } + } +} + +impl_load_from_cache!( + TypeckTables => typeck_tables_of, + MirOptimized => optimized_mir, + UnsafetyCheckResult => unsafety_check_result, + BorrowCheck => borrowck, + MirBorrowCheck => mir_borrowck, + MirConstQualif => mir_const_qualif, + SymbolName => def_symbol_name, + ConstIsRvaluePromotableToStatic => const_is_rvalue_promotable_to_static, + CheckMatch => check_match, + TypeOfItem => type_of, + GenericsOfItem => generics_of, + PredicatesOfItem => predicates_of, + UsedTraitImports => used_trait_imports, + CodegenFnAttrs => codegen_fn_attrs, + SpecializationGraph => specialization_graph_of, +); diff --git a/src/librustc/ty/maps/values.rs b/src/librustc/ty/query/values.rs similarity index 85% rename from src/librustc/ty/maps/values.rs rename to src/librustc/ty/query/values.rs index 165798d19f19..d3d062487924 100644 --- a/src/librustc/ty/maps/values.rs +++ b/src/librustc/ty/query/values.rs @@ -35,15 +35,9 @@ impl<'tcx> Value<'tcx> for Ty<'tcx> { } } -impl<'tcx> Value<'tcx> for ty::DtorckConstraint<'tcx> { - fn from_cycle_error<'a>(_: TyCtxt<'a, 'tcx, 'tcx>) -> Self { - Self::empty() - } -} - impl<'tcx> Value<'tcx> for ty::SymbolName { fn from_cycle_error<'a>(_: TyCtxt<'a, 'tcx, 'tcx>) -> Self { - ty::SymbolName { name: Symbol::intern("").as_str() } + ty::SymbolName { name: Symbol::intern("").as_interned_str() } } } diff --git a/src/librustc/ty/relate.rs b/src/librustc/ty/relate.rs index 376cdc462e82..4e8f33d6a4a0 100644 --- a/src/librustc/ty/relate.rs +++ b/src/librustc/ty/relate.rs @@ -14,17 +14,17 @@ //! type equality, etc. use hir::def_id::DefId; -use middle::const_val::ConstVal; -use traits::Reveal; -use ty::subst::{Kind, Substs}; +use mir::interpret::ConstValue; +use ty::subst::{Kind, UnpackedKind, Substs}; use ty::{self, Ty, TyCtxt, TypeFoldable}; use ty::error::{ExpectedFound, TypeError}; +use mir::interpret::GlobalId; use util::common::ErrorReported; +use syntax_pos::DUMMY_SP; use std::rc::Rc; use std::iter; -use syntax::abi; +use rustc_target::spec::abi; use hir as ast; -use rustc_data_structures::accumulate_vec::AccumulateVec; pub type RelateResult<'tcx, T> = Result>; @@ -141,13 +141,7 @@ pub fn relate_substs<'a, 'gcx, 'tcx, R>(relation: &mut R, let params = a_subst.iter().zip(b_subst).enumerate().map(|(i, (a, b))| { let variance = variances.map_or(ty::Invariant, |v| v[i]); - if let (Some(a_ty), Some(b_ty)) = (a.as_type(), b.as_type()) { - Ok(Kind::from(relation.relate_with_variance(variance, &a_ty, &b_ty)?)) - } else if let (Some(a_r), Some(b_r)) = (a.as_region(), b.as_region()) { - Ok(Kind::from(relation.relate_with_variance(variance, &a_r, &b_r)?)) - } else { - bug!() - } + relation.relate_with_variance(variance, a, b) }); Ok(tcx.mk_substs(params)?) @@ -160,6 +154,8 @@ impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> { -> RelateResult<'tcx, ty::FnSig<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { + let tcx = relation.tcx(); + if a.variadic != b.variadic { return Err(TypeError::VariadicMismatch( expected_found(relation, &a.variadic, &b.variadic))); @@ -181,9 +177,9 @@ impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> { } else { relation.relate_with_variance(ty::Contravariant, &a, &b) } - }).collect::, _>>()?; + }); Ok(ty::FnSig { - inputs_and_output: relation.tcx().intern_type_list(&inputs_and_output), + inputs_and_output: tcx.mk_type_list(inputs_and_output)?, variadic: a.variadic, unsafety, abi, @@ -319,6 +315,29 @@ impl<'tcx> Relate<'tcx> for ty::ExistentialTraitRef<'tcx> { } } +#[derive(Debug, Clone)] +struct GeneratorWitness<'tcx>(&'tcx ty::Slice>); + +TupleStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for GeneratorWitness<'tcx> { + a + } +} + +impl<'tcx> Relate<'tcx> for GeneratorWitness<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &GeneratorWitness<'tcx>, + b: &GeneratorWitness<'tcx>) + -> RelateResult<'tcx, GeneratorWitness<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + assert!(a.0.len() == b.0.len()); + let tcx = relation.tcx(); + let types = tcx.mk_type_list(a.0.iter().zip(b.0).map(|(a, b)| relation.relate(a, b)))?; + Ok(GeneratorWitness(types)) + } +} + impl<'tcx> Relate<'tcx> for Ty<'tcx> { fn relate<'a, 'gcx, R>(relation: &mut R, a: &Ty<'tcx>, @@ -398,16 +417,26 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound)) } - (&ty::TyGenerator(a_id, a_substs, a_interior), - &ty::TyGenerator(b_id, b_substs, b_interior)) + (&ty::TyGenerator(a_id, a_substs, movability), + &ty::TyGenerator(b_id, b_substs, _)) if a_id == b_id => { // All TyGenerator types with the same id represent // the (anonymous) type of the same generator expression. So // all of their regions should be equated. let substs = relation.relate(&a_substs, &b_substs)?; - let interior = relation.relate(&a_interior, &b_interior)?; - Ok(tcx.mk_generator(a_id, substs, interior)) + Ok(tcx.mk_generator(a_id, substs, movability)) + } + + (&ty::TyGeneratorWitness(a_types), &ty::TyGeneratorWitness(b_types)) => + { + // Wrap our types with a temporary GeneratorWitness struct + // inside the binder so we can related them + let a_types = a_types.map_bound(GeneratorWitness); + let b_types = b_types.map_bound(GeneratorWitness); + // Then remove the GeneratorWitness for the result + let types = relation.relate(&a_types, &b_types)?.map_bound(|witness| witness.0); + Ok(tcx.mk_generator_witness(types)) } (&ty::TyClosure(a_id, a_substs), @@ -418,7 +447,7 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, // the (anonymous) type of the same closure expression. So // all of their regions should be equated. let substs = relation.relate(&a_substs, &b_substs)?; - Ok(tcx.mk_closure_from_closure_substs(a_id, substs)) + Ok(tcx.mk_closure(a_id, substs)) } (&ty::TyRawPtr(ref a_mt), &ty::TyRawPtr(ref b_mt)) => @@ -427,10 +456,12 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, Ok(tcx.mk_ptr(mt)) } - (&ty::TyRef(a_r, ref a_mt), &ty::TyRef(b_r, ref b_mt)) => + (&ty::TyRef(a_r, a_ty, a_mutbl), &ty::TyRef(b_r, b_ty, b_mutbl)) => { let r = relation.relate_with_variance(ty::Contravariant, &a_r, &b_r)?; - let mt = relation.relate(a_mt, b_mt)?; + let a_mt = ty::TypeAndMut { ty: a_ty, mutbl: a_mutbl }; + let b_mt = ty::TypeAndMut { ty: b_ty, mutbl: b_mutbl }; + let mt = relation.relate(&a_mt, &b_mt)?; Ok(tcx.mk_ref(r, mt)) } @@ -440,27 +471,44 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, assert_eq!(sz_a.ty, tcx.types.usize); assert_eq!(sz_b.ty, tcx.types.usize); let to_u64 = |x: &'tcx ty::Const<'tcx>| -> Result { + if let Some(s) = x.assert_usize(tcx) { + return Ok(s); + } match x.val { - ConstVal::Integral(x) => Ok(x.to_u64().unwrap()), - ConstVal::Unevaluated(def_id, substs) => { + ConstValue::Unevaluated(def_id, substs) => { // FIXME(eddyb) get the right param_env. - let param_env = ty::ParamEnv::empty(Reveal::UserFacing); + let param_env = ty::ParamEnv::empty(); match tcx.lift_to_global(&substs) { Some(substs) => { - match tcx.const_eval(param_env.and((def_id, substs))) { - Ok(&ty::Const { val: ConstVal::Integral(x), .. }) => { - return Ok(x.to_u64().unwrap()); + let instance = ty::Instance::resolve( + tcx.global_tcx(), + param_env, + def_id, + substs, + ); + if let Some(instance) = instance { + let cid = GlobalId { + instance, + promoted: None + }; + if let Some(s) = tcx.const_eval(param_env.and(cid)) + .ok() + .map(|c| c.unwrap_usize(tcx)) { + return Ok(s) } - _ => {} } - } + }, None => {} } tcx.sess.delay_span_bug(tcx.def_span(def_id), "array length could not be evaluated"); Err(ErrorReported) } - _ => bug!("arrays should not have {:?} as length", x) + _ => { + tcx.sess.delay_span_bug(DUMMY_SP, + &format!("arrays should not have {:?} as length", x)); + Err(ErrorReported) + } } }; match (to_u64(sz_a), to_u64(sz_b)) { @@ -485,11 +533,10 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, Ok(tcx.mk_slice(t)) } - (&ty::TyTuple(as_, a_defaulted), &ty::TyTuple(bs, b_defaulted)) => + (&ty::TyTuple(as_), &ty::TyTuple(bs)) => { if as_.len() == bs.len() { - let defaulted = a_defaulted || b_defaulted; - Ok(tcx.mk_tup(as_.iter().zip(bs).map(|(a, b)| relation.relate(a, b)), defaulted)?) + Ok(tcx.mk_tup(as_.iter().zip(bs).map(|(a, b)| relation.relate(a, b)))?) } else if !(as_.is_empty() || bs.is_empty()) { Err(TypeError::TupleSize( expected_found(relation, &as_.len(), &bs.len()))) @@ -564,19 +611,19 @@ impl<'tcx> Relate<'tcx> for ty::ClosureSubsts<'tcx> { where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { let substs = relate_substs(relation, None, a.substs, b.substs)?; - Ok(ty::ClosureSubsts { substs: substs }) + Ok(ty::ClosureSubsts { substs }) } } -impl<'tcx> Relate<'tcx> for ty::GeneratorInterior<'tcx> { +impl<'tcx> Relate<'tcx> for ty::GeneratorSubsts<'tcx> { fn relate<'a, 'gcx, R>(relation: &mut R, - a: &ty::GeneratorInterior<'tcx>, - b: &ty::GeneratorInterior<'tcx>) - -> RelateResult<'tcx, ty::GeneratorInterior<'tcx>> + a: &ty::GeneratorSubsts<'tcx>, + b: &ty::GeneratorSubsts<'tcx>) + -> RelateResult<'tcx, ty::GeneratorSubsts<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { - let interior = relation.relate(&a.witness, &b.witness)?; - Ok(ty::GeneratorInterior::new(interior)) + let substs = relate_substs(relation, None, a.substs, b.substs)?; + Ok(ty::GeneratorSubsts { substs }) } } @@ -639,6 +686,27 @@ impl<'tcx, T: Relate<'tcx>> Relate<'tcx> for Box { } } +impl<'tcx> Relate<'tcx> for Kind<'tcx> { + fn relate<'a, 'gcx, R>( + relation: &mut R, + a: &Kind<'tcx>, + b: &Kind<'tcx> + ) -> RelateResult<'tcx, Kind<'tcx>> + where + R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a, + { + match (a.unpack(), b.unpack()) { + (UnpackedKind::Lifetime(a_lt), UnpackedKind::Lifetime(b_lt)) => { + Ok(relation.relate(&a_lt, &b_lt)?.into()) + } + (UnpackedKind::Type(a_ty), UnpackedKind::Type(b_ty)) => { + Ok(relation.relate(&a_ty, &b_ty)?.into()) + } + (UnpackedKind::Lifetime(_), _) | (UnpackedKind::Type(_), _) => bug!() + } + } +} + /////////////////////////////////////////////////////////////////////////// // Error handling diff --git a/src/librustc/ty/steal.rs b/src/librustc/ty/steal.rs index 0b0818888812..842c0d657343 100644 --- a/src/librustc/ty/steal.rs +++ b/src/librustc/ty/steal.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cell::{Ref, RefCell}; +use rustc_data_structures::sync::{RwLock, ReadGuard}; use std::mem; /// The `Steal` struct is intended to used as the value for a query. @@ -32,25 +32,25 @@ use std::mem; /// /// FIXME(#41710) -- what is the best way to model linear queries? pub struct Steal { - value: RefCell> + value: RwLock> } impl Steal { pub fn new(value: T) -> Self { Steal { - value: RefCell::new(Some(value)) + value: RwLock::new(Some(value)) } } - pub fn borrow(&self) -> Ref { - Ref::map(self.value.borrow(), |opt| match *opt { + pub fn borrow(&self) -> ReadGuard { + ReadGuard::map(self.value.borrow(), |opt| match *opt { None => bug!("attempted to read from stolen value"), Some(ref v) => v }) } pub fn steal(&self) -> T { - let value_ref = &mut *self.value.borrow_mut(); + let value_ref = &mut *self.value.try_write().expect("stealing value which is locked"); let value = mem::replace(value_ref, None); value.expect("attempt to read from stolen value") } diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index 438511281ba4..ad29f808285b 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -13,12 +13,12 @@ //! hand, though we've recently added some macros (e.g., //! `BraceStructLiftImpl!`) to help with the tedium. -use infer::type_variable; -use middle::const_val::{self, ConstVal, ConstAggregate, ConstEvalErr}; +use mir::interpret::{ConstValue, ConstEvalErr}; use ty::{self, Lift, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use rustc_data_structures::accumulate_vec::AccumulateVec; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; +use mir::interpret; use std::rc::Rc; @@ -28,152 +28,38 @@ use std::rc::Rc; // For things that don't carry any arena-allocated data (and are // copy...), just add them to this list. -macro_rules! CopyImpls { - ($($ty:ty,)+) => { - $( - impl<'tcx> Lift<'tcx> for $ty { - type Lifted = Self; - fn lift_to_tcx<'a, 'gcx>(&self, _: TyCtxt<'a, 'gcx, 'tcx>) -> Option { - Some(*self) - } - } - - impl<'tcx> TypeFoldable<'tcx> for $ty { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> $ty { - *self - } - - fn super_visit_with>(&self, _: &mut F) -> bool { - false - } - } - )+ - } -} - -CopyImpls! { +CloneTypeFoldableAndLiftImpls! { (), - ::hir::Unsafety, - ::syntax::abi::Abi, + bool, + usize, + u64, + ::middle::region::Scope, + ::syntax::ast::FloatTy, + ::syntax::ast::NodeId, + ::syntax_pos::symbol::Symbol, + ::hir::def::Def, ::hir::def_id::DefId, + ::hir::InlineAsm, + ::hir::MatchSource, + ::hir::Mutability, + ::hir::Unsafety, + ::rustc_target::spec::abi::Abi, ::mir::Local, + ::mir::Promoted, ::traits::Reveal, + ::ty::adjustment::AutoBorrowMutability, + ::ty::AdtKind, + // Including `BoundRegion` is a *bit* dubious, but direct + // references to bound region appear in `ty::Error`, and aren't + // really meant to be folded. In general, we can only fold a fully + // general `Region`. + ::ty::BoundRegion, + ::ty::ClosureKind, + ::ty::IntVarValue, + ::ty::ParamTy, ::syntax_pos::Span, } -/////////////////////////////////////////////////////////////////////////// -// Macros -// -// When possible, use one of these (relatively) convenient macros to write -// the impls for you. - -#[macro_export] -macro_rules! BraceStructLiftImpl { - (impl<$($p:tt),*> Lift<$tcx:tt> for $s:path { - type Lifted = $lifted:ty; - $($field:ident),* $(,)* - } $(where $($wc:tt)*)*) => { - impl<$($p),*> $crate::ty::Lift<$tcx> for $s - $(where $($wc)*)* - { - type Lifted = $lifted; - - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<$lifted> { - $(let $field = tcx.lift(&self.$field)?;)* - Some(Self::Lifted { $($field),* }) - } - } - }; -} - -#[macro_export] -macro_rules! EnumLiftImpl { - (impl<$($p:tt),*> Lift<$tcx:tt> for $s:path { - type Lifted = $lifted:ty; - $( - ($variant:path) ( $( $variant_arg:ident),* ) - ),* - $(,)* - } $(where $($wc:tt)*)*) => { - impl<$($p),*> $crate::ty::Lift<$tcx> for $s - $(where $($wc)*)* - { - type Lifted = $lifted; - - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<$lifted> { - match self { - $($variant ( $($variant_arg),* ) => { - Some($variant ( $(tcx.lift($variant_arg)?),* )) - })* - } - } - } - }; -} - -#[macro_export] -macro_rules! BraceStructTypeFoldableImpl { - (impl<$($p:tt),*> TypeFoldable<$tcx:tt> for $s:path { - $($field:ident),* $(,)* - } $(where $($wc:tt)*)*) => { - impl<$($p),*> $crate::ty::fold::TypeFoldable<$tcx> for $s - $(where $($wc)*)* - { - fn super_fold_with<'gcx: $tcx, V: $crate::ty::fold::TypeFolder<'gcx, $tcx>>( - &self, - folder: &mut V, - ) -> Self { - let $s { $($field,)* } = self; - $s { $($field: $field.fold_with(folder),)* } - } - - fn super_visit_with>( - &self, - visitor: &mut V, - ) -> bool { - let $s { $($field,)* } = self; - false $(|| $field.visit_with(visitor))* - } - } - }; -} - -#[macro_export] -macro_rules! EnumTypeFoldableImpl { - (impl<$($p:tt),*> TypeFoldable<$tcx:tt> for $s:path { - $( - ($variant:path) ( $( $variant_arg:ident),* ) - ),* - $(,)* - } $(where $($wc:tt)*)*) => { - impl<$($p),*> $crate::ty::fold::TypeFoldable<$tcx> for $s - $(where $($wc)*)* - { - fn super_fold_with<'gcx: $tcx, V: $crate::ty::fold::TypeFolder<'gcx, $tcx>>( - &self, - folder: &mut V, - ) -> Self { - match self { - $($variant ( $($variant_arg),* ) => { - $variant ( $($variant_arg.fold_with(folder)),* ) - })* - } - } - - fn super_visit_with>( - &self, - visitor: &mut V, - ) -> bool { - match self { - $($variant ( $($variant_arg),* ) => { - false $(|| $variant_arg.visit_with(visitor))* - })* - } - } - } - }; -} - /////////////////////////////////////////////////////////////////////////// // Lift implementations @@ -283,14 +169,6 @@ impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> { } } -impl<'a, 'tcx> Lift<'tcx> for ty::EquatePredicate<'a> { - type Lifted = ty::EquatePredicate<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) - -> Option> { - tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::EquatePredicate(a, b)) - } -} - impl<'a, 'tcx> Lift<'tcx> for ty::SubtypePredicate<'a> { type Lifted = ty::SubtypePredicate<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) @@ -356,9 +234,6 @@ impl<'a, 'tcx> Lift<'tcx> for ty::Predicate<'a> { ty::Predicate::Trait(ref binder) => { tcx.lift(binder).map(ty::Predicate::Trait) } - ty::Predicate::Equate(ref binder) => { - tcx.lift(binder).map(ty::Predicate::Equate) - } ty::Predicate::Subtype(ref binder) => { tcx.lift(binder).map(ty::Predicate::Subtype) } @@ -395,7 +270,7 @@ impl<'a, 'tcx> Lift<'tcx> for ty::Predicate<'a> { impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder { type Lifted = ty::Binder; fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { - tcx.lift(&self.0).map(|x| ty::Binder(x)) + tcx.lift(self.skip_binder()).map(ty::Binder::bind) } } @@ -429,16 +304,16 @@ impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> { type Lifted = ty::ClosureSubsts<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { tcx.lift(&self.substs).map(|substs| { - ty::ClosureSubsts { substs: substs } + ty::ClosureSubsts { substs } }) } } -impl<'a, 'tcx> Lift<'tcx> for ty::GeneratorInterior<'a> { - type Lifted = ty::GeneratorInterior<'tcx>; +impl<'a, 'tcx> Lift<'tcx> for ty::GeneratorSubsts<'a> { + type Lifted = ty::GeneratorSubsts<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - tcx.lift(&self.witness).map(|witness| { - ty::GeneratorInterior { witness } + tcx.lift(&self.substs).map(|substs| { + ty::GeneratorSubsts { substs } }) } } @@ -547,13 +422,6 @@ impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound { } } -BraceStructLiftImpl! { - impl<'a, 'tcx> Lift<'tcx> for type_variable::Default<'a> { - type Lifted = type_variable::Default<'tcx>; - ty, origin_span, def_id - } -} - impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { type Lifted = ty::error::TypeError<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { @@ -585,11 +453,8 @@ impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { ProjectionBoundsLength(x) => ProjectionBoundsLength(x), Sorts(ref x) => return tcx.lift(x).map(Sorts), - TyParamDefaultMismatch(ref x) => { - return tcx.lift(x).map(TyParamDefaultMismatch) - } - ExistentialMismatch(ref x) => return tcx.lift(x).map(ExistentialMismatch), OldStyleLUB(ref x) => return tcx.lift(x).map(OldStyleLUB), + ExistentialMismatch(ref x) => return tcx.lift(x).map(ExistentialMismatch) }) } } @@ -597,44 +462,133 @@ impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { impl<'a, 'tcx> Lift<'tcx> for ConstEvalErr<'a> { type Lifted = ConstEvalErr<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - tcx.lift(&self.kind).map(|kind| { + tcx.lift(&self.error).map(|error| { ConstEvalErr { span: self.span, - kind, + stacktrace: self.stacktrace.clone(), + error, } }) } } -impl<'a, 'tcx> Lift<'tcx> for const_val::ErrKind<'a> { - type Lifted = const_val::ErrKind<'tcx>; +impl<'a, 'tcx> Lift<'tcx> for interpret::EvalError<'a> { + type Lifted = interpret::EvalError<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - use middle::const_val::ErrKind::*; + Some(interpret::EvalError { + kind: tcx.lift(&self.kind)?, + }) + } +} +impl<'a, 'tcx, O: Lift<'tcx>> Lift<'tcx> for interpret::EvalErrorKind<'a, O> { + type Lifted = interpret::EvalErrorKind<'tcx, >::Lifted>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + use ::mir::interpret::EvalErrorKind::*; Some(match *self { - CannotCast => CannotCast, - MissingStructField => MissingStructField, - NonConstPath => NonConstPath, - UnimplementedConstVal(s) => UnimplementedConstVal(s), - ExpectedConstTuple => ExpectedConstTuple, - ExpectedConstStruct => ExpectedConstStruct, - IndexedNonVec => IndexedNonVec, - IndexNotUsize => IndexNotUsize, - IndexOutOfBounds { len, index } => IndexOutOfBounds { len, index }, - MiscBinaryOp => MiscBinaryOp, - MiscCatchAll => MiscCatchAll, - IndexOpFeatureGated => IndexOpFeatureGated, - Math(ref e) => Math(e.clone()), - - LayoutError(ref e) => { - return tcx.lift(e).map(LayoutError) - } - ErroneousReferencedConstant(ref e) => { - return tcx.lift(e).map(ErroneousReferencedConstant) - } - + MachineError(ref err) => MachineError(err.clone()), + FunctionPointerTyMismatch(a, b) => FunctionPointerTyMismatch( + tcx.lift(&a)?, + tcx.lift(&b)?, + ), + NoMirFor(ref s) => NoMirFor(s.clone()), + UnterminatedCString(ptr) => UnterminatedCString(ptr), + DanglingPointerDeref => DanglingPointerDeref, + DoubleFree => DoubleFree, + InvalidMemoryAccess => InvalidMemoryAccess, + InvalidFunctionPointer => InvalidFunctionPointer, + InvalidBool => InvalidBool, + InvalidDiscriminant => InvalidDiscriminant, + PointerOutOfBounds { + ptr, + access, + allocation_size, + } => PointerOutOfBounds { ptr, access, allocation_size }, + InvalidNullPointerUsage => InvalidNullPointerUsage, + ReadPointerAsBytes => ReadPointerAsBytes, + ReadBytesAsPointer => ReadBytesAsPointer, + ReadForeignStatic => ReadForeignStatic, + InvalidPointerMath => InvalidPointerMath, + ReadUndefBytes => ReadUndefBytes, + DeadLocal => DeadLocal, + InvalidBoolOp(bop) => InvalidBoolOp(bop), + Unimplemented(ref s) => Unimplemented(s.clone()), + DerefFunctionPointer => DerefFunctionPointer, + ExecuteMemory => ExecuteMemory, + BoundsCheck { ref len, ref index } => BoundsCheck { + len: tcx.lift(len)?, + index: tcx.lift(index)?, + }, + Intrinsic(ref s) => Intrinsic(s.clone()), + InvalidChar(c) => InvalidChar(c), + StackFrameLimitReached => StackFrameLimitReached, + OutOfTls => OutOfTls, + TlsOutOfBounds => TlsOutOfBounds, + AbiViolation(ref s) => AbiViolation(s.clone()), + AlignmentCheckFailed { + required, + has, + } => AlignmentCheckFailed { required, has }, + MemoryLockViolation { + ptr, + len, + frame, + access, + ref lock, + } => MemoryLockViolation { ptr, len, frame, access, lock: lock.clone() }, + MemoryAcquireConflict { + ptr, + len, + kind, + ref lock, + } => MemoryAcquireConflict { ptr, len, kind, lock: lock.clone() }, + InvalidMemoryLockRelease { + ptr, + len, + frame, + ref lock, + } => InvalidMemoryLockRelease { ptr, len, frame, lock: lock.clone() }, + DeallocatedLockedMemory { + ptr, + ref lock, + } => DeallocatedLockedMemory { ptr, lock: lock.clone() }, + ValidationFailure(ref s) => ValidationFailure(s.clone()), + CalledClosureAsFunction => CalledClosureAsFunction, + VtableForArgumentlessMethod => VtableForArgumentlessMethod, + ModifiedConstantMemory => ModifiedConstantMemory, + AssumptionNotHeld => AssumptionNotHeld, + InlineAsm => InlineAsm, + TypeNotPrimitive(ty) => TypeNotPrimitive(tcx.lift(&ty)?), + ReallocatedWrongMemoryKind(ref a, ref b) => { + ReallocatedWrongMemoryKind(a.clone(), b.clone()) + }, + DeallocatedWrongMemoryKind(ref a, ref b) => { + DeallocatedWrongMemoryKind(a.clone(), b.clone()) + }, + ReallocateNonBasePtr => ReallocateNonBasePtr, + DeallocateNonBasePtr => DeallocateNonBasePtr, + IncorrectAllocationInformation(a, b, c, d) => { + IncorrectAllocationInformation(a, b, c, d) + }, + Layout(lay) => Layout(tcx.lift(&lay)?), + HeapAllocZeroBytes => HeapAllocZeroBytes, + HeapAllocNonPowerOfTwoAlignment(n) => HeapAllocNonPowerOfTwoAlignment(n), + Unreachable => Unreachable, + Panic => Panic, + ReadFromReturnPointer => ReadFromReturnPointer, + PathNotFound(ref v) => PathNotFound(v.clone()), + UnimplementedTraitSelection => UnimplementedTraitSelection, TypeckError => TypeckError, + TooGeneric => TooGeneric, CheckMatchError => CheckMatchError, + ReferencedConstant(ref err) => ReferencedConstant(tcx.lift(&**err)?.into()), + OverflowNeg => OverflowNeg, + Overflow(op) => Overflow(op), + DivisionByZero => DivisionByZero, + RemainderByZero => RemainderByZero, + GeneratorResumedAfterReturn => GeneratorResumedAfterReturn, + GeneratorResumedAfterPanic => GeneratorResumedAfterPanic, + InfiniteLoop => InfiniteLoop, }) } } @@ -653,6 +607,42 @@ impl<'a, 'tcx> Lift<'tcx> for ty::layout::LayoutError<'a> { } } +impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> { + type Lifted = ty::InstanceDef<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + match *self { + ty::InstanceDef::Item(def_id) => + Some(ty::InstanceDef::Item(def_id)), + ty::InstanceDef::Intrinsic(def_id) => + Some(ty::InstanceDef::Intrinsic(def_id)), + ty::InstanceDef::FnPtrShim(def_id, ref ty) => + Some(ty::InstanceDef::FnPtrShim(def_id, tcx.lift(ty)?)), + ty::InstanceDef::Virtual(def_id, n) => + Some(ty::InstanceDef::Virtual(def_id, n)), + ty::InstanceDef::ClosureOnceShim { call_once } => + Some(ty::InstanceDef::ClosureOnceShim { call_once }), + ty::InstanceDef::DropGlue(def_id, ref ty) => + Some(ty::InstanceDef::DropGlue(def_id, tcx.lift(ty)?)), + ty::InstanceDef::CloneShim(def_id, ref ty) => + Some(ty::InstanceDef::CloneShim(def_id, tcx.lift(ty)?)), + } + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for ty::Instance<'a> { + type Lifted = ty::Instance<'tcx>; + def, substs + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for interpret::GlobalId<'a> { + type Lifted = interpret::GlobalId<'tcx>; + instance, promoted + } +} + /////////////////////////////////////////////////////////////////////////// // TypeFoldable implementations. // @@ -664,6 +654,17 @@ impl<'a, 'tcx> Lift<'tcx> for ty::layout::LayoutError<'a> { // can easily refactor the folding into the TypeFolder trait as // needed. +/// AdtDefs are basically the same as a DefId. +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::AdtDef { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _folder: &mut F) -> Self { + *self + } + + fn super_visit_with>(&self, _visitor: &mut V) -> bool { + false + } +} + impl<'tcx, T:TypeFoldable<'tcx>, U:TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> (T, U) { (self.0.fold_with(folder), self.1.fold_with(folder)) @@ -674,14 +675,11 @@ impl<'tcx, T:TypeFoldable<'tcx>, U:TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T } } -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Option { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - self.as_ref().map(|t| t.fold_with(folder)) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.iter().any(|t| t.visit_with(visitor)) - } +EnumTypeFoldableImpl! { + impl<'tcx, T> TypeFoldable<'tcx> for Option { + (Some)(a), + (None), + } where T: TypeFoldable<'tcx> } impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc { @@ -717,7 +715,7 @@ impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec { impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::Binder(self.0.fold_with(folder)) + self.map_bound_ref(|ty| ty.fold_with(folder)) } fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { @@ -725,7 +723,7 @@ impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder { } fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.0.visit_with(visitor) + self.skip_binder().visit_with(visitor) } fn visit_with>(&self, visitor: &mut V) -> bool { @@ -748,22 +746,11 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice } } -impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialPredicate<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - use ty::ExistentialPredicate::*; - match *self { - Trait(ref tr) => Trait(tr.fold_with(folder)), - Projection(ref p) => Projection(p.fold_with(folder)), - AutoTrait(did) => AutoTrait(did), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - ty::ExistentialPredicate::Trait(ref tr) => tr.visit_with(visitor), - ty::ExistentialPredicate::Projection(ref p) => p.visit_with(visitor), - ty::ExistentialPredicate::AutoTrait(_) => false, - } +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialPredicate<'tcx> { + (ty::ExistentialPredicate::Trait)(a), + (ty::ExistentialPredicate::Projection)(a), + (ty::ExistentialPredicate::AutoTrait)(a), } } @@ -778,6 +765,74 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice> { } } +impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + use ty::InstanceDef::*; + Self { + substs: self.substs.fold_with(folder), + def: match self.def { + Item(did) => Item(did.fold_with(folder)), + Intrinsic(did) => Intrinsic(did.fold_with(folder)), + FnPtrShim(did, ty) => FnPtrShim( + did.fold_with(folder), + ty.fold_with(folder), + ), + Virtual(did, i) => Virtual( + did.fold_with(folder), + i, + ), + ClosureOnceShim { call_once } => ClosureOnceShim { + call_once: call_once.fold_with(folder), + }, + DropGlue(did, ty) => DropGlue( + did.fold_with(folder), + ty.fold_with(folder), + ), + CloneShim(did, ty) => CloneShim( + did.fold_with(folder), + ty.fold_with(folder), + ), + }, + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + use ty::InstanceDef::*; + self.substs.visit_with(visitor) || + match self.def { + Item(did) => did.visit_with(visitor), + Intrinsic(did) => did.visit_with(visitor), + FnPtrShim(did, ty) => { + did.visit_with(visitor) || + ty.visit_with(visitor) + }, + Virtual(did, _) => did.visit_with(visitor), + ClosureOnceShim { call_once } => call_once.visit_with(visitor), + DropGlue(did, ty) => { + did.visit_with(visitor) || + ty.visit_with(visitor) + }, + CloneShim(did, ty) => { + did.visit_with(visitor) || + ty.visit_with(visitor) + }, + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for interpret::GlobalId<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + Self { + instance: self.instance.fold_with(folder), + promoted: self.promoted + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.instance.visit_with(visitor) + } +} + impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { let sty = match self.sty { @@ -787,17 +842,21 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { ty::TyAdt(tid, substs) => ty::TyAdt(tid, substs.fold_with(folder)), ty::TyDynamic(ref trait_ty, ref region) => ty::TyDynamic(trait_ty.fold_with(folder), region.fold_with(folder)), - ty::TyTuple(ts, defaulted) => ty::TyTuple(ts.fold_with(folder), defaulted), + ty::TyTuple(ts) => ty::TyTuple(ts.fold_with(folder)), ty::TyFnDef(def_id, substs) => { ty::TyFnDef(def_id, substs.fold_with(folder)) } ty::TyFnPtr(f) => ty::TyFnPtr(f.fold_with(folder)), - ty::TyRef(ref r, tm) => { - ty::TyRef(r.fold_with(folder), tm.fold_with(folder)) + ty::TyRef(ref r, ty, mutbl) => { + ty::TyRef(r.fold_with(folder), ty.fold_with(folder), mutbl) } - ty::TyGenerator(did, substs, interior) => { - ty::TyGenerator(did, substs.fold_with(folder), interior.fold_with(folder)) + ty::TyGenerator(did, substs, movability) => { + ty::TyGenerator( + did, + substs.fold_with(folder), + movability) } + ty::TyGeneratorWitness(types) => ty::TyGeneratorWitness(types.fold_with(folder)), ty::TyClosure(did, substs) => ty::TyClosure(did, substs.fold_with(folder)), ty::TyProjection(ref data) => ty::TyProjection(data.fold_with(folder)), ty::TyAnon(did, substs) => ty::TyAnon(did, substs.fold_with(folder)), @@ -825,13 +884,14 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { ty::TyAdt(_, substs) => substs.visit_with(visitor), ty::TyDynamic(ref trait_ty, ref reg) => trait_ty.visit_with(visitor) || reg.visit_with(visitor), - ty::TyTuple(ts, _) => ts.visit_with(visitor), + ty::TyTuple(ts) => ts.visit_with(visitor), ty::TyFnDef(_, substs) => substs.visit_with(visitor), ty::TyFnPtr(ref f) => f.visit_with(visitor), - ty::TyRef(r, ref tm) => r.visit_with(visitor) || tm.visit_with(visitor), - ty::TyGenerator(_did, ref substs, ref interior) => { - substs.visit_with(visitor) || interior.visit_with(visitor) + ty::TyRef(r, ty, _) => r.visit_with(visitor) || ty.visit_with(visitor), + ty::TyGenerator(_did, ref substs, _) => { + substs.visit_with(visitor) } + ty::TyGeneratorWitness(ref types) => types.visit_with(visitor), ty::TyClosure(_did, ref substs) => substs.visit_with(visitor), ty::TyProjection(ref data) => data.visit_with(visitor), ty::TyAnon(_, ref substs) => substs.visit_with(visitor), @@ -846,13 +906,9 @@ impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { } } -impl<'tcx> TypeFoldable<'tcx> for ty::TypeAndMut<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::TypeAndMut { ty: self.ty.fold_with(folder), mutbl: self.mutbl } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.ty.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::TypeAndMut<'tcx> { + ty, mutbl } } @@ -862,20 +918,9 @@ BraceStructTypeFoldableImpl! { } } -impl<'tcx> TypeFoldable<'tcx> for ty::FnSig<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - let inputs_and_output = self.inputs_and_output.fold_with(folder); - ty::FnSig { - inputs_and_output: folder.tcx().intern_type_list(&inputs_and_output), - variadic: self.variadic, - unsafety: self.unsafety, - abi: self.abi, - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.inputs().iter().any(|i| i.visit_with(visitor)) || - self.output().visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::FnSig<'tcx> { + inputs_and_output, variadic, unsafety, abi } } @@ -914,25 +959,15 @@ impl<'tcx> TypeFoldable<'tcx> for ty::Region<'tcx> { } } -impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::ClosureSubsts { - substs: self.substs.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.substs.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> { + substs, } } -impl<'tcx> TypeFoldable<'tcx> for ty::GeneratorInterior<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::GeneratorInterior::new(self.witness.fold_with(folder)) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.witness.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::GeneratorSubsts<'tcx> { + substs, } } @@ -943,74 +978,32 @@ BraceStructTypeFoldableImpl! { } } -impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::Adjust<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - match *self { - ty::adjustment::Adjust::NeverToAny | - ty::adjustment::Adjust::ReifyFnPointer | - ty::adjustment::Adjust::UnsafeFnPointer | - ty::adjustment::Adjust::ClosureFnPointer | - ty::adjustment::Adjust::MutToConstPointer | - ty::adjustment::Adjust::Unsize => self.clone(), - ty::adjustment::Adjust::Deref(ref overloaded) => { - ty::adjustment::Adjust::Deref(overloaded.fold_with(folder)) - } - ty::adjustment::Adjust::Borrow(ref autoref) => { - ty::adjustment::Adjust::Borrow(autoref.fold_with(folder)) - } - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - ty::adjustment::Adjust::NeverToAny | - ty::adjustment::Adjust::ReifyFnPointer | - ty::adjustment::Adjust::UnsafeFnPointer | - ty::adjustment::Adjust::ClosureFnPointer | - ty::adjustment::Adjust::MutToConstPointer | - ty::adjustment::Adjust::Unsize => false, - ty::adjustment::Adjust::Deref(ref overloaded) => { - overloaded.visit_with(visitor) - } - ty::adjustment::Adjust::Borrow(ref autoref) => { - autoref.visit_with(visitor) - } - } +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::Adjust<'tcx> { + (ty::adjustment::Adjust::NeverToAny), + (ty::adjustment::Adjust::ReifyFnPointer), + (ty::adjustment::Adjust::UnsafeFnPointer), + (ty::adjustment::Adjust::ClosureFnPointer), + (ty::adjustment::Adjust::MutToConstPointer), + (ty::adjustment::Adjust::Unsize), + (ty::adjustment::Adjust::Deref)(a), + (ty::adjustment::Adjust::Borrow)(a), } } -impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::OverloadedDeref<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::adjustment::OverloadedDeref { - region: self.region.fold_with(folder), - mutbl: self.mutbl, - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.region.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::OverloadedDeref<'tcx> { + region, mutbl, } } -impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::AutoBorrow<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - match *self { - ty::adjustment::AutoBorrow::Ref(ref r, m) => { - ty::adjustment::AutoBorrow::Ref(r.fold_with(folder), m) - } - ty::adjustment::AutoBorrow::RawPtr(m) => ty::adjustment::AutoBorrow::RawPtr(m) - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - ty::adjustment::AutoBorrow::Ref(r, _m) => r.visit_with(visitor), - ty::adjustment::AutoBorrow::RawPtr(_m) => false, - } +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::AutoBorrow<'tcx> { + (ty::adjustment::AutoBorrow::Ref)(a, b), + (ty::adjustment::AutoBorrow::RawPtr)(m), } } - BraceStructTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for ty::GenericPredicates<'tcx> { parent, predicates @@ -1028,46 +1021,17 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice> { } } -impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - match *self { - ty::Predicate::Trait(ref a) => - ty::Predicate::Trait(a.fold_with(folder)), - ty::Predicate::Equate(ref binder) => - ty::Predicate::Equate(binder.fold_with(folder)), - ty::Predicate::Subtype(ref binder) => - ty::Predicate::Subtype(binder.fold_with(folder)), - ty::Predicate::RegionOutlives(ref binder) => - ty::Predicate::RegionOutlives(binder.fold_with(folder)), - ty::Predicate::TypeOutlives(ref binder) => - ty::Predicate::TypeOutlives(binder.fold_with(folder)), - ty::Predicate::Projection(ref binder) => - ty::Predicate::Projection(binder.fold_with(folder)), - ty::Predicate::WellFormed(data) => - ty::Predicate::WellFormed(data.fold_with(folder)), - ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => - ty::Predicate::ClosureKind(closure_def_id, closure_substs.fold_with(folder), kind), - ty::Predicate::ObjectSafe(trait_def_id) => - ty::Predicate::ObjectSafe(trait_def_id), - ty::Predicate::ConstEvaluatable(def_id, substs) => - ty::Predicate::ConstEvaluatable(def_id, substs.fold_with(folder)), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - ty::Predicate::Trait(ref a) => a.visit_with(visitor), - ty::Predicate::Equate(ref binder) => binder.visit_with(visitor), - ty::Predicate::Subtype(ref binder) => binder.visit_with(visitor), - ty::Predicate::RegionOutlives(ref binder) => binder.visit_with(visitor), - ty::Predicate::TypeOutlives(ref binder) => binder.visit_with(visitor), - ty::Predicate::Projection(ref binder) => binder.visit_with(visitor), - ty::Predicate::WellFormed(data) => data.visit_with(visitor), - ty::Predicate::ClosureKind(_closure_def_id, closure_substs, _kind) => - closure_substs.visit_with(visitor), - ty::Predicate::ObjectSafe(_trait_def_id) => false, - ty::Predicate::ConstEvaluatable(_def_id, substs) => substs.visit_with(visitor), - } +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { + (ty::Predicate::Trait)(a), + (ty::Predicate::Subtype)(a), + (ty::Predicate::RegionOutlives)(a), + (ty::Predicate::TypeOutlives)(a), + (ty::Predicate::Projection)(a), + (ty::Predicate::WellFormed)(a), + (ty::Predicate::ClosureKind)(a, b, c), + (ty::Predicate::ObjectSafe)(a), + (ty::Predicate::ConstEvaluatable)(a, b), } } @@ -1095,95 +1059,40 @@ BraceStructTypeFoldableImpl! { } } -impl<'tcx> TypeFoldable<'tcx> for ty::EquatePredicate<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::EquatePredicate(self.0.fold_with(folder), self.1.fold_with(folder)) - } +BraceStructTypeFoldableImpl! { + impl<'tcx, T> TypeFoldable<'tcx> for ty::ParamEnvAnd<'tcx, T> { + param_env, value + } where T: TypeFoldable<'tcx> +} - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.0.visit_with(visitor) || self.1.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::SubtypePredicate<'tcx> { + a_is_expected, a, b } } -impl<'tcx> TypeFoldable<'tcx> for ty::SubtypePredicate<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::SubtypePredicate { - a_is_expected: self.a_is_expected, - a: self.a.fold_with(folder), - b: self.b.fold_with(folder) - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.a.visit_with(visitor) || self.b.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::TraitPredicate<'tcx> { + trait_ref } } -impl<'tcx> TypeFoldable<'tcx> for ty::TraitPredicate<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::TraitPredicate { - trait_ref: self.trait_ref.fold_with(folder) - } - } +TupleStructTypeFoldableImpl! { + impl<'tcx,T,U> TypeFoldable<'tcx> for ty::OutlivesPredicate { + a, b + } where T : TypeFoldable<'tcx>, U : TypeFoldable<'tcx>, +} - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.trait_ref.visit_with(visitor) +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::ClosureUpvar<'tcx> { + def, span, ty } } -impl<'tcx,T,U> TypeFoldable<'tcx> for ty::OutlivesPredicate - where T : TypeFoldable<'tcx>, - U : TypeFoldable<'tcx>, -{ - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::OutlivesPredicate(self.0.fold_with(folder), - self.1.fold_with(folder)) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.0.visit_with(visitor) || self.1.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::ClosureUpvar<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::ClosureUpvar { - def: self.def, - span: self.span, - ty: self.ty.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.ty.visit_with(visitor) - } -} - -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::error::ExpectedFound { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - ty::error::ExpectedFound { - expected: self.expected.fold_with(folder), - found: self.found.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.expected.visit_with(visitor) || self.found.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for type_variable::Default<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - type_variable::Default { - ty: self.ty.fold_with(folder), - origin_span: self.origin_span, - def_id: self.def_id - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.ty.visit_with(visitor) - } +BraceStructTypeFoldableImpl! { + impl<'tcx, T> TypeFoldable<'tcx> for ty::error::ExpectedFound { + expected, found + } where T: TypeFoldable<'tcx> } impl<'tcx, T: TypeFoldable<'tcx>, I: Idx> TypeFoldable<'tcx> for IndexVec { @@ -1196,151 +1105,49 @@ impl<'tcx, T: TypeFoldable<'tcx>, I: Idx> TypeFoldable<'tcx> for IndexVec } } -impl<'tcx> TypeFoldable<'tcx> for ty::error::TypeError<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - use ty::error::TypeError::*; - - match *self { - Mismatch => Mismatch, - UnsafetyMismatch(x) => UnsafetyMismatch(x.fold_with(folder)), - AbiMismatch(x) => AbiMismatch(x.fold_with(folder)), - Mutability => Mutability, - TupleSize(x) => TupleSize(x), - FixedArraySize(x) => FixedArraySize(x), - ArgCount => ArgCount, - RegionsDoesNotOutlive(a, b) => { - RegionsDoesNotOutlive(a.fold_with(folder), b.fold_with(folder)) - }, - RegionsInsufficientlyPolymorphic(a, b) => { - RegionsInsufficientlyPolymorphic(a, b.fold_with(folder)) - }, - RegionsOverlyPolymorphic(a, b) => { - RegionsOverlyPolymorphic(a, b.fold_with(folder)) - }, - IntMismatch(x) => IntMismatch(x), - FloatMismatch(x) => FloatMismatch(x), - Traits(x) => Traits(x), - VariadicMismatch(x) => VariadicMismatch(x), - CyclicTy(t) => CyclicTy(t.fold_with(folder)), - ProjectionMismatched(x) => ProjectionMismatched(x), - ProjectionBoundsLength(x) => ProjectionBoundsLength(x), - Sorts(x) => Sorts(x.fold_with(folder)), - TyParamDefaultMismatch(ref x) => TyParamDefaultMismatch(x.fold_with(folder)), - ExistentialMismatch(x) => ExistentialMismatch(x.fold_with(folder)), - OldStyleLUB(ref x) => OldStyleLUB(x.fold_with(folder)), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - use ty::error::TypeError::*; - - match *self { - UnsafetyMismatch(x) => x.visit_with(visitor), - AbiMismatch(x) => x.visit_with(visitor), - RegionsDoesNotOutlive(a, b) => { - a.visit_with(visitor) || b.visit_with(visitor) - }, - RegionsInsufficientlyPolymorphic(_, b) | - RegionsOverlyPolymorphic(_, b) => { - b.visit_with(visitor) - }, - Sorts(x) => x.visit_with(visitor), - OldStyleLUB(ref x) => x.visit_with(visitor), - TyParamDefaultMismatch(ref x) => x.visit_with(visitor), - ExistentialMismatch(x) => x.visit_with(visitor), - CyclicTy(t) => t.visit_with(visitor), - Mismatch | - Mutability | - TupleSize(_) | - FixedArraySize(_) | - ArgCount | - IntMismatch(_) | - FloatMismatch(_) | - Traits(_) | - VariadicMismatch(_) | - ProjectionMismatched(_) | - ProjectionBoundsLength(_) => false, - } +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for ty::error::TypeError<'tcx> { + (ty::error::TypeError::Mismatch), + (ty::error::TypeError::UnsafetyMismatch)(x), + (ty::error::TypeError::AbiMismatch)(x), + (ty::error::TypeError::Mutability), + (ty::error::TypeError::TupleSize)(x), + (ty::error::TypeError::FixedArraySize)(x), + (ty::error::TypeError::ArgCount), + (ty::error::TypeError::RegionsDoesNotOutlive)(a, b), + (ty::error::TypeError::RegionsInsufficientlyPolymorphic)(a, b), + (ty::error::TypeError::RegionsOverlyPolymorphic)(a, b), + (ty::error::TypeError::IntMismatch)(x), + (ty::error::TypeError::FloatMismatch)(x), + (ty::error::TypeError::Traits)(x), + (ty::error::TypeError::VariadicMismatch)(x), + (ty::error::TypeError::CyclicTy)(t), + (ty::error::TypeError::ProjectionMismatched)(x), + (ty::error::TypeError::ProjectionBoundsLength)(x), + (ty::error::TypeError::Sorts)(x), + (ty::error::TypeError::ExistentialMismatch)(x), + (ty::error::TypeError::OldStyleLUB)(x), } } -impl<'tcx> TypeFoldable<'tcx> for ConstVal<'tcx> { +impl<'tcx> TypeFoldable<'tcx> for ConstValue<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { match *self { - ConstVal::Integral(i) => ConstVal::Integral(i), - ConstVal::Float(f) => ConstVal::Float(f), - ConstVal::Str(s) => ConstVal::Str(s), - ConstVal::ByteStr(b) => ConstVal::ByteStr(b), - ConstVal::Bool(b) => ConstVal::Bool(b), - ConstVal::Char(c) => ConstVal::Char(c), - ConstVal::Variant(def_id) => ConstVal::Variant(def_id), - ConstVal::Function(def_id, substs) => { - ConstVal::Function(def_id, substs.fold_with(folder)) - } - ConstVal::Aggregate(ConstAggregate::Struct(fields)) => { - let new_fields: Vec<_> = fields.iter().map(|&(name, v)| { - (name, v.fold_with(folder)) - }).collect(); - let fields = if new_fields == fields { - fields - } else { - folder.tcx().alloc_name_const_slice(&new_fields) - }; - ConstVal::Aggregate(ConstAggregate::Struct(fields)) - } - ConstVal::Aggregate(ConstAggregate::Tuple(fields)) => { - let new_fields: Vec<_> = fields.iter().map(|v| { - v.fold_with(folder) - }).collect(); - let fields = if new_fields == fields { - fields - } else { - folder.tcx().alloc_const_slice(&new_fields) - }; - ConstVal::Aggregate(ConstAggregate::Tuple(fields)) - } - ConstVal::Aggregate(ConstAggregate::Array(fields)) => { - let new_fields: Vec<_> = fields.iter().map(|v| { - v.fold_with(folder) - }).collect(); - let fields = if new_fields == fields { - fields - } else { - folder.tcx().alloc_const_slice(&new_fields) - }; - ConstVal::Aggregate(ConstAggregate::Array(fields)) - } - ConstVal::Aggregate(ConstAggregate::Repeat(v, count)) => { - let v = v.fold_with(folder); - ConstVal::Aggregate(ConstAggregate::Repeat(v, count)) - } - ConstVal::Unevaluated(def_id, substs) => { - ConstVal::Unevaluated(def_id, substs.fold_with(folder)) + ConstValue::Scalar(v) => ConstValue::Scalar(v), + ConstValue::ScalarPair(a, b) => ConstValue::ScalarPair(a, b), + ConstValue::ByRef(alloc, offset) => ConstValue::ByRef(alloc, offset), + ConstValue::Unevaluated(def_id, substs) => { + ConstValue::Unevaluated(def_id, substs.fold_with(folder)) } } } fn super_visit_with>(&self, visitor: &mut V) -> bool { match *self { - ConstVal::Integral(_) | - ConstVal::Float(_) | - ConstVal::Str(_) | - ConstVal::ByteStr(_) | - ConstVal::Bool(_) | - ConstVal::Char(_) | - ConstVal::Variant(_) => false, - ConstVal::Function(_, substs) => substs.visit_with(visitor), - ConstVal::Aggregate(ConstAggregate::Struct(fields)) => { - fields.iter().any(|&(_, v)| v.visit_with(visitor)) - } - ConstVal::Aggregate(ConstAggregate::Tuple(fields)) | - ConstVal::Aggregate(ConstAggregate::Array(fields)) => { - fields.iter().any(|v| v.visit_with(visitor)) - } - ConstVal::Aggregate(ConstAggregate::Repeat(v, _)) => { - v.visit_with(visitor) - } - ConstVal::Unevaluated(_, substs) => substs.visit_with(visitor), + ConstValue::Scalar(_) | + ConstValue::ScalarPair(_, _) | + ConstValue::ByRef(_, _) => false, + ConstValue::Unevaluated(_, substs) => substs.visit_with(visitor), } } } diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 0889efdc142b..65e31f21792d 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -12,19 +12,21 @@ use hir::def_id::DefId; -use middle::const_val::ConstVal; +use mir::interpret::ConstValue; use middle::region; +use polonius_engine::Atom; use rustc_data_structures::indexed_vec::Idx; -use ty::subst::{Substs, Subst}; +use ty::subst::{Substs, Subst, Kind, UnpackedKind}; use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable}; -use ty::{Slice, TyS}; -use ty::subst::Kind; +use ty::{Slice, TyS, ParamEnvAnd, ParamEnv}; +use util::captures::Captures; +use mir::interpret::{Scalar, Pointer, Value}; use std::iter; use std::cmp::Ordering; -use syntax::abi; -use syntax::ast::{self, Name}; -use syntax::symbol::keywords; +use rustc_target::spec::abi; +use syntax::ast::{self, Ident}; +use syntax::symbol::{keywords, InternedString}; use serialize; @@ -33,7 +35,7 @@ use hir; use self::InferTy::*; use self::TypeVariants::*; -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct TypeAndMut<'tcx> { pub ty: Ty<'tcx>, pub mutbl: hir::Mutability, @@ -58,7 +60,7 @@ pub enum BoundRegion { /// /// The def-id is needed to distinguish free regions in /// the event of shadowing. - BrNamed(DefId, Name), + BrNamed(DefId, InternedString), /// Fresh bound identifiers created during GLB computations. BrFresh(u32), @@ -79,7 +81,7 @@ impl BoundRegion { /// NB: If you change this, you'll probably want to change the corresponding /// AST structure in libsyntax/ast.rs as well. -#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub enum TypeVariants<'tcx> { /// The primitive boolean type. Written as `bool`. TyBool, @@ -121,7 +123,7 @@ pub enum TypeVariants<'tcx> { /// A reference; a pointer with an associated lifetime. Written as /// `&'a mut T` or `&'a T`. - TyRef(Region<'tcx>, TypeAndMut<'tcx>), + TyRef(Region<'tcx>, Ty<'tcx>, hir::Mutability), /// The anonymous type of a function declaration/definition. Each /// function has a unique type. @@ -139,25 +141,27 @@ pub enum TypeVariants<'tcx> { /// The anonymous type of a generator. Used to represent the type of /// `|a| yield a`. - TyGenerator(DefId, ClosureSubsts<'tcx>, GeneratorInterior<'tcx>), + TyGenerator(DefId, GeneratorSubsts<'tcx>, hir::GeneratorMovability), + + /// A type representin the types stored inside a generator. + /// This should only appear in GeneratorInteriors. + TyGeneratorWitness(Binder<&'tcx Slice>>), /// The never type `!` TyNever, /// A tuple type. For example, `(i32, bool)`. - /// The bool indicates whether this is a unit tuple and was created by - /// defaulting a diverging type variable with feature(never_type) disabled. - /// It's only purpose is for raising future-compatibility warnings for when - /// diverging type variables start defaulting to ! instead of (). - TyTuple(&'tcx Slice>, bool), + TyTuple(&'tcx Slice>), /// The projection of an associated type. For example, /// `>::N`. TyProjection(ProjectionTy<'tcx>), /// Anonymized (`impl Trait`) type found in a return type. - /// The DefId comes from the `impl Trait` ast::Ty node, and the - /// substitutions are for the generics of the function in question. + /// The DefId comes either from + /// * the `impl Trait` ast::Ty node, + /// * or the `existential type` declaration + /// The substitutions are for the generics of the function in question. /// After typeck, the concrete type can be found in the `types` map. TyAnon(DefId, &'tcx Substs<'tcx>), @@ -242,7 +246,7 @@ pub enum TypeVariants<'tcx> { /// out later. /// /// All right, you say, but why include the type parameters from the -/// original function then? The answer is that trans may need them +/// original function then? The answer is that codegen may need them /// when monomorphizing, and they may not appear in the upvars. A /// closure could capture no variables but still make use of some /// in-scope type parameter with a bound (e.g., if our example above @@ -267,12 +271,12 @@ pub enum TypeVariants<'tcx> { /// /// It'd be nice to split this struct into ClosureSubsts and /// GeneratorSubsts, I believe. -nmatsakis -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ClosureSubsts<'tcx> { /// Lifetime and type parameters from the enclosing function, /// concatenated with the types of the upvars. /// - /// These are separated out because trans wants to pass them around + /// These are separated out because codegen wants to pass them around /// when monomorphizing. pub substs: &'tcx Substs<'tcx>, } @@ -291,10 +295,10 @@ impl<'tcx> ClosureSubsts<'tcx> { /// ordering. fn split(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> SplitClosureSubsts<'tcx> { let generics = tcx.generics_of(def_id); - let parent_len = generics.parent_count(); + let parent_len = generics.parent_count; SplitClosureSubsts { - closure_kind_ty: self.substs[parent_len].as_type().expect("CK should be a type"), - closure_sig_ty: self.substs[parent_len + 1].as_type().expect("CS should be a type"), + closure_kind_ty: self.substs.type_at(parent_len), + closure_sig_ty: self.substs.type_at(parent_len + 1), upvar_kinds: &self.substs[parent_len + 2..], } } @@ -304,7 +308,13 @@ impl<'tcx> ClosureSubsts<'tcx> { impl Iterator> + 'tcx { let SplitClosureSubsts { upvar_kinds, .. } = self.split(def_id, tcx); - upvar_kinds.iter().map(|t| t.as_type().expect("upvar should be type")) + upvar_kinds.iter().map(|t| { + if let UnpackedKind::Type(ty) = t.unpack() { + ty + } else { + bug!("upvar should be type") + } + }) } /// Returns the closure kind for this closure; may return a type @@ -322,37 +332,6 @@ impl<'tcx> ClosureSubsts<'tcx> { self.split(def_id, tcx).closure_sig_ty } - /// Returns the type representing the yield type of the generator. - pub fn generator_yield_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { - self.closure_kind_ty(def_id, tcx) - } - - /// Returns the type representing the return type of the generator. - pub fn generator_return_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { - self.closure_sig_ty(def_id, tcx) - } - - /// Return the "generator signature", which consists of its yield - /// and return types. - /// - /// NB. Some bits of the code prefers to see this wrapped in a - /// binder, but it never contains bound regions. Probably this - /// function should be removed. - pub fn generator_poly_sig(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> PolyGenSig<'tcx> { - ty::Binder(self.generator_sig(def_id, tcx)) - } - - /// Return the "generator signature", which consists of its yield - /// and return types. - pub fn generator_sig(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> GenSig<'tcx> { - ty::GenSig { - yield_ty: self.generator_yield_ty(def_id, tcx), - return_ty: self.generator_return_ty(def_id, tcx), - } - } -} - -impl<'tcx> ClosureSubsts<'tcx> { /// Returns the closure kind for this closure; only usable outside /// of an inference context, because in that context we know that /// there are no type variables. @@ -375,52 +354,140 @@ impl<'tcx> ClosureSubsts<'tcx> { } } -impl<'a, 'gcx, 'tcx> ClosureSubsts<'tcx> { +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct GeneratorSubsts<'tcx> { + pub substs: &'tcx Substs<'tcx>, +} + +struct SplitGeneratorSubsts<'tcx> { + yield_ty: Ty<'tcx>, + return_ty: Ty<'tcx>, + witness: Ty<'tcx>, + upvar_kinds: &'tcx [Kind<'tcx>], +} + +impl<'tcx> GeneratorSubsts<'tcx> { + fn split(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> SplitGeneratorSubsts<'tcx> { + let generics = tcx.generics_of(def_id); + let parent_len = generics.parent_count; + SplitGeneratorSubsts { + yield_ty: self.substs.type_at(parent_len), + return_ty: self.substs.type_at(parent_len + 1), + witness: self.substs.type_at(parent_len + 2), + upvar_kinds: &self.substs[parent_len + 3..], + } + } + + /// This describes the types that can be contained in a generator. + /// It will be a type variable initially and unified in the last stages of typeck of a body. + /// It contains a tuple of all the types that could end up on a generator frame. + /// The state transformation MIR pass may only produce layouts which mention types + /// in this tuple. Upvars are not counted here. + pub fn witness(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { + self.split(def_id, tcx).witness + } + + #[inline] + pub fn upvar_tys(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> + impl Iterator> + 'tcx + { + let SplitGeneratorSubsts { upvar_kinds, .. } = self.split(def_id, tcx); + upvar_kinds.iter().map(|t| { + if let UnpackedKind::Type(ty) = t.unpack() { + ty + } else { + bug!("upvar should be type") + } + }) + } + + /// Returns the type representing the yield type of the generator. + pub fn yield_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { + self.split(def_id, tcx).yield_ty + } + + /// Returns the type representing the return type of the generator. + pub fn return_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { + self.split(def_id, tcx).return_ty + } + + /// Return the "generator signature", which consists of its yield + /// and return types. + /// + /// NB. Some bits of the code prefers to see this wrapped in a + /// binder, but it never contains bound regions. Probably this + /// function should be removed. + pub fn poly_sig(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> PolyGenSig<'tcx> { + ty::Binder::dummy(self.sig(def_id, tcx)) + } + + /// Return the "generator signature", which consists of its yield + /// and return types. + pub fn sig(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> GenSig<'tcx> { + ty::GenSig { + yield_ty: self.yield_ty(def_id, tcx), + return_ty: self.return_ty(def_id, tcx), + } + } +} + +impl<'a, 'gcx, 'tcx> GeneratorSubsts<'tcx> { /// This returns the types of the MIR locals which had to be stored across suspension points. /// It is calculated in rustc_mir::transform::generator::StateTransform. /// All the types here must be in the tuple in GeneratorInterior. - pub fn state_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> - impl Iterator> + 'a - { + pub fn state_tys( + self, + def_id: DefId, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + ) -> impl Iterator> + Captures<'gcx> + 'a { let state = tcx.generator_layout(def_id).fields.iter(); state.map(move |d| d.ty.subst(tcx, self.substs)) } + /// This is the types of the fields of a generate which + /// is available before the generator transformation. + /// It includes the upvars and the state discriminant which is u32. + pub fn pre_transforms_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> + impl Iterator> + 'a + { + self.upvar_tys(def_id, tcx).chain(iter::once(tcx.types.u32)) + } + /// This is the types of all the fields stored in a generator. /// It includes the upvars, state types and the state discriminant which is u32. pub fn field_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> - impl Iterator> + 'a + impl Iterator> + Captures<'gcx> + 'a { - let upvars = self.upvar_tys(def_id, tcx); - let state = self.state_tys(def_id, tcx); - upvars.chain(iter::once(tcx.types.u32)).chain(state) + self.pre_transforms_tys(def_id, tcx).chain(self.state_tys(def_id, tcx)) } } -/// This describes the types that can be contained in a generator. -/// It will be a type variable initially and unified in the last stages of typeck of a body. -/// It contains a tuple of all the types that could end up on a generator frame. -/// The state transformation MIR pass may only produce layouts which mention types in this tuple. -/// Upvars are not counted here. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] -pub struct GeneratorInterior<'tcx> { - pub witness: Ty<'tcx>, +#[derive(Debug, Copy, Clone)] +pub enum UpvarSubsts<'tcx> { + Closure(ClosureSubsts<'tcx>), + Generator(GeneratorSubsts<'tcx>), } -impl<'tcx> GeneratorInterior<'tcx> { - pub fn new(witness: Ty<'tcx>) -> GeneratorInterior<'tcx> { - GeneratorInterior { witness } - } - - pub fn as_slice(&self) -> &'tcx Slice> { - match self.witness.sty { - ty::TyTuple(s, _) => s, - _ => bug!(), - } +impl<'tcx> UpvarSubsts<'tcx> { + #[inline] + pub fn upvar_tys(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> + impl Iterator> + 'tcx + { + let upvar_kinds = match self { + UpvarSubsts::Closure(substs) => substs.split(def_id, tcx).upvar_kinds, + UpvarSubsts::Generator(substs) => substs.split(def_id, tcx).upvar_kinds, + }; + upvar_kinds.iter().map(|t| { + if let UnpackedKind::Type(ty) = t.unpack() { + ty + } else { + bug!("upvar should be type") + } + }) } } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum ExistentialPredicate<'tcx> { /// e.g. Iterator Trait(ExistentialTraitRef<'tcx>), @@ -431,7 +498,9 @@ pub enum ExistentialPredicate<'tcx> { } impl<'a, 'gcx, 'tcx> ExistentialPredicate<'tcx> { - pub fn cmp(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, other: &Self) -> Ordering { + /// Compares via an ordering that will not change if modules are reordered or other changes are + /// made to the tree. In particular, this ordering is preserved across incremental compilations. + pub fn stable_cmp(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, other: &Self) -> Ordering { use self::ExistentialPredicate::*; match (*self, *other) { (Trait(_), Trait(_)) => Ordering::Equal, @@ -501,13 +570,13 @@ impl<'tcx> Slice> { impl<'tcx> Binder<&'tcx Slice>> { pub fn principal(&self) -> Option> { - self.skip_binder().principal().map(Binder) + self.skip_binder().principal().map(Binder::bind) } #[inline] pub fn projection_bounds<'a>(&'a self) -> impl Iterator> + 'a { - self.skip_binder().projection_bounds().map(Binder) + self.skip_binder().projection_bounds().map(Binder::bind) } #[inline] @@ -517,7 +586,7 @@ impl<'tcx> Binder<&'tcx Slice>> { pub fn iter<'a>(&'a self) -> impl DoubleEndedIterator>> + 'tcx { - self.skip_binder().iter().cloned().map(Binder) + self.skip_binder().iter().cloned().map(Binder::bind) } } @@ -547,6 +616,15 @@ impl<'tcx> TraitRef<'tcx> { TraitRef { def_id: def_id, substs: substs } } + /// Returns a TraitRef of the form `P0: Foo` where `Pi` + /// are the parameters defined on trait. + pub fn identity<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> TraitRef<'tcx> { + TraitRef { + def_id, + substs: Substs::identity_for_item(tcx, def_id), + } + } + pub fn self_ty(&self) -> Ty<'tcx> { self.substs.type_at(0) } @@ -558,32 +636,34 @@ impl<'tcx> TraitRef<'tcx> { // associated types. self.substs.types() } + + pub fn from_method(tcx: TyCtxt<'_, '_, 'tcx>, + trait_id: DefId, + substs: &Substs<'tcx>) + -> ty::TraitRef<'tcx> { + let defs = tcx.generics_of(trait_id); + + ty::TraitRef { + def_id: trait_id, + substs: tcx.intern_substs(&substs[..defs.params.len()]) + } + } } pub type PolyTraitRef<'tcx> = Binder>; impl<'tcx> PolyTraitRef<'tcx> { pub fn self_ty(&self) -> Ty<'tcx> { - self.0.self_ty() + self.skip_binder().self_ty() } pub fn def_id(&self) -> DefId { - self.0.def_id - } - - pub fn substs(&self) -> &'tcx Substs<'tcx> { - // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<> - self.0.substs - } - - pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { - // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<> - self.0.input_types() + self.skip_binder().def_id } pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> { // Note that we preserve binding levels - Binder(ty::TraitPredicate { trait_ref: self.0.clone() }) + Binder(ty::TraitPredicate { trait_ref: self.skip_binder().clone() }) } } @@ -594,7 +674,7 @@ impl<'tcx> PolyTraitRef<'tcx> { /// /// The substitutions don't include the erased `Self`, only trait /// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above). -#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct ExistentialTraitRef<'tcx> { pub def_id: DefId, pub substs: &'tcx Substs<'tcx>, @@ -609,6 +689,18 @@ impl<'a, 'gcx, 'tcx> ExistentialTraitRef<'tcx> { self.substs.types() } + pub fn erase_self_ty(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_ref: ty::TraitRef<'tcx>) + -> ty::ExistentialTraitRef<'tcx> { + // Assert there is a Self. + trait_ref.substs.type_at(0); + + ty::ExistentialTraitRef { + def_id: trait_ref.def_id, + substs: tcx.intern_substs(&trait_ref.substs[1..]) + } + } + /// Object types don't have a self-type specified. Therefore, when /// we convert the principal trait-ref into a normal trait-ref, /// you must give *some* self-type. A common choice is `mk_err()` @@ -616,12 +708,11 @@ impl<'a, 'gcx, 'tcx> ExistentialTraitRef<'tcx> { pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::TraitRef<'tcx> { // otherwise the escaping regions would be captured by the binder - assert!(!self_ty.has_escaping_regions()); + debug_assert!(!self_ty.has_escaping_regions()); ty::TraitRef { def_id: self.def_id, - substs: tcx.mk_substs( - iter::once(Kind::from(self_ty)).chain(self.substs.iter().cloned())) + substs: tcx.mk_substs_trait(self_ty, self.substs) } } } @@ -630,12 +721,17 @@ pub type PolyExistentialTraitRef<'tcx> = Binder>; impl<'tcx> PolyExistentialTraitRef<'tcx> { pub fn def_id(&self) -> DefId { - self.0.def_id + self.skip_binder().def_id } - pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { - // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<> - self.0.input_types() + /// Object types don't have a self-type specified. Therefore, when + /// we convert the principal trait-ref into a normal trait-ref, + /// you must give *some* self-type. A common choice is `mk_err()` + /// or some skolemized type. + pub fn with_self_ty(&self, tcx: TyCtxt<'_, '_, 'tcx>, + self_ty: Ty<'tcx>) + -> ty::PolyTraitRef<'tcx> { + self.map_bound(|trait_ref| trait_ref.with_self_ty(tcx, self_ty)) } } @@ -646,8 +742,8 @@ impl<'tcx> PolyExistentialTraitRef<'tcx> { /// erase, or otherwise "discharge" these bound regions, we change the /// type from `Binder` to just `T` (see /// e.g. `liberate_late_bound_regions`). -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] -pub struct Binder(pub T); +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct Binder(T); impl Binder { /// Wraps `value` in a binder, asserting that `value` does not @@ -657,7 +753,13 @@ impl Binder { pub fn dummy<'tcx>(value: T) -> Binder where T: TypeFoldable<'tcx> { - assert!(!value.has_escaping_regions()); + debug_assert!(!value.has_escaping_regions()); + Binder(value) + } + + /// Wraps `value` in a binder, binding late-bound regions (if any). + pub fn bind<'tcx>(value: T) -> Binder + { Binder(value) } @@ -682,7 +784,7 @@ impl Binder { } pub fn as_ref(&self) -> Binder<&T> { - ty::Binder(&self.0) + Binder(&self.0) } pub fn map_bound_ref(&self, f: F) -> Binder @@ -694,7 +796,7 @@ impl Binder { pub fn map_bound(self, f: F) -> Binder where F: FnOnce(T) -> U { - ty::Binder(f(self.0)) + Binder(f(self.0)) } /// Unwraps and returns the value within, but only if it contains @@ -727,7 +829,7 @@ impl Binder { pub fn fuse(self, u: Binder, f: F) -> Binder where F: FnOnce(T, U) -> R { - ty::Binder(f(self.0, u.0)) + Binder(f(self.0, u.0)) } /// Split the contents into two things that share the same binder @@ -740,13 +842,13 @@ impl Binder { where F: FnOnce(T) -> (U, V) { let (u, v) = f(self.0); - (ty::Binder(u), ty::Binder(v)) + (Binder(u), Binder(v)) } } /// Represents the projection of an associated type. In explicit UFCS /// form this would be written `>::N`. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ProjectionTy<'tcx> { /// The parameters of the associated item. pub substs: &'tcx Substs<'tcx>, @@ -762,11 +864,11 @@ impl<'a, 'tcx> ProjectionTy<'tcx> { /// Construct a ProjectionTy by searching the trait from trait_ref for the /// associated item named item_name. pub fn from_ref_and_name( - tcx: TyCtxt, trait_ref: ty::TraitRef<'tcx>, item_name: Name + tcx: TyCtxt, trait_ref: ty::TraitRef<'tcx>, item_name: Ident ) -> ProjectionTy<'tcx> { let item_def_id = tcx.associated_items(trait_ref.def_id).find(|item| { item.kind == ty::AssociatedKind::Type && - tcx.hygienic_eq(item_name, item.name, trait_ref.def_id) + tcx.hygienic_eq(item_name, item.ident, trait_ref.def_id) }).unwrap().def_id; ProjectionTy { @@ -814,7 +916,7 @@ impl<'tcx> PolyGenSig<'tcx> { /// - `inputs` is the list of arguments and their modes. /// - `output` is the return type. /// - `variadic` indicates whether this is a variadic function. (only true for foreign fns) -#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct FnSig<'tcx> { pub inputs_and_output: &'tcx Slice>, pub variadic: bool, @@ -836,7 +938,7 @@ pub type PolyFnSig<'tcx> = Binder>; impl<'tcx> PolyFnSig<'tcx> { pub fn inputs(&self) -> Binder<&'tcx [Ty<'tcx>]> { - Binder(self.skip_binder().inputs()) + self.map_bound_ref(|fn_sig| fn_sig.inputs()) } pub fn input(&self, index: usize) -> ty::Binder> { self.map_bound_ref(|fn_sig| fn_sig.inputs()[index]) @@ -858,32 +960,34 @@ impl<'tcx> PolyFnSig<'tcx> { } } -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct ParamTy { pub idx: u32, - pub name: Name, + pub name: InternedString, } impl<'a, 'gcx, 'tcx> ParamTy { - pub fn new(index: u32, name: Name) -> ParamTy { + pub fn new(index: u32, name: InternedString) -> ParamTy { ParamTy { idx: index, name: name } } pub fn for_self() -> ParamTy { - ParamTy::new(0, keywords::SelfType.name()) + ParamTy::new(0, keywords::SelfType.name().as_interned_str()) } - pub fn for_def(def: &ty::TypeParameterDef) -> ParamTy { + pub fn for_def(def: &ty::GenericParamDef) -> ParamTy { ParamTy::new(def.index, def.name) } pub fn to_ty(self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { - tcx.mk_param(self.idx, self.name) + tcx.mk_ty_param(self.idx, self.name) } pub fn is_self(&self) -> bool { - if self.name == keywords::SelfType.name() { - assert_eq!(self.idx, 0); + // FIXME(#50125): Ignoring `Self` with `idx != 0` might lead to weird behavior elsewhere, + // but this should only be possible when using `-Z continue-parse-after-error` like + // `compile-fail/issue-36638.rs`. + if self.name == keywords::SelfType.name().as_str() && self.idx == 0 { true } else { false @@ -898,11 +1002,11 @@ impl<'a, 'gcx, 'tcx> ParamTy { /// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char) /// ^ ^ | | | /// | | | | | -/// | +------------+ 1 | | +/// | +------------+ 0 | | /// | | | -/// +--------------------------------+ 2 | +/// +--------------------------------+ 1 | /// | | -/// +------------------------------------------+ 1 +/// +------------------------------------------+ 0 /// /// In this type, there are two binders (the outer fn and the inner /// fn). We need to be able to determine, for any given region, which @@ -914,9 +1018,9 @@ impl<'a, 'gcx, 'tcx> ParamTy { /// /// Let's start with the reference type `&'b isize` that is the first /// argument to the inner function. This region `'b` is assigned a De -/// Bruijn index of 1, meaning "the innermost binder" (in this case, a +/// Bruijn index of 0, meaning "the innermost binder" (in this case, a /// fn). The region `'a` that appears in the second argument type (`&'a -/// isize`) would then be assigned a De Bruijn index of 2, meaning "the +/// isize`) would then be assigned a De Bruijn index of 1, meaning "the /// second-innermost binder". (These indices are written on the arrays /// in the diagram). /// @@ -926,16 +1030,15 @@ impl<'a, 'gcx, 'tcx> ParamTy { /// the outermost fn. But this time, this reference is not nested within /// any other binders (i.e., it is not an argument to the inner fn, but /// rather the outer one). Therefore, in this case, it is assigned a -/// De Bruijn index of 1, because the innermost binder in that location +/// De Bruijn index of 0, because the innermost binder in that location /// is the outer fn. /// /// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index -#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug, Copy, PartialOrd, Ord)] -pub struct DebruijnIndex { - /// We maintain the invariant that this is never 0. So 1 indicates - /// the innermost binder. To ensure this, create with `DebruijnIndex::new`. - pub depth: u32, -} +newtype_index!(DebruijnIndex + { + DEBUG_FORMAT = "DebruijnIndex({})", + const INNERMOST = 0, + }); pub type Region<'tcx> = &'tcx RegionKind; @@ -991,10 +1094,11 @@ pub type Region<'tcx> = &'tcx RegionKind; /// the inference variable is supposed to satisfy the relation /// *for every value of the skolemized region*. To ensure that doesn't /// happen, you can use `leak_check`. This is more clearly explained -/// by infer/higher_ranked/README.md. +/// by the [rustc guide]. /// /// [1]: http://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/ /// [2]: http://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/ +/// [rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/traits/hrtb.html #[derive(Clone, PartialEq, Eq, Hash, Copy, RustcEncodable, RustcDecodable, PartialOrd, Ord)] pub enum RegionKind { // Region bound in a type or fn declaration which will be @@ -1024,7 +1128,7 @@ pub enum RegionKind { /// A skolemized region - basically the higher-ranked version of ReFree. /// Should not exist after typeck. - ReSkolemized(SkolemizedRegionVid, BoundRegion), + ReSkolemized(ty::UniverseIndex, BoundRegion), /// Empty lifetime is for data that is never accessed. /// Bottom in the region lattice. We treat ReEmpty somewhat @@ -1035,7 +1139,7 @@ pub enum RegionKind { /// variable with no constraints. ReEmpty, - /// Erased region, used by trait selection, in MIR and during trans. + /// Erased region, used by trait selection, in MIR and during codegen. ReErased, /// These are regions bound in the "defining type" for a @@ -1043,6 +1147,9 @@ pub enum RegionKind { /// `ClosureRegionRequirements` that are produced by MIR borrowck. /// See `ClosureRegionRequirements` for more details. ReClosureBound(RegionVid), + + /// Canonicalized region, used only when preparing a trait query. + ReCanonical(CanonicalVar), } impl<'tcx> serialize::UseSpecializedDecodable for Region<'tcx> {} @@ -1051,20 +1158,20 @@ impl<'tcx> serialize::UseSpecializedDecodable for Region<'tcx> {} pub struct EarlyBoundRegion { pub def_id: DefId, pub index: u32, - pub name: Name, + pub name: InternedString, } -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct TyVid { pub index: u32, } -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct IntVid { pub index: u32, } -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct FloatVid { pub index: u32, } @@ -1075,12 +1182,25 @@ newtype_index!(RegionVid DEBUG_FORMAT = custom, }); -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, PartialOrd, Ord)] -pub struct SkolemizedRegionVid { - pub index: u32, +impl Atom for RegionVid { + fn index(self) -> usize { + Idx::index(self) + } } -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +impl From for RegionVid { + fn from(i: usize) -> RegionVid { + RegionVid::new(i) + } +} + +impl From for usize { + fn from(vid: RegionVid) -> usize { + Idx::index(vid) + } +} + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub enum InferTy { TyVar(TyVid), IntVar(IntVid), @@ -1092,10 +1212,15 @@ pub enum InferTy { FreshTy(u32), FreshIntTy(u32), FreshFloatTy(u32), + + /// Canonicalized type variable, used only when preparing a trait query. + CanonicalTy(CanonicalVar), } +newtype_index!(CanonicalVar); + /// A `ProjectionPredicate` for an `ExistentialTraitRef`. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ExistentialProjection<'tcx> { pub item_def_id: DefId, pub substs: &'tcx Substs<'tcx>, @@ -1122,13 +1247,12 @@ impl<'a, 'tcx, 'gcx> ExistentialProjection<'tcx> { -> ty::ProjectionPredicate<'tcx> { // otherwise the escaping regions would be captured by the binders - assert!(!self_ty.has_escaping_regions()); + debug_assert!(!self_ty.has_escaping_regions()); ty::ProjectionPredicate { projection_ty: ty::ProjectionTy { item_def_id: self.item_def_id, - substs: tcx.mk_substs( - iter::once(Kind::from(self_ty)).chain(self.substs.iter().cloned())), + substs: tcx.mk_substs_trait(self_ty, self.substs), }, ty: self.ty, } @@ -1140,19 +1264,73 @@ impl<'a, 'tcx, 'gcx> PolyExistentialProjection<'tcx> { -> ty::PolyProjectionPredicate<'tcx> { self.map_bound(|p| p.with_self_ty(tcx, self_ty)) } + + pub fn item_def_id(&self) -> DefId { + return self.skip_binder().item_def_id; + } } impl DebruijnIndex { - pub fn new(depth: u32) -> DebruijnIndex { - assert!(depth > 0); - DebruijnIndex { depth: depth } + /// Returns the resulting index when this value is moved into + /// `amount` number of new binders. So e.g. if you had + /// + /// for<'a> fn(&'a x) + /// + /// and you wanted to change to + /// + /// for<'a> fn(for<'b> fn(&'a x)) + /// + /// you would need to shift the index for `'a` into 1 new binder. + #[must_use] + pub const fn shifted_in(self, amount: u32) -> DebruijnIndex { + DebruijnIndex(self.0 + amount) } - pub fn shifted(&self, amount: u32) -> DebruijnIndex { - DebruijnIndex { depth: self.depth + amount } + /// Update this index in place by shifting it "in" through + /// `amount` number of binders. + pub fn shift_in(&mut self, amount: u32) { + *self = self.shifted_in(amount); + } + + /// Returns the resulting index when this value is moved out from + /// `amount` number of new binders. + #[must_use] + pub const fn shifted_out(self, amount: u32) -> DebruijnIndex { + DebruijnIndex(self.0 - amount) + } + + /// Update in place by shifting out from `amount` binders. + pub fn shift_out(&mut self, amount: u32) { + *self = self.shifted_out(amount); + } + + /// Adjusts any Debruijn Indices so as to make `to_binder` the + /// innermost binder. That is, if we have something bound at `to_binder`, + /// it will now be bound at INNERMOST. This is an appropriate thing to do + /// when moving a region out from inside binders: + /// + /// ``` + /// for<'a> fn(for<'b> for<'c> fn(&'a u32), _) + /// // Binder: D3 D2 D1 ^^ + /// ``` + /// + /// Here, the region `'a` would have the debruijn index D3, + /// because it is the bound 3 binders out. However, if we wanted + /// to refer to that region `'a` in the second argument (the `_`), + /// those two binders would not be in scope. In that case, we + /// might invoke `shift_out_to_binder(D3)`. This would adjust the + /// debruijn index of `'a` to D1 (the innermost binder). + /// + /// If we invoke `shift_out_to_binder` and the region is in fact + /// bound by one of the binders we are shifting out of, that is an + /// error (and should fail an assertion failure). + pub fn shifted_out_to_binder(self, to_binder: DebruijnIndex) -> Self { + self.shifted_out((to_binder.0 - INNERMOST.0) as u32) } } +impl_stable_hash_for!(tuple_struct DebruijnIndex { index }); + /// Region utilities impl RegionKind { pub fn is_late_bound(&self) -> bool { @@ -1162,46 +1340,70 @@ impl RegionKind { } } - pub fn needs_infer(&self) -> bool { + pub fn bound_at_or_above_binder(&self, index: DebruijnIndex) -> bool { match *self { - ty::ReVar(..) | ty::ReSkolemized(..) => true, - _ => false - } - } - - pub fn escapes_depth(&self, depth: u32) -> bool { - match *self { - ty::ReLateBound(debruijn, _) => debruijn.depth > depth, + ty::ReLateBound(debruijn, _) => debruijn >= index, _ => false, } } - /// Returns the depth of `self` from the (1-based) binding level `depth` - pub fn from_depth(&self, depth: u32) -> RegionKind { + /// Adjusts any Debruijn Indices so as to make `to_binder` the + /// innermost binder. That is, if we have something bound at `to_binder`, + /// it will now be bound at INNERMOST. This is an appropriate thing to do + /// when moving a region out from inside binders: + /// + /// ``` + /// for<'a> fn(for<'b> for<'c> fn(&'a u32), _) + /// // Binder: D3 D2 D1 ^^ + /// ``` + /// + /// Here, the region `'a` would have the debruijn index D3, + /// because it is the bound 3 binders out. However, if we wanted + /// to refer to that region `'a` in the second argument (the `_`), + /// those two binders would not be in scope. In that case, we + /// might invoke `shift_out_to_binder(D3)`. This would adjust the + /// debruijn index of `'a` to D1 (the innermost binder). + /// + /// If we invoke `shift_out_to_binder` and the region is in fact + /// bound by one of the binders we are shifting out of, that is an + /// error (and should fail an assertion failure). + pub fn shifted_out_to_binder(&self, to_binder: ty::DebruijnIndex) -> RegionKind { match *self { - ty::ReLateBound(debruijn, r) => ty::ReLateBound(DebruijnIndex { - depth: debruijn.depth - (depth - 1) - }, r), + ty::ReLateBound(debruijn, r) => ty::ReLateBound( + debruijn.shifted_out_to_binder(to_binder), + r, + ), r => r } } + pub fn keep_in_local_tcx(&self) -> bool { + if let ty::ReVar(..) = self { + true + } else { + false + } + } + pub fn type_flags(&self) -> TypeFlags { let mut flags = TypeFlags::empty(); + if self.keep_in_local_tcx() { + flags = flags | TypeFlags::KEEP_IN_LOCAL_TCX; + } + match *self { ty::ReVar(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; flags = flags | TypeFlags::HAS_RE_INFER; - flags = flags | TypeFlags::KEEP_IN_LOCAL_TCX; } ty::ReSkolemized(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; - flags = flags | TypeFlags::HAS_RE_INFER; flags = flags | TypeFlags::HAS_RE_SKOL; - flags = flags | TypeFlags::KEEP_IN_LOCAL_TCX; } - ty::ReLateBound(..) => { } + ty::ReLateBound(..) => { + flags = flags | TypeFlags::HAS_RE_LATE_BOUND; + } ty::ReEarlyBound(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; flags = flags | TypeFlags::HAS_RE_EARLY_BOUND; @@ -1214,14 +1416,18 @@ impl RegionKind { } ty::ReErased => { } + ty::ReCanonical(..) => { + flags = flags | TypeFlags::HAS_FREE_REGIONS; + flags = flags | TypeFlags::HAS_CANONICAL_VARS; + } ty::ReClosureBound(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; } } match *self { - ty::ReStatic | ty::ReEmpty | ty::ReErased => (), - _ => flags = flags | TypeFlags::HAS_LOCAL_NAMES, + ty::ReStatic | ty::ReEmpty | ty::ReErased | ty::ReLateBound(..) => (), + _ => flags = flags | TypeFlags::HAS_FREE_LOCAL_NAMES, } debug!("type_flags({:?}) = {:?}", self, flags); @@ -1263,7 +1469,7 @@ impl RegionKind { impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn is_nil(&self) -> bool { match self.sty { - TyTuple(ref tys, _) => tys.is_empty(), + TyTuple(ref tys) => tys.is_empty(), _ => false, } } @@ -1275,15 +1481,6 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } - /// Test whether this is a `()` which was produced by defaulting a - /// diverging type variable with feature(never_type) disabled. - pub fn is_defaulted_unit(&self) -> bool { - match self.sty { - TyTuple(_, true) => true, - _ => false, - } - } - pub fn is_primitive(&self) -> bool { match self.sty { TyBool | TyChar | TyInt(_) | TyUint(_) | TyFloat(_) => true, @@ -1298,6 +1495,13 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + pub fn is_ty_infer(&self) -> bool { + match self.sty { + TyInfer(_) => true, + _ => false, + } + } + pub fn is_phantom_data(&self) -> bool { if let TyAdt(def, _) = self.sty { def.is_phantom_data() @@ -1324,7 +1528,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn is_slice(&self) -> bool { match self.sty { - TyRawPtr(mt) | TyRef(_, mt) => match mt.ty.sty { + TyRawPtr(TypeAndMut { ty, .. }) | TyRef(_, ty, _) => match ty.sty { TySlice(_) | TyStr => true, _ => false, }, @@ -1373,11 +1577,8 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn is_mutable_pointer(&self) -> bool { match self.sty { - TyRawPtr(tnm) | TyRef(_, tnm) => if let hir::Mutability::MutMutable = tnm.mutbl { - true - } else { - false - }, + TyRawPtr(TypeAndMut { mutbl: hir::Mutability::MutMutable, .. }) | + TyRef(_, _, hir::Mutability::MutMutable) => true, _ => false } } @@ -1522,27 +1723,21 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { /// /// The parameter `explicit` indicates if this is an *explicit* dereference. /// Some types---notably unsafe ptrs---can only be dereferenced explicitly. - pub fn builtin_deref(&self, explicit: bool, pref: ty::LvaluePreference) - -> Option> - { + pub fn builtin_deref(&self, explicit: bool) -> Option> { match self.sty { TyAdt(def, _) if def.is_box() => { Some(TypeAndMut { ty: self.boxed_ty(), - mutbl: if pref == ty::PreferMutLvalue { - hir::MutMutable - } else { - hir::MutImmutable - }, + mutbl: hir::MutImmutable, }) }, - TyRef(_, mt) => Some(mt), + TyRef(_, ty, mutbl) => Some(TypeAndMut { ty, mutbl }), TyRawPtr(mt) if explicit => Some(mt), _ => None, } } - /// Returns the type of ty[i] + /// Returns the type of `ty[i]`. pub fn builtin_index(&self) -> Option> { match self.sty { TyArray(ty, _) | TySlice(ty) => Some(ty), @@ -1567,14 +1762,10 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } - pub fn ty_to_def_id(&self) -> Option { + pub fn is_impl_trait(&self) -> bool { match self.sty { - TyDynamic(ref tt, ..) => tt.principal().map(|p| p.def_id()), - TyAdt(def, _) => Some(def.did), - TyForeign(did) => Some(did), - TyClosure(id, _) => Some(id), - TyFnDef(id, _) => Some(id), - _ => None, + TyAnon(..) => true, + _ => false, } } @@ -1590,7 +1781,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { /// ignores late-bound regions binders. pub fn regions(&self) -> Vec> { match self.sty { - TyRef(region, _) => { + TyRef(region, _, _) => { vec![region] } TyDynamic(ref obj, region) => { @@ -1603,14 +1794,16 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { TyAdt(_, substs) | TyAnon(_, substs) => { substs.regions().collect() } - TyClosure(_, ref substs) | TyGenerator(_, ref substs, _) => { - substs.substs.regions().collect() + TyClosure(_, ClosureSubsts { ref substs }) | + TyGenerator(_, GeneratorSubsts { ref substs }, _) => { + substs.regions().collect() } TyProjection(ref data) => { data.substs.regions().collect() } TyFnDef(..) | TyFnPtr(_) | + TyGeneratorWitness(..) | TyBool | TyChar | TyInt(_) | @@ -1662,12 +1855,147 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } /// Typed constant value. -#[derive(Copy, Clone, Debug, Hash, RustcEncodable, RustcDecodable, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Hash, RustcEncodable, RustcDecodable, Eq, PartialEq, Ord, PartialOrd)] pub struct Const<'tcx> { pub ty: Ty<'tcx>, - // FIXME(eddyb) Replace this with a miri value. - pub val: ConstVal<'tcx>, + pub val: ConstValue<'tcx>, +} + +impl<'tcx> Const<'tcx> { + pub fn unevaluated( + tcx: TyCtxt<'_, '_, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>, + ty: Ty<'tcx>, + ) -> &'tcx Self { + tcx.mk_const(Const { + val: ConstValue::Unevaluated(def_id, substs), + ty, + }) + } + + #[inline] + pub fn from_const_value( + tcx: TyCtxt<'_, '_, 'tcx>, + val: ConstValue<'tcx>, + ty: Ty<'tcx>, + ) -> &'tcx Self { + tcx.mk_const(Const { + val, + ty, + }) + } + + #[inline] + pub fn from_scalar( + tcx: TyCtxt<'_, '_, 'tcx>, + val: Scalar, + ty: Ty<'tcx>, + ) -> &'tcx Self { + Self::from_const_value(tcx, ConstValue::Scalar(val), ty) + } + + #[inline] + pub fn from_bits( + tcx: TyCtxt<'_, '_, 'tcx>, + bits: u128, + ty: ParamEnvAnd<'tcx, Ty<'tcx>>, + ) -> &'tcx Self { + let ty = tcx.lift_to_global(&ty).unwrap(); + let size = tcx.layout_of(ty).unwrap_or_else(|e| { + panic!("could not compute layout for {:?}: {:?}", ty, e) + }).size; + let shift = 128 - size.bits(); + let truncated = (bits << shift) >> shift; + assert_eq!(truncated, bits, "from_bits called with untruncated value"); + Self::from_scalar(tcx, Scalar::Bits { bits, size: size.bytes() as u8 }, ty.value) + } + + #[inline] + pub fn zero_sized(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> &'tcx Self { + Self::from_scalar(tcx, Scalar::Bits { bits: 0, size: 0 }, ty) + } + + #[inline] + pub fn from_bool(tcx: TyCtxt<'_, '_, 'tcx>, v: bool) -> &'tcx Self { + Self::from_bits(tcx, v as u128, ParamEnv::empty().and(tcx.types.bool)) + } + + #[inline] + pub fn from_usize(tcx: TyCtxt<'_, '_, 'tcx>, n: u64) -> &'tcx Self { + Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize)) + } + + #[inline] + pub fn to_bits( + &self, + tcx: TyCtxt<'_, '_, 'tcx>, + ty: ParamEnvAnd<'tcx, Ty<'tcx>>, + ) -> Option { + if self.ty != ty.value { + return None; + } + let ty = tcx.lift_to_global(&ty).unwrap(); + let size = tcx.layout_of(ty).ok()?.size; + self.val.to_bits(size) + } + + #[inline] + pub fn to_ptr(&self) -> Option { + self.val.to_ptr() + } + + #[inline] + pub fn to_byval_value(&self) -> Option { + self.val.to_byval_value() + } + + #[inline] + pub fn assert_bits( + &self, + tcx: TyCtxt<'_, '_, '_>, + ty: ParamEnvAnd<'tcx, Ty<'tcx>>, + ) -> Option { + assert_eq!(self.ty, ty.value); + let ty = tcx.lift_to_global(&ty).unwrap(); + let size = tcx.layout_of(ty).ok()?.size; + self.val.to_bits(size) + } + + #[inline] + pub fn assert_bool(&self, tcx: TyCtxt<'_, '_, '_>) -> Option { + self.assert_bits(tcx, ParamEnv::empty().and(tcx.types.bool)).and_then(|v| match v { + 0 => Some(false), + 1 => Some(true), + _ => None, + }) + } + + #[inline] + pub fn assert_usize(&self, tcx: TyCtxt<'_, '_, '_>) -> Option { + self.assert_bits(tcx, ParamEnv::empty().and(tcx.types.usize)).map(|v| v as u64) + } + + #[inline] + pub fn unwrap_bits( + &self, + tcx: TyCtxt<'_, '_, '_>, + ty: ParamEnvAnd<'tcx, Ty<'tcx>>, + ) -> u128 { + match self.assert_bits(tcx, ty) { + Some(val) => val, + None => bug!("expected bits of {}, got {:#?}", ty.value, self), + } + } + + #[inline] + pub fn unwrap_usize(&self, tcx: TyCtxt<'_, '_, '_>) -> u64 { + match self.assert_usize(tcx) { + Some(val) => val, + None => bug!("expected constant usize, got {:#?}", self), + } + } } impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Const<'tcx> {} diff --git a/src/librustc/ty/subst.rs b/src/librustc/ty/subst.rs index 80b113dfdf5a..2e3c6df9754d 100644 --- a/src/librustc/ty/subst.rs +++ b/src/librustc/ty/subst.rs @@ -11,18 +11,20 @@ // Type substitutions. use hir::def_id::DefId; -use ty::{self, Slice, Region, Ty, TyCtxt}; +use ty::{self, Lift, Slice, Ty, TyCtxt}; use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; use serialize::{self, Encodable, Encoder, Decodable, Decoder}; use syntax_pos::{Span, DUMMY_SP}; use rustc_data_structures::accumulate_vec::AccumulateVec; +use rustc_data_structures::array_vec::ArrayVec; -use core::nonzero::NonZero; +use core::intrinsics; +use std::cmp::Ordering; use std::fmt; -use std::iter; use std::marker::PhantomData; use std::mem; +use std::num::NonZeroUsize; /// An entity in the Rust typesystem, which can be one of /// several kinds (only types and lifetimes for now). @@ -31,7 +33,7 @@ use std::mem; /// indicate the type (`Ty` or `Region`) it points to. #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct Kind<'tcx> { - ptr: NonZero, + ptr: NonZeroUsize, marker: PhantomData<(Ty<'tcx>, ty::Region<'tcx>)> } @@ -39,227 +41,216 @@ const TAG_MASK: usize = 0b11; const TYPE_TAG: usize = 0b00; const REGION_TAG: usize = 0b01; -impl<'tcx> From> for Kind<'tcx> { - fn from(ty: Ty<'tcx>) -> Kind<'tcx> { - // Ensure we can use the tag bits. - assert_eq!(mem::align_of_val(ty) & TAG_MASK, 0); +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub enum UnpackedKind<'tcx> { + Lifetime(ty::Region<'tcx>), + Type(Ty<'tcx>), +} + +impl<'tcx> UnpackedKind<'tcx> { + fn pack(self) -> Kind<'tcx> { + let (tag, ptr) = match self { + UnpackedKind::Lifetime(lt) => { + // Ensure we can use the tag bits. + assert_eq!(mem::align_of_val(lt) & TAG_MASK, 0); + (REGION_TAG, lt as *const _ as usize) + } + UnpackedKind::Type(ty) => { + // Ensure we can use the tag bits. + assert_eq!(mem::align_of_val(ty) & TAG_MASK, 0); + (TYPE_TAG, ty as *const _ as usize) + } + }; - let ptr = ty as *const _ as usize; Kind { ptr: unsafe { - NonZero::new_unchecked(ptr | TYPE_TAG) + NonZeroUsize::new_unchecked(ptr | tag) }, marker: PhantomData } } } +impl<'tcx> Ord for Kind<'tcx> { + fn cmp(&self, other: &Kind) -> Ordering { + match (self.unpack(), other.unpack()) { + (UnpackedKind::Type(_), UnpackedKind::Lifetime(_)) => Ordering::Greater, + + (UnpackedKind::Type(ty1), UnpackedKind::Type(ty2)) => { + ty1.sty.cmp(&ty2.sty) + } + + (UnpackedKind::Lifetime(reg1), UnpackedKind::Lifetime(reg2)) => reg1.cmp(reg2), + + (UnpackedKind::Lifetime(_), UnpackedKind::Type(_)) => Ordering::Less, + } + } +} + +impl<'tcx> PartialOrd for Kind<'tcx> { + fn partial_cmp(&self, other: &Kind) -> Option { + Some(self.cmp(&other)) + } +} + impl<'tcx> From> for Kind<'tcx> { fn from(r: ty::Region<'tcx>) -> Kind<'tcx> { - // Ensure we can use the tag bits. - assert_eq!(mem::align_of_val(r) & TAG_MASK, 0); + UnpackedKind::Lifetime(r).pack() + } +} - let ptr = r as *const _ as usize; - Kind { - ptr: unsafe { - NonZero::new_unchecked(ptr | REGION_TAG) - }, - marker: PhantomData - } +impl<'tcx> From> for Kind<'tcx> { + fn from(ty: Ty<'tcx>) -> Kind<'tcx> { + UnpackedKind::Type(ty).pack() } } impl<'tcx> Kind<'tcx> { #[inline] - unsafe fn downcast(self, tag: usize) -> Option<&'tcx T> { + pub fn unpack(self) -> UnpackedKind<'tcx> { let ptr = self.ptr.get(); - if ptr & TAG_MASK == tag { - Some(&*((ptr & !TAG_MASK) as *const _)) - } else { - None - } - } - - #[inline] - pub fn as_type(self) -> Option> { unsafe { - self.downcast(TYPE_TAG) - } - } - - #[inline] - pub fn as_region(self) -> Option> { - unsafe { - self.downcast(REGION_TAG) + match ptr & TAG_MASK { + REGION_TAG => UnpackedKind::Lifetime(&*((ptr & !TAG_MASK) as *const _)), + TYPE_TAG => UnpackedKind::Type(&*((ptr & !TAG_MASK) as *const _)), + _ => intrinsics::unreachable() + } } } } impl<'tcx> fmt::Debug for Kind<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if let Some(ty) = self.as_type() { - write!(f, "{:?}", ty) - } else if let Some(r) = self.as_region() { - write!(f, "{:?}", r) - } else { - write!(f, "", self.ptr.get() as *const ()) + match self.unpack() { + UnpackedKind::Lifetime(lt) => write!(f, "{:?}", lt), + UnpackedKind::Type(ty) => write!(f, "{:?}", ty), } } } impl<'tcx> fmt::Display for Kind<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if let Some(ty) = self.as_type() { - write!(f, "{}", ty) - } else if let Some(r) = self.as_region() { - write!(f, "{}", r) - } else { - // FIXME(RFC 2000): extend this if/else chain when we support const generic. - unimplemented!(); + match self.unpack() { + UnpackedKind::Lifetime(lt) => write!(f, "{}", lt), + UnpackedKind::Type(ty) => write!(f, "{}", ty), + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for Kind<'a> { + type Lifted = Kind<'tcx>; + + fn lift_to_tcx<'cx, 'gcx>(&self, tcx: TyCtxt<'cx, 'gcx, 'tcx>) -> Option { + match self.unpack() { + UnpackedKind::Lifetime(a) => a.lift_to_tcx(tcx).map(|a| a.into()), + UnpackedKind::Type(a) => a.lift_to_tcx(tcx).map(|a| a.into()), } } } impl<'tcx> TypeFoldable<'tcx> for Kind<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - if let Some(ty) = self.as_type() { - Kind::from(ty.fold_with(folder)) - } else if let Some(r) = self.as_region() { - Kind::from(r.fold_with(folder)) - } else { - bug!() + match self.unpack() { + UnpackedKind::Lifetime(lt) => lt.fold_with(folder).into(), + UnpackedKind::Type(ty) => ty.fold_with(folder).into(), } } fn super_visit_with>(&self, visitor: &mut V) -> bool { - if let Some(ty) = self.as_type() { - ty.visit_with(visitor) - } else if let Some(r) = self.as_region() { - r.visit_with(visitor) - } else { - bug!() + match self.unpack() { + UnpackedKind::Lifetime(lt) => lt.visit_with(visitor), + UnpackedKind::Type(ty) => ty.visit_with(visitor), } } } impl<'tcx> Encodable for Kind<'tcx> { fn encode(&self, e: &mut E) -> Result<(), E::Error> { - e.emit_enum("Kind", |e| { - if let Some(ty) = self.as_type() { - e.emit_enum_variant("Ty", TYPE_TAG, 1, |e| { - e.emit_enum_variant_arg(0, |e| ty.encode(e)) - }) - } else if let Some(r) = self.as_region() { - e.emit_enum_variant("Region", REGION_TAG, 1, |e| { - e.emit_enum_variant_arg(0, |e| r.encode(e)) - }) - } else { - bug!() - } - }) + self.unpack().encode(e) } } impl<'tcx> Decodable for Kind<'tcx> { fn decode(d: &mut D) -> Result, D::Error> { - d.read_enum("Kind", |d| { - d.read_enum_variant(&["Ty", "Region"], |d, tag| { - match tag { - TYPE_TAG => Ty::decode(d).map(Kind::from), - REGION_TAG => Region::decode(d).map(Kind::from), - _ => Err(d.error("invalid Kind tag")) - } - }) - }) + Ok(UnpackedKind::decode(d)?.pack()) } } -/// A substitution mapping type/region parameters to new values. +/// A substitution mapping generic parameters to new values. pub type Substs<'tcx> = Slice>; impl<'a, 'gcx, 'tcx> Substs<'tcx> { /// Creates a Substs that maps each generic parameter to itself. pub fn identity_for_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> &'tcx Substs<'tcx> { - Substs::for_item(tcx, def_id, |def, _| { - tcx.mk_region(ty::ReEarlyBound(def.to_early_bound_region_data())) - }, |def, _| tcx.mk_param_from_def(def)) + Substs::for_item(tcx, def_id, |param, _| { + tcx.mk_param_from_def(param) + }) } /// Creates a Substs for generic parameter definitions, - /// by calling closures to obtain each region and type. + /// by calling closures to obtain each kind. /// The closures get to observe the Substs as they're /// being built, which can be used to correctly - /// substitute defaults of type parameters. - pub fn for_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, - def_id: DefId, - mut mk_region: FR, - mut mk_type: FT) - -> &'tcx Substs<'tcx> - where FR: FnMut(&ty::RegionParameterDef, &[Kind<'tcx>]) -> ty::Region<'tcx>, - FT: FnMut(&ty::TypeParameterDef, &[Kind<'tcx>]) -> Ty<'tcx> { + /// substitute defaults of generic parameters. + pub fn for_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, + def_id: DefId, + mut mk_kind: F) + -> &'tcx Substs<'tcx> + where F: FnMut(&ty::GenericParamDef, &[Kind<'tcx>]) -> Kind<'tcx> + { let defs = tcx.generics_of(def_id); - let mut substs = Vec::with_capacity(defs.count()); - Substs::fill_item(&mut substs, tcx, defs, &mut mk_region, &mut mk_type); + let count = defs.count(); + let mut substs = if count <= 8 { + AccumulateVec::Array(ArrayVec::new()) + } else { + AccumulateVec::Heap(Vec::with_capacity(count)) + }; + Substs::fill_item(&mut substs, tcx, defs, &mut mk_kind); tcx.intern_substs(&substs) } - pub fn extend_to(&self, - tcx: TyCtxt<'a, 'gcx, 'tcx>, - def_id: DefId, - mut mk_region: FR, - mut mk_type: FT) - -> &'tcx Substs<'tcx> - where FR: FnMut(&ty::RegionParameterDef, &[Kind<'tcx>]) -> ty::Region<'tcx>, - FT: FnMut(&ty::TypeParameterDef, &[Kind<'tcx>]) -> Ty<'tcx> + pub fn extend_to(&self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + def_id: DefId, + mut mk_kind: F) + -> &'tcx Substs<'tcx> + where F: FnMut(&ty::GenericParamDef, &[Kind<'tcx>]) -> Kind<'tcx> { - let defs = tcx.generics_of(def_id); - let mut result = Vec::with_capacity(defs.count()); - result.extend(self[..].iter().cloned()); - Substs::fill_single(&mut result, defs, &mut mk_region, &mut mk_type); - tcx.intern_substs(&result) + Substs::for_item(tcx, def_id, |param, substs| { + match self.get(param.index as usize) { + Some(&kind) => kind, + None => mk_kind(param, substs), + } + }) } - pub fn fill_item(substs: &mut Vec>, - tcx: TyCtxt<'a, 'gcx, 'tcx>, - defs: &ty::Generics, - mk_region: &mut FR, - mk_type: &mut FT) - where FR: FnMut(&ty::RegionParameterDef, &[Kind<'tcx>]) -> ty::Region<'tcx>, - FT: FnMut(&ty::TypeParameterDef, &[Kind<'tcx>]) -> Ty<'tcx> { + fn fill_item(substs: &mut AccumulateVec<[Kind<'tcx>; 8]>, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + defs: &ty::Generics, + mk_kind: &mut F) + where F: FnMut(&ty::GenericParamDef, &[Kind<'tcx>]) -> Kind<'tcx> + { if let Some(def_id) = defs.parent { let parent_defs = tcx.generics_of(def_id); - Substs::fill_item(substs, tcx, parent_defs, mk_region, mk_type); + Substs::fill_item(substs, tcx, parent_defs, mk_kind); } - Substs::fill_single(substs, defs, mk_region, mk_type) + Substs::fill_single(substs, defs, mk_kind) } - fn fill_single(substs: &mut Vec>, - defs: &ty::Generics, - mk_region: &mut FR, - mk_type: &mut FT) - where FR: FnMut(&ty::RegionParameterDef, &[Kind<'tcx>]) -> ty::Region<'tcx>, - FT: FnMut(&ty::TypeParameterDef, &[Kind<'tcx>]) -> Ty<'tcx> { - // Handle Self first, before all regions. - let mut types = defs.types.iter(); - if defs.parent.is_none() && defs.has_self { - let def = types.next().unwrap(); - let ty = mk_type(def, substs); - assert_eq!(def.index as usize, substs.len()); - substs.push(Kind::from(ty)); - } - - for def in &defs.regions { - let region = mk_region(def, substs); - assert_eq!(def.index as usize, substs.len()); - substs.push(Kind::from(region)); - } - - for def in types { - let ty = mk_type(def, substs); - assert_eq!(def.index as usize, substs.len()); - substs.push(Kind::from(ty)); + fn fill_single(substs: &mut AccumulateVec<[Kind<'tcx>; 8]>, + defs: &ty::Generics, + mk_kind: &mut F) + where F: FnMut(&ty::GenericParamDef, &[Kind<'tcx>]) -> Kind<'tcx> + { + for param in &defs.params { + let kind = mk_kind(param, substs); + assert_eq!(param.index as usize, substs.len()); + match *substs { + AccumulateVec::Array(ref mut arr) => arr.push(kind), + AccumulateVec::Heap(ref mut vec) => vec.push(kind), + } } } @@ -269,36 +260,47 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { #[inline] pub fn types(&'a self) -> impl DoubleEndedIterator> + 'a { - self.iter().filter_map(|k| k.as_type()) + self.iter().filter_map(|k| { + if let UnpackedKind::Type(ty) = k.unpack() { + Some(ty) + } else { + None + } + }) } #[inline] pub fn regions(&'a self) -> impl DoubleEndedIterator> + 'a { - self.iter().filter_map(|k| k.as_region()) + self.iter().filter_map(|k| { + if let UnpackedKind::Lifetime(lt) = k.unpack() { + Some(lt) + } else { + None + } + }) } #[inline] pub fn type_at(&self, i: usize) -> Ty<'tcx> { - self[i].as_type().unwrap_or_else(|| { + if let UnpackedKind::Type(ty) = self[i].unpack() { + ty + } else { bug!("expected type for param #{} in {:?}", i, self); - }) + } } #[inline] pub fn region_at(&self, i: usize) -> ty::Region<'tcx> { - self[i].as_region().unwrap_or_else(|| { + if let UnpackedKind::Lifetime(lt) = self[i].unpack() { + lt + } else { bug!("expected region for param #{} in {:?}", i, self); - }) + } } #[inline] - pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> { - self.type_at(ty_param_def.index as usize) - } - - #[inline] - pub fn region_for_def(&self, def: &ty::RegionParameterDef) -> ty::Region<'tcx> { - self.region_at(def.index as usize) + pub fn type_for_def(&self, def: &ty::GenericParamDef) -> Kind<'tcx> { + self.type_at(def.index as usize).into() } /// Transform from substitutions for a child of `source_ancestor` @@ -311,7 +313,7 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { target_substs: &Substs<'tcx>) -> &'tcx Substs<'tcx> { let defs = tcx.generics_of(source_ancestor); - tcx.mk_substs(target_substs.iter().chain(&self[defs.own_count()..]).cloned()) + tcx.mk_substs(target_substs.iter().chain(&self[defs.params.len()..]).cloned()) } pub fn truncate_to(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, generics: &ty::Generics) @@ -413,13 +415,12 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for SubstFolder<'a, 'gcx, 'tcx> { // the specialized routine `ty::replace_late_regions()`. match *r { ty::ReEarlyBound(data) => { - let r = self.substs.get(data.index as usize) - .and_then(|k| k.as_region()); + let r = self.substs.get(data.index as usize).map(|k| k.unpack()); match r { - Some(r) => { - self.shift_region_through_binders(r) + Some(UnpackedKind::Lifetime(lt)) => { + self.shift_region_through_binders(lt) } - None => { + _ => { let span = self.span.unwrap_or(DUMMY_SP); span_bug!( span, @@ -470,11 +471,10 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for SubstFolder<'a, 'gcx, 'tcx> { impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> { // Look up the type in the substitutions. It really should be in there. - let opt_ty = self.substs.get(p.idx as usize) - .and_then(|k| k.as_type()); + let opt_ty = self.substs.get(p.idx as usize).map(|k| k.unpack()); let ty = match opt_ty { - Some(t) => t, - None => { + Some(UnpackedKind::Type(ty)) => ty, + _ => { let span = self.span.unwrap_or(DUMMY_SP); span_bug!( span, @@ -554,54 +554,3 @@ impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { self.tcx().mk_region(ty::fold::shift_region(*region, self.region_binders_passed)) } } - -// Helper methods that modify substitutions. - -impl<'a, 'gcx, 'tcx> ty::TraitRef<'tcx> { - pub fn from_method(tcx: TyCtxt<'a, 'gcx, 'tcx>, - trait_id: DefId, - substs: &Substs<'tcx>) - -> ty::TraitRef<'tcx> { - let defs = tcx.generics_of(trait_id); - - ty::TraitRef { - def_id: trait_id, - substs: tcx.intern_substs(&substs[..defs.own_count()]) - } - } -} - -impl<'a, 'gcx, 'tcx> ty::ExistentialTraitRef<'tcx> { - pub fn erase_self_ty(tcx: TyCtxt<'a, 'gcx, 'tcx>, - trait_ref: ty::TraitRef<'tcx>) - -> ty::ExistentialTraitRef<'tcx> { - // Assert there is a Self. - trait_ref.substs.type_at(0); - - ty::ExistentialTraitRef { - def_id: trait_ref.def_id, - substs: tcx.intern_substs(&trait_ref.substs[1..]) - } - } -} - -impl<'a, 'gcx, 'tcx> ty::PolyExistentialTraitRef<'tcx> { - /// Object types don't have a self-type specified. Therefore, when - /// we convert the principal trait-ref into a normal trait-ref, - /// you must give *some* self-type. A common choice is `mk_err()` - /// or some skolemized type. - pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, - self_ty: Ty<'tcx>) - -> ty::PolyTraitRef<'tcx> { - // otherwise the escaping regions would be captured by the binder - assert!(!self_ty.has_escaping_regions()); - - self.map_bound(|trait_ref| { - ty::TraitRef { - def_id: trait_ref.def_id, - substs: tcx.mk_substs( - iter::once(Kind::from(self_ty)).chain(trait_ref.substs.iter().cloned())) - } - }) - } -} diff --git a/src/librustc/ty/trait_def.rs b/src/librustc/ty/trait_def.rs index 0fbf9f1bd587..6332080a1836 100644 --- a/src/librustc/ty/trait_def.rs +++ b/src/librustc/ty/trait_def.rs @@ -20,7 +20,7 @@ use ty::{Ty, TyCtxt}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; -use std::rc::Rc; +use rustc_data_structures::sync::Lrc; /// A trait's definition with type information. pub struct TraitDef { @@ -41,6 +41,7 @@ pub struct TraitDef { pub def_path_hash: DefPathHash, } +#[derive(Default)] pub struct TraitImpls { blanket_impls: Vec, /// Impls indexed by their simplified self-type, for fast lookup. @@ -142,53 +143,49 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // Query provider for `trait_impls_of`. pub(super) fn trait_impls_of_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_id: DefId) - -> Rc { - let mut remote_impls = Vec::new(); + -> Lrc { + let mut impls = TraitImpls::default(); - // Traits defined in the current crate can't have impls in upstream - // crates, so we don't bother querying the cstore. - if !trait_id.is_local() { - for &cnum in tcx.crates().iter() { - let impls = tcx.implementations_of_trait((cnum, trait_id)); - remote_impls.extend(impls.iter().cloned()); + { + let mut add_impl = |impl_def_id| { + let impl_self_ty = tcx.type_of(impl_def_id); + if impl_def_id.is_local() && impl_self_ty.references_error() { + return; + } + + if let Some(simplified_self_ty) = + fast_reject::simplify_type(tcx, impl_self_ty, false) + { + impls.non_blanket_impls + .entry(simplified_self_ty) + .or_default() + .push(impl_def_id); + } else { + impls.blanket_impls.push(impl_def_id); + } + }; + + // Traits defined in the current crate can't have impls in upstream + // crates, so we don't bother querying the cstore. + if !trait_id.is_local() { + for &cnum in tcx.crates().iter() { + for &def_id in tcx.implementations_of_trait((cnum, trait_id)).iter() { + add_impl(def_id); + } + } + } + + for &node_id in tcx.hir.trait_impls(trait_id) { + add_impl(tcx.hir.local_def_id(node_id)); } } - let mut blanket_impls = Vec::new(); - let mut non_blanket_impls = FxHashMap(); - - let local_impls = tcx.hir - .trait_impls(trait_id) - .into_iter() - .map(|&node_id| tcx.hir.local_def_id(node_id)); - - for impl_def_id in local_impls.chain(remote_impls.into_iter()) { - let impl_self_ty = tcx.type_of(impl_def_id); - if impl_def_id.is_local() && impl_self_ty.references_error() { - continue - } - - if let Some(simplified_self_ty) = - fast_reject::simplify_type(tcx, impl_self_ty, false) - { - non_blanket_impls - .entry(simplified_self_ty) - .or_insert(vec![]) - .push(impl_def_id); - } else { - blanket_impls.push(impl_def_id); - } - } - - Rc::new(TraitImpls { - blanket_impls: blanket_impls, - non_blanket_impls: non_blanket_impls, - }) + Lrc::new(impls) } -impl<'gcx> HashStable> for TraitImpls { +impl<'a> HashStable> for TraitImpls { fn hash_stable(&self, - hcx: &mut StableHashingContext<'gcx>, + hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { let TraitImpls { ref blanket_impls, diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index de96e9dc8ff2..95caa0c185be 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -15,65 +15,105 @@ use hir::def_id::DefId; use hir::map::{DefPathData, Node}; use hir; use ich::NodeIdHashingMode; -use middle::const_val::ConstVal; -use traits::{self, Reveal}; -use ty::{self, Ty, TyCtxt, TypeFoldable}; -use ty::fold::TypeVisitor; -use ty::subst::{Subst, Kind}; +use traits::{self, ObligationCause}; +use ty::{self, Ty, TyCtxt, GenericParamDefKind, TypeFoldable}; +use ty::subst::{Substs, UnpackedKind}; +use ty::query::TyCtxtAt; use ty::TypeVariants::*; +use ty::layout::{Integer, IntegerExt}; use util::common::ErrorReported; use middle::lang_items; -use rustc_const_math::{ConstInt, ConstIsize, ConstUsize}; -use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult, - HashStable}; +use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; use rustc_data_structures::fx::FxHashMap; -use std::cmp; -use std::iter; -use std::hash::Hash; -use std::intrinsics; -use syntax::ast::{self, Name}; +use std::{cmp, fmt}; +use syntax::ast; use syntax::attr::{self, SignedInt, UnsignedInt}; use syntax_pos::{Span, DUMMY_SP}; -type Disr = ConstInt; +#[derive(Copy, Clone, Debug)] +pub struct Discr<'tcx> { + /// bit representation of the discriminant, so `-128i8` is `0xFF_u128` + pub val: u128, + pub ty: Ty<'tcx> +} + +impl<'tcx> fmt::Display for Discr<'tcx> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match self.ty.sty { + ty::TyInt(ity) => { + let bits = ty::tls::with(|tcx| { + Integer::from_attr(tcx, SignedInt(ity)).size().bits() + }); + let x = self.val as i128; + // sign extend the raw representation to be an i128 + let x = (x << (128 - bits)) >> (128 - bits); + write!(fmt, "{}", x) + }, + _ => write!(fmt, "{}", self.val), + } + } +} + +impl<'tcx> Discr<'tcx> { + /// Adds 1 to the value and wraps around if the maximum for the type is reached + pub fn wrap_incr<'a, 'gcx>(self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Self { + self.checked_add(tcx, 1).0 + } + pub fn checked_add<'a, 'gcx>(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, n: u128) -> (Self, bool) { + let (int, signed) = match self.ty.sty { + TyInt(ity) => (Integer::from_attr(tcx, SignedInt(ity)), true), + TyUint(uty) => (Integer::from_attr(tcx, UnsignedInt(uty)), false), + _ => bug!("non integer discriminant"), + }; + + let bit_size = int.size().bits(); + let shift = 128 - bit_size; + if signed { + let sext = |u| { + let i = u as i128; + (i << shift) >> shift + }; + let min = sext(1_u128 << (bit_size - 1)); + let max = i128::max_value() >> shift; + let val = sext(self.val); + assert!(n < (i128::max_value() as u128)); + let n = n as i128; + let oflo = val > max - n; + let val = if oflo { + min + (n - (max - val) - 1) + } else { + val + n + }; + // zero the upper bits + let val = val as u128; + let val = (val << shift) >> shift; + (Self { + val: val as u128, + ty: self.ty, + }, oflo) + } else { + let max = u128::max_value() >> shift; + let val = self.val; + let oflo = val > max - n; + let val = if oflo { + n - (max - val) - 1 + } else { + val + n + }; + (Self { + val: val, + ty: self.ty, + }, oflo) + } + } +} pub trait IntTypeExt { fn to_ty<'a, 'gcx, 'tcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx>; - fn disr_incr<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, val: Option) - -> Option; - fn assert_ty_matches(&self, val: Disr); - fn initial_discriminant<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Disr; -} - - -macro_rules! typed_literal { - ($tcx:expr, $ty:expr, $lit:expr) => { - match $ty { - SignedInt(ast::IntTy::I8) => ConstInt::I8($lit), - SignedInt(ast::IntTy::I16) => ConstInt::I16($lit), - SignedInt(ast::IntTy::I32) => ConstInt::I32($lit), - SignedInt(ast::IntTy::I64) => ConstInt::I64($lit), - SignedInt(ast::IntTy::I128) => ConstInt::I128($lit), - SignedInt(ast::IntTy::Isize) => match $tcx.sess.target.isize_ty { - ast::IntTy::I16 => ConstInt::Isize(ConstIsize::Is16($lit)), - ast::IntTy::I32 => ConstInt::Isize(ConstIsize::Is32($lit)), - ast::IntTy::I64 => ConstInt::Isize(ConstIsize::Is64($lit)), - _ => bug!(), - }, - UnsignedInt(ast::UintTy::U8) => ConstInt::U8($lit), - UnsignedInt(ast::UintTy::U16) => ConstInt::U16($lit), - UnsignedInt(ast::UintTy::U32) => ConstInt::U32($lit), - UnsignedInt(ast::UintTy::U64) => ConstInt::U64($lit), - UnsignedInt(ast::UintTy::U128) => ConstInt::U128($lit), - UnsignedInt(ast::UintTy::Usize) => match $tcx.sess.target.usize_ty { - ast::UintTy::U16 => ConstInt::Usize(ConstUsize::Us16($lit)), - ast::UintTy::U32 => ConstInt::Usize(ConstUsize::Us32($lit)), - ast::UintTy::U64 => ConstInt::Usize(ConstUsize::Us64($lit)), - _ => bug!(), - }, - } - } + fn disr_incr<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, val: Option>) + -> Option>; + fn initial_discriminant<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Discr<'tcx>; } impl IntTypeExt for attr::IntType { @@ -94,33 +134,26 @@ impl IntTypeExt for attr::IntType { } } - fn initial_discriminant<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Disr { - typed_literal!(tcx, *self, 0) - } - - fn assert_ty_matches(&self, val: Disr) { - match (*self, val) { - (SignedInt(ast::IntTy::I8), ConstInt::I8(_)) => {}, - (SignedInt(ast::IntTy::I16), ConstInt::I16(_)) => {}, - (SignedInt(ast::IntTy::I32), ConstInt::I32(_)) => {}, - (SignedInt(ast::IntTy::I64), ConstInt::I64(_)) => {}, - (SignedInt(ast::IntTy::I128), ConstInt::I128(_)) => {}, - (SignedInt(ast::IntTy::Isize), ConstInt::Isize(_)) => {}, - (UnsignedInt(ast::UintTy::U8), ConstInt::U8(_)) => {}, - (UnsignedInt(ast::UintTy::U16), ConstInt::U16(_)) => {}, - (UnsignedInt(ast::UintTy::U32), ConstInt::U32(_)) => {}, - (UnsignedInt(ast::UintTy::U64), ConstInt::U64(_)) => {}, - (UnsignedInt(ast::UintTy::U128), ConstInt::U128(_)) => {}, - (UnsignedInt(ast::UintTy::Usize), ConstInt::Usize(_)) => {}, - _ => bug!("disr type mismatch: {:?} vs {:?}", self, val), + fn initial_discriminant<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Discr<'tcx> { + Discr { + val: 0, + ty: self.to_ty(tcx) } } - fn disr_incr<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, val: Option) - -> Option { + fn disr_incr<'a, 'tcx>( + &self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + val: Option>, + ) -> Option> { if let Some(val) = val { - self.assert_ty_matches(val); - (val + typed_literal!(tcx, *self, 1)).ok() + assert_eq!(self.to_ty(tcx), val.ty); + let (new, oflo) = val.checked_add(tcx, 1); + if oflo { + None + } else { + Some(new) + } } else { Some(self.initial_discriminant(tcx)) } @@ -128,9 +161,9 @@ impl IntTypeExt for attr::IntType { } -#[derive(Copy, Clone)] +#[derive(Clone)] pub enum CopyImplementationError<'tcx> { - InfrigingField(&'tcx ty::FieldDef), + InfrigingFields(Vec<&'tcx ty::FieldDef>), NotAnAdt, HasDestructor, } @@ -151,56 +184,47 @@ pub enum Representability { } impl<'tcx> ty::ParamEnv<'tcx> { - /// Construct a trait environment suitable for contexts where - /// there are no where clauses in scope. - pub fn empty(reveal: Reveal) -> Self { - Self::new(ty::Slice::empty(), reveal) - } - - /// Construct a trait environment with the given set of predicates. - pub fn new(caller_bounds: &'tcx ty::Slice>, - reveal: Reveal) - -> Self { - ty::ParamEnv { caller_bounds, reveal } - } - - /// Returns a new parameter environment with the same clauses, but - /// which "reveals" the true results of projections in all cases - /// (even for associated types that are specializable). This is - /// the desired behavior during trans and certain other special - /// contexts; normally though we want to use `Reveal::UserFacing`, - /// which is the default. - pub fn reveal_all(self) -> Self { - ty::ParamEnv { reveal: Reveal::All, ..self } - } - pub fn can_type_implement_copy<'a>(self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - self_type: Ty<'tcx>, span: Span) + self_type: Ty<'tcx>) -> Result<(), CopyImplementationError<'tcx>> { // FIXME: (@jroesch) float this code up tcx.infer_ctxt().enter(|infcx| { let (adt, substs) = match self_type.sty { + // These types used to have a builtin impl. + // Now libcore provides that impl. + ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | + ty::TyChar | ty::TyRawPtr(..) | ty::TyNever | + ty::TyRef(_, _, hir::MutImmutable) => return Ok(()), + ty::TyAdt(adt, substs) => (adt, substs), + _ => return Err(CopyImplementationError::NotAnAdt), }; - let field_implements_copy = |field: &ty::FieldDef| { - let cause = traits::ObligationCause::dummy(); - match traits::fully_normalize(&infcx, cause, self, &field.ty(tcx, substs)) { - Ok(ty) => !infcx.type_moves_by_default(self, ty, span), - Err(..) => false, - } - }; - + let mut infringing = Vec::new(); for variant in &adt.variants { for field in &variant.fields { - if !field_implements_copy(field) { - return Err(CopyImplementationError::InfrigingField(field)); + let span = tcx.def_span(field.did); + let ty = field.ty(tcx, substs); + if ty.references_error() { + continue; } + let cause = ObligationCause { span, ..ObligationCause::dummy() }; + let ctx = traits::FulfillmentContext::new(); + match traits::fully_normalize(&infcx, ctx, cause, self, &ty) { + Ok(ty) => if infcx.type_moves_by_default(self, ty, span) { + infringing.push(field); + } + Err(errors) => { + infcx.report_fulfillment_errors(&errors, None, false); + } + }; } } - + if !infringing.is_empty() { + return Err(CopyImplementationError::InfrigingFields(infringing)); + } if adt.has_dtor(tcx) { return Err(CopyImplementationError::HasDestructor); } @@ -247,42 +271,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { false } - /// Returns the type of element at index `i` in tuple or tuple-like type `t`. - /// For an enum `t`, `variant` is None only if `t` is a univariant enum. - pub fn positional_element_ty(self, - ty: Ty<'tcx>, - i: usize, - variant: Option) -> Option> { - match (&ty.sty, variant) { - (&TyAdt(adt, substs), Some(vid)) => { - adt.variant_with_id(vid).fields.get(i).map(|f| f.ty(self, substs)) - } - (&TyAdt(adt, substs), None) => { - // Don't use `non_enum_variant`, this may be a univariant enum. - adt.variants[0].fields.get(i).map(|f| f.ty(self, substs)) - } - (&TyTuple(ref v, _), None) => v.get(i).cloned(), - _ => None, - } - } - - /// Returns the type of element at field `n` in struct or struct-like type `t`. - /// For an enum `t`, `variant` must be some def id. - pub fn named_element_ty(self, - ty: Ty<'tcx>, - n: Name, - variant: Option) -> Option> { - match (&ty.sty, variant) { - (&TyAdt(adt, substs), Some(vid)) => { - adt.variant_with_id(vid).find_field_named(n).map(|f| f.ty(self, substs)) - } - (&TyAdt(adt, substs), None) => { - adt.non_enum_variant().find_field_named(n).map(|f| f.ty(self, substs)) - } - _ => return None - } - } - /// Returns the deeply last field of nested structures, or the same type, /// if not a structure at all. Corresponds to the only possible unsized /// field, and its type can be used to determine unsizing strategy. @@ -299,7 +287,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - ty::TyTuple(tys, _) => { + ty::TyTuple(tys) => { if let Some((&last_ty, _)) = tys.split_last() { ty = last_ty; } else { @@ -336,7 +324,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { break; } }, - (&TyTuple(a_tys, _), &TyTuple(b_tys, _)) + (&TyTuple(a_tys), &TyTuple(b_tys)) if a_tys.len() == b_tys.len() => { if let Some(a_last) = a_tys.last() { a = a_last; @@ -385,7 +373,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { match predicate { ty::Predicate::Projection(..) | ty::Predicate::Trait(..) | - ty::Predicate::Equate(..) | ty::Predicate::Subtype(..) | ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | @@ -394,7 +381,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::Predicate::ConstEvaluatable(..) => { None } - ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(t, r))) => { + ty::Predicate::TypeOutlives(predicate) => { // Search for a bound of the form `erased_self_ty // : 'a`, but be wary of something like `for<'a> // erased_self_ty : 'a` (we interpret a @@ -404,8 +391,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // it's kind of a moot point since you could never // construct such an object, but this seems // correct even if that code changes). - if t == erased_self_ty && !r.has_escaping_regions() { - Some(r) + let ty::OutlivesPredicate(ref t, ref r) = predicate.skip_binder(); + if t == &erased_self_ty && !r.has_escaping_regions() { + Some(*r) } else { None } @@ -419,7 +407,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn calculate_dtor( self, adt_did: DefId, - validate: &mut FnMut(Self, DefId) -> Result<(), ErrorReported> + validate: &mut dyn FnMut(Self, DefId) -> Result<(), ErrorReported> ) -> Option { let drop_trait = if let Some(def_id) = self.lang_items().drop_trait() { def_id @@ -427,7 +415,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { return None; }; - ty::maps::queries::coherent_trait::ensure(self, drop_trait); + ty::query::queries::coherent_trait::ensure(self, drop_trait); let mut dtor_did = None; let ty = self.type_of(adt_did); @@ -510,115 +498,51 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let result = item_substs.iter().zip(impl_substs.iter()) .filter(|&(_, &k)| { - if let Some(&ty::RegionKind::ReEarlyBound(ref ebr)) = k.as_region() { - !impl_generics.region_param(ebr, self).pure_wrt_drop - } else if let Some(&ty::TyS { - sty: ty::TypeVariants::TyParam(ref pt), .. - }) = k.as_type() { - !impl_generics.type_param(pt, self).pure_wrt_drop - } else { - // not a type or region param - this should be reported - // as an error. - false + match k.unpack() { + UnpackedKind::Lifetime(&ty::RegionKind::ReEarlyBound(ref ebr)) => { + !impl_generics.region_param(ebr, self).pure_wrt_drop + } + UnpackedKind::Type(&ty::TyS { + sty: ty::TypeVariants::TyParam(ref pt), .. + }) => { + !impl_generics.type_param(pt, self).pure_wrt_drop + } + UnpackedKind::Lifetime(_) | UnpackedKind::Type(_) => { + // not a type or region param - this should be reported + // as an error. + false + } } }).map(|(&item_param, _)| item_param).collect(); debug!("destructor_constraint({:?}) = {:?}", def.did, result); result } - /// Return a set of constraints that needs to be satisfied in - /// order for `ty` to be valid for destruction. - pub fn dtorck_constraint_for_ty(self, - span: Span, - for_ty: Ty<'tcx>, - depth: usize, - ty: Ty<'tcx>) - -> Result, ErrorReported> - { - debug!("dtorck_constraint_for_ty({:?}, {:?}, {:?}, {:?})", - span, for_ty, depth, ty); - - if depth >= self.sess.recursion_limit.get() { - let mut err = struct_span_err!( - self.sess, span, E0320, - "overflow while adding drop-check rules for {}", for_ty); - err.note(&format!("overflowed on {}", ty)); - err.emit(); - return Err(ErrorReported); - } - - let result = match ty.sty { - ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | - ty::TyFloat(_) | ty::TyStr | ty::TyNever | ty::TyForeign(..) | - ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyFnDef(..) | ty::TyFnPtr(_) => { - // these types never have a destructor - Ok(ty::DtorckConstraint::empty()) - } - - ty::TyArray(ety, _) | ty::TySlice(ety) => { - // single-element containers, behave like their element - self.dtorck_constraint_for_ty(span, for_ty, depth+1, ety) - } - - ty::TyTuple(tys, _) => { - tys.iter().map(|ty| { - self.dtorck_constraint_for_ty(span, for_ty, depth+1, ty) - }).collect() - } - - ty::TyClosure(def_id, substs) => { - substs.upvar_tys(def_id, self).map(|ty| { - self.dtorck_constraint_for_ty(span, for_ty, depth+1, ty) - }).collect() - } - - ty::TyGenerator(def_id, substs, interior) => { - substs.upvar_tys(def_id, self).chain(iter::once(interior.witness)).map(|ty| { - self.dtorck_constraint_for_ty(span, for_ty, depth+1, ty) - }).collect() - } - - ty::TyAdt(def, substs) => { - let ty::DtorckConstraint { - dtorck_types, outlives - } = self.at(span).adt_dtorck_constraint(def.did); - Ok(ty::DtorckConstraint { - // FIXME: we can try to recursively `dtorck_constraint_on_ty` - // there, but that needs some way to handle cycles. - dtorck_types: dtorck_types.subst(self, substs), - outlives: outlives.subst(self, substs) - }) - } - - // Objects must be alive in order for their destructor - // to be called. - ty::TyDynamic(..) => Ok(ty::DtorckConstraint { - outlives: vec![Kind::from(ty)], - dtorck_types: vec![], - }), - - // Types that can't be resolved. Pass them forward. - ty::TyProjection(..) | ty::TyAnon(..) | ty::TyParam(..) => { - Ok(ty::DtorckConstraint { - outlives: vec![], - dtorck_types: vec![ty], - }) - } - - ty::TyInfer(..) | ty::TyError => { - self.sess.delay_span_bug(span, "unresolved type in dtorck"); - Err(ErrorReported) - } - }; - - debug!("dtorck_constraint_for_ty({:?}) = {:?}", ty, result); - result - } - + /// True if `def_id` refers to a closure (e.g., `|x| x * 2`). Note + /// that closures have a def-id, but the closure *expression* also + /// has a `HirId` that is located within the context where the + /// closure appears (and, sadly, a corresponding `NodeId`, since + /// those are not yet phased out). The parent of the closure's + /// def-id will also be the context where it appears. pub fn is_closure(self, def_id: DefId) -> bool { self.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr } + /// True if `def_id` refers to a trait (e.g., `trait Foo { ... }`). + pub fn is_trait(self, def_id: DefId) -> bool { + if let DefPathData::Trait(_) = self.def_key(def_id).disambiguated_data.data { + true + } else { + false + } + } + + /// True if this def-id refers to the implicit constructor for + /// a tuple struct like `struct Foo(u32)`. + pub fn is_struct_constructor(self, def_id: DefId) -> bool { + self.def_key(def_id).disambiguated_data.data == DefPathData::StructCtor + } + /// Given the `DefId` of a fn or closure, returns the `DefId` of /// the innermost fn item that the closure is contained within. /// This is a significant def-id because, when we do @@ -652,7 +576,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { -> Option>> { let closure_ty = self.mk_closure(closure_def_id, closure_substs); - let env_region = ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrEnv); + let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); let closure_kind_ty = closure_substs.closure_kind_ty(closure_def_id, self); let closure_kind = closure_kind_ty.to_opt_closure_kind()?; let env_ty = match closure_kind { @@ -660,189 +584,53 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty), ty::ClosureKind::FnOnce => closure_ty, }; - Some(ty::Binder(env_ty)) + Some(ty::Binder::bind(env_ty)) } /// Given the def-id of some item that has no type parameters, make /// a suitable "empty substs" for it. - pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> &'tcx ty::Substs<'tcx> { - ty::Substs::for_item(self, item_def_id, - |_, _| self.types.re_erased, - |_, _| { - bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id) + pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> &'tcx Substs<'tcx> { + Substs::for_item(self, item_def_id, |param, _| { + match param.kind { + GenericParamDefKind::Lifetime => self.types.re_erased.into(), + GenericParamDefKind::Type {..} => { + bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id) + } + } }) } - pub fn const_usize(&self, val: u16) -> ConstInt { - match self.sess.target.usize_ty { - ast::UintTy::U16 => ConstInt::Usize(ConstUsize::Us16(val as u16)), - ast::UintTy::U32 => ConstInt::Usize(ConstUsize::Us32(val as u32)), - ast::UintTy::U64 => ConstInt::Usize(ConstUsize::Us64(val as u64)), - _ => bug!(), - } - } - - /// Check if the node pointed to by def_id is a mutable static item - pub fn is_static_mut(&self, def_id: DefId) -> bool { + /// Return whether the node pointed to by def_id is a static item, and its mutability + pub fn is_static(&self, def_id: DefId) -> Option { if let Some(node) = self.hir.get_if_local(def_id) { match node { Node::NodeItem(&hir::Item { - node: hir::ItemStatic(_, hir::MutMutable, _), .. - }) => true, + node: hir::ItemKind::Static(_, mutbl, _), .. + }) => Some(mutbl), Node::NodeForeignItem(&hir::ForeignItem { - node: hir::ForeignItemStatic(_, mutbl), .. - }) => mutbl, - _ => false + node: hir::ForeignItemKind::Static(_, is_mutbl), .. + }) => + Some(if is_mutbl { + hir::Mutability::MutMutable + } else { + hir::Mutability::MutImmutable + }), + _ => None } } else { match self.describe_def(def_id) { - Some(Def::Static(_, mutbl)) => mutbl, - _ => false + Some(Def::Static(_, is_mutbl)) => + Some(if is_mutbl { + hir::Mutability::MutMutable + } else { + hir::Mutability::MutImmutable + }), + _ => None } } } } -pub struct TypeIdHasher<'a, 'gcx: 'a+'tcx, 'tcx: 'a, W> { - tcx: TyCtxt<'a, 'gcx, 'tcx>, - state: StableHasher, -} - -impl<'a, 'gcx, 'tcx, W> TypeIdHasher<'a, 'gcx, 'tcx, W> - where W: StableHasherResult -{ - pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Self { - TypeIdHasher { tcx: tcx, state: StableHasher::new() } - } - - pub fn finish(self) -> W { - self.state.finish() - } - - pub fn hash(&mut self, x: T) { - x.hash(&mut self.state); - } - - fn hash_discriminant_u8(&mut self, x: &T) { - let v = unsafe { - intrinsics::discriminant_value(x) - }; - let b = v as u8; - assert_eq!(v, b as u64); - self.hash(b) - } - - fn def_id(&mut self, did: DefId) { - // Hash the DefPath corresponding to the DefId, which is independent - // of compiler internal state. We already have a stable hash value of - // all DefPaths available via tcx.def_path_hash(), so we just feed that - // into the hasher. - let hash = self.tcx.def_path_hash(did); - self.hash(hash); - } -} - -impl<'a, 'gcx, 'tcx, W> TypeVisitor<'tcx> for TypeIdHasher<'a, 'gcx, 'tcx, W> - where W: StableHasherResult -{ - fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { - // Distinguish between the Ty variants uniformly. - self.hash_discriminant_u8(&ty.sty); - - match ty.sty { - TyInt(i) => self.hash(i), - TyUint(u) => self.hash(u), - TyFloat(f) => self.hash(f), - TyArray(_, n) => { - self.hash_discriminant_u8(&n.val); - match n.val { - ConstVal::Integral(x) => self.hash(x.to_u64().unwrap()), - ConstVal::Unevaluated(def_id, _) => self.def_id(def_id), - _ => bug!("arrays should not have {:?} as length", n) - } - } - TyRawPtr(m) | - TyRef(_, m) => self.hash(m.mutbl), - TyClosure(def_id, _) | - TyGenerator(def_id, _, _) | - TyAnon(def_id, _) | - TyFnDef(def_id, _) => self.def_id(def_id), - TyAdt(d, _) => self.def_id(d.did), - TyForeign(def_id) => self.def_id(def_id), - TyFnPtr(f) => { - self.hash(f.unsafety()); - self.hash(f.abi()); - self.hash(f.variadic()); - self.hash(f.inputs().skip_binder().len()); - } - TyDynamic(ref data, ..) => { - if let Some(p) = data.principal() { - self.def_id(p.def_id()); - } - for d in data.auto_traits() { - self.def_id(d); - } - } - TyTuple(tys, defaulted) => { - self.hash(tys.len()); - self.hash(defaulted); - } - TyParam(p) => { - self.hash(p.idx); - self.hash(p.name.as_str()); - } - TyProjection(ref data) => { - self.def_id(data.item_def_id); - } - TyNever | - TyBool | - TyChar | - TyStr | - TySlice(_) => {} - - TyError | - TyInfer(_) => bug!("TypeIdHasher: unexpected type {}", ty) - } - - ty.super_visit_with(self) - } - - fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { - self.hash_discriminant_u8(r); - match *r { - ty::ReErased | - ty::ReStatic | - ty::ReEmpty => { - // No variant fields to hash for these ... - } - ty::ReLateBound(db, ty::BrAnon(i)) => { - self.hash(db.depth); - self.hash(i); - } - ty::ReEarlyBound(ty::EarlyBoundRegion { def_id, .. }) => { - self.def_id(def_id); - } - - ty::ReClosureBound(..) | - ty::ReLateBound(..) | - ty::ReFree(..) | - ty::ReScope(..) | - ty::ReVar(..) | - ty::ReSkolemized(..) => { - bug!("TypeIdHasher: unexpected region {:?}", r) - } - } - false - } - - fn visit_binder>(&mut self, x: &ty::Binder) -> bool { - // Anonymize late-bound regions so that, for example: - // `for<'a, b> fn(&'a &'b T)` and `for<'a, b> fn(&'b &'a T)` - // result in the same TypeId (the two types are equivalent). - self.tcx.anonymize_late_bound_regions(x).super_visit_with(self) - } -} - impl<'a, 'tcx> ty::TyS<'tcx> { pub fn moves_by_default(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -853,11 +641,10 @@ impl<'a, 'tcx> ty::TyS<'tcx> { } pub fn is_sized(&'tcx self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - span: Span)-> bool + tcx_at: TyCtxtAt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>)-> bool { - tcx.at(span).is_sized_raw(param_env.and(self)) + tcx_at.is_sized_raw(param_env.and(self)) } pub fn is_freeze(&'tcx self, @@ -910,7 +697,7 @@ impl<'a, 'tcx> ty::TyS<'tcx> { -> Representability { match ty.sty { - TyTuple(ref ts, _) => { + TyTuple(ref ts) => { // Find non representable fold_repr(ts.iter().map(|ty| { is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty) @@ -1117,7 +904,7 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let (param_env, ty) = query.into_parts(); let needs_drop = |ty: Ty<'tcx>| -> bool { - match ty::queries::needs_drop_raw::try_get(tcx, DUMMY_SP, param_env.and(ty)) { + match tcx.try_needs_drop_raw(DUMMY_SP, param_env.and(ty)) { Ok(v) => v, Err(mut bug) => { // Cycles should be reported as an error by `check_representable`. @@ -1139,12 +926,15 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Fast-path for primitive types ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) | ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyNever | - ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar | + ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar | ty::TyGeneratorWitness(..) | ty::TyRawPtr(_) | ty::TyRef(..) | ty::TyStr => false, // Foreign types can never have destructors ty::TyForeign(..) => false, + // `ManuallyDrop` doesn't have a destructor regardless of field types. + ty::TyAdt(def, _) if Some(def.did) == tcx.lang_items().manually_drop() => false, + // Issue #22536: We first query type_moves_by_default. It sees a // normalized version of the type, and therefore will definitely // know whether the type implements Copy (and thus needs no @@ -1178,9 +968,10 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // state transformation pass ty::TyGenerator(..) => true, - ty::TyTuple(ref tys, _) => tys.iter().cloned().any(needs_drop), + ty::TyTuple(ref tys) => tys.iter().cloned().any(needs_drop), - // unions don't have destructors regardless of the child types + // unions don't have destructors because of the child types, + // only if they manually implement `Drop` (handled above). ty::TyAdt(def, _) if def.is_union() => false, ty::TyAdt(def, substs) => @@ -1234,7 +1025,7 @@ impl<'tcx> ExplicitSelf<'tcx> { match self_arg_ty.sty { _ if is_self_ty(self_arg_ty) => ByValue, - ty::TyRef(region, ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => { + ty::TyRef(region, ty, mutbl) if is_self_ty(ty) => { ByReference(region, mutbl) } ty::TyRawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => { @@ -1248,8 +1039,8 @@ impl<'tcx> ExplicitSelf<'tcx> { } } -pub fn provide(providers: &mut ty::maps::Providers) { - *providers = ty::maps::Providers { +pub fn provide(providers: &mut ty::query::Providers) { + *providers = ty::query::Providers { is_copy_raw, is_sized_raw, is_freeze_raw, diff --git a/src/librustc/ty/walk.rs b/src/librustc/ty/walk.rs index 448ad4cf675c..d12f73144269 100644 --- a/src/librustc/ty/walk.rs +++ b/src/librustc/ty/walk.rs @@ -11,7 +11,7 @@ //! An iterator over the type substructure. //! WARNING: this does not keep track of the region depth. -use middle::const_val::{ConstVal, ConstAggregate}; +use mir::interpret::ConstValue; use ty::{self, Ty}; use rustc_data_structures::small_vec::SmallVec; use rustc_data_structures::accumulate_vec::IntoIter as AccIntoIter; @@ -92,9 +92,12 @@ fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) { ty::TySlice(ty) => { stack.push(ty); } - ty::TyRawPtr(ref mt) | ty::TyRef(_, ref mt) => { + ty::TyRawPtr(ref mt) => { stack.push(mt.ty); } + ty::TyRef(_, ty, _) => { + stack.push(ty); + } ty::TyProjection(ref data) => { stack.extend(data.substs.types().rev()); } @@ -118,11 +121,13 @@ fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) { ty::TyClosure(_, ref substs) => { stack.extend(substs.substs.types().rev()); } - ty::TyGenerator(_, ref substs, ref interior) => { + ty::TyGenerator(_, ref substs, _) => { stack.extend(substs.substs.types().rev()); - stack.push(interior.witness); } - ty::TyTuple(ts, _) => { + ty::TyGeneratorWitness(ts) => { + stack.extend(ts.skip_binder().iter().cloned().rev()); + } + ty::TyTuple(ts) => { stack.extend(ts.iter().cloned().rev()); } ty::TyFnDef(_, substs) => { @@ -136,34 +141,8 @@ fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) { } fn push_const<'tcx>(stack: &mut TypeWalkerStack<'tcx>, constant: &'tcx ty::Const<'tcx>) { - match constant.val { - ConstVal::Integral(_) | - ConstVal::Float(_) | - ConstVal::Str(_) | - ConstVal::ByteStr(_) | - ConstVal::Bool(_) | - ConstVal::Char(_) | - ConstVal::Variant(_) => {} - ConstVal::Function(_, substs) => { - stack.extend(substs.types().rev()); - } - ConstVal::Aggregate(ConstAggregate::Struct(fields)) => { - for &(_, v) in fields.iter().rev() { - push_const(stack, v); - } - } - ConstVal::Aggregate(ConstAggregate::Tuple(fields)) | - ConstVal::Aggregate(ConstAggregate::Array(fields)) => { - for v in fields.iter().rev() { - push_const(stack, v); - } - } - ConstVal::Aggregate(ConstAggregate::Repeat(v, _)) => { - push_const(stack, v); - } - ConstVal::Unevaluated(_, substs) => { - stack.extend(substs.types().rev()); - } + if let ConstValue::Unevaluated(_, substs) = constant.val { + stack.extend(substs.types().rev()); } stack.push(constant.ty); } diff --git a/src/librustc/ty/wf.rs b/src/librustc/ty/wf.rs index a851ccc34bfd..b99cdd59773a 100644 --- a/src/librustc/ty/wf.rs +++ b/src/librustc/ty/wf.rs @@ -9,7 +9,7 @@ // except according to those terms. use hir::def_id::DefId; -use middle::const_val::{ConstVal, ConstAggregate}; +use mir::interpret::ConstValue; use infer::InferCtxt; use ty::subst::Substs; use traits; @@ -77,10 +77,6 @@ pub fn predicate_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, ty::Predicate::Trait(ref t) => { wf.compute_trait_ref(&t.skip_binder().trait_ref, Elaborate::None); // (*) } - ty::Predicate::Equate(ref t) => { - wf.compute(t.skip_binder().0); - wf.compute(t.skip_binder().1); - } ty::Predicate::RegionOutlives(..) => { } ty::Predicate::TypeOutlives(ref t) => { @@ -220,39 +216,15 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { /// into `self.out`. fn compute_const(&mut self, constant: &'tcx ty::Const<'tcx>) { self.require_sized(constant.ty, traits::ConstSized); - match constant.val { - ConstVal::Integral(_) | - ConstVal::Float(_) | - ConstVal::Str(_) | - ConstVal::ByteStr(_) | - ConstVal::Bool(_) | - ConstVal::Char(_) | - ConstVal::Variant(_) | - ConstVal::Function(..) => {} - ConstVal::Aggregate(ConstAggregate::Struct(fields)) => { - for &(_, v) in fields { - self.compute_const(v); - } - } - ConstVal::Aggregate(ConstAggregate::Tuple(fields)) | - ConstVal::Aggregate(ConstAggregate::Array(fields)) => { - for v in fields { - self.compute_const(v); - } - } - ConstVal::Aggregate(ConstAggregate::Repeat(v, _)) => { - self.compute_const(v); - } - ConstVal::Unevaluated(def_id, substs) => { - let obligations = self.nominal_obligations(def_id, substs); - self.out.extend(obligations); + if let ConstValue::Unevaluated(def_id, substs) = constant.val { + let obligations = self.nominal_obligations(def_id, substs); + self.out.extend(obligations); - let predicate = ty::Predicate::ConstEvaluatable(def_id, substs); - let cause = self.cause(traits::MiscObligation); - self.out.push(traits::Obligation::new(cause, - self.param_env, - predicate)); - } + let predicate = ty::Predicate::ConstEvaluatable(def_id, substs); + let cause = self.cause(traits::MiscObligation); + self.out.push(traits::Obligation::new(cause, + self.param_env, + predicate)); } } @@ -283,6 +255,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { ty::TyFloat(..) | ty::TyError | ty::TyStr | + ty::TyGeneratorWitness(..) | ty::TyNever | ty::TyParam(_) | ty::TyForeign(..) => { @@ -299,7 +272,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { self.compute_const(len); } - ty::TyTuple(ref tys, _) => { + ty::TyTuple(ref tys) => { if let Some((_last, rest)) = tys.split_last() { for elem in rest { self.require_sized(elem, traits::TupleElem); @@ -322,17 +295,17 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { self.out.extend(obligations); } - ty::TyRef(r, mt) => { + ty::TyRef(r, rty, _) => { // WfReference - if !r.has_escaping_regions() && !mt.ty.has_escaping_regions() { + if !r.has_escaping_regions() && !rty.has_escaping_regions() { let cause = self.cause(traits::ReferenceOutlivesReferent(ty)); self.out.push( traits::Obligation::new( cause, param_env, ty::Predicate::TypeOutlives( - ty::Binder( - ty::OutlivesPredicate(mt.ty, r))))); + ty::Binder::dummy( + ty::OutlivesPredicate(rty, r))))); } } @@ -387,10 +360,16 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { // types appearing in the fn signature } - ty::TyAnon(..) => { + ty::TyAnon(did, substs) => { // all of the requirements on type parameters // should've been checked by the instantiation // of whatever returned this exact `impl Trait`. + + // for named existential types we still need to check them + if super::is_impl_trait_defn(self.infcx.tcx, did).is_none() { + let obligations = self.nominal_obligations(did, substs); + self.out.extend(obligations); + } } ty::TyDynamic(data, r) => { @@ -516,7 +495,8 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { for implicit_bound in implicit_bounds { let cause = self.cause(traits::ObjectTypeBound(ty, explicit_bound)); - let outlives = ty::Binder(ty::OutlivesPredicate(explicit_bound, implicit_bound)); + let outlives = ty::Binder::dummy( + ty::OutlivesPredicate(explicit_bound, implicit_bound)); self.out.push(traits::Obligation::new(cause, self.param_env, outlives.to_predicate())); diff --git a/src/librustc/util/bug.rs b/src/librustc/util/bug.rs new file mode 100644 index 000000000000..f2593e4d4b5e --- /dev/null +++ b/src/librustc/util/bug.rs @@ -0,0 +1,51 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// These functions are used by macro expansion for bug! and span_bug! + +use ty::tls; +use std::fmt; +use syntax_pos::{Span, MultiSpan}; + +#[cold] +#[inline(never)] +pub fn bug_fmt(file: &'static str, line: u32, args: fmt::Arguments) -> ! { + // this wrapper mostly exists so I don't have to write a fully + // qualified path of None:: inside the bug!() macro definition + opt_span_bug_fmt(file, line, None::, args); +} + +#[cold] +#[inline(never)] +pub fn span_bug_fmt>( + file: &'static str, + line: u32, + span: S, + args: fmt::Arguments, +) -> ! { + opt_span_bug_fmt(file, line, Some(span), args); +} + +fn opt_span_bug_fmt>( + file: &'static str, + line: u32, + span: Option, + args: fmt::Arguments, +) -> ! { + tls::with_opt(move |tcx| { + let msg = format!("{}:{}: {}", file, line, args); + match (tcx, span) { + (Some(tcx), Some(span)) => tcx.sess.diagnostic().span_bug(span, &msg), + (Some(tcx), None) => tcx.sess.diagnostic().bug(&msg), + (None, _) => panic!(msg), + } + }); + unreachable!(); +} diff --git a/src/librustc/util/captures.rs b/src/librustc/util/captures.rs new file mode 100644 index 000000000000..b68cfd278fa9 --- /dev/null +++ b/src/librustc/util/captures.rs @@ -0,0 +1,18 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// "Signaling" trait used in impl trait to tag lifetimes that you may +/// need to capture but don't really need for other reasons. +/// Basically a workaround; see [this comment] for details. +/// +/// [this comment]: https://github.com/rust-lang/rust/issues/34511#issuecomment-373423999 +pub trait Captures<'a> { } + +impl<'a, T: ?Sized> Captures<'a> for T { } diff --git a/src/librustc/util/common.rs b/src/librustc/util/common.rs index 2971f3e853a9..1ec025f78c9a 100644 --- a/src/librustc/util/common.rs +++ b/src/librustc/util/common.rs @@ -10,19 +10,23 @@ #![allow(non_camel_case_types)] +use rustc_data_structures::sync::Lock; + use std::cell::{RefCell, Cell}; use std::collections::HashMap; -use std::ffi::CString; use std::fmt::Debug; use std::hash::{Hash, BuildHasher}; -use std::iter::repeat; -use std::path::Path; +use std::panic; +use std::env; use std::time::{Duration, Instant}; use std::sync::mpsc::{Sender}; use syntax_pos::{SpanData}; -use ty::maps::{QueryMsg}; +use ty::TyCtxt; use dep_graph::{DepNode}; +use proc_macro; +use lazy_static; +use session::Session; // The name of the associated type for `Fn` return types pub const FN_OUTPUT_NAME: &'static str = "Output"; @@ -34,8 +38,40 @@ pub struct ErrorReported; thread_local!(static TIME_DEPTH: Cell = Cell::new(0)); -/// Initialized for -Z profile-queries -thread_local!(static PROFQ_CHAN: RefCell>> = RefCell::new(None)); +lazy_static! { + static ref DEFAULT_HOOK: Box = { + let hook = panic::take_hook(); + panic::set_hook(Box::new(panic_hook)); + hook + }; +} + +fn panic_hook(info: &panic::PanicInfo) { + if !proc_macro::__internal::in_sess() { + (*DEFAULT_HOOK)(info); + + let backtrace = env::var_os("RUST_BACKTRACE").map(|x| &x != "0").unwrap_or(false); + + if backtrace { + TyCtxt::try_print_query_stack(); + } + + #[cfg(windows)] + unsafe { + if env::var("RUSTC_BREAK_ON_ICE").is_ok() { + extern "system" { + fn DebugBreak(); + } + // Trigger a debugger if we crashed during bootstrap + DebugBreak(); + } + } + } +} + +pub fn install_panic_hook() { + lazy_static::initialize(&DEFAULT_HOOK); +} /// Parameters to the `Dump` variant of type `ProfileQueriesMsg`. #[derive(Clone,Debug)] @@ -48,6 +84,13 @@ pub struct ProfQDumpParams { pub dump_profq_msg_log:bool, } +#[allow(bad_style)] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct QueryMsg { + pub query: &'static str, + pub msg: Option, +} + /// A sequence of these messages induce a trace of query-based incremental compilation. /// FIXME(matthewhammer): Determine whether we should include cycle detection here or not. #[derive(Clone,Debug)] @@ -76,29 +119,23 @@ pub enum ProfileQueriesMsg { } /// If enabled, send a message to the profile-queries thread -pub fn profq_msg(msg: ProfileQueriesMsg) { - PROFQ_CHAN.with(|sender|{ - if let Some(s) = sender.borrow().as_ref() { - s.send(msg).unwrap() - } else { - // Do nothing. - // - // FIXME(matthewhammer): Multi-threaded translation phase triggers the panic below. - // From backtrace: rustc_trans::back::write::spawn_work::{{closure}}. - // - // panic!("no channel on which to send profq_msg: {:?}", msg) - } - }) +pub fn profq_msg(sess: &Session, msg: ProfileQueriesMsg) { + if let Some(s) = sess.profile_channel.borrow().as_ref() { + s.send(msg).unwrap() + } else { + // Do nothing + } } /// Set channel for profile queries channel -pub fn profq_set_chan(s: Sender) -> bool { - PROFQ_CHAN.with(|chan|{ - if chan.borrow().is_none() { - *chan.borrow_mut() = Some(s); - true - } else { false } - }) +pub fn profq_set_chan(sess: &Session, s: Sender) -> bool { + let mut channel = sess.profile_channel.borrow_mut(); + if channel.is_none() { + *channel = Some(s); + true + } else { + false + } } /// Read the current depth of `time()` calls. This is used to @@ -114,7 +151,13 @@ pub fn set_time_depth(depth: usize) { TIME_DEPTH.with(|slot| slot.set(depth)); } -pub fn time(do_it: bool, what: &str, f: F) -> T where +pub fn time(sess: &Session, what: &str, f: F) -> T where + F: FnOnce() -> T, +{ + time_ext(sess.time_passes(), Some(sess), what, f) +} + +pub fn time_ext(do_it: bool, sess: Option<&Session>, what: &str, f: F) -> T where F: FnOnce() -> T, { if !do_it { return f(); } @@ -125,15 +168,19 @@ pub fn time(do_it: bool, what: &str, f: F) -> T where r }); - if cfg!(debug_assertions) { - profq_msg(ProfileQueriesMsg::TimeBegin(what.to_string())) - }; + if let Some(sess) = sess { + if cfg!(debug_assertions) { + profq_msg(sess, ProfileQueriesMsg::TimeBegin(what.to_string())) + } + } let start = Instant::now(); let rv = f(); let dur = start.elapsed(); - if cfg!(debug_assertions) { - profq_msg(ProfileQueriesMsg::TimeEnd) - }; + if let Some(sess) = sess { + if cfg!(debug_assertions) { + profq_msg(sess, ProfileQueriesMsg::TimeEnd) + } + } print_time_passes_entry_internal(what, dur); @@ -169,7 +216,7 @@ fn print_time_passes_entry_internal(what: &str, dur: Duration) { None => "".to_owned(), }; println!("{}time: {}{}\t{}", - repeat(" ").take(indentation).collect::(), + " ".repeat(indentation), duration_to_secs_str(dur), mem_string, what); @@ -193,7 +240,7 @@ pub fn to_readable_str(mut val: usize) -> String { val /= 1000; if val == 0 { - groups.push(format!("{}", group)); + groups.push(group.to_string()); break; } else { groups.push(format!("{:03}", group)); @@ -205,13 +252,14 @@ pub fn to_readable_str(mut val: usize) -> String { groups.join("_") } -pub fn record_time(accu: &Cell, f: F) -> T where +pub fn record_time(accu: &Lock, f: F) -> T where F: FnOnce() -> T, { let start = Instant::now(); let rv = f(); let duration = start.elapsed(); - accu.set(duration + accu.get()); + let mut accu = accu.lock(); + *accu = *accu + duration; rv } @@ -221,7 +269,8 @@ fn get_resident() -> Option { use std::fs; let field = 1; - let contents = fs::read_string("/proc/self/statm").ok()?; + let contents = fs::read("/proc/self/statm").ok()?; + let contents = String::from_utf8(contents).ok()?; let s = contents.split_whitespace().nth(field)?; let npages = s.parse::().ok()?; Some(npages * 4096) @@ -325,19 +374,6 @@ impl MemoizationMap for RefCell> } } -#[cfg(unix)] -pub fn path2cstr(p: &Path) -> CString { - use std::os::unix::prelude::*; - use std::ffi::OsStr; - let p: &OsStr = p.as_ref(); - CString::new(p.as_bytes()).unwrap() -} -#[cfg(windows)] -pub fn path2cstr(p: &Path) -> CString { - CString::new(p.to_str().unwrap()).unwrap() -} - - #[test] fn test_to_readable_str() { assert_eq!("0", to_readable_str(0)); diff --git a/src/librustc/util/fs.rs b/src/librustc/util/fs.rs deleted file mode 100644 index 090753b18c0b..000000000000 --- a/src/librustc/util/fs.rs +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::path::{self, Path, PathBuf}; -use std::ffi::OsString; -use std::fs; -use std::io; - -// Unfortunately, on windows, it looks like msvcrt.dll is silently translating -// verbatim paths under the hood to non-verbatim paths! This manifests itself as -// gcc looking like it cannot accept paths of the form `\\?\C:\...`, but the -// real bug seems to lie in msvcrt.dll. -// -// Verbatim paths are generally pretty rare, but the implementation of -// `fs::canonicalize` currently generates paths of this form, meaning that we're -// going to be passing quite a few of these down to gcc, so we need to deal with -// this case. -// -// For now we just strip the "verbatim prefix" of `\\?\` from the path. This -// will probably lose information in some cases, but there's not a whole lot -// more we can do with a buggy msvcrt... -// -// For some more information, see this comment: -// https://github.com/rust-lang/rust/issues/25505#issuecomment-102876737 -pub fn fix_windows_verbatim_for_gcc(p: &Path) -> PathBuf { - if !cfg!(windows) { - return p.to_path_buf(); - } - let mut components = p.components(); - let prefix = match components.next() { - Some(path::Component::Prefix(p)) => p, - _ => return p.to_path_buf(), - }; - match prefix.kind() { - path::Prefix::VerbatimDisk(disk) => { - let mut base = OsString::from(format!("{}:", disk as char)); - base.push(components.as_path()); - PathBuf::from(base) - } - path::Prefix::VerbatimUNC(server, share) => { - let mut base = OsString::from(r"\\"); - base.push(server); - base.push(r"\"); - base.push(share); - base.push(components.as_path()); - PathBuf::from(base) - } - _ => p.to_path_buf(), - } -} - -pub enum LinkOrCopy { - Link, - Copy, -} - -/// Copy `p` into `q`, preferring to use hard-linking if possible. If -/// `q` already exists, it is removed first. -/// The result indicates which of the two operations has been performed. -pub fn link_or_copy, Q: AsRef>(p: P, q: Q) -> io::Result { - let p = p.as_ref(); - let q = q.as_ref(); - if q.exists() { - fs::remove_file(&q)?; - } - - match fs::hard_link(p, q) { - Ok(()) => Ok(LinkOrCopy::Link), - Err(_) => { - match fs::copy(p, q) { - Ok(_) => Ok(LinkOrCopy::Copy), - Err(e) => Err(e), - } - } - } -} - -#[derive(Debug)] -pub enum RenameOrCopyRemove { - Rename, - CopyRemove, -} - -/// Rename `p` into `q`, preferring to use `rename` if possible. -/// If `rename` fails (rename may fail for reasons such as crossing -/// filesystem), fallback to copy & remove -pub fn rename_or_copy_remove, Q: AsRef>(p: P, - q: Q) - -> io::Result { - let p = p.as_ref(); - let q = q.as_ref(); - match fs::rename(p, q) { - Ok(()) => Ok(RenameOrCopyRemove::Rename), - Err(_) => { - match fs::copy(p, q) { - Ok(_) => { - fs::remove_file(p)?; - Ok(RenameOrCopyRemove::CopyRemove) - } - Err(e) => Err(e), - } - } - } -} diff --git a/src/librustc/util/nodemap.rs b/src/librustc/util/nodemap.rs index 674f67d5cd2f..0dc71af9db69 100644 --- a/src/librustc/util/nodemap.rs +++ b/src/librustc/util/nodemap.rs @@ -13,24 +13,22 @@ #![allow(non_snake_case)] use hir::def_id::DefId; -use hir::ItemLocalId; +use hir::{HirId, ItemLocalId}; use syntax::ast; pub use rustc_data_structures::fx::FxHashMap; pub use rustc_data_structures::fx::FxHashSet; -pub type NodeMap = FxHashMap; -pub type DefIdMap = FxHashMap; -pub type ItemLocalMap = FxHashMap; - -pub type NodeSet = FxHashSet; -pub type DefIdSet = FxHashSet; -pub type ItemLocalSet = FxHashSet; - -pub fn NodeMap() -> NodeMap { FxHashMap() } -pub fn DefIdMap() -> DefIdMap { FxHashMap() } -pub fn ItemLocalMap() -> ItemLocalMap { FxHashMap() } -pub fn NodeSet() -> NodeSet { FxHashSet() } -pub fn DefIdSet() -> DefIdSet { FxHashSet() } -pub fn ItemLocalSet() -> ItemLocalSet { FxHashSet() } +macro_rules! define_id_collections { + ($map_name:ident, $set_name:ident, $key:ty) => { + pub type $map_name = FxHashMap<$key, T>; + pub fn $map_name() -> $map_name { FxHashMap() } + pub type $set_name = FxHashSet<$key>; + pub fn $set_name() -> $set_name { FxHashSet() } + } +} +define_id_collections!(NodeMap, NodeSet, ast::NodeId); +define_id_collections!(DefIdMap, DefIdSet, DefId); +define_id_collections!(HirIdMap, HirIdSet, HirId); +define_id_collections!(ItemLocalMap, ItemLocalSet, ItemLocalId); diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs index 51841836698e..bb54e1836049 100644 --- a/src/librustc/util/ppaux.rs +++ b/src/librustc/util/ppaux.rs @@ -10,29 +10,34 @@ use hir::def_id::DefId; use hir::map::definitions::DefPathData; -use middle::const_val::ConstVal; +use mir::interpret::ConstValue; use middle::region::{self, BlockRemainder}; use ty::subst::{self, Subst}; use ty::{BrAnon, BrEnv, BrFresh, BrNamed}; use ty::{TyBool, TyChar, TyAdt}; use ty::{TyError, TyStr, TyArray, TySlice, TyFloat, TyFnDef, TyFnPtr}; use ty::{TyParam, TyRawPtr, TyRef, TyNever, TyTuple}; -use ty::{TyClosure, TyGenerator, TyForeign, TyProjection, TyAnon}; +use ty::{TyClosure, TyGenerator, TyGeneratorWitness, TyForeign, TyProjection, TyAnon}; use ty::{TyDynamic, TyInt, TyUint, TyInfer}; -use ty::{self, Ty, TyCtxt, TypeFoldable}; +use ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable, GenericParamCount, GenericParamDefKind}; use util::nodemap::FxHashSet; use std::cell::Cell; use std::fmt; use std::usize; -use rustc_const_math::ConstInt; use rustc_data_structures::indexed_vec::Idx; -use syntax::abi::Abi; +use rustc_target::spec::abi::Abi; use syntax::ast::CRATE_NODE_ID; -use syntax::symbol::Symbol; +use syntax::symbol::{Symbol, InternedString}; use hir; +thread_local! { + /// Mechanism for highlighting of specific regions for display in NLL region inference errors. + /// Contains region to highlight and counter for number to use when highlighting. + static HIGHLIGHT_REGION: Cell> = Cell::new(None) +} + macro_rules! gen_display_debug_body { ( $with:path ) => { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -130,7 +135,7 @@ macro_rules! print { } -struct LateBoundRegionNameCollector(FxHashSet); +struct LateBoundRegionNameCollector(FxHashSet); impl<'tcx> ty::fold::TypeVisitor<'tcx> for LateBoundRegionNameCollector { fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { match *r { @@ -148,7 +153,7 @@ pub struct PrintContext { is_debug: bool, is_verbose: bool, identify_regions: bool, - used_region_names: Option>, + used_region_names: Option>, region_index: usize, binder_depth: usize, } @@ -257,8 +262,10 @@ impl PrintContext { let verbose = self.is_verbose; let mut num_supplied_defaults = 0; let mut has_self = false; - let mut num_regions = 0; - let mut num_types = 0; + let mut own_counts = GenericParamCount { + lifetimes: 0, + types: 0, + }; let mut is_value_path = false; let fn_trait_kind = ty::tls::with(|tcx| { // Unfortunately, some kinds of items (e.g., closures) don't have @@ -268,14 +275,31 @@ impl PrintContext { loop { let key = tcx.def_key(item_def_id); match key.disambiguated_data.data { + DefPathData::AssocTypeInTrait(_) | + DefPathData::AssocTypeInImpl(_) | + DefPathData::AssocExistentialInImpl(_) | + DefPathData::Trait(_) | DefPathData::TypeNs(_) => { break; } - DefPathData::ValueNs(_) | DefPathData::EnumVariant(_) => { + DefPathData::ValueNs(_) | + DefPathData::EnumVariant(_) => { is_value_path = true; break; } - _ => { + DefPathData::CrateRoot | + DefPathData::Misc | + DefPathData::Impl | + DefPathData::Module(_) | + DefPathData::MacroDef(_) | + DefPathData::ClosureExpr | + DefPathData::TypeParam(_) | + DefPathData::LifetimeParam(_) | + DefPathData::Field(_) | + DefPathData::StructCtor | + DefPathData::AnonConst | + DefPathData::ImplTrait | + DefPathData::GlobalMetaData(_) => { // if we're making a symbol for something, there ought // to be a value or type-def or something in there // *somewhere* @@ -287,6 +311,7 @@ impl PrintContext { } } let mut generics = tcx.generics_of(item_def_id); + let child_own_counts = generics.own_counts(); let mut path_def_id = did; has_self = generics.has_self; @@ -294,10 +319,9 @@ impl PrintContext { if let Some(def_id) = generics.parent { // Methods. assert!(is_value_path); - child_types = generics.types.len(); + child_types = child_own_counts.types; generics = tcx.generics_of(def_id); - num_regions = generics.regions.len(); - num_types = generics.types.len(); + own_counts = generics.own_counts(); if has_self { print!(f, self, write("<"), print_display(substs.type_at(0)), write(" as "))?; @@ -312,20 +336,30 @@ impl PrintContext { assert_eq!(has_self, false); } else { // Types and traits. - num_regions = generics.regions.len(); - num_types = generics.types.len(); + own_counts = child_own_counts; } } if !verbose { - if generics.types.last().map_or(false, |def| def.has_default) { + let mut type_params = + generics.params.iter().rev().filter_map(|param| match param.kind { + GenericParamDefKind::Lifetime => None, + GenericParamDefKind::Type { has_default, .. } => { + Some((param.def_id, has_default)) + } + }).peekable(); + let has_default = { + let has_default = type_params.peek().map(|(_, has_default)| has_default); + *has_default.unwrap_or(&false) + }; + if has_default { if let Some(substs) = tcx.lift(&substs) { - let tps = substs.types().rev().skip(child_types); - for (def, actual) in generics.types.iter().rev().zip(tps) { - if !def.has_default { + let types = substs.types().rev().skip(child_types); + for ((def_id, has_default), actual) in type_params.zip(types) { + if !has_default { break; } - if tcx.type_of(def.def_id).subst(tcx, substs) != actual { + if tcx.type_of(def_id).subst(tcx, substs) != actual { break; } num_supplied_defaults += 1; @@ -340,7 +374,7 @@ impl PrintContext { if !verbose && fn_trait_kind.is_some() && projections.len() == 1 { let projection_ty = projections[0].ty; - if let TyTuple(ref args, _) = substs.type_at(1).sty { + if let TyTuple(ref args) = substs.type_at(1).sty { return self.fn_sig(f, args, false, projection_ty); } } @@ -385,10 +419,11 @@ impl PrintContext { Ok(()) }; - print_regions(f, "<", 0, num_regions)?; + print_regions(f, "<", 0, own_counts.lifetimes)?; - let tps = substs.types().take(num_types - num_supplied_defaults) - .skip(has_self as usize); + let tps = substs.types() + .take(own_counts.types - num_supplied_defaults) + .skip(has_self as usize); for ty in tps { start_or_continue(f, "<", ", ")?; @@ -400,7 +435,7 @@ impl PrintContext { ty::tls::with(|tcx| print!(f, self, write("{}=", - tcx.associated_item(projection.projection_ty.item_def_id).name), + tcx.associated_item(projection.projection_ty.item_def_id).ident), print_display(projection.ty)) )?; } @@ -419,10 +454,10 @@ impl PrintContext { write!(f, "::{}", item_name)?; } - print_regions(f, "::<", num_regions, usize::MAX)?; + print_regions(f, "::<", own_counts.lifetimes, usize::MAX)?; // FIXME: consider being smart with defaults here too - for ty in substs.types().skip(num_types) { + for ty in substs.types().skip(own_counts.types) { start_or_continue(f, "::<", ", ")?; ty.print_display(f, self)?; } @@ -440,12 +475,12 @@ impl PrintContext { lifted: Option>) -> fmt::Result where T: Print, U: Print + TypeFoldable<'tcx>, F: fmt::Write { - fn name_by_region_index(index: usize) -> Symbol { + fn name_by_region_index(index: usize) -> InternedString { match index { 0 => Symbol::intern("'r"), 1 => Symbol::intern("'s"), i => Symbol::intern(&format!("'t{}", i-2)), - } + }.as_interned_str() } // Replace any anonymous late-bound regions with named @@ -456,7 +491,7 @@ impl PrintContext { let value = if let Some(v) = lifted { v } else { - return original.0.print_display(f, self); + return original.skip_binder().print_display(f, self); }; if self.binder_depth == 0 { @@ -493,11 +528,10 @@ impl PrintContext { } }; let _ = write!(f, "{}", name); - ty::BrNamed(tcx.hir.local_def_id(CRATE_NODE_ID), - name) + ty::BrNamed(tcx.hir.local_def_id(CRATE_NODE_ID), name) } }; - tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), br)) + tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)) }).0; start_or_continue(f, "", "> ")?; @@ -510,7 +544,7 @@ impl PrintContext { result } - fn is_name_used(&self, name: &Symbol) -> bool { + fn is_name_used(&self, name: &InternedString) -> bool { match self.used_region_names { Some(ref names) => names.contains(name), None => false, @@ -534,6 +568,19 @@ pub fn parameterized(f: &mut F, PrintContext::new().parameterized(f, substs, did, projections) } +fn get_highlight_region() -> Option<(RegionVid, usize)> { + HIGHLIGHT_REGION.with(|hr| hr.get()) +} + +pub fn with_highlight_region(r: RegionVid, counter: usize, op: impl FnOnce() -> R) -> R { + HIGHLIGHT_REGION.with(|hr| { + assert_eq!(hr.get(), None); + hr.set(Some((r, counter))); + let r = op(); + hr.set(None); + r + }) +} impl<'a, T: Print> Print for &'a T { fn print(&self, f: &mut F, cx: &mut PrintContext) -> fmt::Result { @@ -573,18 +620,14 @@ define_print! { } } -impl fmt::Debug for ty::TypeParameterDef { +impl fmt::Debug for ty::GenericParamDef { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TypeParameterDef({}, {:?}, {})", - self.name, - self.def_id, - self.index) - } -} - -impl fmt::Debug for ty::RegionParameterDef { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "RegionParameterDef({}, {:?}, {})", + let type_name = match self.kind { + ty::GenericParamDefKind::Lifetime => "Lifetime", + ty::GenericParamDefKind::Type {..} => "Type", + }; + write!(f, "{}({}, {:?}, {})", + type_name, self.name, self.def_id, self.index) @@ -631,6 +674,22 @@ impl<'tcx> fmt::Debug for ty::UpvarBorrow<'tcx> { } } +define_print! { + ('tcx) &'tcx ty::Slice>, (self, f, cx) { + display { + write!(f, "{{")?; + let mut tys = self.iter(); + if let Some(&ty) = tys.next() { + print!(f, cx, print(ty))?; + for &ty in tys { + print!(f, cx, write(", "), print(ty))?; + } + } + write!(f, "}}") + } + } +} + define_print! { ('tcx) ty::TypeAndMut<'tcx>, (self, f, cx) { display { @@ -647,9 +706,9 @@ define_print! { ty::tls::with(|tcx| { let dummy_self = tcx.mk_infer(ty::FreshTy(0)); - let trait_ref = tcx.lift(&ty::Binder(*self)) + let trait_ref = *tcx.lift(&ty::Binder::bind(*self)) .expect("could not lift TraitRef for printing") - .with_self_ty(tcx, dummy_self).0; + .with_self_ty(tcx, dummy_self).skip_binder(); cx.parameterized(f, trait_ref.substs, trait_ref.def_id, &[]) }) } @@ -681,7 +740,7 @@ define_print! { BrAnon(n) => write!(f, "BrAnon({:?})", n), BrFresh(n) => write!(f, "BrFresh({:?})", n), BrNamed(did, name) => { - write!(f, "BrNamed({:?}:{:?}, {:?})", + write!(f, "BrNamed({:?}:{:?}, {})", did.krate, did.index, name) } BrEnv => write!(f, "BrEnv"), @@ -693,7 +752,7 @@ define_print! { define_print! { () ty::RegionKind, (self, f, cx) { display { - if cx.is_verbose { + if cx.is_verbose || get_highlight_region().is_some() { return self.print_debug(f, cx); } @@ -705,6 +764,9 @@ define_print! { ty::ReEarlyBound(ref data) => { write!(f, "{}", data.name) } + ty::ReCanonical(_) => { + write!(f, "'_") + } ty::ReLateBound(_, br) | ty::ReFree(ty::FreeRegion { bound_region: br, .. }) | ty::ReSkolemized(_, br) => { @@ -769,8 +831,12 @@ define_print! { write!(f, "{:?}", vid) } - ty::ReSkolemized(id, ref bound_region) => { - write!(f, "ReSkolemized({}, {:?})", id.index, bound_region) + ty::ReCanonical(c) => { + write!(f, "'?{}", c.index()) + } + + ty::ReSkolemized(universe, ref bound_region) => { + write!(f, "ReSkolemized({:?}, {:?})", universe, bound_region) } ty::ReEmpty => write!(f, "ReEmpty"), @@ -858,6 +924,15 @@ impl fmt::Debug for ty::FloatVid { impl fmt::Debug for ty::RegionVid { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if let Some((region, counter)) = get_highlight_region() { + debug!("RegionVid.fmt: region={:?} self={:?} counter={:?}", region, self, counter); + return if *self == region { + write!(f, "'{:?}", counter) + } else { + write!(f, "'_") + } + } + write!(f, "'_#{}r", self.index()) } } @@ -872,6 +947,7 @@ define_print! { ty::TyVar(_) => write!(f, "_"), ty::IntVar(_) => write!(f, "{}", "{integer}"), ty::FloatVar(_) => write!(f, "{}", "{float}"), + ty::CanonicalTy(_) => write!(f, "_"), ty::FreshTy(v) => write!(f, "FreshTy({})", v), ty::FreshIntTy(v) => write!(f, "FreshIntTy({})", v), ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({})", v) @@ -883,6 +959,7 @@ define_print! { ty::TyVar(ref v) => write!(f, "{:?}", v), ty::IntVar(ref v) => write!(f, "{:?}", v), ty::FloatVar(ref v) => write!(f, "{:?}", v), + ty::CanonicalTy(v) => write!(f, "?{:?}", v.index()), ty::FreshTy(v) => write!(f, "FreshTy({:?})", v), ty::FreshIntTy(v) => write!(f, "FreshIntTy({:?})", v), ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({:?})", v) @@ -900,6 +977,12 @@ impl fmt::Debug for ty::IntVarValue { } } +impl fmt::Debug for ty::FloatVarValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + // The generic impl doesn't work yet because projections are not // normalized under HRTB. /*impl fmt::Display for ty::Binder @@ -917,7 +1000,6 @@ define_print_multi! { ('tcx) ty::Binder>, ('tcx) ty::Binder>, ('tcx) ty::Binder>, - ('tcx) ty::Binder>, ('tcx) ty::Binder>, ('tcx) ty::Binder>, ('tcx) ty::Binder, ty::Region<'tcx>>>, @@ -949,14 +1031,6 @@ define_print! { } } -define_print! { - ('tcx) ty::GeneratorInterior<'tcx>, (self, f, cx) { - display { - self.witness.print(f, cx) - } - } -} - define_print! { ('tcx) ty::TypeVariants<'tcx>, (self, f, cx) { display { @@ -973,17 +1047,19 @@ define_print! { })?; tm.ty.print(f, cx) } - TyRef(r, ref tm) => { + TyRef(r, ty, mutbl) => { write!(f, "&")?; let s = r.print_to_string(cx); - write!(f, "{}", s)?; - if !s.is_empty() { - write!(f, " ")?; + if s != "'_" { + write!(f, "{}", s)?; + if !s.is_empty() { + write!(f, " ")?; + } } - tm.print(f, cx) + ty::TypeAndMut { ty, mutbl }.print(f, cx) } TyNever => write!(f, "!"), - TyTuple(ref tys, _) => { + TyTuple(ref tys) => { write!(f, "(")?; let mut tys = tys.iter(); if let Some(&ty) = tys.next() { @@ -1016,10 +1092,14 @@ define_print! { TyParam(ref param_ty) => write!(f, "{}", param_ty), TyAdt(def, substs) => cx.parameterized(f, substs, def.did, &[]), TyDynamic(data, r) => { - data.print(f, cx)?; let r = r.print_to_string(cx); if !r.is_empty() { - write!(f, " + {}", r) + write!(f, "(")?; + } + write!(f, "dyn ")?; + data.print(f, cx)?; + if !r.is_empty() { + write!(f, " + {})", r) } else { Ok(()) } @@ -1032,6 +1112,20 @@ define_print! { } ty::tls::with(|tcx| { + let def_key = tcx.def_key(def_id); + if let Some(name) = def_key.disambiguated_data.data.get_opt_name() { + write!(f, "{}", name)?; + let mut substs = substs.iter(); + if let Some(first) = substs.next() { + write!(f, "::<")?; + write!(f, "{}", first)?; + for subst in substs { + write!(f, ", {}", subst)?; + } + write!(f, ">")?; + } + return Ok(()); + } // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`, // by looking up the projections associated with the def_id. let predicates_of = tcx.predicates_of(def_id); @@ -1064,9 +1158,14 @@ define_print! { }) } TyStr => write!(f, "str"), - TyGenerator(did, substs, interior) => ty::tls::with(|tcx| { + TyGenerator(did, substs, movability) => ty::tls::with(|tcx| { let upvar_tys = substs.upvar_tys(did, tcx); - write!(f, "[generator")?; + let witness = substs.witness(did, tcx); + if movability == hir::GeneratorMovability::Movable { + write!(f, "[generator")?; + } else { + write!(f, "[static generator")?; + } if let Some(node_id) = tcx.hir.as_local_node_id(did) { write!(f, "@{:?}", tcx.hir.span(node_id))?; @@ -1084,7 +1183,7 @@ define_print! { })? } else { // cross-crate closure types should only be - // visible in trans bug reports, I imagine. + // visible in codegen bug reports, I imagine. write!(f, "@{:?}", did)?; let mut sep = " "; for (index, upvar_ty) in upvar_tys.enumerate() { @@ -1095,8 +1194,11 @@ define_print! { } } - print!(f, cx, write(" "), print(interior), write("]")) + print!(f, cx, write(" "), print(witness), write("]")) }), + TyGeneratorWitness(types) => { + ty::tls::with(|tcx| cx.in_binder(f, tcx, &types, tcx.lift(&types))) + } TyClosure(did, substs) => ty::tls::with(|tcx| { let upvar_tys = substs.upvar_tys(did, tcx); write!(f, "[closure")?; @@ -1121,7 +1223,7 @@ define_print! { })? } else { // cross-crate closure types should only be - // visible in trans bug reports, I imagine. + // visible in codegen bug reports, I imagine. write!(f, "@{:?}", did)?; let mut sep = " "; for (index, upvar_ty) in upvar_tys.enumerate() { @@ -1137,15 +1239,12 @@ define_print! { TyArray(ty, sz) => { print!(f, cx, write("["), print(ty), write("; "))?; match sz.val { - ConstVal::Integral(ConstInt::Usize(sz)) => { - write!(f, "{}", sz)?; - } - ConstVal::Unevaluated(_def_id, substs) => { - write!(f, "", &substs[..])?; - } - _ => { - write!(f, "{:?}", sz)?; + ConstValue::Unevaluated(_def_id, _substs) => { + write!(f, "_")?; } + _ => ty::tls::with(|tcx| { + write!(f, "{}", sz.unwrap_usize(tcx)) + })?, } write!(f, "]") } @@ -1188,14 +1287,6 @@ define_print! { } } -define_print! { - ('tcx) ty::EquatePredicate<'tcx>, (self, f, cx) { - display { - print!(f, cx, print(self.0), write(" == "), print(self.1)) - } - } -} - define_print! { ('tcx) ty::SubtypePredicate<'tcx>, (self, f, cx) { display { @@ -1239,7 +1330,7 @@ define_print! { // parameterized(f, self.substs, self.item_def_id, &[]) // (which currently ICEs). let (trait_ref, item_name) = ty::tls::with(|tcx| - (self.trait_ref(tcx), tcx.associated_item(self.item_def_id).name) + (self.trait_ref(tcx), tcx.associated_item(self.item_def_id).ident) ); print!(f, cx, print_debug(trait_ref), write("::{}", item_name)) } @@ -1263,7 +1354,6 @@ define_print! { display { match *self { ty::Predicate::Trait(ref data) => data.print(f, cx), - ty::Predicate::Equate(ref predicate) => predicate.print(f, cx), ty::Predicate::Subtype(ref predicate) => predicate.print(f, cx), ty::Predicate::RegionOutlives(ref predicate) => predicate.print(f, cx), ty::Predicate::TypeOutlives(ref predicate) => predicate.print(f, cx), @@ -1288,7 +1378,6 @@ define_print! { debug { match *self { ty::Predicate::Trait(ref a) => a.print(f, cx), - ty::Predicate::Equate(ref pair) => pair.print(f, cx), ty::Predicate::Subtype(ref pair) => pair.print(f, cx), ty::Predicate::RegionOutlives(ref pair) => pair.print(f, cx), ty::Predicate::TypeOutlives(ref pair) => pair.print(f, cx), diff --git a/src/librustc/util/profiling.rs b/src/librustc/util/profiling.rs new file mode 100644 index 000000000000..447b75e547f0 --- /dev/null +++ b/src/librustc/util/profiling.rs @@ -0,0 +1,248 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use session::config::Options; + +use std::fs; +use std::io::{self, StdoutLock, Write}; +use std::time::Instant; + +macro_rules! define_categories { + ($($name:ident,)*) => { + #[derive(Clone, Copy, Debug, PartialEq, Eq)] + pub enum ProfileCategory { + $($name),* + } + + #[allow(bad_style)] + struct Categories { + $($name: T),* + } + + impl Categories { + fn new() -> Categories { + Categories { + $($name: T::default()),* + } + } + } + + impl Categories { + fn get(&self, category: ProfileCategory) -> &T { + match category { + $(ProfileCategory::$name => &self.$name),* + } + } + + fn set(&mut self, category: ProfileCategory, value: T) { + match category { + $(ProfileCategory::$name => self.$name = value),* + } + } + } + + struct CategoryData { + times: Categories, + query_counts: Categories<(u64, u64)>, + } + + impl CategoryData { + fn new() -> CategoryData { + CategoryData { + times: Categories::new(), + query_counts: Categories::new(), + } + } + + fn print(&self, lock: &mut StdoutLock) { + writeln!(lock, "| Phase | Time (ms) | Queries | Hits (%) |") + .unwrap(); + writeln!(lock, "| ---------------- | -------------- | -------------- | -------- |") + .unwrap(); + + $( + let (hits, total) = self.query_counts.$name; + let (hits, total) = if total > 0 { + (format!("{:.2}", + (((hits as f32) / (total as f32)) * 100.0)), total.to_string()) + } else { + ("".into(), "".into()) + }; + + writeln!( + lock, + "| {0: <16} | {1: <14} | {2: <14} | {3: <8} |", + stringify!($name), + self.times.$name / 1_000_000, + total, + hits + ).unwrap(); + )* + } + + fn json(&self) -> String { + let mut json = String::from("["); + + $( + let (hits, total) = self.query_counts.$name; + + json.push_str(&format!( + "{{ \"category\": {}, \"time_ms\": {}, + \"query_count\": {}, \"query_hits\": {} }}", + stringify!($name), + self.times.$name / 1_000_000, + total, + format!("{:.2}", (((hits as f32) / (total as f32)) * 100.0)) + )); + )* + + json.push(']'); + + json + } + } + } +} + +define_categories! { + Parsing, + Expansion, + TypeChecking, + BorrowChecking, + Codegen, + Linking, + Other, +} + +pub struct SelfProfiler { + timer_stack: Vec, + data: CategoryData, + current_timer: Instant, +} + +impl SelfProfiler { + pub fn new() -> SelfProfiler { + let mut profiler = SelfProfiler { + timer_stack: Vec::new(), + data: CategoryData::new(), + current_timer: Instant::now(), + }; + + profiler.start_activity(ProfileCategory::Other); + + profiler + } + + pub fn start_activity(&mut self, category: ProfileCategory) { + match self.timer_stack.last().cloned() { + None => { + self.current_timer = Instant::now(); + }, + Some(current_category) if current_category == category => { + //since the current category is the same as the new activity's category, + //we don't need to do anything with the timer, we just need to push it on the stack + } + Some(current_category) => { + let elapsed = self.stop_timer(); + + //record the current category's time + let new_time = self.data.times.get(current_category) + elapsed; + self.data.times.set(current_category, new_time); + } + } + + //push the new category + self.timer_stack.push(category); + } + + pub fn record_query(&mut self, category: ProfileCategory) { + let (hits, total) = *self.data.query_counts.get(category); + self.data.query_counts.set(category, (hits, total + 1)); + } + + pub fn record_query_hit(&mut self, category: ProfileCategory) { + let (hits, total) = *self.data.query_counts.get(category); + self.data.query_counts.set(category, (hits + 1, total)); + } + + pub fn end_activity(&mut self, category: ProfileCategory) { + match self.timer_stack.pop() { + None => bug!("end_activity() was called but there was no running activity"), + Some(c) => + assert!( + c == category, + "end_activity() was called but a different activity was running"), + } + + //check if the new running timer is in the same category as this one + //if it is, we don't need to do anything + if let Some(c) = self.timer_stack.last() { + if *c == category { + return; + } + } + + //the new timer is different than the previous, + //so record the elapsed time and start a new timer + let elapsed = self.stop_timer(); + let new_time = self.data.times.get(category) + elapsed; + self.data.times.set(category, new_time); + } + + fn stop_timer(&mut self) -> u64 { + let elapsed = self.current_timer.elapsed(); + + self.current_timer = Instant::now(); + + (elapsed.as_secs() * 1_000_000_000) + (elapsed.subsec_nanos() as u64) + } + + pub fn print_results(&mut self, opts: &Options) { + self.end_activity(ProfileCategory::Other); + + assert!( + self.timer_stack.is_empty(), + "there were timers running when print_results() was called"); + + let out = io::stdout(); + let mut lock = out.lock(); + + let crate_name = + opts.crate_name + .as_ref() + .map(|n| format!(" for {}", n)) + .unwrap_or_default(); + + writeln!(lock, "Self profiling results{}:", crate_name).unwrap(); + writeln!(lock).unwrap(); + + self.data.print(&mut lock); + + writeln!(lock).unwrap(); + writeln!(lock, "Optimization level: {:?}", opts.optimize).unwrap(); + + let incremental = if opts.incremental.is_some() { "on" } else { "off" }; + writeln!(lock, "Incremental: {}", incremental).unwrap(); + } + + pub fn save_results(&self, opts: &Options) { + let category_data = self.data.json(); + let compilation_options = + format!("{{ \"optimization_level\": \"{:?}\", \"incremental\": {} }}", + opts.optimize, + if opts.incremental.is_some() { "true" } else { "false" }); + + let json = format!("{{ \"category_data\": {}, \"compilation_options\": {} }}", + category_data, + compilation_options); + + fs::write("self_profiler_results.json", json).unwrap(); + } +} diff --git a/src/librustc_trans/time_graph.rs b/src/librustc/util/time_graph.rs similarity index 100% rename from src/librustc_trans/time_graph.rs rename to src/librustc/util/time_graph.rs diff --git a/src/librustc_allocator/Cargo.toml b/src/librustc_allocator/Cargo.toml index e3d1d8e32c4b..83a918f2af83 100644 --- a/src/librustc_allocator/Cargo.toml +++ b/src/librustc_allocator/Cargo.toml @@ -10,6 +10,9 @@ test = false [dependencies] rustc = { path = "../librustc" } +rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } +rustc_target = { path = "../librustc_target" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } +log = "0.4" diff --git a/src/librustc_allocator/expand.rs b/src/librustc_allocator/expand.rs index 352184c1efa7..676dbeeeeb00 100644 --- a/src/librustc_allocator/expand.rs +++ b/src/librustc_allocator/expand.rs @@ -9,37 +9,46 @@ // except according to those terms. use rustc::middle::allocator::AllocatorKind; +use rustc_data_structures::small_vec::OneVector; use rustc_errors; -use syntax::abi::Abi; -use syntax::ast::{Crate, Attribute, LitKind, StrStyle, ExprKind}; -use syntax::ast::{Unsafety, Constness, Generics, Mutability, Ty, Mac, Arg}; -use syntax::ast::{self, Ident, Item, ItemKind, TyKind, Visibility, Expr}; -use syntax::attr; -use syntax::codemap::dummy_spanned; -use syntax::codemap::{ExpnInfo, NameAndSpan, MacroAttribute}; -use syntax::ext::base::ExtCtxt; -use syntax::ext::base::Resolver; -use syntax::ext::build::AstBuilder; -use syntax::ext::expand::ExpansionConfig; -use syntax::ext::hygiene::{Mark, SyntaxContext}; -use syntax::fold::{self, Folder}; -use syntax::parse::ParseSess; -use syntax::ptr::P; -use syntax::symbol::Symbol; -use syntax::util::small_vector::SmallVector; -use syntax_pos::{Span, DUMMY_SP}; +use syntax::{ + ast::{ + self, Arg, Attribute, Crate, Expr, FnHeader, Generics, Ident, Item, ItemKind, + Mac, Mod, Mutability, Ty, TyKind, Unsafety, VisibilityKind, + }, + attr, + codemap::{ + respan, ExpnInfo, MacroAttribute, + }, + ext::{ + base::{ExtCtxt, Resolver}, + build::AstBuilder, + expand::ExpansionConfig, + hygiene::{self, Mark, SyntaxContext}, + }, + fold::{self, Folder}, + parse::ParseSess, + ptr::P, + symbol::Symbol +}; +use syntax_pos::Span; use {AllocatorMethod, AllocatorTy, ALLOCATOR_METHODS}; -pub fn modify(sess: &ParseSess, - resolver: &mut Resolver, - krate: Crate, - handler: &rustc_errors::Handler) -> ast::Crate { +pub fn modify( + sess: &ParseSess, + resolver: &mut dyn Resolver, + krate: Crate, + crate_name: String, + handler: &rustc_errors::Handler, +) -> ast::Crate { ExpandAllocatorDirectives { handler, sess, resolver, found: false, + crate_name: Some(crate_name), + in_submod: -1, // -1 to account for the "root" module }.fold_crate(krate) } @@ -47,72 +56,120 @@ struct ExpandAllocatorDirectives<'a> { found: bool, handler: &'a rustc_errors::Handler, sess: &'a ParseSess, - resolver: &'a mut Resolver, + resolver: &'a mut dyn Resolver, + crate_name: Option, + + // For now, we disallow `global_allocator` in submodules because hygiene is hard. Keep track of + // whether we are in a submodule or not. If `in_submod > 0` we are in a submodule. + in_submod: isize, } impl<'a> Folder for ExpandAllocatorDirectives<'a> { - fn fold_item(&mut self, item: P) -> SmallVector> { + fn fold_item(&mut self, item: P) -> OneVector> { + debug!("in submodule {}", self.in_submod); + let name = if attr::contains_name(&item.attrs, "global_allocator") { "global_allocator" } else { - return fold::noop_fold_item(item, self) + return fold::noop_fold_item(item, self); }; match item.node { ItemKind::Static(..) => {} _ => { - self.handler.span_err(item.span, "allocators must be statics"); - return SmallVector::one(item) + self.handler + .span_err(item.span, "allocators must be statics"); + return OneVector::one(item); } } + if self.in_submod > 0 { + self.handler + .span_err(item.span, "`global_allocator` cannot be used in submodules"); + return OneVector::one(item); + } + if self.found { - self.handler.span_err(item.span, "cannot define more than one \ - #[global_allocator]"); - return SmallVector::one(item) + self.handler + .span_err(item.span, "cannot define more than one #[global_allocator]"); + return OneVector::one(item); } self.found = true; + // Create a fresh Mark for the new macro expansion we are about to do let mark = Mark::fresh(Mark::root()); mark.set_expn_info(ExpnInfo { - call_site: DUMMY_SP, - callee: NameAndSpan { - format: MacroAttribute(Symbol::intern(name)), - span: None, - allow_internal_unstable: true, - allow_internal_unsafe: false, - } + call_site: item.span, // use the call site of the static + def_site: None, + format: MacroAttribute(Symbol::intern(name)), + allow_internal_unstable: true, + allow_internal_unsafe: false, + local_inner_macros: false, + edition: hygiene::default_edition(), }); + + // Tie the span to the macro expansion info we just created let span = item.span.with_ctxt(SyntaxContext::empty().apply_mark(mark)); - let ecfg = ExpansionConfig::default(name.to_string()); + + // Create an expansion config + let ecfg = ExpansionConfig::default(self.crate_name.take().unwrap()); + + // Generate a bunch of new items using the AllocFnFactory let mut f = AllocFnFactory { span, kind: AllocatorKind::Global, global: item.ident, - alloc: Ident::from_str("alloc"), + core: Ident::from_str("core"), cx: ExtCtxt::new(self.sess, ecfg, self.resolver), }; - let super_path = f.cx.path(f.span, vec![ - Ident::from_str("super"), - f.global, - ]); + + // We will generate a new submodule. To `use` the static from that module, we need to get + // the `super::...` path. + let super_path = f.cx.path(f.span, vec![Ident::from_str("super"), f.global]); + + // Generate the items in the submodule let mut items = vec![ - f.cx.item_extern_crate(f.span, f.alloc), - f.cx.item_use_simple(f.span, Visibility::Inherited, super_path), + // import `core` to use allocators + f.cx.item_extern_crate(f.span, f.core), + // `use` the `global_allocator` in `super` + f.cx.item_use_simple( + f.span, + respan(f.span.shrink_to_lo(), VisibilityKind::Inherited), + super_path, + ), ]; - for method in ALLOCATOR_METHODS { - items.push(f.allocator_fn(method)); - } + + // Add the allocator methods to the submodule + items.extend( + ALLOCATOR_METHODS + .iter() + .map(|method| f.allocator_fn(method)), + ); + + // Generate the submodule itself let name = f.kind.fn_name("allocator_abi"); let allocator_abi = Ident::with_empty_ctxt(Symbol::gensym(&name)); let module = f.cx.item_mod(span, span, allocator_abi, Vec::new(), items); let module = f.cx.monotonic_expander().fold_item(module).pop().unwrap(); - let mut ret = SmallVector::new(); + // Return the item and new submodule + let mut ret = OneVector::with_capacity(2); ret.push(item); ret.push(module); - return ret + + return ret; } + // If we enter a submodule, take note. + fn fold_mod(&mut self, m: Mod) -> Mod { + debug!("enter submodule"); + self.in_submod += 1; + let ret = fold::noop_fold_mod(m, self); + self.in_submod -= 1; + debug!("exit submodule"); + ret + } + + // `fold_mac` is disabled by default. Enable it here. fn fold_mac(&mut self, mac: Mac) -> Mac { fold::noop_fold_mac(mac, self) } @@ -122,7 +179,7 @@ struct AllocFnFactory<'a> { span: Span, kind: AllocatorKind, global: Ident, - alloc: Ident, + core: Ident, cx: ExtCtxt<'a>, } @@ -135,62 +192,67 @@ impl<'a> AllocFnFactory<'a> { i += 1; name }; - let args = method.inputs.iter().map(|ty| { - self.arg_ty(ty, &mut abi_args, mk) - }).collect(); + let args = method + .inputs + .iter() + .map(|ty| self.arg_ty(ty, &mut abi_args, mk)) + .collect(); let result = self.call_allocator(method.name, args); - let (output_ty, output_expr) = - self.ret_ty(&method.output, &mut abi_args, mk, result); - let kind = ItemKind::Fn(self.cx.fn_decl(abi_args, output_ty), - Unsafety::Unsafe, - dummy_spanned(Constness::NotConst), - Abi::Rust, - Generics::default(), - self.cx.block_expr(output_expr)); - self.cx.item(self.span, - Ident::from_str(&self.kind.fn_name(method.name)), - self.attrs(), - kind) + let (output_ty, output_expr) = self.ret_ty(&method.output, result); + let kind = ItemKind::Fn( + self.cx.fn_decl(abi_args, ast::FunctionRetTy::Ty(output_ty)), + FnHeader { + unsafety: Unsafety::Unsafe, + ..FnHeader::default() + }, + Generics::default(), + self.cx.block_expr(output_expr), + ); + self.cx.item( + self.span, + Ident::from_str(&self.kind.fn_name(method.name)), + self.attrs(), + kind, + ) } fn call_allocator(&self, method: &str, mut args: Vec>) -> P { - let method = self.cx.path(self.span, vec![ - self.alloc, - Ident::from_str("heap"), - Ident::from_str("Alloc"), - Ident::from_str(method), - ]); + let method = self.cx.path( + self.span, + vec![ + self.core, + Ident::from_str("alloc"), + Ident::from_str("GlobalAlloc"), + Ident::from_str(method), + ], + ); let method = self.cx.expr_path(method); let allocator = self.cx.path_ident(self.span, self.global); let allocator = self.cx.expr_path(allocator); let allocator = self.cx.expr_addr_of(self.span, allocator); - let allocator = self.cx.expr_mut_addr_of(self.span, allocator); args.insert(0, allocator); self.cx.expr_call(self.span, method, args) } fn attrs(&self) -> Vec { - let key = Symbol::intern("linkage"); - let value = LitKind::Str(Symbol::intern("external"), StrStyle::Cooked); - let linkage = self.cx.meta_name_value(self.span, key, value); - let no_mangle = Symbol::intern("no_mangle"); let no_mangle = self.cx.meta_word(self.span, no_mangle); let special = Symbol::intern("rustc_std_internal_symbol"); let special = self.cx.meta_word(self.span, special); vec![ - self.cx.attribute(self.span, linkage), self.cx.attribute(self.span, no_mangle), self.cx.attribute(self.span, special), ] } - fn arg_ty(&self, - ty: &AllocatorTy, - args: &mut Vec, - ident: &mut FnMut() -> Ident) -> P { + fn arg_ty( + &self, + ty: &AllocatorTy, + args: &mut Vec, + ident: &mut dyn FnMut() -> Ident, + ) -> P { match *ty { AllocatorTy::Layout => { let usize = self.cx.path_ident(self.span, Ident::from_str("usize")); @@ -200,301 +262,68 @@ impl<'a> AllocFnFactory<'a> { args.push(self.cx.arg(self.span, size, ty_usize.clone())); args.push(self.cx.arg(self.span, align, ty_usize)); - let layout_new = self.cx.path(self.span, vec![ - self.alloc, - Ident::from_str("heap"), - Ident::from_str("Layout"), - Ident::from_str("from_size_align_unchecked"), - ]); + let layout_new = self.cx.path( + self.span, + vec![ + self.core, + Ident::from_str("alloc"), + Ident::from_str("Layout"), + Ident::from_str("from_size_align_unchecked"), + ], + ); let layout_new = self.cx.expr_path(layout_new); let size = self.cx.expr_ident(self.span, size); let align = self.cx.expr_ident(self.span, align); - let layout = self.cx.expr_call(self.span, - layout_new, - vec![size, align]); + let layout = self.cx.expr_call(self.span, layout_new, vec![size, align]); layout } - AllocatorTy::LayoutRef => { - let ident = ident(); - args.push(self.cx.arg(self.span, ident, self.ptr_u8())); - - // Convert our `arg: *const u8` via: - // - // &*(arg as *const Layout) - let expr = self.cx.expr_ident(self.span, ident); - let expr = self.cx.expr_cast(self.span, expr, self.layout_ptr()); - let expr = self.cx.expr_deref(self.span, expr); - self.cx.expr_addr_of(self.span, expr) - } - - AllocatorTy::AllocErr => { - // We're creating: - // - // (*(arg as *const AllocErr)).clone() - let ident = ident(); - args.push(self.cx.arg(self.span, ident, self.ptr_u8())); - let expr = self.cx.expr_ident(self.span, ident); - let expr = self.cx.expr_cast(self.span, expr, self.alloc_err_ptr()); - let expr = self.cx.expr_deref(self.span, expr); - self.cx.expr_method_call( - self.span, - expr, - Ident::from_str("clone"), - Vec::new() - ) - } - AllocatorTy::Ptr => { let ident = ident(); args.push(self.cx.arg(self.span, ident, self.ptr_u8())); + let arg = self.cx.expr_ident(self.span, ident); + self.cx.expr_cast(self.span, arg, self.ptr_u8()) + } + + AllocatorTy::Usize => { + let ident = ident(); + args.push(self.cx.arg(self.span, ident, self.usize())); self.cx.expr_ident(self.span, ident) } - AllocatorTy::ResultPtr | - AllocatorTy::ResultExcess | - AllocatorTy::ResultUnit | - AllocatorTy::Bang | - AllocatorTy::UsizePair | - AllocatorTy::Unit => { + AllocatorTy::ResultPtr | AllocatorTy::Unit => { panic!("can't convert AllocatorTy to an argument") } } } - fn ret_ty(&self, - ty: &AllocatorTy, - args: &mut Vec, - ident: &mut FnMut() -> Ident, - expr: P) -> (P, P) - { + fn ret_ty(&self, ty: &AllocatorTy, expr: P) -> (P, P) { match *ty { - AllocatorTy::UsizePair => { - // We're creating: - // - // let arg = #expr; - // *min = arg.0; - // *max = arg.1; - - let min = ident(); - let max = ident(); - - args.push(self.cx.arg(self.span, min, self.ptr_usize())); - args.push(self.cx.arg(self.span, max, self.ptr_usize())); - - let ident = ident(); - let stmt = self.cx.stmt_let(self.span, false, ident, expr); - let min = self.cx.expr_ident(self.span, min); - let max = self.cx.expr_ident(self.span, max); - let layout = self.cx.expr_ident(self.span, ident); - let assign_min = self.cx.expr(self.span, ExprKind::Assign( - self.cx.expr_deref(self.span, min), - self.cx.expr_tup_field_access(self.span, layout.clone(), 0), - )); - let assign_min = self.cx.stmt_semi(assign_min); - let assign_max = self.cx.expr(self.span, ExprKind::Assign( - self.cx.expr_deref(self.span, max), - self.cx.expr_tup_field_access(self.span, layout.clone(), 1), - )); - let assign_max = self.cx.stmt_semi(assign_max); - - let stmts = vec![stmt, assign_min, assign_max]; - let block = self.cx.block(self.span, stmts); - let ty_unit = self.cx.ty(self.span, TyKind::Tup(Vec::new())); - (ty_unit, self.cx.expr_block(block)) - } - - AllocatorTy::ResultExcess => { - // We're creating: - // - // match #expr { - // Ok(ptr) => { - // *excess = ptr.1; - // ptr.0 - // } - // Err(e) => { - // ptr::write(err_ptr, e); - // 0 as *mut u8 - // } - // } - - let excess_ptr = ident(); - args.push(self.cx.arg(self.span, excess_ptr, self.ptr_usize())); - let excess_ptr = self.cx.expr_ident(self.span, excess_ptr); - - let err_ptr = ident(); - args.push(self.cx.arg(self.span, err_ptr, self.ptr_u8())); - let err_ptr = self.cx.expr_ident(self.span, err_ptr); - let err_ptr = self.cx.expr_cast(self.span, - err_ptr, - self.alloc_err_ptr()); - - let name = ident(); - let ok_expr = { - let ptr = self.cx.expr_ident(self.span, name); - let write = self.cx.expr(self.span, ExprKind::Assign( - self.cx.expr_deref(self.span, excess_ptr), - self.cx.expr_tup_field_access(self.span, ptr.clone(), 1), - )); - let write = self.cx.stmt_semi(write); - let ret = self.cx.expr_tup_field_access(self.span, - ptr.clone(), - 0); - let ret = self.cx.stmt_expr(ret); - let block = self.cx.block(self.span, vec![write, ret]); - self.cx.expr_block(block) - }; - let pat = self.cx.pat_ident(self.span, name); - let ok = self.cx.path_ident(self.span, Ident::from_str("Ok")); - let ok = self.cx.pat_tuple_struct(self.span, ok, vec![pat]); - let ok = self.cx.arm(self.span, vec![ok], ok_expr); - - let name = ident(); - let err_expr = { - let err = self.cx.expr_ident(self.span, name); - let write = self.cx.path(self.span, vec![ - self.alloc, - Ident::from_str("heap"), - Ident::from_str("__core"), - Ident::from_str("ptr"), - Ident::from_str("write"), - ]); - let write = self.cx.expr_path(write); - let write = self.cx.expr_call(self.span, write, - vec![err_ptr, err]); - let write = self.cx.stmt_semi(write); - let null = self.cx.expr_usize(self.span, 0); - let null = self.cx.expr_cast(self.span, null, self.ptr_u8()); - let null = self.cx.stmt_expr(null); - let block = self.cx.block(self.span, vec![write, null]); - self.cx.expr_block(block) - }; - let pat = self.cx.pat_ident(self.span, name); - let err = self.cx.path_ident(self.span, Ident::from_str("Err")); - let err = self.cx.pat_tuple_struct(self.span, err, vec![pat]); - let err = self.cx.arm(self.span, vec![err], err_expr); - - let expr = self.cx.expr_match(self.span, expr, vec![ok, err]); - (self.ptr_u8(), expr) - } - AllocatorTy::ResultPtr => { // We're creating: // - // match #expr { - // Ok(ptr) => ptr, - // Err(e) => { - // ptr::write(err_ptr, e); - // 0 as *mut u8 - // } - // } + // #expr as *mut u8 - let err_ptr = ident(); - args.push(self.cx.arg(self.span, err_ptr, self.ptr_u8())); - let err_ptr = self.cx.expr_ident(self.span, err_ptr); - let err_ptr = self.cx.expr_cast(self.span, - err_ptr, - self.alloc_err_ptr()); - - let name = ident(); - let ok_expr = self.cx.expr_ident(self.span, name); - let pat = self.cx.pat_ident(self.span, name); - let ok = self.cx.path_ident(self.span, Ident::from_str("Ok")); - let ok = self.cx.pat_tuple_struct(self.span, ok, vec![pat]); - let ok = self.cx.arm(self.span, vec![ok], ok_expr); - - let name = ident(); - let err_expr = { - let err = self.cx.expr_ident(self.span, name); - let write = self.cx.path(self.span, vec![ - self.alloc, - Ident::from_str("heap"), - Ident::from_str("__core"), - Ident::from_str("ptr"), - Ident::from_str("write"), - ]); - let write = self.cx.expr_path(write); - let write = self.cx.expr_call(self.span, write, - vec![err_ptr, err]); - let write = self.cx.stmt_semi(write); - let null = self.cx.expr_usize(self.span, 0); - let null = self.cx.expr_cast(self.span, null, self.ptr_u8()); - let null = self.cx.stmt_expr(null); - let block = self.cx.block(self.span, vec![write, null]); - self.cx.expr_block(block) - }; - let pat = self.cx.pat_ident(self.span, name); - let err = self.cx.path_ident(self.span, Ident::from_str("Err")); - let err = self.cx.pat_tuple_struct(self.span, err, vec![pat]); - let err = self.cx.arm(self.span, vec![err], err_expr); - - let expr = self.cx.expr_match(self.span, expr, vec![ok, err]); + let expr = self.cx.expr_cast(self.span, expr, self.ptr_u8()); (self.ptr_u8(), expr) } - AllocatorTy::ResultUnit => { - // We're creating: - // - // #expr.is_ok() as u8 + AllocatorTy::Unit => (self.cx.ty(self.span, TyKind::Tup(Vec::new())), expr), - let cast = self.cx.expr_method_call( - self.span, - expr, - Ident::from_str("is_ok"), - Vec::new() - ); - let u8 = self.cx.path_ident(self.span, Ident::from_str("u8")); - let u8 = self.cx.ty_path(u8); - let cast = self.cx.expr_cast(self.span, cast, u8.clone()); - (u8, cast) - } - - AllocatorTy::Bang => { - (self.cx.ty(self.span, TyKind::Never), expr) - } - - AllocatorTy::Unit => { - (self.cx.ty(self.span, TyKind::Tup(Vec::new())), expr) - } - - AllocatorTy::AllocErr | - AllocatorTy::Layout | - AllocatorTy::LayoutRef | - AllocatorTy::Ptr => { + AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => { panic!("can't convert AllocatorTy to an output") } } } + fn usize(&self) -> P { + let usize = self.cx.path_ident(self.span, Ident::from_str("usize")); + self.cx.ty_path(usize) + } + fn ptr_u8(&self) -> P { let u8 = self.cx.path_ident(self.span, Ident::from_str("u8")); let ty_u8 = self.cx.ty_path(u8); self.cx.ty_ptr(self.span, ty_u8, Mutability::Mutable) } - - fn ptr_usize(&self) -> P { - let usize = self.cx.path_ident(self.span, Ident::from_str("usize")); - let ty_usize = self.cx.ty_path(usize); - self.cx.ty_ptr(self.span, ty_usize, Mutability::Mutable) - } - - fn layout_ptr(&self) -> P { - let layout = self.cx.path(self.span, vec![ - self.alloc, - Ident::from_str("heap"), - Ident::from_str("Layout"), - ]); - let layout = self.cx.ty_path(layout); - self.cx.ty_ptr(self.span, layout, Mutability::Mutable) - } - - fn alloc_err_ptr(&self) -> P { - let err = self.cx.path(self.span, vec![ - self.alloc, - Ident::from_str("heap"), - Ident::from_str("AllocErr"), - ]); - let err = self.cx.ty_path(err); - self.cx.ty_ptr(self.span, err, Mutability::Mutable) - } } diff --git a/src/librustc_allocator/lib.rs b/src/librustc_allocator/lib.rs index e17fce5a2ec0..d020fe96335e 100644 --- a/src/librustc_allocator/lib.rs +++ b/src/librustc_allocator/lib.rs @@ -8,12 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![deny(warnings)] - +#![cfg_attr(not(stage0), feature(nll))] #![feature(rustc_private)] +#[macro_use] extern crate log; extern crate rustc; +extern crate rustc_data_structures; extern crate rustc_errors; +extern crate rustc_target; extern crate syntax; extern crate syntax_pos; @@ -25,24 +27,14 @@ pub static ALLOCATOR_METHODS: &[AllocatorMethod] = &[ inputs: &[AllocatorTy::Layout], output: AllocatorTy::ResultPtr, }, - AllocatorMethod { - name: "oom", - inputs: &[AllocatorTy::AllocErr], - output: AllocatorTy::Bang, - }, AllocatorMethod { name: "dealloc", inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout], output: AllocatorTy::Unit, }, - AllocatorMethod { - name: "usable_size", - inputs: &[AllocatorTy::LayoutRef], - output: AllocatorTy::UsizePair, - }, AllocatorMethod { name: "realloc", - inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Layout], + inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Usize], output: AllocatorTy::ResultPtr, }, AllocatorMethod { @@ -50,26 +42,6 @@ pub static ALLOCATOR_METHODS: &[AllocatorMethod] = &[ inputs: &[AllocatorTy::Layout], output: AllocatorTy::ResultPtr, }, - AllocatorMethod { - name: "alloc_excess", - inputs: &[AllocatorTy::Layout], - output: AllocatorTy::ResultExcess, - }, - AllocatorMethod { - name: "realloc_excess", - inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Layout], - output: AllocatorTy::ResultExcess, - }, - AllocatorMethod { - name: "grow_in_place", - inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Layout], - output: AllocatorTy::ResultUnit, - }, - AllocatorMethod { - name: "shrink_in_place", - inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Layout], - output: AllocatorTy::ResultUnit, - }, ]; pub struct AllocatorMethod { @@ -79,14 +51,9 @@ pub struct AllocatorMethod { } pub enum AllocatorTy { - AllocErr, - Bang, Layout, - LayoutRef, Ptr, - ResultExcess, ResultPtr, - ResultUnit, Unit, - UsizePair, + Usize, } diff --git a/src/librustc_apfloat/ieee.rs b/src/librustc_apfloat/ieee.rs index 3e76b60b84a1..87d59d2e763c 100644 --- a/src/librustc_apfloat/ieee.rs +++ b/src/librustc_apfloat/ieee.rs @@ -536,23 +536,21 @@ impl fmt::Display for IeeeFloat { // Check whether we should use scientific notation. let scientific = if width == 0 { true + } else if exp >= 0 { + // 765e3 --> 765000 + // ^^^ + // But we shouldn't make the number look more precise than it is. + exp as usize > width || digits + exp as usize > precision } else { - if exp >= 0 { - // 765e3 --> 765000 - // ^^^ - // But we shouldn't make the number look more precise than it is. - exp as usize > width || digits + exp as usize > precision + // Power of the most significant digit. + let msd = exp + (digits - 1) as ExpInt; + if msd >= 0 { + // 765e-2 == 7.65 + false } else { - // Power of the most significant digit. - let msd = exp + (digits - 1) as ExpInt; - if msd >= 0 { - // 765e-2 == 7.65 - false - } else { - // 765e-5 == 0.00765 - // ^ ^^ - -msd as usize > width - } + // 765e-5 == 0.00765 + // ^ ^^ + -msd as usize > width } }; @@ -702,7 +700,7 @@ impl Float for IeeeFloat { // exponent = 1..10 // significand = 1..1 IeeeFloat { - sig: [!0 & ((1 << S::PRECISION) - 1)], + sig: [(1 << S::PRECISION) - 1], exp: S::MAX_EXP, category: Category::Normal, sign: false, @@ -1434,7 +1432,7 @@ impl Float for IeeeFloat { let max_change = S::MAX_EXP as i32 - (S::MIN_EXP as i32 - sig_bits) + 1; // Clamp to one past the range ends to let normalize handle overflow. - let exp_change = cmp::min(cmp::max(exp as i32, (-max_change - 1)), max_change); + let exp_change = cmp::min(cmp::max(exp as i32, -max_change - 1), max_change); self.exp = self.exp.saturating_add(exp_change as ExpInt); self = self.normalize(round, Loss::ExactlyZero).value; if self.is_nan() { @@ -1507,10 +1505,11 @@ impl FloatConvert> for IeeeFloat { } // If this is a truncation, perform the shift. - let mut loss = Loss::ExactlyZero; - if shift < 0 && (r.is_finite_non_zero() || r.category == Category::NaN) { - loss = sig::shift_right(&mut r.sig, &mut 0, -shift as usize); - } + let loss = if shift < 0 && (r.is_finite_non_zero() || r.category == Category::NaN) { + sig::shift_right(&mut r.sig, &mut 0, -shift as usize) + } else { + Loss::ExactlyZero + }; // If this is an extension, perform the shift. if shift > 0 && (r.is_finite_non_zero() || r.category == Category::NaN) { @@ -1738,27 +1737,25 @@ impl IeeeFloat { bit_pos -= 4; if bit_pos >= 0 { r.sig[0] |= (hex_value as Limb) << bit_pos; - } else { - // If zero or one-half (the hexadecimal digit 8) are followed - // by non-zero, they're a little more than zero or one-half. - if let Some(ref mut loss) = loss { - if hex_value != 0 { - if *loss == Loss::ExactlyZero { - *loss = Loss::LessThanHalf; - } - if *loss == Loss::ExactlyHalf { - *loss = Loss::MoreThanHalf; - } + // If zero or one-half (the hexadecimal digit 8) are followed + // by non-zero, they're a little more than zero or one-half. + } else if let Some(ref mut loss) = loss { + if hex_value != 0 { + if *loss == Loss::ExactlyZero { + *loss = Loss::LessThanHalf; + } + if *loss == Loss::ExactlyHalf { + *loss = Loss::MoreThanHalf; } - } else { - loss = Some(match hex_value { - 0 => Loss::ExactlyZero, - 1...7 => Loss::LessThanHalf, - 8 => Loss::ExactlyHalf, - 9...15 => Loss::MoreThanHalf, - _ => unreachable!(), - }); } + } else { + loss = Some(match hex_value { + 0 => Loss::ExactlyZero, + 1..=7 => Loss::LessThanHalf, + 8 => Loss::ExactlyHalf, + 9..=15 => Loss::MoreThanHalf, + _ => unreachable!(), + }); } } else if c == 'p' || c == 'P' { if !any_digits { @@ -2309,24 +2306,14 @@ mod sig { /// One, not zero, based LSB. That is, returns 0 for a zeroed significand. pub(super) fn olsb(limbs: &[Limb]) -> usize { - for i in 0..limbs.len() { - if limbs[i] != 0 { - return i * LIMB_BITS + limbs[i].trailing_zeros() as usize + 1; - } - } - - 0 + limbs.iter().enumerate().find(|(_, &limb)| limb != 0).map_or(0, + |(i, limb)| i * LIMB_BITS + limb.trailing_zeros() as usize + 1) } /// One, not zero, based MSB. That is, returns 0 for a zeroed significand. pub(super) fn omsb(limbs: &[Limb]) -> usize { - for i in (0..limbs.len()).rev() { - if limbs[i] != 0 { - return (i + 1) * LIMB_BITS - limbs[i].leading_zeros() as usize; - } - } - - 0 + limbs.iter().enumerate().rfind(|(_, &limb)| limb != 0).map_or(0, + |(i, limb)| (i + 1) * LIMB_BITS - limb.leading_zeros() as usize) } /// Comparison (unsigned) of two significands. @@ -2378,7 +2365,7 @@ mod sig { limb = dst[i - jump]; if shift > 0 { limb <<= shift; - if i >= jump + 1 { + if i > jump { limb |= dst[i - jump - 1] >> (LIMB_BITS - shift); } } @@ -2448,7 +2435,7 @@ mod sig { let n = dst_limbs * LIMB_BITS - shift; if n < src_bits { let mask = (1 << (src_bits - n)) - 1; - dst[dst_limbs - 1] |= (src[dst_limbs] & mask) << n % LIMB_BITS; + dst[dst_limbs - 1] |= (src[dst_limbs] & mask) << (n % LIMB_BITS); } else if n > src_bits && src_bits % LIMB_BITS > 0 { dst[dst_limbs - 1] &= (1 << (src_bits % LIMB_BITS)) - 1; } diff --git a/src/librustc_apfloat/lib.rs b/src/librustc_apfloat/lib.rs index 3afc2f684009..d6e821d427d0 100644 --- a/src/librustc_apfloat/lib.rs +++ b/src/librustc_apfloat/lib.rs @@ -43,13 +43,10 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] -#![deny(warnings)] #![forbid(unsafe_code)] -#![feature(i128_type)] -#![feature(slice_patterns)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(try_from)] - // See librustc_cratesio_shim/Cargo.toml for a comment explaining this. #[allow(unused_extern_crates)] extern crate rustc_cratesio_shim; diff --git a/src/librustc_apfloat/ppc.rs b/src/librustc_apfloat/ppc.rs index dec88eb62cc6..e662088e82fb 100644 --- a/src/librustc_apfloat/ppc.rs +++ b/src/librustc_apfloat/ppc.rs @@ -20,7 +20,7 @@ use std::ops::Neg; pub struct DoubleFloat(F, F); pub type DoubleDouble = DoubleFloat; -// These are legacy semantics for the Fallback, inaccrurate implementation of +// These are legacy semantics for the Fallback, inaccurate implementation of // IBM double-double, if the accurate DoubleDouble doesn't handle the // operation. It's equivalent to having an IEEE number with consecutive 106 // bits of mantissa and 11 bits of exponent. diff --git a/src/librustc_apfloat/tests/ieee.rs b/src/librustc_apfloat/tests/ieee.rs index aff2076e0383..6e06ea858efa 100644 --- a/src/librustc_apfloat/tests/ieee.rs +++ b/src/librustc_apfloat/tests/ieee.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(i128_type)] - #[macro_use] extern crate rustc_apfloat; @@ -2201,12 +2199,12 @@ fn is_finite_non_zero() { assert!(!Single::ZERO.is_finite_non_zero()); assert!(!(-Single::ZERO).is_finite_non_zero()); - // Test +/- qNaN. +/- dont mean anything with qNaN but paranoia can't hurt in + // Test +/- qNaN. +/- don't mean anything with qNaN but paranoia can't hurt in // this instance. assert!(!Single::NAN.is_finite_non_zero()); assert!(!(-Single::NAN).is_finite_non_zero()); - // Test +/- sNaN. +/- dont mean anything with sNaN but paranoia can't hurt in + // Test +/- sNaN. +/- don't mean anything with sNaN but paranoia can't hurt in // this instance. assert!(!Single::snan(None).is_finite_non_zero()); assert!(!(-Single::snan(None)).is_finite_non_zero()); diff --git a/src/librustc_asan/Cargo.toml b/src/librustc_asan/Cargo.toml index 8f8ef1cc4a01..34d8b75a5bfb 100644 --- a/src/librustc_asan/Cargo.toml +++ b/src/librustc_asan/Cargo.toml @@ -17,3 +17,4 @@ cmake = "0.1.18" alloc = { path = "../liballoc" } alloc_system = { path = "../liballoc_system" } core = { path = "../libcore" } +compiler_builtins = { path = "../rustc/compiler_builtins_shim" } diff --git a/src/librustc_asan/build.rs b/src/librustc_asan/build.rs index cb7721affe76..b8614c520e7c 100644 --- a/src/librustc_asan/build.rs +++ b/src/librustc_asan/build.rs @@ -18,7 +18,7 @@ use cmake::Config; fn main() { if let Some(llvm_config) = env::var_os("LLVM_CONFIG") { - let native = match sanitizer_lib_boilerplate("asan") { + let (native, target) = match sanitizer_lib_boilerplate("asan") { Ok(native) => native, _ => return, }; @@ -29,7 +29,7 @@ fn main() { .define("COMPILER_RT_BUILD_XRAY", "OFF") .define("LLVM_CONFIG_PATH", llvm_config) .out_dir(&native.out_dir) - .build_target("asan") + .build_target(&target) .build(); } println!("cargo:rerun-if-env-changed=LLVM_CONFIG"); diff --git a/src/librustc_asan/lib.rs b/src/librustc_asan/lib.rs index 3429e3bda0f6..b3ba86ad8a4b 100644 --- a/src/librustc_asan/lib.rs +++ b/src/librustc_asan/lib.rs @@ -10,8 +10,7 @@ #![sanitizer_runtime] #![feature(alloc_system)] -#![feature(allocator_api)] -#![feature(global_allocator)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(sanitizer_runtime)] #![feature(staged_api)] #![no_std] diff --git a/src/librustc_back/Cargo.toml b/src/librustc_back/Cargo.toml deleted file mode 100644 index d864c5bc6105..000000000000 --- a/src/librustc_back/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -authors = ["The Rust Project Developers"] -name = "rustc_back" -version = "0.0.0" - -[lib] -name = "rustc_back" -path = "lib.rs" -crate-type = ["dylib"] - -[dependencies] -syntax = { path = "../libsyntax" } -serialize = { path = "../libserialize" } -log = "0.4" -rand = "0.3" - -[features] -jemalloc = [] diff --git a/src/librustc_back/README.md b/src/librustc_back/README.md deleted file mode 100644 index bd99c687bb6a..000000000000 --- a/src/librustc_back/README.md +++ /dev/null @@ -1,6 +0,0 @@ -NB: This crate is part of the Rust compiler. For an overview of the -compiler as a whole, see -[the README.md file found in `librustc`](../librustc/README.md). - -`librustc_back` contains some very low-level details that are -specific to different LLVM targets and so forth. diff --git a/src/librustc_back/lib.rs b/src/librustc_back/lib.rs deleted file mode 100644 index 8bf60b091a7a..000000000000 --- a/src/librustc_back/lib.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Some stuff used by rustc that doesn't have many dependencies -//! -//! Originally extracted from rustc::back, which was nominally the -//! compiler 'backend', though LLVM is rustc's backend, so rustc_back -//! is really just odds-and-ends relating to code gen and linking. -//! This crate mostly exists to make rustc smaller, so we might put -//! more 'stuff' here in the future. It does not have a dependency on -//! rustc_llvm. -//! -//! FIXME: Split this into two crates: one that has deps on syntax, and -//! one that doesn't; the one that doesn't might get decent parallel -//! build speedups. - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] -#![deny(warnings)] - -#![feature(box_syntax)] -#![feature(const_fn)] -#![feature(fs_read_write)] - -extern crate syntax; -extern crate rand; -extern crate serialize; -#[macro_use] extern crate log; - -extern crate serialize as rustc_serialize; // used by deriving - -pub mod target; - -use std::str::FromStr; - -use serialize::json::{Json, ToJson}; - -macro_rules! linker_flavor { - ($(($variant:ident, $string:expr),)+) => { - #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd, Hash, - RustcEncodable, RustcDecodable)] - pub enum LinkerFlavor { - $($variant,)+ - } - - impl LinkerFlavor { - pub const fn one_of() -> &'static str { - concat!("one of: ", $($string, " ",)+) - } - - pub fn from_str(s: &str) -> Option { - Some(match s { - $($string => LinkerFlavor::$variant,)+ - _ => return None, - }) - } - - pub fn desc(&self) -> &str { - match *self { - $(LinkerFlavor::$variant => $string,)+ - } - } - } - - impl ToJson for LinkerFlavor { - fn to_json(&self) -> Json { - self.desc().to_json() - } - } - } -} - -linker_flavor! { - (Em, "em"), - (Binaryen, "binaryen"), - (Gcc, "gcc"), - (Ld, "ld"), - (Msvc, "msvc"), -} - -#[derive(Clone, Copy, Debug, PartialEq, Hash, RustcEncodable, RustcDecodable)] -pub enum PanicStrategy { - Unwind, - Abort, -} - -impl PanicStrategy { - pub fn desc(&self) -> &str { - match *self { - PanicStrategy::Unwind => "unwind", - PanicStrategy::Abort => "abort", - } - } -} - -impl ToJson for PanicStrategy { - fn to_json(&self) -> Json { - match *self { - PanicStrategy::Abort => "abort".to_json(), - PanicStrategy::Unwind => "unwind".to_json(), - } - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Hash, RustcEncodable, RustcDecodable)] -pub enum RelroLevel { - Full, - Partial, - Off, -} - -impl RelroLevel { - pub fn desc(&self) -> &str { - match *self { - RelroLevel::Full => "full", - RelroLevel::Partial => "partial", - RelroLevel::Off => "off", - } - } -} - -impl FromStr for RelroLevel { - type Err = (); - - fn from_str(s: &str) -> Result { - match s { - "full" => Ok(RelroLevel::Full), - "partial" => Ok(RelroLevel::Partial), - "off" => Ok(RelroLevel::Off), - _ => Err(()), - } - } -} - -impl ToJson for RelroLevel { - fn to_json(&self) -> Json { - match *self { - RelroLevel::Full => "full".to_json(), - RelroLevel::Partial => "partial".to_json(), - RelroLevel::Off => "off".to_json(), - } - } -} diff --git a/src/librustc_back/target/aarch64_unknown_fuchsia.rs b/src/librustc_back/target/aarch64_unknown_fuchsia.rs deleted file mode 100644 index 73cd9c927015..000000000000 --- a/src/librustc_back/target/aarch64_unknown_fuchsia.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use LinkerFlavor; -use target::{Target, TargetOptions, TargetResult}; - -pub fn target() -> TargetResult { - let mut base = super::fuchsia_base::opts(); - base.max_atomic_width = Some(128); - - Ok(Target { - llvm_target: "aarch64-unknown-fuchsia".to_string(), - target_endian: "little".to_string(), - target_pointer_width: "64".to_string(), - target_c_int_width: "32".to_string(), - data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), - arch: "aarch64".to_string(), - target_os: "fuchsia".to_string(), - target_env: "".to_string(), - target_vendor: "unknown".to_string(), - linker_flavor: LinkerFlavor::Gcc, - options: TargetOptions { - abi_blacklist: super::arm_base::abi_blacklist(), - .. base - }, - }) -} diff --git a/src/librustc_back/target/emscripten_base.rs b/src/librustc_back/target/emscripten_base.rs deleted file mode 100644 index bacada3f5ab0..000000000000 --- a/src/librustc_back/target/emscripten_base.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub fn cmd(name: &str) -> String { - if cfg!(windows) { - format!("{}.bat", name) - } else { - name.to_string() - } -} diff --git a/src/librustc_back/target/l4re_base.rs b/src/librustc_back/target/l4re_base.rs deleted file mode 100644 index 31d428d26683..000000000000 --- a/src/librustc_back/target/l4re_base.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use PanicStrategy; -use LinkerFlavor; -use target::{LinkArgs, TargetOptions}; -use std::default::Default; -use std::env; -use std::process::Command; - -// Use GCC to locate code for crt* libraries from the host, not from L4Re. Note -// that a few files also come from L4Re, for these, the function shouldn't be -// used. This uses GCC for the location of the file, but GCC is required for L4Re anyway. -fn get_path_or(filename: &str) -> String { - let child = Command::new("gcc") - .arg(format!("-print-file-name={}", filename)).output() - .expect("Failed to execute GCC"); - String::from_utf8(child.stdout) - .expect("Couldn't read path from GCC").trim().into() -} - -pub fn opts() -> Result { - let l4re_lib_path = env::var_os("L4RE_LIBDIR").ok_or("Unable to find L4Re \ - library directory: L4RE_LIBDIR not set.")?.into_string().unwrap(); - let mut pre_link_args = LinkArgs::new(); - pre_link_args.insert(LinkerFlavor::Ld, vec![ - format!("-T{}/main_stat.ld", l4re_lib_path), - "--defsym=__executable_start=0x01000000".to_string(), - "--defsym=__L4_KIP_ADDR__=0x6ffff000".to_string(), - format!("{}/crt1.o", l4re_lib_path), - format!("{}/crti.o", l4re_lib_path), - get_path_or("crtbeginT.o"), - ]); - let mut post_link_args = LinkArgs::new(); - post_link_args.insert(LinkerFlavor::Ld, vec![ - format!("{}/l4f/libpthread.a", l4re_lib_path), - format!("{}/l4f/libc_be_sig.a", l4re_lib_path), - format!("{}/l4f/libc_be_sig_noop.a", l4re_lib_path), - format!("{}/l4f/libc_be_socket_noop.a", l4re_lib_path), - format!("{}/l4f/libc_be_fs_noop.a", l4re_lib_path), - format!("{}/l4f/libc_be_sem_noop.a", l4re_lib_path), - format!("{}/l4f/libl4re-vfs.o.a", l4re_lib_path), - format!("{}/l4f/lib4re.a", l4re_lib_path), - format!("{}/l4f/lib4re-util.a", l4re_lib_path), - format!("{}/l4f/libc_support_misc.a", l4re_lib_path), - format!("{}/l4f/libsupc++.a", l4re_lib_path), - format!("{}/l4f/lib4shmc.a", l4re_lib_path), - format!("{}/l4f/lib4re-c.a", l4re_lib_path), - format!("{}/l4f/lib4re-c-util.a", l4re_lib_path), - get_path_or("libgcc_eh.a"), - format!("{}/l4f/libdl.a", l4re_lib_path), - "--start-group".to_string(), - format!("{}/l4f/libl4util.a", l4re_lib_path), - format!("{}/l4f/libc_be_l4re.a", l4re_lib_path), - format!("{}/l4f/libuc_c.a", l4re_lib_path), - format!("{}/l4f/libc_be_l4refile.a", l4re_lib_path), - "--end-group".to_string(), - format!("{}/l4f/libl4sys.a", l4re_lib_path), - "-gc-sections".to_string(), - get_path_or("crtend.o"), - format!("{}/crtn.o", l4re_lib_path), - ]); - - Ok(TargetOptions { - executables: true, - has_elf_tls: false, - exe_allocation_crate: None, - panic_strategy: PanicStrategy::Abort, - linker: "ld".to_string(), - pre_link_args, - post_link_args, - target_family: Some("unix".to_string()), - .. Default::default() - }) -} diff --git a/src/librustc_back/target/mips_unknown_linux_musl.rs b/src/librustc_back/target/mips_unknown_linux_musl.rs deleted file mode 100644 index 3f6b984272ed..000000000000 --- a/src/librustc_back/target/mips_unknown_linux_musl.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use LinkerFlavor; -use target::{Target, TargetOptions, TargetResult}; - -pub fn target() -> TargetResult { - Ok(Target { - llvm_target: "mips-unknown-linux-musl".to_string(), - target_endian: "big".to_string(), - target_pointer_width: "32".to_string(), - target_c_int_width: "32".to_string(), - data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), - arch: "mips".to_string(), - target_os: "linux".to_string(), - target_env: "musl".to_string(), - target_vendor: "unknown".to_string(), - linker_flavor: LinkerFlavor::Gcc, - options: TargetOptions { - cpu: "mips32r2".to_string(), - features: "+mips32r2,+soft-float".to_string(), - max_atomic_width: Some(32), - - // see #36994 - exe_allocation_crate: None, - - ..super::linux_base::opts() - } - }) -} diff --git a/src/librustc_back/target/mipsel_unknown_linux_musl.rs b/src/librustc_back/target/mipsel_unknown_linux_musl.rs deleted file mode 100644 index 464f0bfe4805..000000000000 --- a/src/librustc_back/target/mipsel_unknown_linux_musl.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use LinkerFlavor; -use target::{Target, TargetOptions, TargetResult}; - -pub fn target() -> TargetResult { - Ok(Target { - llvm_target: "mipsel-unknown-linux-musl".to_string(), - target_endian: "little".to_string(), - target_pointer_width: "32".to_string(), - target_c_int_width: "32".to_string(), - data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), - arch: "mips".to_string(), - target_os: "linux".to_string(), - target_env: "musl".to_string(), - target_vendor: "unknown".to_string(), - linker_flavor: LinkerFlavor::Gcc, - options: TargetOptions { - cpu: "mips32".to_string(), - features: "+mips32,+soft-float".to_string(), - max_atomic_width: Some(32), - - // see #36994 - exe_allocation_crate: None, - - ..super::linux_base::opts() - } - }) -} diff --git a/src/librustc_back/target/mod.rs b/src/librustc_back/target/mod.rs deleted file mode 100644 index b65b18d0caa8..000000000000 --- a/src/librustc_back/target/mod.rs +++ /dev/null @@ -1,996 +0,0 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! [Flexible target specification.](https://github.com/rust-lang/rfcs/pull/131) -//! -//! Rust targets a wide variety of usecases, and in the interest of flexibility, -//! allows new target triples to be defined in configuration files. Most users -//! will not need to care about these, but this is invaluable when porting Rust -//! to a new platform, and allows for an unprecedented level of control over how -//! the compiler works. -//! -//! # Using custom targets -//! -//! A target triple, as passed via `rustc --target=TRIPLE`, will first be -//! compared against the list of built-in targets. This is to ease distributing -//! rustc (no need for configuration files) and also to hold these built-in -//! targets as immutable and sacred. If `TRIPLE` is not one of the built-in -//! targets, rustc will check if a file named `TRIPLE` exists. If it does, it -//! will be loaded as the target configuration. If the file does not exist, -//! rustc will search each directory in the environment variable -//! `RUST_TARGET_PATH` for a file named `TRIPLE.json`. The first one found will -//! be loaded. If no file is found in any of those directories, a fatal error -//! will be given. -//! -//! Projects defining their own targets should use -//! `--target=path/to/my-awesome-platform.json` instead of adding to -//! `RUST_TARGET_PATH`. -//! -//! # Defining a new target -//! -//! Targets are defined using [JSON](http://json.org/). The `Target` struct in -//! this module defines the format the JSON file should take, though each -//! underscore in the field names should be replaced with a hyphen (`-`) in the -//! JSON file. Some fields are required in every target specification, such as -//! `llvm-target`, `target-endian`, `target-pointer-width`, `data-layout`, -//! `arch`, and `os`. In general, options passed to rustc with `-C` override -//! the target's settings, though `target-feature` and `link-args` will *add* -//! to the list specified by the target, rather than replace. - -use serialize::json::{Json, ToJson}; -use std::collections::BTreeMap; -use std::default::Default; -use syntax::abi::{Abi, lookup as lookup_abi}; - -use {LinkerFlavor, PanicStrategy, RelroLevel}; - -mod android_base; -mod apple_base; -mod apple_ios_base; -mod arm_base; -mod bitrig_base; -mod cloudabi_base; -mod dragonfly_base; -mod emscripten_base; -mod freebsd_base; -mod haiku_base; -mod linux_base; -mod linux_musl_base; -mod openbsd_base; -mod netbsd_base; -mod solaris_base; -mod windows_base; -mod windows_msvc_base; -mod thumb_base; -mod l4re_base; -mod fuchsia_base; -mod redox_base; - -pub type LinkArgs = BTreeMap>; -pub type TargetResult = Result; - -macro_rules! supported_targets { - ( $(($triple:expr, $module:ident),)+ ) => ( - $(mod $module;)* - - /// List of supported targets - const TARGETS: &'static [&'static str] = &[$($triple),*]; - - fn load_specific(target: &str) -> TargetResult { - match target { - $( - $triple => { - let mut t = $module::target()?; - t.options.is_builtin = true; - - // round-trip through the JSON parser to ensure at - // run-time that the parser works correctly - t = Target::from_json(t.to_json())?; - debug!("Got builtin target: {:?}", t); - Ok(t) - }, - )+ - _ => Err(format!("Unable to find target: {}", target)) - } - } - - pub fn get_targets() -> Box> { - Box::new(TARGETS.iter().filter_map(|t| -> Option { - load_specific(t) - .and(Ok(t.to_string())) - .ok() - })) - } - - #[cfg(test)] - mod test_json_encode_decode { - use serialize::json::ToJson; - use super::Target; - $(use super::$module;)* - - $( - #[test] - fn $module() { - // Grab the TargetResult struct. If we successfully retrieved - // a Target, then the test JSON encoding/decoding can run for this - // Target on this testing platform (i.e., checking the iOS targets - // only on a Mac test platform). - let _ = $module::target().map(|original| { - let as_json = original.to_json(); - let parsed = Target::from_json(as_json).unwrap(); - assert_eq!(original, parsed); - }); - } - )* - } - ) -} - -supported_targets! { - ("x86_64-unknown-linux-gnu", x86_64_unknown_linux_gnu), - ("x86_64-unknown-linux-gnux32", x86_64_unknown_linux_gnux32), - ("i686-unknown-linux-gnu", i686_unknown_linux_gnu), - ("i586-unknown-linux-gnu", i586_unknown_linux_gnu), - ("mips-unknown-linux-gnu", mips_unknown_linux_gnu), - ("mips64-unknown-linux-gnuabi64", mips64_unknown_linux_gnuabi64), - ("mips64el-unknown-linux-gnuabi64", mips64el_unknown_linux_gnuabi64), - ("mipsel-unknown-linux-gnu", mipsel_unknown_linux_gnu), - ("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu), - ("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu), - ("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu), - ("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu), - ("sparc64-unknown-linux-gnu", sparc64_unknown_linux_gnu), - ("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi), - ("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf), - ("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi), - ("arm-unknown-linux-musleabihf", arm_unknown_linux_musleabihf), - ("armv4t-unknown-linux-gnueabi", armv4t_unknown_linux_gnueabi), - ("armv5te-unknown-linux-gnueabi", armv5te_unknown_linux_gnueabi), - ("armv7-unknown-linux-gnueabihf", armv7_unknown_linux_gnueabihf), - ("armv7-unknown-linux-musleabihf", armv7_unknown_linux_musleabihf), - ("aarch64-unknown-linux-gnu", aarch64_unknown_linux_gnu), - - ("aarch64-unknown-linux-musl", aarch64_unknown_linux_musl), - ("x86_64-unknown-linux-musl", x86_64_unknown_linux_musl), - ("i686-unknown-linux-musl", i686_unknown_linux_musl), - ("i586-unknown-linux-musl", i586_unknown_linux_musl), - ("mips-unknown-linux-musl", mips_unknown_linux_musl), - ("mipsel-unknown-linux-musl", mipsel_unknown_linux_musl), - - ("mips-unknown-linux-uclibc", mips_unknown_linux_uclibc), - ("mipsel-unknown-linux-uclibc", mipsel_unknown_linux_uclibc), - - ("i686-linux-android", i686_linux_android), - ("x86_64-linux-android", x86_64_linux_android), - ("arm-linux-androideabi", arm_linux_androideabi), - ("armv7-linux-androideabi", armv7_linux_androideabi), - ("aarch64-linux-android", aarch64_linux_android), - - ("aarch64-unknown-freebsd", aarch64_unknown_freebsd), - ("i686-unknown-freebsd", i686_unknown_freebsd), - ("x86_64-unknown-freebsd", x86_64_unknown_freebsd), - - ("i686-unknown-dragonfly", i686_unknown_dragonfly), - ("x86_64-unknown-dragonfly", x86_64_unknown_dragonfly), - - ("x86_64-unknown-bitrig", x86_64_unknown_bitrig), - - ("i686-unknown-openbsd", i686_unknown_openbsd), - ("x86_64-unknown-openbsd", x86_64_unknown_openbsd), - - ("i686-unknown-netbsd", i686_unknown_netbsd), - ("sparc64-unknown-netbsd", sparc64_unknown_netbsd), - ("x86_64-unknown-netbsd", x86_64_unknown_netbsd), - ("x86_64-rumprun-netbsd", x86_64_rumprun_netbsd), - - ("i686-unknown-haiku", i686_unknown_haiku), - ("x86_64-unknown-haiku", x86_64_unknown_haiku), - - ("x86_64-apple-darwin", x86_64_apple_darwin), - ("i686-apple-darwin", i686_apple_darwin), - - ("aarch64-unknown-fuchsia", aarch64_unknown_fuchsia), - ("x86_64-unknown-fuchsia", x86_64_unknown_fuchsia), - - ("x86_64-unknown-l4re-uclibc", x86_64_unknown_l4re_uclibc), - - ("x86_64-unknown-redox", x86_64_unknown_redox), - - ("i386-apple-ios", i386_apple_ios), - ("x86_64-apple-ios", x86_64_apple_ios), - ("aarch64-apple-ios", aarch64_apple_ios), - ("armv7-apple-ios", armv7_apple_ios), - ("armv7s-apple-ios", armv7s_apple_ios), - - ("x86_64-sun-solaris", x86_64_sun_solaris), - ("sparcv9-sun-solaris", sparcv9_sun_solaris), - - ("x86_64-pc-windows-gnu", x86_64_pc_windows_gnu), - ("i686-pc-windows-gnu", i686_pc_windows_gnu), - - ("x86_64-pc-windows-msvc", x86_64_pc_windows_msvc), - ("i686-pc-windows-msvc", i686_pc_windows_msvc), - ("i586-pc-windows-msvc", i586_pc_windows_msvc), - - ("asmjs-unknown-emscripten", asmjs_unknown_emscripten), - ("wasm32-unknown-emscripten", wasm32_unknown_emscripten), - ("wasm32-unknown-unknown", wasm32_unknown_unknown), - ("wasm32-experimental-emscripten", wasm32_experimental_emscripten), - - ("thumbv6m-none-eabi", thumbv6m_none_eabi), - ("thumbv7m-none-eabi", thumbv7m_none_eabi), - ("thumbv7em-none-eabi", thumbv7em_none_eabi), - ("thumbv7em-none-eabihf", thumbv7em_none_eabihf), - - ("msp430-none-elf", msp430_none_elf), - - ("aarch64-unknown-cloudabi", aarch64_unknown_cloudabi), - ("armv7-unknown-cloudabi-eabihf", armv7_unknown_cloudabi_eabihf), - ("i686-unknown-cloudabi", i686_unknown_cloudabi), - ("x86_64-unknown-cloudabi", x86_64_unknown_cloudabi), -} - -/// Everything `rustc` knows about how to compile for a specific target. -/// -/// Every field here must be specified, and has no default value. -#[derive(PartialEq, Clone, Debug)] -pub struct Target { - /// Target triple to pass to LLVM. - pub llvm_target: String, - /// String to use as the `target_endian` `cfg` variable. - pub target_endian: String, - /// String to use as the `target_pointer_width` `cfg` variable. - pub target_pointer_width: String, - /// Width of c_int type - pub target_c_int_width: String, - /// OS name to use for conditional compilation. - pub target_os: String, - /// Environment name to use for conditional compilation. - pub target_env: String, - /// Vendor name to use for conditional compilation. - pub target_vendor: String, - /// Architecture to use for ABI considerations. Valid options: "x86", - /// "x86_64", "arm", "aarch64", "mips", "powerpc", and "powerpc64". - pub arch: String, - /// [Data layout](http://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM. - pub data_layout: String, - /// Linker flavor - pub linker_flavor: LinkerFlavor, - /// Optional settings with defaults. - pub options: TargetOptions, -} - -/// Optional aspects of a target specification. -/// -/// This has an implementation of `Default`, see each field for what the default is. In general, -/// these try to take "minimal defaults" that don't assume anything about the runtime they run in. -#[derive(PartialEq, Clone, Debug)] -pub struct TargetOptions { - /// Whether the target is built-in or loaded from a custom target specification. - pub is_builtin: bool, - - /// Linker to invoke. Defaults to "cc". - pub linker: String, - - /// Linker arguments that are unconditionally passed *before* any - /// user-defined libraries. - pub pre_link_args: LinkArgs, - /// Objects to link before all others, always found within the - /// sysroot folder. - pub pre_link_objects_exe: Vec, // ... when linking an executable - pub pre_link_objects_dll: Vec, // ... when linking a dylib - /// Linker arguments that are unconditionally passed after any - /// user-defined but before post_link_objects. Standard platform - /// libraries that should be always be linked to, usually go here. - pub late_link_args: LinkArgs, - /// Objects to link after all others, always found within the - /// sysroot folder. - pub post_link_objects: Vec, - /// Linker arguments that are unconditionally passed *after* any - /// user-defined libraries. - pub post_link_args: LinkArgs, - - /// Environment variables to be set before invoking the linker. - pub link_env: Vec<(String, String)>, - - /// Extra arguments to pass to the external assembler (when used) - pub asm_args: Vec, - - /// Default CPU to pass to LLVM. Corresponds to `llc -mcpu=$cpu`. Defaults - /// to "generic". - pub cpu: String, - /// Default target features to pass to LLVM. These features will *always* be - /// passed, and cannot be disabled even via `-C`. Corresponds to `llc - /// -mattr=$features`. - pub features: String, - /// Whether dynamic linking is available on this target. Defaults to false. - pub dynamic_linking: bool, - /// If dynamic linking is available, whether only cdylibs are supported. - pub only_cdylib: bool, - /// Whether executables are available on this target. iOS, for example, only allows static - /// libraries. Defaults to false. - pub executables: bool, - /// Relocation model to use in object file. Corresponds to `llc - /// -relocation-model=$relocation_model`. Defaults to "pic". - pub relocation_model: String, - /// Code model to use. Corresponds to `llc -code-model=$code_model`. Defaults to "default". - pub code_model: String, - /// TLS model to use. Options are "global-dynamic" (default), "local-dynamic", "initial-exec" - /// and "local-exec". This is similar to the -ftls-model option in GCC/Clang. - pub tls_model: String, - /// Do not emit code that uses the "red zone", if the ABI has one. Defaults to false. - pub disable_redzone: bool, - /// Eliminate frame pointers from stack frames if possible. Defaults to true. - pub eliminate_frame_pointer: bool, - /// Emit each function in its own section. Defaults to true. - pub function_sections: bool, - /// String to prepend to the name of every dynamic library. Defaults to "lib". - pub dll_prefix: String, - /// String to append to the name of every dynamic library. Defaults to ".so". - pub dll_suffix: String, - /// String to append to the name of every executable. - pub exe_suffix: String, - /// String to prepend to the name of every static library. Defaults to "lib". - pub staticlib_prefix: String, - /// String to append to the name of every static library. Defaults to ".a". - pub staticlib_suffix: String, - /// OS family to use for conditional compilation. Valid options: "unix", "windows". - pub target_family: Option, - /// Whether the target toolchain is like OpenBSD's. - /// Only useful for compiling against OpenBSD, for configuring abi when returning a struct. - pub is_like_openbsd: bool, - /// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS, - /// in particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false. - pub is_like_osx: bool, - /// Whether the target toolchain is like Solaris's. - /// Only useful for compiling against Illumos/Solaris, - /// as they have a different set of linker flags. Defaults to false. - pub is_like_solaris: bool, - /// Whether the target toolchain is like Windows'. Only useful for compiling against Windows, - /// only really used for figuring out how to find libraries, since Windows uses its own - /// library naming convention. Defaults to false. - pub is_like_windows: bool, - pub is_like_msvc: bool, - /// Whether the target toolchain is like Android's. Only useful for compiling against Android. - /// Defaults to false. - pub is_like_android: bool, - /// Whether the target toolchain is like Emscripten's. Only useful for compiling with - /// Emscripten toolchain. - /// Defaults to false. - pub is_like_emscripten: bool, - /// Whether the linker support GNU-like arguments such as -O. Defaults to false. - pub linker_is_gnu: bool, - /// The MinGW toolchain has a known issue that prevents it from correctly - /// handling COFF object files with more than 215 sections. Since each weak - /// symbol needs its own COMDAT section, weak linkage implies a large - /// number sections that easily exceeds the given limit for larger - /// codebases. Consequently we want a way to disallow weak linkage on some - /// platforms. - pub allows_weak_linkage: bool, - /// Whether the linker support rpaths or not. Defaults to false. - pub has_rpath: bool, - /// Whether to disable linking to the default libraries, typically corresponds - /// to `-nodefaultlibs`. Defaults to true. - pub no_default_libraries: bool, - /// Dynamically linked executables can be compiled as position independent - /// if the default relocation model of position independent code is not - /// changed. This is a requirement to take advantage of ASLR, as otherwise - /// the functions in the executable are not randomized and can be used - /// during an exploit of a vulnerability in any code. - pub position_independent_executables: bool, - /// Either partial, full, or off. Full RELRO makes the dynamic linker - /// resolve all symbols at startup and marks the GOT read-only before - /// starting the program, preventing overwriting the GOT. - pub relro_level: RelroLevel, - /// Format that archives should be emitted in. This affects whether we use - /// LLVM to assemble an archive or fall back to the system linker, and - /// currently only "gnu" is used to fall into LLVM. Unknown strings cause - /// the system linker to be used. - pub archive_format: String, - /// Is asm!() allowed? Defaults to true. - pub allow_asm: bool, - /// Whether the target uses a custom unwind resumption routine. - /// By default LLVM lowers `resume` instructions into calls to `_Unwind_Resume` - /// defined in libgcc. If this option is enabled, the target must provide - /// `eh_unwind_resume` lang item. - pub custom_unwind_resume: bool, - - /// If necessary, a different crate to link exe allocators by default - pub exe_allocation_crate: Option, - - /// Flag indicating whether ELF TLS (e.g. #[thread_local]) is available for - /// this target. - pub has_elf_tls: bool, - // This is mainly for easy compatibility with emscripten. - // If we give emcc .o files that are actually .bc files it - // will 'just work'. - pub obj_is_bitcode: bool, - - // LLVM can't produce object files for this target. Instead, we'll make LLVM - // emit assembly and then use `gcc` to turn that assembly into an object - // file - pub no_integrated_as: bool, - - /// Don't use this field; instead use the `.min_atomic_width()` method. - pub min_atomic_width: Option, - - /// Don't use this field; instead use the `.max_atomic_width()` method. - pub max_atomic_width: Option, - - /// Panic strategy: "unwind" or "abort" - pub panic_strategy: PanicStrategy, - - /// A blacklist of ABIs unsupported by the current target. Note that generic - /// ABIs are considered to be supported on all platforms and cannot be blacklisted. - pub abi_blacklist: Vec, - - /// Whether or not linking dylibs to a static CRT is allowed. - pub crt_static_allows_dylibs: bool, - /// Whether or not the CRT is statically linked by default. - pub crt_static_default: bool, - /// Whether or not crt-static is respected by the compiler (or is a no-op). - pub crt_static_respected: bool, - - /// Whether or not stack probes (__rust_probestack) are enabled - pub stack_probes: bool, - - /// The minimum alignment for global symbols. - pub min_global_align: Option, - - /// Default number of codegen units to use in debug mode - pub default_codegen_units: Option, - - /// Whether to generate trap instructions in places where optimization would - /// otherwise produce control flow that falls through into unrelated memory. - pub trap_unreachable: bool, - - /// This target requires everything to be compiled with LTO to emit a final - /// executable, aka there is no native linker for this target. - pub requires_lto: bool, - - /// This target has no support for threads. - pub singlethread: bool, - - /// Whether library functions call lowering/optimization is disabled in LLVM - /// for this target unconditionally. - pub no_builtins: bool, - - /// Whether to lower 128-bit operations to compiler_builtins calls. Use if - /// your backend only supports 64-bit and smaller math. - pub i128_lowering: bool, -} - -impl Default for TargetOptions { - /// Create a set of "sane defaults" for any target. This is still - /// incomplete, and if used for compilation, will certainly not work. - fn default() -> TargetOptions { - TargetOptions { - is_builtin: false, - linker: option_env!("CFG_DEFAULT_LINKER").unwrap_or("cc").to_string(), - pre_link_args: LinkArgs::new(), - post_link_args: LinkArgs::new(), - asm_args: Vec::new(), - cpu: "generic".to_string(), - features: "".to_string(), - dynamic_linking: false, - only_cdylib: false, - executables: false, - relocation_model: "pic".to_string(), - code_model: "default".to_string(), - tls_model: "global-dynamic".to_string(), - disable_redzone: false, - eliminate_frame_pointer: true, - function_sections: true, - dll_prefix: "lib".to_string(), - dll_suffix: ".so".to_string(), - exe_suffix: "".to_string(), - staticlib_prefix: "lib".to_string(), - staticlib_suffix: ".a".to_string(), - target_family: None, - is_like_openbsd: false, - is_like_osx: false, - is_like_solaris: false, - is_like_windows: false, - is_like_android: false, - is_like_emscripten: false, - is_like_msvc: false, - linker_is_gnu: false, - allows_weak_linkage: true, - has_rpath: false, - no_default_libraries: true, - position_independent_executables: false, - relro_level: RelroLevel::Off, - pre_link_objects_exe: Vec::new(), - pre_link_objects_dll: Vec::new(), - post_link_objects: Vec::new(), - late_link_args: LinkArgs::new(), - link_env: Vec::new(), - archive_format: "gnu".to_string(), - custom_unwind_resume: false, - exe_allocation_crate: None, - allow_asm: true, - has_elf_tls: false, - obj_is_bitcode: false, - no_integrated_as: false, - min_atomic_width: None, - max_atomic_width: None, - panic_strategy: PanicStrategy::Unwind, - abi_blacklist: vec![], - crt_static_allows_dylibs: false, - crt_static_default: false, - crt_static_respected: false, - stack_probes: false, - min_global_align: None, - default_codegen_units: None, - trap_unreachable: true, - requires_lto: false, - singlethread: false, - no_builtins: false, - i128_lowering: false, - } - } -} - -impl Target { - /// Given a function ABI, turn "System" into the correct ABI for this target. - pub fn adjust_abi(&self, abi: Abi) -> Abi { - match abi { - Abi::System => { - if self.options.is_like_windows && self.arch == "x86" { - Abi::Stdcall - } else { - Abi::C - } - }, - abi => abi - } - } - - /// Minimum integer size in bits that this target can perform atomic - /// operations on. - pub fn min_atomic_width(&self) -> u64 { - self.options.min_atomic_width.unwrap_or(8) - } - - /// Maximum integer size in bits that this target can perform atomic - /// operations on. - pub fn max_atomic_width(&self) -> u64 { - self.options.max_atomic_width.unwrap_or(self.target_pointer_width.parse().unwrap()) - } - - pub fn is_abi_supported(&self, abi: Abi) -> bool { - abi.generic() || !self.options.abi_blacklist.contains(&abi) - } - - /// Load a target descriptor from a JSON object. - pub fn from_json(obj: Json) -> TargetResult { - // While ugly, this code must remain this way to retain - // compatibility with existing JSON fields and the internal - // expected naming of the Target and TargetOptions structs. - // To ensure compatibility is retained, the built-in targets - // are round-tripped through this code to catch cases where - // the JSON parser is not updated to match the structs. - - let get_req_field = |name: &str| { - match obj.find(name) - .map(|s| s.as_string()) - .and_then(|os| os.map(|s| s.to_string())) { - Some(val) => Ok(val), - None => { - return Err(format!("Field {} in target specification is required", name)) - } - } - }; - - let get_opt_field = |name: &str, default: &str| { - obj.find(name).and_then(|s| s.as_string()) - .map(|s| s.to_string()) - .unwrap_or(default.to_string()) - }; - - let mut base = Target { - llvm_target: get_req_field("llvm-target")?, - target_endian: get_req_field("target-endian")?, - target_pointer_width: get_req_field("target-pointer-width")?, - target_c_int_width: get_req_field("target-c-int-width")?, - data_layout: get_req_field("data-layout")?, - arch: get_req_field("arch")?, - target_os: get_req_field("os")?, - target_env: get_opt_field("env", ""), - target_vendor: get_opt_field("vendor", "unknown"), - linker_flavor: LinkerFlavor::from_str(&*get_req_field("linker-flavor")?) - .ok_or_else(|| { - format!("linker flavor must be {}", LinkerFlavor::one_of()) - })?, - options: Default::default(), - }; - - macro_rules! key { - ($key_name:ident) => ( { - let name = (stringify!($key_name)).replace("_", "-"); - obj.find(&name[..]).map(|o| o.as_string() - .map(|s| base.options.$key_name = s.to_string())); - } ); - ($key_name:ident, bool) => ( { - let name = (stringify!($key_name)).replace("_", "-"); - obj.find(&name[..]) - .map(|o| o.as_boolean() - .map(|s| base.options.$key_name = s)); - } ); - ($key_name:ident, Option) => ( { - let name = (stringify!($key_name)).replace("_", "-"); - obj.find(&name[..]) - .map(|o| o.as_u64() - .map(|s| base.options.$key_name = Some(s))); - } ); - ($key_name:ident, PanicStrategy) => ( { - let name = (stringify!($key_name)).replace("_", "-"); - obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { - match s { - "unwind" => base.options.$key_name = PanicStrategy::Unwind, - "abort" => base.options.$key_name = PanicStrategy::Abort, - _ => return Some(Err(format!("'{}' is not a valid value for \ - panic-strategy. Use 'unwind' or 'abort'.", - s))), - } - Some(Ok(())) - })).unwrap_or(Ok(())) - } ); - ($key_name:ident, RelroLevel) => ( { - let name = (stringify!($key_name)).replace("_", "-"); - obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { - match s.parse::() { - Ok(level) => base.options.$key_name = level, - _ => return Some(Err(format!("'{}' is not a valid value for \ - relro-level. Use 'full', 'partial, or 'off'.", - s))), - } - Some(Ok(())) - })).unwrap_or(Ok(())) - } ); - ($key_name:ident, list) => ( { - let name = (stringify!($key_name)).replace("_", "-"); - obj.find(&name[..]).map(|o| o.as_array() - .map(|v| base.options.$key_name = v.iter() - .map(|a| a.as_string().unwrap().to_string()).collect() - ) - ); - } ); - ($key_name:ident, optional) => ( { - let name = (stringify!($key_name)).replace("_", "-"); - if let Some(o) = obj.find(&name[..]) { - base.options.$key_name = o - .as_string() - .map(|s| s.to_string() ); - } - } ); - ($key_name:ident, LinkerFlavor) => ( { - let name = (stringify!($key_name)).replace("_", "-"); - obj.find(&name[..]).and_then(|o| o.as_string().map(|s| { - LinkerFlavor::from_str(&s).ok_or_else(|| { - Err(format!("'{}' is not a valid value for linker-flavor. \ - Use 'em', 'gcc', 'ld' or 'msvc.", s)) - }) - })).unwrap_or(Ok(())) - } ); - ($key_name:ident, link_args) => ( { - let name = (stringify!($key_name)).replace("_", "-"); - if let Some(obj) = obj.find(&name[..]).and_then(|o| o.as_object()) { - let mut args = LinkArgs::new(); - for (k, v) in obj { - let k = LinkerFlavor::from_str(&k).ok_or_else(|| { - format!("{}: '{}' is not a valid value for linker-flavor. \ - Use 'em', 'gcc', 'ld' or 'msvc'", name, k) - })?; - - let v = v.as_array().map(|a| { - a - .iter() - .filter_map(|o| o.as_string()) - .map(|s| s.to_owned()) - .collect::>() - }).unwrap_or(vec![]); - - args.insert(k, v); - } - base.options.$key_name = args; - } - } ); - ($key_name:ident, env) => ( { - let name = (stringify!($key_name)).replace("_", "-"); - if let Some(a) = obj.find(&name[..]).and_then(|o| o.as_array()) { - for o in a { - if let Some(s) = o.as_string() { - let p = s.split('=').collect::>(); - if p.len() == 2 { - let k = p[0].to_string(); - let v = p[1].to_string(); - base.options.$key_name.push((k, v)); - } - } - } - } - } ); - } - - key!(is_builtin, bool); - key!(linker); - key!(pre_link_args, link_args); - key!(pre_link_objects_exe, list); - key!(pre_link_objects_dll, list); - key!(late_link_args, link_args); - key!(post_link_objects, list); - key!(post_link_args, link_args); - key!(link_env, env); - key!(asm_args, list); - key!(cpu); - key!(features); - key!(dynamic_linking, bool); - key!(only_cdylib, bool); - key!(executables, bool); - key!(relocation_model); - key!(code_model); - key!(tls_model); - key!(disable_redzone, bool); - key!(eliminate_frame_pointer, bool); - key!(function_sections, bool); - key!(dll_prefix); - key!(dll_suffix); - key!(exe_suffix); - key!(staticlib_prefix); - key!(staticlib_suffix); - key!(target_family, optional); - key!(is_like_openbsd, bool); - key!(is_like_osx, bool); - key!(is_like_solaris, bool); - key!(is_like_windows, bool); - key!(is_like_msvc, bool); - key!(is_like_emscripten, bool); - key!(is_like_android, bool); - key!(linker_is_gnu, bool); - key!(allows_weak_linkage, bool); - key!(has_rpath, bool); - key!(no_default_libraries, bool); - key!(position_independent_executables, bool); - try!(key!(relro_level, RelroLevel)); - key!(archive_format); - key!(allow_asm, bool); - key!(custom_unwind_resume, bool); - key!(exe_allocation_crate, optional); - key!(has_elf_tls, bool); - key!(obj_is_bitcode, bool); - key!(no_integrated_as, bool); - key!(max_atomic_width, Option); - key!(min_atomic_width, Option); - try!(key!(panic_strategy, PanicStrategy)); - key!(crt_static_allows_dylibs, bool); - key!(crt_static_default, bool); - key!(crt_static_respected, bool); - key!(stack_probes, bool); - key!(min_global_align, Option); - key!(default_codegen_units, Option); - key!(trap_unreachable, bool); - key!(requires_lto, bool); - key!(singlethread, bool); - key!(no_builtins, bool); - - if let Some(array) = obj.find("abi-blacklist").and_then(Json::as_array) { - for name in array.iter().filter_map(|abi| abi.as_string()) { - match lookup_abi(name) { - Some(abi) => { - if abi.generic() { - return Err(format!("The ABI \"{}\" is considered to be supported on \ - all targets and cannot be blacklisted", abi)) - } - - base.options.abi_blacklist.push(abi) - } - None => return Err(format!("Unknown ABI \"{}\" in target specification", name)) - } - } - } - - Ok(base) - } - - /// Search RUST_TARGET_PATH for a JSON file specifying the given target - /// triple. Note that it could also just be a bare filename already, so also - /// check for that. If one of the hardcoded targets we know about, just - /// return it directly. - /// - /// The error string could come from any of the APIs called, including - /// filesystem access and JSON decoding. - pub fn search(target: &str) -> Result { - use std::env; - use std::ffi::OsString; - use std::fs; - use std::path::{Path, PathBuf}; - use serialize::json; - - fn load_file(path: &Path) -> Result { - let contents = fs::read(path).map_err(|e| e.to_string())?; - let obj = json::from_reader(&mut &contents[..]) - .map_err(|e| e.to_string())?; - Target::from_json(obj) - } - - if let Ok(t) = load_specific(target) { - return Ok(t) - } - - let path = Path::new(target); - - if path.is_file() { - return load_file(&path); - } - - let path = { - let mut target = target.to_string(); - target.push_str(".json"); - PathBuf::from(target) - }; - - let target_path = env::var_os("RUST_TARGET_PATH") - .unwrap_or(OsString::new()); - - // FIXME 16351: add a sane default search path? - - for dir in env::split_paths(&target_path) { - let p = dir.join(&path); - if p.is_file() { - return load_file(&p); - } - } - - Err(format!("Could not find specification for target {:?}", target)) - } -} - -impl ToJson for Target { - fn to_json(&self) -> Json { - let mut d = BTreeMap::new(); - let default: TargetOptions = Default::default(); - - macro_rules! target_val { - ($attr:ident) => ( { - let name = (stringify!($attr)).replace("_", "-"); - d.insert(name.to_string(), self.$attr.to_json()); - } ); - ($attr:ident, $key_name:expr) => ( { - let name = $key_name; - d.insert(name.to_string(), self.$attr.to_json()); - } ); - } - - macro_rules! target_option_val { - ($attr:ident) => ( { - let name = (stringify!($attr)).replace("_", "-"); - if default.$attr != self.options.$attr { - d.insert(name.to_string(), self.options.$attr.to_json()); - } - } ); - ($attr:ident, $key_name:expr) => ( { - let name = $key_name; - if default.$attr != self.options.$attr { - d.insert(name.to_string(), self.options.$attr.to_json()); - } - } ); - (link_args - $attr:ident) => ( { - let name = (stringify!($attr)).replace("_", "-"); - if default.$attr != self.options.$attr { - let obj = self.options.$attr - .iter() - .map(|(k, v)| (k.desc().to_owned(), v.clone())) - .collect::>(); - d.insert(name.to_string(), obj.to_json()); - } - } ); - (env - $attr:ident) => ( { - let name = (stringify!($attr)).replace("_", "-"); - if default.$attr != self.options.$attr { - let obj = self.options.$attr - .iter() - .map(|&(ref k, ref v)| k.clone() + "=" + &v) - .collect::>(); - d.insert(name.to_string(), obj.to_json()); - } - } ); - - } - - target_val!(llvm_target); - target_val!(target_endian); - target_val!(target_pointer_width); - target_val!(target_c_int_width); - target_val!(arch); - target_val!(target_os, "os"); - target_val!(target_env, "env"); - target_val!(target_vendor, "vendor"); - target_val!(data_layout); - target_val!(linker_flavor); - - target_option_val!(is_builtin); - target_option_val!(linker); - target_option_val!(link_args - pre_link_args); - target_option_val!(pre_link_objects_exe); - target_option_val!(pre_link_objects_dll); - target_option_val!(link_args - late_link_args); - target_option_val!(post_link_objects); - target_option_val!(link_args - post_link_args); - target_option_val!(env - link_env); - target_option_val!(asm_args); - target_option_val!(cpu); - target_option_val!(features); - target_option_val!(dynamic_linking); - target_option_val!(only_cdylib); - target_option_val!(executables); - target_option_val!(relocation_model); - target_option_val!(code_model); - target_option_val!(tls_model); - target_option_val!(disable_redzone); - target_option_val!(eliminate_frame_pointer); - target_option_val!(function_sections); - target_option_val!(dll_prefix); - target_option_val!(dll_suffix); - target_option_val!(exe_suffix); - target_option_val!(staticlib_prefix); - target_option_val!(staticlib_suffix); - target_option_val!(target_family); - target_option_val!(is_like_openbsd); - target_option_val!(is_like_osx); - target_option_val!(is_like_solaris); - target_option_val!(is_like_windows); - target_option_val!(is_like_msvc); - target_option_val!(is_like_emscripten); - target_option_val!(is_like_android); - target_option_val!(linker_is_gnu); - target_option_val!(allows_weak_linkage); - target_option_val!(has_rpath); - target_option_val!(no_default_libraries); - target_option_val!(position_independent_executables); - target_option_val!(relro_level); - target_option_val!(archive_format); - target_option_val!(allow_asm); - target_option_val!(custom_unwind_resume); - target_option_val!(exe_allocation_crate); - target_option_val!(has_elf_tls); - target_option_val!(obj_is_bitcode); - target_option_val!(no_integrated_as); - target_option_val!(min_atomic_width); - target_option_val!(max_atomic_width); - target_option_val!(panic_strategy); - target_option_val!(crt_static_allows_dylibs); - target_option_val!(crt_static_default); - target_option_val!(crt_static_respected); - target_option_val!(stack_probes); - target_option_val!(min_global_align); - target_option_val!(default_codegen_units); - target_option_val!(trap_unreachable); - target_option_val!(requires_lto); - target_option_val!(singlethread); - target_option_val!(no_builtins); - - if default.abi_blacklist != self.options.abi_blacklist { - d.insert("abi-blacklist".to_string(), self.options.abi_blacklist.iter() - .map(Abi::name).map(|name| name.to_json()) - .collect::>().to_json()); - } - - Json::Object(d) - } -} - -fn maybe_jemalloc() -> Option { - if cfg!(feature = "jemalloc") { - Some("alloc_jemalloc".to_string()) - } else { - None - } -} diff --git a/src/librustc_back/target/wasm32_unknown_unknown.rs b/src/librustc_back/target/wasm32_unknown_unknown.rs deleted file mode 100644 index 7e1011ab8af9..000000000000 --- a/src/librustc_back/target/wasm32_unknown_unknown.rs +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// The wasm32-unknown-unknown target is currently a highly experimental version -// of a wasm-based target which does *not* use the Emscripten toolchain. Instead -// this is a pretty flavorful (aka hacked up) target right now. The definition -// and semantics of this target are likely to change and so this shouldn't be -// relied on just yet. -// -// In general everyone is currently waiting on a linker for wasm code. In the -// meantime we have no means of actually making use of the traditional separate -// compilation model. At a high level this means that assembling Rust programs -// into a WebAssembly program looks like: -// -// 1. All intermediate artifacts are LLVM bytecode. We'll be using LLVM as -// a linker later on. -// 2. For the final artifact we emit one giant assembly file (WebAssembly -// doesn't have an object file format). To do this we force LTO to be turned -// on (`requires_lto` below) to ensure all Rust code is in one module. Any -// "linked" C library is basically just ignored. -// 3. Using LLVM we emit a `foo.s` file (assembly) with some... what I can only -// describe as arcane syntax. From there we need to actually change this -// into a wasm module. For this step we use the `binaryen` project. This -// project is mostly intended as a WebAssembly code generator, but for now -// we're just using its LLVM-assembly-to-wasm-module conversion utilities. -// -// And voila, out comes a web assembly module! There's some various tweaks here -// and there, but that's the high level at least. Note that this will be -// rethought from the ground up once a linker (lld) is available, so this is all -// temporary and should improve in the future. - -use LinkerFlavor; -use super::{Target, TargetOptions, PanicStrategy}; - -pub fn target() -> Result { - let opts = TargetOptions { - linker: "not-used".to_string(), - - // we allow dynamic linking, but only cdylibs. Basically we allow a - // final library artifact that exports some symbols (a wasm module) but - // we don't allow intermediate `dylib` crate types - dynamic_linking: true, - only_cdylib: true, - - // This means we'll just embed a `start` function in the wasm module - executables: true, - - // relatively self-explanatory! - exe_suffix: ".wasm".to_string(), - dll_prefix: "".to_string(), - dll_suffix: ".wasm".to_string(), - linker_is_gnu: false, - - // We're storing bitcode for now in all the rlibs - obj_is_bitcode: true, - - // A bit of a lie, but "eh" - max_atomic_width: Some(32), - - // Unwinding doesn't work right now, so the whole target unconditionally - // defaults to panic=abort. Note that this is guaranteed to change in - // the future once unwinding is implemented. Don't rely on this. - panic_strategy: PanicStrategy::Abort, - - // There's no linker yet so we're forced to use LLVM as a linker. This - // means that we must always enable LTO for final artifacts. - requires_lto: true, - - // Wasm doesn't have atomics yet, so tell LLVM that we're in a single - // threaded model which will legalize atomics to normal operations. - singlethread: true, - - // Because we're always enabling LTO we can't enable builtin lowering as - // otherwise we'll lower the definition of the `memcpy` function to - // memcpy itself. Note that this is specifically because we're - // performing LTO with compiler-builtins. - no_builtins: true, - - .. Default::default() - }; - Ok(Target { - llvm_target: "wasm32-unknown-unknown".to_string(), - target_endian: "little".to_string(), - target_pointer_width: "32".to_string(), - target_c_int_width: "32".to_string(), - // This is basically guaranteed to change in the future, don't rely on - // this. Use `not(target_os = "emscripten")` for now. - target_os: "unknown".to_string(), - target_env: "".to_string(), - target_vendor: "unknown".to_string(), - data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(), - arch: "wasm32".to_string(), - // A bit of a lie, but it gets the job done - linker_flavor: LinkerFlavor::Binaryen, - options: opts, - }) -} diff --git a/src/librustc_back/target/x86_64_unknown_fuchsia.rs b/src/librustc_back/target/x86_64_unknown_fuchsia.rs deleted file mode 100644 index 6e97d53cfad6..000000000000 --- a/src/librustc_back/target/x86_64_unknown_fuchsia.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use LinkerFlavor; -use target::{Target, TargetResult}; - -pub fn target() -> TargetResult { - let mut base = super::fuchsia_base::opts(); - base.cpu = "x86-64".to_string(); - base.max_atomic_width = Some(64); - base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string()); - base.stack_probes = true; - - Ok(Target { - llvm_target: "x86_64-unknown-fuchsia".to_string(), - target_endian: "little".to_string(), - target_pointer_width: "64".to_string(), - target_c_int_width: "32".to_string(), - data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), - arch: "x86_64".to_string(), - target_os: "fuchsia".to_string(), - target_env: "".to_string(), - target_vendor: "unknown".to_string(), - linker_flavor: LinkerFlavor::Gcc, - options: base, - }) -} diff --git a/src/librustc_binaryen/BinaryenWrapper.cpp b/src/librustc_binaryen/BinaryenWrapper.cpp deleted file mode 100644 index d1095a7819d4..000000000000 --- a/src/librustc_binaryen/BinaryenWrapper.cpp +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// This is a small C API inserted on top of the Binaryen C++ API which we use -// from Rust. Once we have a real linker for we'll be able to remove all this, -// and otherwise this is just all on a "as we need it" basis for now. - -#include -#include -#include - -#include "s2wasm.h" -#include "wasm-binary.h" -#include "wasm-linker.h" - -using namespace wasm; - -struct BinaryenRustModule { - BufferWithRandomAccess buffer; -}; - -struct BinaryenRustModuleOptions { - uint64_t globalBase; - bool debug; - uint64_t stackAllocation; - uint64_t initialMem; - uint64_t maxMem; - bool importMemory; - bool ignoreUnknownSymbols; - bool debugInfo; - std::string startFunction; - - BinaryenRustModuleOptions() : - globalBase(0), - debug(false), - stackAllocation(0), - initialMem(0), - maxMem(0), - importMemory(false), - ignoreUnknownSymbols(false), - debugInfo(false), - startFunction("") - {} - -}; - -extern "C" BinaryenRustModuleOptions* -BinaryenRustModuleOptionsCreate() { - return new BinaryenRustModuleOptions; -} - -extern "C" void -BinaryenRustModuleOptionsFree(BinaryenRustModuleOptions *options) { - delete options; -} - -extern "C" void -BinaryenRustModuleOptionsSetDebugInfo(BinaryenRustModuleOptions *options, - bool debugInfo) { - options->debugInfo = debugInfo; -} - -extern "C" void -BinaryenRustModuleOptionsSetStart(BinaryenRustModuleOptions *options, - char *start) { - options->startFunction = start; -} - -extern "C" void -BinaryenRustModuleOptionsSetStackAllocation(BinaryenRustModuleOptions *options, - uint64_t stack) { - options->stackAllocation = stack; -} - -extern "C" void -BinaryenRustModuleOptionsSetImportMemory(BinaryenRustModuleOptions *options, - bool import) { - options->importMemory = import; -} - -extern "C" BinaryenRustModule* -BinaryenRustModuleCreate(const BinaryenRustModuleOptions *options, - const char *assembly) { - Linker linker( - options->globalBase, - options->stackAllocation, - options->initialMem, - options->maxMem, - options->importMemory, - options->ignoreUnknownSymbols, - options->startFunction, - options->debug); - - S2WasmBuilder mainbuilder(assembly, options->debug); - linker.linkObject(mainbuilder); - linker.layout(); - - auto ret = make_unique(); - { - WasmBinaryWriter writer(&linker.getOutput().wasm, ret->buffer, options->debug); - writer.setNamesSection(options->debugInfo); - // FIXME: support source maps? - // writer.setSourceMap(sourceMapStream.get(), sourceMapUrl); - - // FIXME: support symbol maps? - // writer.setSymbolMap(symbolMap); - writer.write(); - } - return ret.release(); -} - -extern "C" const uint8_t* -BinaryenRustModulePtr(const BinaryenRustModule *M) { - return M->buffer.data(); -} - -extern "C" size_t -BinaryenRustModuleLen(const BinaryenRustModule *M) { - return M->buffer.size(); -} - -extern "C" void -BinaryenRustModuleFree(BinaryenRustModule *M) { - delete M; -} diff --git a/src/librustc_binaryen/Cargo.toml b/src/librustc_binaryen/Cargo.toml deleted file mode 100644 index 9573c8947140..000000000000 --- a/src/librustc_binaryen/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -# Wondering what this crate is? Take a look at the `lib.rs`! - -[package] -name = "rustc_binaryen" -version = "0.0.0" -authors = ["The Rust Project Developers"] - -[lib] -path = "lib.rs" - -[dependencies] -libc = "0.2" - -[build-dependencies] -cmake = "0.1" -cc = "1.0" diff --git a/src/librustc_binaryen/build.rs b/src/librustc_binaryen/build.rs deleted file mode 100644 index f23ff3cee555..000000000000 --- a/src/librustc_binaryen/build.rs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -extern crate cc; -extern crate cmake; - -use std::env; - -use cmake::Config; - -fn main() { - let target = env::var("TARGET").unwrap(); - - // Bring in `__emutls_get_address` which is apparently needed for now - if target.contains("pc-windows-gnu") { - println!("cargo:rustc-link-lib=gcc_eh"); - println!("cargo:rustc-link-lib=pthread"); - } - - Config::new("../binaryen") - .define("BUILD_STATIC_LIB", "ON") - .build_target("binaryen") - .build(); - - // I couldn't figure out how to link just one of these, so link everything. - println!("cargo:rustc-link-lib=static=asmjs"); - println!("cargo:rustc-link-lib=static=binaryen"); - println!("cargo:rustc-link-lib=static=cfg"); - println!("cargo:rustc-link-lib=static=emscripten-optimizer"); - println!("cargo:rustc-link-lib=static=ir"); - println!("cargo:rustc-link-lib=static=passes"); - println!("cargo:rustc-link-lib=static=support"); - println!("cargo:rustc-link-lib=static=wasm"); - - let out_dir = env::var("OUT_DIR").unwrap(); - println!("cargo:rustc-link-search=native={}/build/lib", out_dir); - - // Add in our own little shim along with some extra files that weren't - // included in the main build. - let mut cfg = cc::Build::new(); - cfg.file("BinaryenWrapper.cpp") - .file("../binaryen/src/wasm-linker.cpp") - .file("../binaryen/src/wasm-emscripten.cpp") - .include("../binaryen/src") - .cpp_link_stdlib(None) - .warnings(false) - .cpp(true); - - if !target.contains("msvc") { - cfg.flag("-std=c++11"); - } - cfg.compile("binaryen_wrapper"); -} diff --git a/src/librustc_binaryen/lib.rs b/src/librustc_binaryen/lib.rs deleted file mode 100644 index 6c7feb6a7a9d..000000000000 --- a/src/librustc_binaryen/lib.rs +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Rustc bindings to the binaryen project. -//! -//! This crate is a small shim around the binaryen project which provides us the -//! ability to take LLVM's output and generate a wasm module. Specifically this -//! only supports one operation, creating a module from LLVM's assembly format -//! and then serializing that module to a wasm module. - -extern crate libc; - -use std::slice; -use std::ffi::{CString, CStr}; - -/// In-memory representation of a serialized wasm module. -pub struct Module { - ptr: *mut BinaryenRustModule, -} - -impl Module { - /// Creates a new wasm module from the LLVM-assembly provided (in a C string - /// format). - /// - /// The actual module creation can be tweaked through the various options in - /// `ModuleOptions` as well. Any errors are just returned as a bland string. - pub fn new(assembly: &CStr, opts: &ModuleOptions) -> Result { - unsafe { - let ptr = BinaryenRustModuleCreate(opts.ptr, assembly.as_ptr()); - if ptr.is_null() { - Err(format!("failed to create binaryen module")) - } else { - Ok(Module { ptr }) - } - } - } - - /// Returns the data of the serialized wasm module. This is a `foo.wasm` - /// file contents. - pub fn data(&self) -> &[u8] { - unsafe { - let ptr = BinaryenRustModulePtr(self.ptr); - let len = BinaryenRustModuleLen(self.ptr); - slice::from_raw_parts(ptr, len) - } - } -} - -impl Drop for Module { - fn drop(&mut self) { - unsafe { - BinaryenRustModuleFree(self.ptr); - } - } -} - -pub struct ModuleOptions { - ptr: *mut BinaryenRustModuleOptions, -} - -impl ModuleOptions { - pub fn new() -> ModuleOptions { - unsafe { - let ptr = BinaryenRustModuleOptionsCreate(); - ModuleOptions { ptr } - } - } - - /// Turns on or off debug info. - /// - /// From what I can tell this just creates a "names" section of the wasm - /// module which contains a table of the original function names. - pub fn debuginfo(&mut self, debug: bool) -> &mut Self { - unsafe { - BinaryenRustModuleOptionsSetDebugInfo(self.ptr, debug); - } - self - } - - /// Configures a `start` function for the module, to be executed when it's - /// loaded. - pub fn start(&mut self, func: &str) -> &mut Self { - let func = CString::new(func).unwrap(); - unsafe { - BinaryenRustModuleOptionsSetStart(self.ptr, func.as_ptr()); - } - self - } - - /// Configures how much stack is initially allocated for the module. 1MB is - /// probably good enough for now. - pub fn stack(&mut self, amt: u64) -> &mut Self { - unsafe { - BinaryenRustModuleOptionsSetStackAllocation(self.ptr, amt); - } - self - } - - /// Flags whether the initial memory should be imported or exported. So far - /// we export it by default. - pub fn import_memory(&mut self, import: bool) -> &mut Self { - unsafe { - BinaryenRustModuleOptionsSetImportMemory(self.ptr, import); - } - self - } -} - -impl Drop for ModuleOptions { - fn drop(&mut self) { - unsafe { - BinaryenRustModuleOptionsFree(self.ptr); - } - } -} - -enum BinaryenRustModule {} -enum BinaryenRustModuleOptions {} - -extern { - fn BinaryenRustModuleCreate(opts: *const BinaryenRustModuleOptions, - assembly: *const libc::c_char) - -> *mut BinaryenRustModule; - fn BinaryenRustModulePtr(module: *const BinaryenRustModule) -> *const u8; - fn BinaryenRustModuleLen(module: *const BinaryenRustModule) -> usize; - fn BinaryenRustModuleFree(module: *mut BinaryenRustModule); - - fn BinaryenRustModuleOptionsCreate() - -> *mut BinaryenRustModuleOptions; - fn BinaryenRustModuleOptionsSetDebugInfo(module: *mut BinaryenRustModuleOptions, - debuginfo: bool); - fn BinaryenRustModuleOptionsSetStart(module: *mut BinaryenRustModuleOptions, - start: *const libc::c_char); - fn BinaryenRustModuleOptionsSetStackAllocation( - module: *mut BinaryenRustModuleOptions, - stack: u64, - ); - fn BinaryenRustModuleOptionsSetImportMemory( - module: *mut BinaryenRustModuleOptions, - import: bool, - ); - fn BinaryenRustModuleOptionsFree(module: *mut BinaryenRustModuleOptions); -} diff --git a/src/librustc_borrowck/Cargo.toml b/src/librustc_borrowck/Cargo.toml index 8522fe11fe1c..3368bbf3855a 100644 --- a/src/librustc_borrowck/Cargo.toml +++ b/src/librustc_borrowck/Cargo.toml @@ -17,3 +17,4 @@ graphviz = { path = "../libgraphviz" } rustc = { path = "../librustc" } rustc_mir = { path = "../librustc_mir" } rustc_errors = { path = "../librustc_errors" } +rustc_data_structures = { path = "../librustc_data_structures" } \ No newline at end of file diff --git a/src/librustc_borrowck/borrowck/README.md b/src/librustc_borrowck/borrowck/README.md index b877c5a9cbcb..8bc0b4969b8f 100644 --- a/src/librustc_borrowck/borrowck/README.md +++ b/src/librustc_borrowck/borrowck/README.md @@ -1,5 +1,10 @@ % The Borrow Checker +> WARNING: This README is more or less obsolete, and will be removed +> soon! The new system is described in the [rustc guide]. + +[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/mir/borrowck.html + This pass has the job of enforcing memory safety. This is a subtle topic. This docs aim to explain both the practice and the theory behind the borrow checker. They start with a high-level overview of @@ -43,14 +48,14 @@ it is safe with respect to the in-scope loans. # Formal model Throughout the docs we'll consider a simple subset of Rust in which -you can only borrow from lvalues, defined like so: +you can only borrow from places, defined like so: ```text -LV = x | LV.f | *LV +P = x | P.f | *P ``` -Here `x` represents some variable, `LV.f` is a field reference, -and `*LV` is a pointer dereference. There is no auto-deref or other +Here `x` represents some variable, `P.f` is a field reference, +and `*P` is a pointer dereference. There is no auto-deref or other niceties. This means that if you have a type like: ```rust @@ -58,7 +63,7 @@ struct S { f: i32 } ``` and a variable `a: Box`, then the rust expression `a.f` would correspond -to an `LV` of `(*a).f`. +to an `P` of `(*a).f`. Here is the formal grammar for the types we'll consider: @@ -99,7 +104,7 @@ this sort of thing. #### Loans and restrictions The way the borrow checker works is that it analyzes each borrow -expression (in our simple model, that's stuff like `&LV`, though in +expression (in our simple model, that's stuff like `&P`, though in real life there are a few other cases to consider). For each borrow expression, it computes a `Loan`, which is a data structure that records (1) the value being borrowed, (2) the mutability and scope of @@ -108,29 +113,29 @@ struct defined in `middle::borrowck`. Formally, we define `LOAN` as follows: ```text -LOAN = (LV, LT, MQ, RESTRICTION*) -RESTRICTION = (LV, ACTION*) +LOAN = (P, LT, MQ, RESTRICTION*) +RESTRICTION = (P, ACTION*) ACTION = MUTATE | CLAIM | FREEZE ``` -Here the `LOAN` tuple defines the lvalue `LV` being borrowed; the +Here the `LOAN` tuple defines the place `P` being borrowed; the lifetime `LT` of that borrow; the mutability `MQ` of the borrow; and a list of restrictions. The restrictions indicate actions which, if taken, could invalidate the loan and lead to type safety violations. -Each `RESTRICTION` is a pair of a restrictive lvalue `LV` (which will +Each `RESTRICTION` is a pair of a restrictive place `P` (which will either be the path that was borrowed or some prefix of the path that was borrowed) and a set of restricted actions. There are three kinds -of actions that may be restricted for the path `LV`: +of actions that may be restricted for the path `P`: -- `MUTATE` means that `LV` cannot be assigned to; -- `CLAIM` means that the `LV` cannot be borrowed mutably; -- `FREEZE` means that the `LV` cannot be borrowed immutably; +- `MUTATE` means that `P` cannot be assigned to; +- `CLAIM` means that the `P` cannot be borrowed mutably; +- `FREEZE` means that the `P` cannot be borrowed immutably; -Finally, it is never possible to move from an lvalue that appears in a -restriction. This implies that the "empty restriction" `(LV, [])`, +Finally, it is never possible to move from a place that appears in a +restriction. This implies that the "empty restriction" `(P, [])`, which contains an empty set of actions, still has a purpose---it -prevents moves from `LV`. I chose not to make `MOVE` a fourth kind of +prevents moves from `P`. I chose not to make `MOVE` a fourth kind of action because that would imply that sometimes moves are permitted from restricted values, which is not the case. @@ -239,22 +244,22 @@ live. (This is done via restrictions, read on.) We start with the `gather_loans` pass, which walks the AST looking for borrows. For each borrow, there are three bits of information: the -lvalue `LV` being borrowed and the mutability `MQ` and lifetime `LT` +place `P` being borrowed and the mutability `MQ` and lifetime `LT` of the resulting pointer. Given those, `gather_loans` applies four validity tests: -1. `MUTABILITY(LV, MQ)`: The mutability of the reference is -compatible with the mutability of `LV` (i.e., not borrowing immutable +1. `MUTABILITY(P, MQ)`: The mutability of the reference is +compatible with the mutability of `P` (i.e., not borrowing immutable data as mutable). -2. `ALIASABLE(LV, MQ)`: The aliasability of the reference is -compatible with the aliasability of `LV`. The goal is to prevent +2. `ALIASABLE(P, MQ)`: The aliasability of the reference is +compatible with the aliasability of `P`. The goal is to prevent `&mut` borrows of aliasability data. -3. `LIFETIME(LV, LT, MQ)`: The lifetime of the borrow does not exceed +3. `LIFETIME(P, LT, MQ)`: The lifetime of the borrow does not exceed the lifetime of the value being borrowed. -4. `RESTRICTIONS(LV, LT, ACTIONS) = RS`: This pass checks and computes the +4. `RESTRICTIONS(P, LT, ACTIONS) = RS`: This pass checks and computes the restrictions to maintain memory safety. These are the restrictions that will go into the final loan. We'll discuss in more detail below. @@ -263,7 +268,7 @@ that will go into the final loan. We'll discuss in more detail below. Checking mutability is fairly straightforward. We just want to prevent immutable data from being borrowed as mutable. Note that it is ok to borrow mutable data as immutable, since that is simply a freeze. The judgement -`MUTABILITY(LV, MQ)` means the mutability of `LV` is compatible with a borrow +`MUTABILITY(P, MQ)` means the mutability of `P` is compatible with a borrow of mutability `MQ`. The Rust code corresponding to this predicate is the function `check_mutability` in `middle::borrowck::gather_loans`. @@ -288,15 +293,15 @@ MUTABILITY(X, imm) // M-Var-Imm Fields and boxes inherit their mutability from their base expressions, so both of their rules basically -delegate the check to the base expression `LV`: +delegate the check to the base expression `P`: ```text -MUTABILITY(LV.f, MQ) // M-Field - MUTABILITY(LV, MQ) +MUTABILITY(P.f, MQ) // M-Field + MUTABILITY(P, MQ) -MUTABILITY(*LV, MQ) // M-Deref-Unique - TYPE(LV) = Box - MUTABILITY(LV, MQ) +MUTABILITY(*P, MQ) // M-Deref-Unique + TYPE(P) = Box + MUTABILITY(P, MQ) ``` ### Checking mutability of immutable pointer types @@ -305,8 +310,8 @@ Immutable pointer types like `&T` can only be borrowed if MQ is immutable: ```text -MUTABILITY(*LV, imm) // M-Deref-Borrowed-Imm - TYPE(LV) = &Ty +MUTABILITY(*P, imm) // M-Deref-Borrowed-Imm + TYPE(P) = &Ty ``` ### Checking mutability of mutable pointer types @@ -314,15 +319,15 @@ MUTABILITY(*LV, imm) // M-Deref-Borrowed-Imm `&mut T` can be frozen, so it is acceptable to borrow it as either imm or mut: ```text -MUTABILITY(*LV, MQ) // M-Deref-Borrowed-Mut - TYPE(LV) = &mut Ty +MUTABILITY(*P, MQ) // M-Deref-Borrowed-Mut + TYPE(P) = &mut Ty ``` ## Checking aliasability The goal of the aliasability check is to ensure that we never permit `&mut` -borrows of aliasable data. The judgement `ALIASABLE(LV, MQ)` means the -aliasability of `LV` is compatible with a borrow of mutability `MQ`. The Rust +borrows of aliasable data. The judgement `ALIASABLE(P, MQ)` means the +aliasability of `P` is compatible with a borrow of mutability `MQ`. The Rust code corresponding to this predicate is the function `check_aliasability()` in `middle::borrowck::gather_loans`. @@ -340,11 +345,11 @@ the stack frame. Owned content is aliasable if it is found in an aliasable location: ```text -ALIASABLE(LV.f, MQ) // M-Field - ALIASABLE(LV, MQ) +ALIASABLE(P.f, MQ) // M-Field + ALIASABLE(P, MQ) -ALIASABLE(*LV, MQ) // M-Deref-Unique - ALIASABLE(LV, MQ) +ALIASABLE(*P, MQ) // M-Deref-Unique + ALIASABLE(P, MQ) ``` ### Checking aliasability of immutable pointer types @@ -353,8 +358,8 @@ Immutable pointer types like `&T` are aliasable, and hence can only be borrowed immutably: ```text -ALIASABLE(*LV, imm) // M-Deref-Borrowed-Imm - TYPE(LV) = &Ty +ALIASABLE(*P, imm) // M-Deref-Borrowed-Imm + TYPE(P) = &Ty ``` ### Checking aliasability of mutable pointer types @@ -362,16 +367,16 @@ ALIASABLE(*LV, imm) // M-Deref-Borrowed-Imm `&mut T` can be frozen, so it is acceptable to borrow it as either imm or mut: ```text -ALIASABLE(*LV, MQ) // M-Deref-Borrowed-Mut - TYPE(LV) = &mut Ty +ALIASABLE(*P, MQ) // M-Deref-Borrowed-Mut + TYPE(P) = &mut Ty ``` ## Checking lifetime These rules aim to ensure that no data is borrowed for a scope that exceeds its lifetime. These two computations wind up being intimately related. -Formally, we define a predicate `LIFETIME(LV, LT, MQ)`, which states that -"the lvalue `LV` can be safely borrowed for the lifetime `LT` with mutability +Formally, we define a predicate `LIFETIME(P, LT, MQ)`, which states that +"the place `P` can be safely borrowed for the lifetime `LT` with mutability `MQ`". The Rust code corresponding to this predicate is the module `middle::borrowck::gather_loans::lifetime`. @@ -391,12 +396,12 @@ The lifetime of a field or box is the same as the lifetime of its owner: ```text -LIFETIME(LV.f, LT, MQ) // L-Field - LIFETIME(LV, LT, MQ) +LIFETIME(P.f, LT, MQ) // L-Field + LIFETIME(P, LT, MQ) -LIFETIME(*LV, LT, MQ) // L-Deref-Send - TYPE(LV) = Box - LIFETIME(LV, LT, MQ) +LIFETIME(*P, LT, MQ) // L-Deref-Send + TYPE(P) = Box + LIFETIME(P, LT, MQ) ``` ### Checking lifetime for derefs of references @@ -408,8 +413,8 @@ of the borrow is shorter than the lifetime `LT'` of the pointer itself: ```text -LIFETIME(*LV, LT, MQ) // L-Deref-Borrowed - TYPE(LV) = <' Ty OR <' mut Ty +LIFETIME(*P, LT, MQ) // L-Deref-Borrowed + TYPE(P) = <' Ty OR <' mut Ty LT <= LT' ``` @@ -417,17 +422,17 @@ LIFETIME(*LV, LT, MQ) // L-Deref-Borrowed The final rules govern the computation of *restrictions*, meaning that we compute the set of actions that will be illegal for the life of the -loan. The predicate is written `RESTRICTIONS(LV, LT, ACTIONS) = +loan. The predicate is written `RESTRICTIONS(P, LT, ACTIONS) = RESTRICTION*`, which can be read "in order to prevent `ACTIONS` from -occurring on `LV`, the restrictions `RESTRICTION*` must be respected +occurring on `P`, the restrictions `RESTRICTION*` must be respected for the lifetime of the loan". Note that there is an initial set of restrictions: these restrictions are computed based on the kind of borrow: ```text -&mut LV => RESTRICTIONS(LV, LT, MUTATE|CLAIM|FREEZE) -&LV => RESTRICTIONS(LV, LT, MUTATE|CLAIM) +&mut P => RESTRICTIONS(P, LT, MUTATE|CLAIM|FREEZE) +&P => RESTRICTIONS(P, LT, MUTATE|CLAIM) ``` The reasoning here is that a mutable borrow must be the only writer, @@ -451,8 +456,8 @@ Restricting a field is the same as restricting the owner of that field: ```text -RESTRICTIONS(LV.f, LT, ACTIONS) = RS, (LV.f, ACTIONS) // R-Field - RESTRICTIONS(LV, LT, ACTIONS) = RS +RESTRICTIONS(P.f, LT, ACTIONS) = RS, (P.f, ACTIONS) // R-Field + RESTRICTIONS(P, LT, ACTIONS) = RS ``` The reasoning here is as follows. If the field must not be mutated, @@ -467,16 +472,16 @@ origin of inherited mutability. Because the mutability of owned referents is inherited, restricting an owned referent is similar to restricting a field, in that it implies restrictions on the pointer. However, boxes have an important -twist: if the owner `LV` is mutated, that causes the owned referent -`*LV` to be freed! So whenever an owned referent `*LV` is borrowed, we -must prevent the box `LV` from being mutated, which means +twist: if the owner `P` is mutated, that causes the owned referent +`*P` to be freed! So whenever an owned referent `*P` is borrowed, we +must prevent the box `P` from being mutated, which means that we always add `MUTATE` and `CLAIM` to the restriction set imposed -on `LV`: +on `P`: ```text -RESTRICTIONS(*LV, LT, ACTIONS) = RS, (*LV, ACTIONS) // R-Deref-Send-Pointer - TYPE(LV) = Box - RESTRICTIONS(LV, LT, ACTIONS|MUTATE|CLAIM) = RS +RESTRICTIONS(*P, LT, ACTIONS) = RS, (*P, ACTIONS) // R-Deref-Send-Pointer + TYPE(P) = Box + RESTRICTIONS(P, LT, ACTIONS|MUTATE|CLAIM) = RS ``` ### Restrictions for loans of immutable borrowed referents @@ -484,15 +489,15 @@ RESTRICTIONS(*LV, LT, ACTIONS) = RS, (*LV, ACTIONS) // R-Deref-Send-Pointer Immutable borrowed referents are freely aliasable, meaning that the compiler does not prevent you from copying the pointer. This implies that issuing restrictions is useless. We might prevent the -user from acting on `*LV` itself, but there could be another path -`*LV1` that refers to the exact same memory, and we would not be +user from acting on `*P` itself, but there could be another path +`*P1` that refers to the exact same memory, and we would not be restricting that path. Therefore, the rule for `&Ty` pointers always returns an empty set of restrictions, and it only permits restricting `MUTATE` and `CLAIM` actions: ```text -RESTRICTIONS(*LV, LT, ACTIONS) = [] // R-Deref-Imm-Borrowed - TYPE(LV) = <' Ty +RESTRICTIONS(*P, LT, ACTIONS) = [] // R-Deref-Imm-Borrowed + TYPE(P) = <' Ty LT <= LT' // (1) ACTIONS subset of [MUTATE, CLAIM] ``` @@ -546,7 +551,7 @@ This function is legal. The reason for this is that the inner pointer (`*point : &'b Point`) is enough to guarantee the memory is immutable and valid for the lifetime `'b`. This is reflected in `RESTRICTIONS()` by the fact that we do not recurse (i.e., we impose -no restrictions on `LV`, which in this particular case is the pointer +no restrictions on `P`, which in this particular case is the pointer `point : &'a &'b Point`). #### Why both `LIFETIME()` and `RESTRICTIONS()`? @@ -612,10 +617,10 @@ while the new claimant is live. The rule for mutable borrowed pointers is as follows: ```text -RESTRICTIONS(*LV, LT, ACTIONS) = RS, (*LV, ACTIONS) // R-Deref-Mut-Borrowed - TYPE(LV) = <' mut Ty +RESTRICTIONS(*P, LT, ACTIONS) = RS, (*P, ACTIONS) // R-Deref-Mut-Borrowed + TYPE(P) = <' mut Ty LT <= LT' // (1) - RESTRICTIONS(LV, LT, ACTIONS) = RS // (2) + RESTRICTIONS(P, LT, ACTIONS) = RS // (2) ``` Let's examine the two numbered clauses: @@ -670,7 +675,7 @@ fn foo(t0: &mut i32) { Remember that `&mut` pointers are linear, and hence `let t1 = t0` is a move of `t0` -- or would be, if it were legal. Instead, we get an -error, because clause (2) imposes restrictions on `LV` (`t0`, here), +error, because clause (2) imposes restrictions on `P` (`t0`, here), and any restrictions on a path make it impossible to move from that path. @@ -906,7 +911,7 @@ results of a dataflow computation. The `MovePath` tree tracks every path that is moved or assigned to. These paths have the same form as the `LoanPath` data structure, which -in turn is the "real world version of the lvalues `LV` that we +in turn is the "real world version of the places `P` that we introduced earlier. The difference between a `MovePath` and a `LoanPath` is that move paths are: @@ -1121,7 +1126,7 @@ fn foo(a: [D; 10], b: [D; 10], i: i32, t: bool) -> D { } ``` -There are a number of ways that the trans backend could choose to +There are a number of ways that the codegen backend could choose to compile this (e.g. a `[bool; 10]` array for each such moved array; or an `Option` for each moved array). From the viewpoint of the borrow-checker, the important thing is to record what kind of fragment @@ -1132,7 +1137,7 @@ is implied by the relevant moves. While writing up these docs, I encountered some rules I believe to be stricter than necessary: -- I think restricting the `&mut` LV against moves and `ALIAS` is sufficient, +- I think restricting the `&mut` P against moves and `ALIAS` is sufficient, `MUTATE` and `CLAIM` are overkill. `MUTATE` was necessary when swap was a built-in operator, but as it is not, it is implied by `CLAIM`, and `CLAIM` is implied by `ALIAS`. The only net effect of this is an diff --git a/src/librustc_borrowck/borrowck/check_loans.rs b/src/librustc_borrowck/borrowck/check_loans.rs index 908737669c5c..709590f649b9 100644 --- a/src/librustc_borrowck/borrowck/check_loans.rs +++ b/src/librustc_borrowck/borrowck/check_loans.rs @@ -25,7 +25,7 @@ use rustc::middle::expr_use_visitor::MutateMode; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::region; -use rustc::ty::{self, TyCtxt}; +use rustc::ty::{self, TyCtxt, RegionKind}; use syntax::ast; use syntax_pos::Span; use rustc::hir; @@ -91,14 +91,14 @@ struct CheckLoanCtxt<'a, 'tcx: 'a> { dfcx_loans: &'a LoanDataFlow<'a, 'tcx>, move_data: &'a move_data::FlowedMoveData<'a, 'tcx>, all_loans: &'a [Loan<'tcx>], - param_env: ty::ParamEnv<'tcx>, + movable_generator: bool, } impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { fn consume(&mut self, consume_id: ast::NodeId, consume_span: Span, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, mode: euv::ConsumeMode) { debug!("consume(consume_id={}, cmt={:?}, mode={:?})", consume_id, cmt, mode); @@ -109,12 +109,12 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { fn matched_pat(&mut self, _matched_pat: &hir::Pat, - _cmt: mc::cmt, + _cmt: &mc::cmt_, _mode: euv::MatchMode) { } fn consume_pat(&mut self, consume_pat: &hir::Pat, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, mode: euv::ConsumeMode) { debug!("consume_pat(consume_pat={:?}, cmt={:?}, mode={:?})", consume_pat, @@ -127,7 +127,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { fn borrow(&mut self, borrow_id: ast::NodeId, borrow_span: Span, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, loan_region: ty::Region<'tcx>, bk: ty::BorrowKind, loan_cause: euv::LoanCause) @@ -138,7 +138,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { bk, loan_cause); let hir_id = self.tcx().hir.node_to_hir_id(borrow_id); - if let Some(lp) = opt_loan_path(&cmt) { + if let Some(lp) = opt_loan_path(cmt) { let moved_value_use_kind = match loan_cause { euv::ClosureCapture(_) => MovedInCapture, _ => MovedInUse, @@ -147,26 +147,27 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { } self.check_for_conflicting_loans(hir_id.local_id); + + self.check_for_loans_across_yields(cmt, loan_region, borrow_span); } fn mutate(&mut self, assignment_id: ast::NodeId, assignment_span: Span, - assignee_cmt: mc::cmt<'tcx>, + assignee_cmt: &mc::cmt_<'tcx>, mode: euv::MutateMode) { debug!("mutate(assignment_id={}, assignee_cmt={:?})", assignment_id, assignee_cmt); - if let Some(lp) = opt_loan_path(&assignee_cmt) { + if let Some(lp) = opt_loan_path(assignee_cmt) { match mode { MutateMode::Init | MutateMode::JustWrite => { // In a case like `path = 1`, then path does not // have to be *FULLY* initialized, but we still // must be careful lest it contains derefs of // pointers. - let hir_id = self.tcx().hir.node_to_hir_id(assignee_cmt.id); - self.check_if_assigned_path_is_moved(hir_id.local_id, + self.check_if_assigned_path_is_moved(assignee_cmt.hir_id.local_id, assignment_span, MovedInUse, &lp); @@ -175,8 +176,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { // In a case like `path += 1`, then path must be // fully initialized, since we will read it before // we write it. - let hir_id = self.tcx().hir.node_to_hir_id(assignee_cmt.id); - self.check_if_path_is_moved(hir_id.local_id, + self.check_if_path_is_moved(assignee_cmt.hir_id.local_id, assignment_span, MovedInUse, &lp); @@ -198,13 +198,23 @@ pub fn check_loans<'a, 'b, 'c, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, debug!("check_loans(body id={})", body.value.id); let def_id = bccx.tcx.hir.body_owner_def_id(body.id()); + + let node_id = bccx.tcx.hir.as_local_node_id(def_id).unwrap(); + let movable_generator = !match bccx.tcx.hir.get(node_id) { + hir::map::Node::NodeExpr(&hir::Expr { + node: hir::ExprKind::Closure(.., Some(hir::GeneratorMovability::Static)), + .. + }) => true, + _ => false, + }; + let param_env = bccx.tcx.param_env(def_id); let mut clcx = CheckLoanCtxt { bccx, dfcx_loans, move_data, all_loans, - param_env, + movable_generator, }; let rvalue_promotable_map = bccx.tcx.rvalue_promotable_map(def_id); euv::ExprUseVisitor::new(&mut clcx, @@ -348,6 +358,102 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { return result; } + pub fn check_for_loans_across_yields(&self, + cmt: &mc::cmt_<'tcx>, + loan_region: ty::Region<'tcx>, + borrow_span: Span) { + pub fn borrow_of_local_data<'tcx>(cmt: &mc::cmt_<'tcx>) -> bool { + match cmt.cat { + // Borrows of static items is allowed + Categorization::StaticItem => false, + // Reborrow of already borrowed data is ignored + // Any errors will be caught on the initial borrow + Categorization::Deref(..) => false, + + // By-ref upvars has Derefs so they will get ignored. + // Generators counts as FnOnce so this leaves only + // by-move upvars, which is local data for generators + Categorization::Upvar(..) => true, + + Categorization::Rvalue(region) => { + // Rvalues promoted to 'static are no longer local + if let RegionKind::ReStatic = *region { + false + } else { + true + } + } + + // Borrow of local data must be checked + Categorization::Local(..) => true, + + // For interior references and downcasts, find out if the base is local + Categorization::Downcast(ref cmt_base, _) | + Categorization::Interior(ref cmt_base, _) => borrow_of_local_data(&cmt_base), + } + } + + if !self.movable_generator { + return; + } + + if !borrow_of_local_data(cmt) { + return; + } + + let scope = match *loan_region { + // A concrete region in which we will look for a yield expression + RegionKind::ReScope(scope) => scope, + + // There cannot be yields inside an empty region + RegionKind::ReEmpty => return, + + // Local data cannot have these lifetimes + RegionKind::ReEarlyBound(..) | + RegionKind::ReLateBound(..) | + RegionKind::ReFree(..) | + RegionKind::ReStatic => { + self.bccx + .tcx + .sess.delay_span_bug(borrow_span, + &format!("unexpected region for local data {:?}", + loan_region)); + return + } + + // These cannot exist in borrowck + RegionKind::ReVar(..) | + RegionKind::ReCanonical(..) | + RegionKind::ReSkolemized(..) | + RegionKind::ReClosureBound(..) | + RegionKind::ReErased => span_bug!(borrow_span, + "unexpected region in borrowck {:?}", + loan_region), + }; + + let body_id = self.bccx.body.value.hir_id.local_id; + + if self.bccx.region_scope_tree.containing_body(scope) != Some(body_id) { + // We are borrowing local data longer than its storage. + // This should result in other borrowck errors. + self.bccx.tcx.sess.delay_span_bug(borrow_span, + "borrowing local data longer than its storage"); + return; + } + + if let Some(yield_span) = self.bccx + .region_scope_tree + .yield_in_scope_for_expr(scope, + cmt.hir_id, + self.bccx.body) + { + self.bccx.cannot_borrow_across_generator_yield(borrow_span, + yield_span, + Origin::Ast).emit(); + self.bccx.signal_error(); + } + } + pub fn check_for_conflicting_loans(&self, node: hir::ItemLocalId) { //! Checks to see whether any of the loans that are issued //! on entrance to `node` conflict with loans that have already been @@ -401,9 +507,13 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { new_loan, old_loan, old_loan, new_loan).err(); match (err_old_new, err_new_old) { - (Some(mut err), None) | (None, Some(mut err)) => err.emit(), + (Some(mut err), None) | (None, Some(mut err)) => { + err.emit(); + self.bccx.signal_error(); + } (Some(mut err_old), Some(mut err_new)) => { err_old.emit(); + self.bccx.signal_error(); err_new.cancel(); } (None, None) => return true, @@ -484,8 +594,8 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { // 3. Where does old loan expire. let previous_end_span = - Some(old_loan.kill_scope.span(self.tcx(), &self.bccx.region_scope_tree) - .end_point()); + Some(self.tcx().sess.codemap().end_point( + old_loan.kill_scope.span(self.tcx(), &self.bccx.region_scope_tree))); let mut err = match (new_loan.kind, old_loan.kind) { (ty::MutBorrow, ty::MutBorrow) => @@ -541,9 +651,9 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { fn consume_common(&self, id: hir::ItemLocalId, span: Span, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, mode: euv::ConsumeMode) { - if let Some(lp) = opt_loan_path(&cmt) { + if let Some(lp) = opt_loan_path(cmt) { let moved_value_use_kind = match mode { euv::Copy => { self.check_for_copy_of_frozen_path(id, span, &lp); @@ -589,6 +699,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { loan_span, &self.bccx.loan_path_to_string(&loan_path), Origin::Ast) .emit(); + self.bccx.signal_error(); } } } @@ -639,6 +750,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { }; err.emit(); + self.bccx.signal_error(); } } } @@ -687,8 +799,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { use_kind, &lp, the_move, - moved_lp, - self.param_env); + moved_lp); false }); } @@ -768,11 +879,11 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { fn check_assignment(&self, assignment_id: hir::ItemLocalId, assignment_span: Span, - assignee_cmt: mc::cmt<'tcx>) { + assignee_cmt: &mc::cmt_<'tcx>) { debug!("check_assignment(assignee_cmt={:?})", assignee_cmt); // Check that we don't invalidate any outstanding loans - if let Some(loan_path) = opt_loan_path(&assignee_cmt) { + if let Some(loan_path) = opt_loan_path(assignee_cmt) { let scope = region::Scope::Node(assignment_id); self.each_in_scope_loan_affecting_path(scope, &loan_path, |loan| { self.report_illegal_mutation(assignment_span, &loan_path, loan); @@ -784,7 +895,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { // needs to be done here instead of in check_loans because we // depend on move data. if let Categorization::Local(local_id) = assignee_cmt.cat { - let lp = opt_loan_path(&assignee_cmt).unwrap(); + let lp = opt_loan_path(assignee_cmt).unwrap(); self.move_data.each_assignment_of(assignment_id, &lp, |assign| { if assignee_cmt.mutbl.is_mutable() { let hir_id = self.bccx.tcx.hir.node_to_hir_id(local_id); @@ -808,5 +919,6 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { self.bccx.cannot_assign_to_borrowed( span, loan.span, &self.bccx.loan_path_to_string(loan_path), Origin::Ast) .emit(); + self.bccx.signal_error(); } } diff --git a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs index 465457f5ab39..b76931c30174 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs @@ -26,10 +26,10 @@ use syntax_pos::Span; use rustc::hir::*; use rustc::hir::map::Node::*; -struct GatherMoveInfo<'tcx> { +struct GatherMoveInfo<'c, 'tcx: 'c> { id: hir::ItemLocalId, kind: MoveKind, - cmt: mc::cmt<'tcx>, + cmt: &'c mc::cmt_<'tcx>, span_path_opt: Option> } @@ -63,7 +63,7 @@ fn get_pattern_source<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pat: &Pat) -> Patte NodeExpr(ref e) => { // the enclosing expression must be a `match` or something else assert!(match e.node { - ExprMatch(..) => true, + ExprKind::Match(..) => true, _ => return PatternSource::Other, }); PatternSource::MatchExpr(e) @@ -87,7 +87,7 @@ pub fn gather_move_from_expr<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, move_data: &MoveData<'tcx>, move_error_collector: &mut MoveErrorCollector<'tcx>, move_expr_id: hir::ItemLocalId, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, move_reason: euv::MoveReason) { let kind = match move_reason { euv::DirectRefMove | euv::PatBindingMove => MoveExpr, @@ -102,17 +102,17 @@ pub fn gather_move_from_expr<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, gather_move(bccx, move_data, move_error_collector, move_info); } -pub fn gather_move_from_pat<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, - move_data: &MoveData<'tcx>, - move_error_collector: &mut MoveErrorCollector<'tcx>, - move_pat: &hir::Pat, - cmt: mc::cmt<'tcx>) { +pub fn gather_move_from_pat<'a, 'c, 'tcx: 'c>(bccx: &BorrowckCtxt<'a, 'tcx>, + move_data: &MoveData<'tcx>, + move_error_collector: &mut MoveErrorCollector<'tcx>, + move_pat: &hir::Pat, + cmt: &'c mc::cmt_<'tcx>) { let source = get_pattern_source(bccx.tcx,move_pat); let pat_span_path_opt = match move_pat.node { - PatKind::Binding(_, _, ref path1, _) => { + PatKind::Binding(_, _, ident, _) => { Some(MovePlace { span: move_pat.span, - name: path1.node, + name: ident.name, pat_source: source, }) } @@ -132,18 +132,17 @@ pub fn gather_move_from_pat<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, gather_move(bccx, move_data, move_error_collector, move_info); } -fn gather_move<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, +fn gather_move<'a, 'c, 'tcx: 'c>(bccx: &BorrowckCtxt<'a, 'tcx>, move_data: &MoveData<'tcx>, move_error_collector: &mut MoveErrorCollector<'tcx>, - move_info: GatherMoveInfo<'tcx>) { + move_info: GatherMoveInfo<'c, 'tcx>) { debug!("gather_move(move_id={:?}, cmt={:?})", move_info.id, move_info.cmt); - let potentially_illegal_move = - check_and_get_illegal_move_origin(bccx, &move_info.cmt); + let potentially_illegal_move = check_and_get_illegal_move_origin(bccx, move_info.cmt); if let Some(illegal_move_origin) = potentially_illegal_move { debug!("illegal_move_origin={:?}", illegal_move_origin); - let error = MoveError::with_move_info(illegal_move_origin, + let error = MoveError::with_move_info(Rc::new(illegal_move_origin), move_info.span_path_opt); move_error_collector.add_error(error); return; @@ -164,24 +163,19 @@ pub fn gather_assignment<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, move_data: &MoveData<'tcx>, assignment_id: hir::ItemLocalId, assignment_span: Span, - assignee_loan_path: Rc>, - assignee_id: hir::ItemLocalId, - mode: euv::MutateMode) { + assignee_loan_path: Rc>) { move_data.add_assignment(bccx.tcx, assignee_loan_path, assignment_id, - assignment_span, - assignee_id, - mode); + assignment_span); } // (keep in sync with move_error::report_cannot_move_out_of ) fn check_and_get_illegal_move_origin<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, - cmt: &mc::cmt<'tcx>) - -> Option> { + cmt: &mc::cmt_<'tcx>) + -> Option> { match cmt.cat { Categorization::Deref(_, mc::BorrowedPtr(..)) | - Categorization::Deref(_, mc::Implicit(..)) | Categorization::Deref(_, mc::UnsafePtr(..)) | Categorization::StaticItem => { Some(cmt.clone()) diff --git a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs index 66aaafb77f75..c9dcc0d9fa26 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs @@ -27,9 +27,8 @@ pub fn guarantee_lifetime<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, item_scope: region::Scope, span: Span, cause: euv::LoanCause, - cmt: mc::cmt<'tcx>, - loan_region: ty::Region<'tcx>, - _: ty::BorrowKind) + cmt: &'a mc::cmt_<'tcx>, + loan_region: ty::Region<'tcx>) -> Result<(),()> { //! Reports error if `loan_region` is larger than S //! where S is `item_scope` if `cmt` is an upvar, @@ -41,8 +40,8 @@ pub fn guarantee_lifetime<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, span, cause, loan_region, - cmt_original: cmt.clone()}; - ctxt.check(&cmt, None) + cmt_original: cmt}; + ctxt.check(cmt, None) } /////////////////////////////////////////////////////////////////////////// @@ -57,12 +56,11 @@ struct GuaranteeLifetimeContext<'a, 'tcx: 'a> { span: Span, cause: euv::LoanCause, loan_region: ty::Region<'tcx>, - cmt_original: mc::cmt<'tcx> + cmt_original: &'a mc::cmt_<'tcx> } impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { - - fn check(&self, cmt: &mc::cmt<'tcx>, discr_scope: Option) -> R { + fn check(&self, cmt: &mc::cmt_<'tcx>, discr_scope: Option) -> R { //! Main routine. Walks down `cmt` until we find the //! "guarantor". Reports an error if `self.loan_region` is //! larger than scope of `cmt`. @@ -75,7 +73,6 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { Categorization::Local(..) | // L-Local Categorization::Upvar(..) | Categorization::Deref(_, mc::BorrowedPtr(..)) | // L-Deref-Borrowed - Categorization::Deref(_, mc::Implicit(..)) | Categorization::Deref(_, mc::UnsafePtr(..)) => { self.check_scope(self.scope(cmt)) } @@ -102,9 +99,9 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { } } - fn scope(&self, cmt: &mc::cmt<'tcx>) -> ty::Region<'tcx> { + fn scope(&self, cmt: &mc::cmt_<'tcx>) -> ty::Region<'tcx> { //! Returns the maximal region scope for the which the - //! lvalue `cmt` is guaranteed to be valid without any + //! place `cmt` is guaranteed to be valid without any //! rooting etc, and presuming `cmt` is not mutated. match cmt.cat { @@ -123,8 +120,7 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { Categorization::Deref(_, mc::UnsafePtr(..)) => { self.bccx.tcx.types.re_static } - Categorization::Deref(_, mc::BorrowedPtr(_, r)) | - Categorization::Deref(_, mc::Implicit(_, r)) => { + Categorization::Deref(_, mc::BorrowedPtr(_, r)) => { r } Categorization::Downcast(ref cmt, _) | @@ -136,7 +132,7 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { } fn report_error(&self, code: bckerr_code<'tcx>) { - self.bccx.report(BckError { cmt: self.cmt_original.clone(), + self.bccx.report(BckError { cmt: self.cmt_original, span: self.span, cause: BorrowViolation(self.cause), code: code }); diff --git a/src/librustc_borrowck/borrowck/gather_loans/mod.rs b/src/librustc_borrowck/borrowck/gather_loans/mod.rs index 5cbe2822e5c0..6c83e2dd1c20 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/mod.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/mod.rs @@ -76,7 +76,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { fn consume(&mut self, consume_id: ast::NodeId, _consume_span: Span, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, mode: euv::ConsumeMode) { debug!("consume(consume_id={}, cmt={:?}, mode={:?})", consume_id, cmt, mode); @@ -93,7 +93,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { fn matched_pat(&mut self, matched_pat: &hir::Pat, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, mode: euv::MatchMode) { debug!("matched_pat(matched_pat={:?}, cmt={:?}, mode={:?})", matched_pat, @@ -103,7 +103,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { fn consume_pat(&mut self, consume_pat: &hir::Pat, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, mode: euv::ConsumeMode) { debug!("consume_pat(consume_pat={:?}, cmt={:?}, mode={:?})", consume_pat, @@ -123,7 +123,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { fn borrow(&mut self, borrow_id: ast::NodeId, borrow_span: Span, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, loan_region: ty::Region<'tcx>, bk: ty::BorrowKind, loan_cause: euv::LoanCause) @@ -144,13 +144,12 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { fn mutate(&mut self, assignment_id: ast::NodeId, assignment_span: Span, - assignee_cmt: mc::cmt<'tcx>, - mode: euv::MutateMode) + assignee_cmt: &mc::cmt_<'tcx>, + _: euv::MutateMode) { self.guarantee_assignment_valid(assignment_id, assignment_span, - assignee_cmt, - mode); + assignee_cmt); } fn decl_without_init(&mut self, id: ast::NodeId, _span: Span) { @@ -165,7 +164,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { fn check_aliasability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, borrow_span: Span, loan_cause: AliasableViolationKind, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, req_kind: ty::BorrowKind) -> Result<(),()> { @@ -206,7 +205,7 @@ fn check_aliasability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, fn check_mutability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, borrow_span: Span, cause: AliasableViolationKind, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, req_kind: ty::BorrowKind) -> Result<(),()> { debug!("check_mutability(cause={:?} cmt={:?} req_kind={:?}", @@ -246,10 +245,9 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { fn guarantee_assignment_valid(&mut self, assignment_id: ast::NodeId, assignment_span: Span, - cmt: mc::cmt<'tcx>, - mode: euv::MutateMode) { + cmt: &mc::cmt_<'tcx>) { - let opt_lp = opt_loan_path(&cmt); + let opt_lp = opt_loan_path(cmt); debug!("guarantee_assignment_valid(assignment_id={}, cmt={:?}) opt_lp={:?}", assignment_id, cmt, opt_lp); @@ -259,14 +257,14 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { } else { // Check that we don't allow assignments to non-mutable data. if check_mutability(self.bccx, assignment_span, MutabilityViolation, - cmt.clone(), ty::MutBorrow).is_err() { + cmt, ty::MutBorrow).is_err() { return; // reported an error, no sense in reporting more. } } // Check that we don't allow assignments to aliasable data if check_aliasability(self.bccx, assignment_span, MutabilityViolation, - cmt.clone(), ty::MutBorrow).is_err() { + cmt, ty::MutBorrow).is_err() { return; // reported an error, no sense in reporting more. } @@ -282,9 +280,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { self.bccx.tcx.hir.node_to_hir_id(assignment_id) .local_id, assignment_span, - lp, - self.bccx.tcx.hir.node_to_hir_id(cmt.id).local_id, - mode); + lp); } None => { // This can occur with e.g. `*foo() = 5`. In such @@ -300,7 +296,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { fn guarantee_valid(&mut self, borrow_id: hir::ItemLocalId, borrow_span: Span, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, req_kind: ty::BorrowKind, loan_region: ty::Region<'tcx>, cause: euv::LoanCause) { @@ -320,28 +316,26 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { // Check that the lifetime of the borrow does not exceed // the lifetime of the data being borrowed. if lifetime::guarantee_lifetime(self.bccx, self.item_ub, - borrow_span, cause, cmt.clone(), loan_region, - req_kind).is_err() { + borrow_span, cause, cmt, loan_region).is_err() { return; // reported an error, no sense in reporting more. } // Check that we don't allow mutable borrows of non-mutable data. if check_mutability(self.bccx, borrow_span, BorrowViolation(cause), - cmt.clone(), req_kind).is_err() { + cmt, req_kind).is_err() { return; // reported an error, no sense in reporting more. } // Check that we don't allow mutable borrows of aliasable data. if check_aliasability(self.bccx, borrow_span, BorrowViolation(cause), - cmt.clone(), req_kind).is_err() { + cmt, req_kind).is_err() { return; // reported an error, no sense in reporting more. } // Compute the restrictions that are required to enforce the // loan is safe. let restr = restrictions::compute_restrictions( - self.bccx, borrow_span, cause, - cmt.clone(), loan_region); + self.bccx, borrow_span, cause, &cmt, loan_region); debug!("guarantee_valid(): restrictions={:?}", restr); @@ -366,6 +360,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { ty::ReStatic => self.item_ub, + ty::ReCanonical(_) | ty::ReEmpty | ty::ReClosureBound(..) | ty::ReLateBound(..) | diff --git a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs index 1f2b917bdb99..b217e6a85647 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs @@ -68,7 +68,7 @@ pub struct GroupedMoveErrors<'tcx> { move_to_places: Vec> } -fn report_move_errors<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, errors: &Vec>) { +fn report_move_errors<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, errors: &[MoveError<'tcx>]) { let grouped_errors = group_errors_with_same_origin(errors); for error in &grouped_errors { let mut err = report_cannot_move_out_of(bccx, error.move_from.clone()); @@ -99,10 +99,11 @@ fn report_move_errors<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, errors: &Vec(errors: &Vec>) +fn group_errors_with_same_origin<'tcx>(errors: &[MoveError<'tcx>]) -> Vec> { let mut grouped_errors = Vec::new(); for error in errors { @@ -112,15 +113,15 @@ fn group_errors_with_same_origin<'tcx>(errors: &Vec>) fn append_to_grouped_errors<'tcx>(grouped_errors: &mut Vec>, error: &MoveError<'tcx>) { - let move_from_id = error.move_from.id; - debug!("append_to_grouped_errors(move_from_id={})", move_from_id); + let move_from_id = error.move_from.hir_id; + debug!("append_to_grouped_errors(move_from_id={:?})", move_from_id); let move_to = if error.move_to.is_some() { vec![error.move_to.clone().unwrap()] } else { Vec::new() }; for ge in &mut *grouped_errors { - if move_from_id == ge.move_from.id && error.move_to.is_some() { + if move_from_id == ge.move_from.hir_id && error.move_to.is_some() { debug!("appending move_to to list"); ge.move_to_places.extend(move_to); return @@ -140,7 +141,6 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &'a BorrowckCtxt<'a, 'tcx>, -> DiagnosticBuilder<'a> { match move_from.cat { Categorization::Deref(_, mc::BorrowedPtr(..)) | - Categorization::Deref(_, mc::Implicit(..)) | Categorization::Deref(_, mc::UnsafePtr(..)) | Categorization::StaticItem => { bccx.cannot_move_out_of( @@ -148,7 +148,7 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &'a BorrowckCtxt<'a, 'tcx>, } Categorization::Interior(ref b, mc::InteriorElement(ik)) => { bccx.cannot_move_out_of_interior_noncopy( - move_from.span, b.ty, ik == Kind::Index, Origin::Ast) + move_from.span, b.ty, Some(ik == Kind::Index), Origin::Ast) } Categorization::Downcast(ref b, _) | diff --git a/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs b/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs index 5cfbe49f77f1..e332f6832754 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs @@ -30,7 +30,7 @@ pub enum RestrictionResult<'tcx> { pub fn compute_restrictions<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, span: Span, cause: euv::LoanCause, - cmt: mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, loan_region: ty::Region<'tcx>) -> RestrictionResult<'tcx> { let ctxt = RestrictionsContext { @@ -55,7 +55,7 @@ struct RestrictionsContext<'a, 'tcx: 'a> { impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { fn restrict(&self, - cmt: mc::cmt<'tcx>) -> RestrictionResult<'tcx> { + cmt: &mc::cmt_<'tcx>) -> RestrictionResult<'tcx> { debug!("restrict(cmt={:?})", cmt); let new_lp = |v: LoanPathKind<'tcx>| Rc::new(LoanPath::new(v, cmt.ty)); @@ -86,7 +86,7 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { // When we borrow the interior of an enum, we have to // ensure the enum itself is not mutated, because that // could cause the type of the memory to change. - self.restrict(cmt_base) + self.restrict(&cmt_base) } Categorization::Interior(cmt_base, interior) => { @@ -101,14 +101,16 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { }; let interior = interior.cleaned(); let base_ty = cmt_base.ty; - let result = self.restrict(cmt_base); + let result = self.restrict(&cmt_base); // Borrowing one union field automatically borrows all its fields. match base_ty.sty { ty::TyAdt(adt_def, _) if adt_def.is_union() => match result { RestrictionResult::Safe => RestrictionResult::Safe, RestrictionResult::SafeIf(base_lp, mut base_vec) => { - for field in &adt_def.non_enum_variant().fields { - let field = InteriorKind::InteriorField(mc::NamedField(field.name)); + for (i, field) in adt_def.non_enum_variant().fields.iter().enumerate() { + let field = InteriorKind::InteriorField( + mc::FieldIndex(i, field.ident.name) + ); let field_ty = if field == interior { cmt.ty } else { @@ -144,17 +146,17 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { // // Eventually we should make these non-special and // just rely on Deref implementation. - let result = self.restrict(cmt_base); + let result = self.restrict(&cmt_base); self.extend(result, &cmt, LpDeref(pk)) } - mc::Implicit(bk, lt) | mc::BorrowedPtr(bk, lt) => { + mc::BorrowedPtr(bk, lt) => { // R-Deref-[Mut-]Borrowed if !self.bccx.is_subregion_of(self.loan_region, lt) { self.bccx.report( BckError { span: self.span, cause: BorrowViolation(self.cause), - cmt: cmt_base, + cmt: &cmt_base, code: err_borrowed_pointer_too_short( self.loan_region, lt)}); return RestrictionResult::Safe; @@ -168,7 +170,7 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { // The referent can be aliased after the // references lifetime ends (by a newly-unfrozen // borrow). - let result = self.restrict(cmt_base); + let result = self.restrict(&cmt_base); self.extend(result, &cmt, LpDeref(pk)) } } @@ -182,7 +184,7 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { fn extend(&self, result: RestrictionResult<'tcx>, - cmt: &mc::cmt<'tcx>, + cmt: &mc::cmt_<'tcx>, elem: LoanPathElem<'tcx>) -> RestrictionResult<'tcx> { match result { RestrictionResult::Safe => RestrictionResult::Safe, diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index 4529e4bab752..5b08400eb112 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -24,11 +24,7 @@ use rustc::hir::HirId; use rustc::hir::map as hir_map; use rustc::hir::map::blocks::FnLikeNode; use rustc::cfg; -use rustc::middle::dataflow::DataFlowContext; -use rustc::middle::dataflow::BitwiseOperator; -use rustc::middle::dataflow::DataFlowOperator; -use rustc::middle::dataflow::KillFrom; -use rustc::middle::borrowck::BorrowCheckResult; +use rustc::middle::borrowck::{BorrowCheckResult, SignalledError}; use rustc::hir::def_id::{DefId, LocalDefId}; use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization as mc; @@ -37,13 +33,15 @@ use rustc::middle::mem_categorization::ImmutabilityBlame; use rustc::middle::region; use rustc::middle::free_region::RegionRelations; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::maps::Providers; +use rustc::ty::query::Providers; use rustc_mir::util::borrowck_errors::{BorrowckErrors, Origin}; +use rustc_mir::util::suggest_ref_mut; use rustc::util::nodemap::FxHashSet; -use std::cell::RefCell; +use std::cell::{Cell, RefCell}; use std::fmt; use std::rc::Rc; +use rustc_data_structures::sync::Lrc; use std::hash::{Hash, Hasher}; use syntax::ast; use syntax_pos::{MultiSpan, Span}; @@ -52,6 +50,8 @@ use errors::{DiagnosticBuilder, DiagnosticId}; use rustc::hir; use rustc::hir::intravisit::{self, Visitor}; +use dataflow::{DataFlowContext, BitwiseOperator, DataFlowOperator, KillFrom}; + pub mod check_loans; pub mod gather_loans; @@ -66,9 +66,9 @@ pub struct LoanDataFlowOperator; pub type LoanDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, LoanDataFlowOperator>; pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - for body_owner_def_id in tcx.body_owners() { + tcx.par_body_owners(|body_owner_def_id| { tcx.borrowck(body_owner_def_id); - } + }); } pub fn provide(providers: &mut Providers) { @@ -86,8 +86,10 @@ pub struct AnalysisData<'a, 'tcx: 'a> { } fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) - -> Rc + -> Lrc { + assert!(tcx.use_ast_borrowck() || tcx.migrate_borrowck()); + debug!("borrowck(body_owner_def_id={:?})", owner_def_id); let owner_id = tcx.hir.as_local_node_id(owner_def_id).unwrap(); @@ -99,8 +101,9 @@ fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) // those things (notably the synthesized constructors from // tuple structs/variants) do not have an associated body // and do not need borrowchecking. - return Rc::new(BorrowCheckResult { + return Lrc::new(BorrowCheckResult { used_mut_nodes: FxHashSet(), + signalled_any_error: SignalledError::NoErrorsSeen, }) } _ => { } @@ -117,6 +120,7 @@ fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) owner_def_id, body, used_mut_nodes: RefCell::new(FxHashSet()), + signalled_any_error: Cell::new(SignalledError::NoErrorsSeen), }; // Eventually, borrowck will always read the MIR, but at the @@ -127,7 +131,7 @@ fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) // Note that `mir_validated` is a "stealable" result; the // thief, `optimized_mir()`, forces borrowck, so we know that // is not yet stolen. - tcx.mir_validated(owner_def_id).borrow(); + ty::query::queries::mir_validated::ensure(tcx, owner_def_id); // option dance because you can't capture an uninitialized variable // by mut-ref. @@ -143,10 +147,14 @@ fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) { check_loans::check_loans(&mut bccx, &loan_dfcx, &flowed_moves, &all_loans, body); } - unused::check(&mut bccx, body); - Rc::new(BorrowCheckResult { + if !tcx.use_mir_borrowck() { + unused::check(&mut bccx, body); + } + + Lrc::new(BorrowCheckResult { used_mut_nodes: bccx.used_mut_nodes.into_inner(), + signalled_any_error: bccx.signalled_any_error.into_inner(), }) } @@ -170,7 +178,7 @@ fn build_borrowck_dataflow_data<'a, 'c, 'tcx, F>(this: &mut BorrowckCtxt<'a, 'tc if !force_analysis && move_data.is_empty() && all_loans.is_empty() { // large arrays of data inserted as constants can take a lot of // time and memory to borrow-check - see issue #36799. However, - // they don't have lvalues, so no borrow-check is actually needed. + // they don't have places, so no borrow-check is actually needed. // Recognize that case and skip borrow-checking. debug!("skipping loan propagation for {:?} because of no loans", body_id); return None; @@ -227,6 +235,7 @@ pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>( owner_def_id, body, used_mut_nodes: RefCell::new(FxHashSet()), + signalled_any_error: Cell::new(SignalledError::NoErrorsSeen), }; let dataflow_data = build_borrowck_dataflow_data(&mut bccx, true, body_id, |_| cfg); @@ -243,39 +252,48 @@ pub struct BorrowckCtxt<'a, 'tcx: 'a> { // Some in `borrowck_fn` and cleared later tables: &'a ty::TypeckTables<'tcx>, - region_scope_tree: Rc, + region_scope_tree: Lrc, owner_def_id: DefId, body: &'tcx hir::Body, used_mut_nodes: RefCell>, + + signalled_any_error: Cell, } -impl<'b, 'tcx: 'b> BorrowckErrors for BorrowckCtxt<'b, 'tcx> { - fn struct_span_err_with_code<'a, S: Into>(&'a self, - sp: S, - msg: &str, - code: DiagnosticId) - -> DiagnosticBuilder<'a> + +impl<'a, 'tcx: 'a> BorrowckCtxt<'a, 'tcx> { + fn signal_error(&self) { + self.signalled_any_error.set(SignalledError::SawSomeError); + } +} + +impl<'a, 'b, 'tcx: 'b> BorrowckErrors<'a> for &'a BorrowckCtxt<'b, 'tcx> { + fn struct_span_err_with_code>(self, + sp: S, + msg: &str, + code: DiagnosticId) + -> DiagnosticBuilder<'a> { self.tcx.sess.struct_span_err_with_code(sp, msg, code) } - fn struct_span_err<'a, S: Into>(&'a self, - sp: S, - msg: &str) - -> DiagnosticBuilder<'a> + fn struct_span_err>(self, + sp: S, + msg: &str) + -> DiagnosticBuilder<'a> { self.tcx.sess.struct_span_err(sp, msg) } - fn cancel_if_wrong_origin<'a>(&'a self, - mut diag: DiagnosticBuilder<'a>, - o: Origin) - -> DiagnosticBuilder<'a> + fn cancel_if_wrong_origin(self, + mut diag: DiagnosticBuilder<'a>, + o: Origin) + -> DiagnosticBuilder<'a> { - if !o.should_emit_errors(self.tcx.sess.borrowck_mode()) { + if !o.should_emit_errors(self.tcx.borrowck_mode()) { self.tcx.sess.diagnostic().cancel(&mut diag); } diag @@ -369,7 +387,7 @@ const DOWNCAST_PRINTED_OPERATOR: &'static str = " as "; // is tracked is irrelevant here.) #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum InteriorKind { - InteriorField(mc::FieldName), + InteriorField(mc::FieldIndex), InteriorElement, } @@ -384,9 +402,9 @@ impl ToInteriorKind for mc::InteriorKind { } // This can be: -// - a pointer dereference (`*LV` in README.md) +// - a pointer dereference (`*P` in README.md) // - a field reference, with an optional definition of the containing -// enum variant (`LV.f` in README.md) +// enum variant (`P.f` in README.md) // `DefId` is present when the field is part of struct that is in // a variant of an enum. For instance in: // `enum E { X { foo: u32 }, Y { foo: u32 }}` @@ -402,7 +420,7 @@ fn closure_to_block(closure_id: LocalDefId, let closure_id = tcx.hir.local_def_id_to_node_id(closure_id); match tcx.hir.get(closure_id) { hir_map::NodeExpr(expr) => match expr.node { - hir::ExprClosure(.., body_id, _, _) => { + hir::ExprKind::Closure(.., body_id, _, _) => { body_id.node_id } _ => { @@ -496,7 +514,7 @@ impl<'a, 'tcx> LoanPath<'tcx> { // Avoid "cannot borrow immutable field `self.x` as mutable" as that implies that a field *can* be // mutable independently of the struct it belongs to. (#35937) -pub fn opt_loan_path_is_field<'tcx>(cmt: &mc::cmt<'tcx>) -> (Option>>, bool) { +pub fn opt_loan_path_is_field<'tcx>(cmt: &mc::cmt_<'tcx>) -> (Option>>, bool) { let new_lp = |v: LoanPathKind<'tcx>| Rc::new(LoanPath::new(v, cmt.ty)); match cmt.cat { @@ -544,7 +562,7 @@ pub fn opt_loan_path_is_field<'tcx>(cmt: &mc::cmt<'tcx>) -> (Option(cmt: &mc::cmt<'tcx>) -> Option>> { +pub fn opt_loan_path<'tcx>(cmt: &mc::cmt_<'tcx>) -> Option>> { opt_loan_path_is_field(cmt).0 } @@ -563,10 +581,10 @@ pub enum bckerr_code<'tcx> { // Combination of an error code and the categorization of the expression // that caused it #[derive(Debug, PartialEq)] -pub struct BckError<'tcx> { +pub struct BckError<'c, 'tcx: 'c> { span: Span, cause: AliasableViolationKind, - cmt: mc::cmt<'tcx>, + cmt: &'c mc::cmt_<'tcx>, code: bckerr_code<'tcx> } @@ -598,7 +616,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { region_rels.is_subregion_of(r_sub, r_sup) } - pub fn report(&self, err: BckError<'tcx>) { + pub fn report(&self, err: BckError<'a, 'tcx>) { // Catch and handle some particular cases. match (&err.code, &err.cause) { (&err_out_of_scope(&ty::ReScope(_), &ty::ReStatic, _), @@ -620,8 +638,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { use_kind: MovedValueUseKind, lp: &LoanPath<'tcx>, the_move: &move_data::Move, - moved_lp: &LoanPath<'tcx>, - _param_env: ty::ParamEnv<'tcx>) { + moved_lp: &LoanPath<'tcx>) { let (verb, verb_participle) = match use_kind { MovedInUse => ("use", "used"), MovedInCapture => ("capture", "captured"), @@ -638,6 +655,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { .span_label(use_span, format!("use of possibly uninitialized `{}`", self.loan_path_to_string(lp))) .emit(); + self.signal_error(); return; } _ => { @@ -676,7 +694,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { let mut err = self.cannot_act_on_moved_value(use_span, verb, msg, - &format!("{}", nl), + Some(nl.to_string()), Origin::Ast); let need_note = match lp.ty.sty { ty::TypeVariants::TyClosure(id, _) => { @@ -715,7 +733,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { move_data::Captured => (match self.tcx.hir.expect_expr(node_id).node { - hir::ExprClosure(.., fn_decl_span, _) => fn_decl_span, + hir::ExprKind::Closure(.., fn_decl_span, _) => fn_decl_span, ref r => bug!("Captured({:?}) maps to non-closure: {:?}", the_move.id, r), }, " (into closure)"), @@ -753,6 +771,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { // not considered particularly helpful. err.emit(); + self.signal_error(); } pub fn report_partial_reinitialization_of_uninitialized_structure( @@ -763,6 +782,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { &self.loan_path_to_string(lp), Origin::Ast) .emit(); + self.signal_error(); } pub fn report_reassigned_immutable_variable(&self, @@ -780,26 +800,10 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { self.loan_path_to_string(lp))); } err.emit(); + self.signal_error(); } - pub fn struct_span_err_with_code>(&self, - s: S, - msg: &str, - code: DiagnosticId) - -> DiagnosticBuilder<'a> { - self.tcx.sess.struct_span_err_with_code(s, msg, code) - } - - pub fn span_err_with_code>( - &self, - s: S, - msg: &str, - code: DiagnosticId, - ) { - self.tcx.sess.span_err_with_code(s, msg, code); - } - - fn report_bckerr(&self, err: &BckError<'tcx>) { + fn report_bckerr(&self, err: &BckError<'a, 'tcx>) { let error_span = err.span.clone(); match err.code { @@ -842,16 +846,39 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { if let mc::NoteClosureEnv(upvar_id) = err.cmt.note { let node_id = self.tcx.hir.hir_to_node_id(upvar_id.var_id); let sp = self.tcx.hir.span(node_id); - match self.tcx.sess.codemap().span_to_snippet(sp) { - Ok(snippet) => { + let fn_closure_msg = "`Fn` closures cannot capture their enclosing \ + environment for modifications"; + match (self.tcx.sess.codemap().span_to_snippet(sp), &err.cmt.cat) { + (_, &Categorization::Upvar(mc::Upvar { + kind: ty::ClosureKind::Fn, .. + })) => { + db.note(fn_closure_msg); + // we should point at the cause for this closure being + // identified as `Fn` (like in signature of method this + // closure was passed into) + } + (Ok(ref snippet), ref cat) => { let msg = &format!("consider making `{}` mutable", snippet); - db.span_suggestion(sp, msg, format!("mut {}", snippet)); + let suggestion = format!("mut {}", snippet); + + if let &Categorization::Deref(ref cmt, _) = cat { + if let Categorization::Upvar(mc::Upvar { + kind: ty::ClosureKind::Fn, .. + }) = cmt.cat { + db.note(fn_closure_msg); + } else { + db.span_suggestion(sp, msg, suggestion); + } + } else { + db.span_suggestion(sp, msg, suggestion); + } } _ => { db.span_help(sp, "consider making this binding mutable"); } } } + db } BorrowViolation(euv::ClosureCapture(_)) => { @@ -872,9 +899,36 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } }; + // We add a special note about `IndexMut`, if the source of this error + // is the fact that `Index` is implemented, but `IndexMut` is not. Needing + // to implement two traits for "one operator" is not very intuitive for + // many programmers. + if err.cmt.note == mc::NoteIndex { + let node_id = self.tcx.hir.hir_to_node_id(err.cmt.hir_id); + let node = self.tcx.hir.get(node_id); + + // This pattern probably always matches. + if let hir_map::NodeExpr( + hir::Expr { node: hir::ExprKind::Index(lhs, _), ..} + ) = node { + let ty = self.tables.expr_ty(lhs); + + db.help(&format!( + "trait `IndexMut` is required to modify indexed content, but \ + it is not implemented for `{}`", + ty + )); + } + } + self.note_and_explain_mutbl_error(&mut db, &err, &error_span); - self.note_immutability_blame(&mut db, err.cmt.immutability_blame()); + self.note_immutability_blame( + &mut db, + err.cmt.immutability_blame(), + self.tcx.hir.hir_to_node_id(err.cmt.hir_id) + ); db.emit(); + self.signal_error(); } err_out_of_scope(super_scope, sub_scope, cause) => { let msg = match opt_loan_path(&err.cmt) { @@ -884,73 +938,6 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } }; - // When you have a borrow that lives across a yield, - // that reference winds up captured in the generator - // type. Regionck then constraints it to live as long - // as the generator itself. If that borrow is borrowing - // data owned by the generator, this winds up resulting in - // an `err_out_of_scope` error: - // - // ``` - // { - // let g = || { - // let a = &3; // this borrow is forced to ... -+ - // yield (); // | - // println!("{}", a); // | - // }; // | - // } <----------------------... live until here --------+ - // ``` - // - // To detect this case, we look for cases where the - // `super_scope` (lifetime of the value) is within the - // body, but the `sub_scope` is not. - debug!("err_out_of_scope: self.body.is_generator = {:?}", - self.body.is_generator); - let maybe_borrow_across_yield = if self.body.is_generator { - let body_scope = region::Scope::Node(self.body.value.hir_id.local_id); - debug!("err_out_of_scope: body_scope = {:?}", body_scope); - debug!("err_out_of_scope: super_scope = {:?}", super_scope); - debug!("err_out_of_scope: sub_scope = {:?}", sub_scope); - match (super_scope, sub_scope) { - (&ty::RegionKind::ReScope(value_scope), - &ty::RegionKind::ReScope(loan_scope)) => { - if { - // value_scope <= body_scope && - self.region_scope_tree.is_subscope_of(value_scope, body_scope) && - // body_scope <= loan_scope - self.region_scope_tree.is_subscope_of(body_scope, loan_scope) - } { - // We now know that this is a case - // that fits the bill described above: - // a borrow of something whose scope - // is within the generator, but the - // borrow is for a scope outside the - // generator. - // - // Now look within the scope of the of - // the value being borrowed (in the - // example above, that would be the - // block remainder that starts with - // `let a`) for a yield. We can cite - // that for the user. - self.region_scope_tree.yield_in_scope(value_scope) - } else { - None - } - } - _ => None, - } - } else { - None - }; - - if let Some((yield_span, _)) = maybe_borrow_across_yield { - debug!("err_out_of_scope: opt_yield_span = {:?}", yield_span); - self.cannot_borrow_across_generator_yield(error_span, yield_span, Origin::Ast) - .emit(); - return; - } - let mut db = self.path_does_not_live_long_enough(error_span, &msg, Origin::Ast); let value_kind = match err.cmt.cat { mc::Categorization::Rvalue(..) => "temporary value", @@ -1046,16 +1033,20 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { let node_id = scope.node_id(self.tcx, &self.region_scope_tree); match self.tcx.hir.find(node_id) { Some(hir_map::NodeStmt(_)) => { - db.note("consider using a `let` binding to increase its lifetime"); + if *sub_scope != ty::ReStatic { + db.note("consider using a `let` binding to increase its lifetime"); + } + } _ => {} } } db.emit(); + self.signal_error(); } err_borrowed_pointer_too_short(loan_scope, ptr_scope) => { - let descr = self.cmt_to_path_or_string(&err.cmt); + let descr = self.cmt_to_path_or_string(err.cmt); let mut db = self.lifetime_too_short_for_reborrow(error_span, &descr, Origin::Ast); let descr = match opt_loan_path(&err.cmt) { Some(lp) => { @@ -1078,6 +1069,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { ""); db.emit(); + self.signal_error(); } } } @@ -1086,7 +1078,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { span: Span, kind: AliasableViolationKind, cause: mc::AliasableReason, - cmt: mc::cmt<'tcx>) { + cmt: &mc::cmt_<'tcx>) { let mut is_closure = false; let prefix = match kind { MutabilityViolation => { @@ -1113,22 +1105,12 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { }; match cause { - mc::AliasableStatic => { - // This happens when we have an `&mut` or assignment to a - // static. We should have already reported a mutability - // violation first, but may have continued compiling. - self.tcx.sess.delay_span_bug( - span, - &format!("aliasability violation for static `{}`", prefix) - ); - return; - } mc::AliasableStaticMut => { // This path cannot occur. `static mut X` is not checked // for aliasability violations. span_bug!(span, "aliasability violation for static mut `{}`", prefix) } - mc::AliasableBorrowed => {} + mc::AliasableStatic | mc::AliasableBorrowed => {} }; let blame = cmt.immutability_blame(); let mut err = match blame { @@ -1156,19 +1138,24 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { Origin::Ast) } }; - self.note_immutability_blame(&mut err, blame); + self.note_immutability_blame( + &mut err, + blame, + self.tcx.hir.hir_to_node_id(cmt.hir_id) + ); if is_closure { err.help("closures behind references must be called via `&mut`"); } err.emit(); + self.signal_error(); } /// Given a type, if it is an immutable reference, return a suggestion to make it mutable fn suggest_mut_for_immutable(&self, pty: &hir::Ty, is_implicit_self: bool) -> Option { - // Check wether the argument is an immutable reference + // Check whether the argument is an immutable reference debug!("suggest_mut_for_immutable({:?}, {:?})", pty, is_implicit_self); - if let hir::TyRptr(lifetime, hir::MutTy { + if let hir::TyKind::Rptr(lifetime, hir::MutTy { mutbl: hir::Mutability::MutImmutable, ref ty }) = pty.node { @@ -1235,36 +1222,24 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { fn note_immutability_blame(&self, db: &mut DiagnosticBuilder, - blame: Option) { + blame: Option, + error_node_id: ast::NodeId) { match blame { None => {} Some(ImmutabilityBlame::ClosureEnv(_)) => {} Some(ImmutabilityBlame::ImmLocal(node_id)) => { - let let_span = self.tcx.hir.span(node_id); - if let ty::BindByValue(..) = self.local_binding_mode(node_id) { - if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(let_span) { - let (_, is_implicit_self) = self.local_ty(node_id); - if is_implicit_self && snippet != "self" { - // avoid suggesting `mut &self`. - return - } - db.span_label( - let_span, - format!("consider changing this to `mut {}`", snippet) - ); - } - } + self.note_immutable_local(db, error_node_id, node_id) } Some(ImmutabilityBlame::LocalDeref(node_id)) => { - let let_span = self.tcx.hir.span(node_id); match self.local_binding_mode(node_id) { ty::BindByReference(..) => { - let snippet = self.tcx.sess.codemap().span_to_snippet(let_span); - if let Ok(snippet) = snippet { - db.span_label( + let let_span = self.tcx.hir.span(node_id); + let suggestion = suggest_ref_mut(self.tcx, let_span); + if let Some(replace_str) = suggestion { + db.span_suggestion( let_span, - format!("consider changing this to `{}`", - snippet.replace("ref ", "ref mut ")) + "use a mutable reference instead", + replace_str, ); } } @@ -1293,8 +1268,48 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } + // Suggest a fix when trying to mutably borrow an immutable local + // binding: either to make the binding mutable (if its type is + // not a mutable reference) or to avoid borrowing altogether + fn note_immutable_local(&self, + db: &mut DiagnosticBuilder, + borrowed_node_id: ast::NodeId, + binding_node_id: ast::NodeId) { + let let_span = self.tcx.hir.span(binding_node_id); + if let ty::BindByValue(..) = self.local_binding_mode(binding_node_id) { + if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(let_span) { + let (ty, is_implicit_self) = self.local_ty(binding_node_id); + if is_implicit_self && snippet != "self" { + // avoid suggesting `mut &self`. + return + } + if let Some(&hir::TyKind::Rptr( + _, + hir::MutTy { + mutbl: hir::MutMutable, + .. + }, + )) = ty.map(|t| &t.node) + { + let borrow_expr_id = self.tcx.hir.get_parent_node(borrowed_node_id); + db.span_suggestion( + self.tcx.hir.span(borrow_expr_id), + "consider removing the `&mut`, as it is an \ + immutable binding to a mutable reference", + snippet + ); + } else { + db.span_label( + let_span, + format!("consider changing this to `mut {}`", snippet), + ); + } + } + } + } + fn report_out_of_scope_escaping_closure_capture(&self, - err: &BckError<'tcx>, + err: &BckError<'a, 'tcx>, capture_span: Span) { let cmt_path_or_string = self.cmt_to_path_or_string(&err.cmt); @@ -1302,7 +1317,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { let suggestion = match self.tcx.sess.codemap().span_to_snippet(err.span) { Ok(string) => format!("move {}", string), - Err(_) => format!("move || ") + Err(_) => "move || ".to_string() }; self.cannot_capture_in_long_lived_closure(err.span, @@ -1316,29 +1331,31 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { cmt_path_or_string), suggestion) .emit(); + self.signal_error(); } fn region_end_span(&self, region: ty::Region<'tcx>) -> Option { match *region { ty::ReScope(scope) => { - Some(scope.span(self.tcx, &self.region_scope_tree).end_point()) + Some(self.tcx.sess.codemap().end_point( + scope.span(self.tcx, &self.region_scope_tree))) } _ => None } } - fn note_and_explain_mutbl_error(&self, db: &mut DiagnosticBuilder, err: &BckError<'tcx>, + fn note_and_explain_mutbl_error(&self, db: &mut DiagnosticBuilder, err: &BckError<'a, 'tcx>, error_span: &Span) { match err.cmt.note { mc::NoteClosureEnv(upvar_id) | mc::NoteUpvarRef(upvar_id) => { // If this is an `Fn` closure, it simply can't mutate upvars. // If it's an `FnMut` closure, the original variable was declared immutable. // We need to determine which is the case here. - let kind = match err.cmt.upvar().unwrap().cat { + let kind = match err.cmt.upvar_cat().unwrap() { Categorization::Upvar(mc::Upvar { kind, .. }) => kind, _ => bug!() }; - if kind == ty::ClosureKind::Fn { + if *kind == ty::ClosureKind::Fn { let closure_node_id = self.tcx.hir.local_def_id_to_node_id(upvar_id.closure_expr_id); db.span_help(self.tcx.hir.span(closure_node_id), @@ -1389,18 +1406,10 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { out.push(')'); } - LpExtend(ref lp_base, _, LpInterior(_, InteriorField(fname))) => { + LpExtend(ref lp_base, _, LpInterior(_, InteriorField(mc::FieldIndex(_, info)))) => { self.append_autoderefd_loan_path_to_string(&lp_base, out); - match fname { - mc::NamedField(fname) => { - out.push('.'); - out.push_str(&fname.as_str()); - } - mc::PositionalField(idx) => { - out.push('.'); - out.push_str(&idx.to_string()); - } - } + out.push('.'); + out.push_str(&info.as_str()); } LpExtend(ref lp_base, _, LpInterior(_, InteriorElement)) => { @@ -1450,7 +1459,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { cmt.descriptive_string(self.tcx) } - pub fn cmt_to_path_or_string(&self, cmt: &mc::cmt<'tcx>) -> String { + pub fn cmt_to_path_or_string(&self, cmt: &mc::cmt_<'tcx>) -> String { match opt_loan_path(cmt) { Some(lp) => format!("`{}`", self.loan_path_to_string(&lp)), None => self.cmt_to_string(cmt), @@ -1475,8 +1484,7 @@ impl DataFlowOperator for LoanDataFlowOperator { impl<'tcx> fmt::Debug for InteriorKind { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - InteriorField(mc::NamedField(fld)) => write!(f, "{}", fld), - InteriorField(mc::PositionalField(i)) => write!(f, "#{}", i), + InteriorField(mc::FieldIndex(_, info)) => write!(f, "{}", info), InteriorElement => write!(f, "[]"), } } diff --git a/src/librustc_borrowck/borrowck/move_data.rs b/src/librustc_borrowck/borrowck/move_data.rs index 98de394ae396..979d71a0923c 100644 --- a/src/librustc_borrowck/borrowck/move_data.rs +++ b/src/librustc_borrowck/borrowck/move_data.rs @@ -13,17 +13,12 @@ pub use self::MoveKind::*; +use dataflow::{DataFlowContext, BitwiseOperator, DataFlowOperator, KillFrom}; + use borrowck::*; use rustc::cfg; -use rustc::middle::dataflow::DataFlowContext; -use rustc::middle::dataflow::BitwiseOperator; -use rustc::middle::dataflow::DataFlowOperator; -use rustc::middle::dataflow::KillFrom; -use rustc::middle::expr_use_visitor as euv; -use rustc::middle::expr_use_visitor::MutateMode; -use rustc::middle::mem_categorization as mc; use rustc::ty::{self, TyCtxt}; -use rustc::util::nodemap::{FxHashMap, FxHashSet}; +use rustc::util::nodemap::FxHashMap; use std::cell::RefCell; use std::rc::Rc; @@ -52,9 +47,6 @@ pub struct MoveData<'tcx> { /// assigned dataflow bits, but we track them because they still /// kill move bits. pub path_assignments: RefCell>, - - /// Assignments to a variable or path, like `x = foo`, but not `x += foo`. - pub assignee_ids: RefCell>, } pub struct FlowedMoveData<'a, 'tcx: 'a> { @@ -152,9 +144,6 @@ pub struct Assignment { /// span of node where assignment occurs pub span: Span, - - /// id for l-value expression on lhs of assignment - pub assignee_id: hir::ItemLocalId, } #[derive(Clone, Copy)] @@ -343,8 +332,9 @@ impl<'a, 'tcx> MoveData<'tcx> { if let (&ty::TyAdt(adt_def, _), LpInterior(opt_variant_id, interior)) = (&base_lp.ty.sty, lp_elem) { if adt_def.is_union() { - for field in &adt_def.non_enum_variant().fields { - let field = InteriorKind::InteriorField(mc::NamedField(field.name)); + for (i, field) in adt_def.non_enum_variant().fields.iter().enumerate() { + let field = + InteriorKind::InteriorField(mc::FieldIndex(i, field.ident.name)); if field != interior { let sibling_lp_kind = LpExtend(base_lp.clone(), mutbl, LpInterior(opt_variant_id, field)); @@ -388,15 +378,14 @@ impl<'a, 'tcx> MoveData<'tcx> { pub fn add_assignment(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, lp: Rc>, assign_id: hir::ItemLocalId, - span: Span, - assignee_id: hir::ItemLocalId, - mode: euv::MutateMode) { + span: Span) { // Assigning to one union field automatically assigns to all its fields. if let LpExtend(ref base_lp, mutbl, LpInterior(opt_variant_id, interior)) = lp.kind { if let ty::TyAdt(adt_def, _) = base_lp.ty.sty { if adt_def.is_union() { - for field in &adt_def.non_enum_variant().fields { - let field = InteriorKind::InteriorField(mc::NamedField(field.name)); + for (i, field) in adt_def.non_enum_variant().fields.iter().enumerate() { + let field = + InteriorKind::InteriorField(mc::FieldIndex(i, field.ident.name)); let field_ty = if field == interior { lp.ty } else { @@ -406,39 +395,28 @@ impl<'a, 'tcx> MoveData<'tcx> { LpInterior(opt_variant_id, field)); let sibling_lp = Rc::new(LoanPath::new(sibling_lp_kind, field_ty)); self.add_assignment_helper(tcx, sibling_lp, assign_id, - span, assignee_id, mode); + span); } return; } } } - self.add_assignment_helper(tcx, lp.clone(), assign_id, span, assignee_id, mode); + self.add_assignment_helper(tcx, lp.clone(), assign_id, span); } fn add_assignment_helper(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, lp: Rc>, assign_id: hir::ItemLocalId, - span: Span, - assignee_id: hir::ItemLocalId, - mode: euv::MutateMode) { - debug!("add_assignment(lp={:?}, assign_id={:?}, assignee_id={:?}", - lp, assign_id, assignee_id); + span: Span) { + debug!("add_assignment(lp={:?}, assign_id={:?}", lp, assign_id); let path_index = self.move_path(tcx, lp.clone()); - match mode { - MutateMode::Init | MutateMode::JustWrite => { - self.assignee_ids.borrow_mut().insert(assignee_id); - } - MutateMode::WriteAndRead => { } - } - let assignment = Assignment { path: path_index, id: assign_id, span, - assignee_id, }; if self.is_var_path(path_index) { diff --git a/src/librustc_borrowck/borrowck/unused.rs b/src/librustc_borrowck/borrowck/unused.rs index ddee122d0a6b..88545c12415c 100644 --- a/src/librustc_borrowck/borrowck/unused.rs +++ b/src/librustc_borrowck/borrowck/unused.rs @@ -44,47 +44,45 @@ struct UnusedMutCx<'a, 'tcx: 'a> { impl<'a, 'tcx> UnusedMutCx<'a, 'tcx> { fn check_unused_mut_pat(&self, pats: &[P]) { let tcx = self.bccx.tcx; - let mut mutables = FxHashMap(); + let mut mutables: FxHashMap<_, Vec<_>> = FxHashMap(); for p in pats { - p.each_binding(|_, id, span, path1| { - let name = path1.node; - + p.each_binding(|_, hir_id, span, ident| { // Skip anything that looks like `_foo` - if name.as_str().starts_with("_") { - return + if ident.as_str().starts_with("_") { + return; } // Skip anything that looks like `&foo` or `&mut foo`, only look // for by-value bindings - let hir_id = tcx.hir.node_to_hir_id(id); - let bm = match self.bccx.tables.pat_binding_modes().get(hir_id) { - Some(&bm) => bm, - None => span_bug!(span, "missing binding mode"), - }; - match bm { - ty::BindByValue(hir::MutMutable) => {} - _ => return, - } + if let Some(&bm) = self.bccx.tables.pat_binding_modes().get(hir_id) { + match bm { + ty::BindByValue(hir::MutMutable) => {} + _ => return, + } - mutables.entry(name).or_insert(Vec::new()).push((id, hir_id, span)); + mutables.entry(ident.name).or_default().push((hir_id, span)); + } else { + tcx.sess.delay_span_bug(span, "missing binding mode"); + } }); } for (_name, ids) in mutables { // If any id for this name was used mutably then consider them all // ok, so move on to the next - if ids.iter().any(|&(_, ref id, _)| self.used_mut.contains(id)) { - continue + if ids.iter().any(|&(ref hir_id, _)| self.used_mut.contains(hir_id)) { + continue; } - let mut_span = tcx.sess.codemap().span_until_char(ids[0].2, ' '); + let (hir_id, span) = ids[0]; + let mut_span = tcx.sess.codemap().span_until_non_whitespace(span); // Ok, every name wasn't used mutably, so issue a warning that this // didn't need to be mutable. - tcx.struct_span_lint_node(UNUSED_MUT, - ids[0].0, - ids[0].2, - "variable does not need to be mutable") + tcx.struct_span_lint_hir(UNUSED_MUT, + hir_id, + span, + "variable does not need to be mutable") .span_suggestion_short(mut_span, "remove this `mut`", "".to_owned()) .emit(); } diff --git a/src/librustc/middle/dataflow.rs b/src/librustc_borrowck/dataflow.rs similarity index 98% rename from src/librustc/middle/dataflow.rs rename to src/librustc_borrowck/dataflow.rs index 5c86554f9079..75dee2b78fdd 100644 --- a/src/librustc/middle/dataflow.rs +++ b/src/librustc_borrowck/dataflow.rs @@ -14,20 +14,20 @@ //! and thus uses bitvectors. Your job is simply to specify the so-called //! GEN and KILL bits for each expression. -use cfg; -use cfg::CFGIndex; -use ty::TyCtxt; +use rustc::cfg; +use rustc::cfg::CFGIndex; +use rustc::ty::TyCtxt; use std::io; use std::mem; use std::usize; use syntax::print::pprust::PrintState; -use rustc_data_structures::graph::OUTGOING; +use rustc_data_structures::graph::implementation::OUTGOING; -use util::nodemap::FxHashMap; -use hir; -use hir::intravisit::{self, IdRange}; -use hir::print as pprust; +use rustc::util::nodemap::FxHashMap; +use rustc::hir; +use rustc::hir::intravisit::{self, IdRange}; +use rustc::hir::print as pprust; #[derive(Copy, Clone, Debug)] @@ -181,7 +181,7 @@ fn build_local_id_to_index(body: Option<&hir::Body>, cfg.graph.each_node(|node_idx, node| { if let cfg::CFGNodeData::AST(id) = node.data { - index.entry(id).or_insert(vec![]).push(node_idx); + index.entry(id).or_default().push(node_idx); } true }); @@ -193,7 +193,7 @@ fn build_local_id_to_index(body: Option<&hir::Body>, fn add_entries_from_fn_body(index: &mut FxHashMap>, body: &hir::Body, entry: CFGIndex) { - use hir::intravisit::Visitor; + use rustc::hir::intravisit::Visitor; struct Formals<'a> { entry: CFGIndex, @@ -209,7 +209,7 @@ fn build_local_id_to_index(body: Option<&hir::Body>, } fn visit_pat(&mut self, p: &hir::Pat) { - self.index.entry(p.hir_id.local_id).or_insert(vec![]).push(self.entry); + self.index.entry(p.hir_id.local_id).or_default().push(self.entry); intravisit::walk_pat(self, p) } } diff --git a/src/librustc_borrowck/graphviz.rs b/src/librustc_borrowck/graphviz.rs index 22867ba5b55a..dddd6a354c11 100644 --- a/src/librustc_borrowck/graphviz.rs +++ b/src/librustc_borrowck/graphviz.rs @@ -21,7 +21,7 @@ use borrowck; use borrowck::{BorrowckCtxt, LoanPath}; use dot; use rustc::cfg::CFGIndex; -use rustc::middle::dataflow::{DataFlowOperator, DataFlowContext, EntryOrExit}; +use dataflow::{DataFlowOperator, DataFlowContext, EntryOrExit}; use std::rc::Rc; use dot::IntoCow; diff --git a/src/librustc_borrowck/lib.rs b/src/librustc_borrowck/lib.rs index be173db23a52..16da8c8a3b8b 100644 --- a/src/librustc_borrowck/lib.rs +++ b/src/librustc_borrowck/lib.rs @@ -11,18 +11,19 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] -#![deny(warnings)] #![allow(non_camel_case_types)] -#![feature(from_ref)] -#![feature(match_default_bindings)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(quote)] +#![recursion_limit="256"] + #[macro_use] extern crate log; extern crate syntax; extern crate syntax_pos; extern crate rustc_errors as errors; +extern crate rustc_data_structures; // for "clarity", rename the graphviz crate to dot; graphviz within `borrowck` // refers to the borrowck-specific graphviz adapter traits. @@ -38,4 +39,6 @@ mod borrowck; pub mod graphviz; +mod dataflow; + pub use borrowck::provide; diff --git a/src/librustc_codegen_llvm/Cargo.toml b/src/librustc_codegen_llvm/Cargo.toml new file mode 100644 index 000000000000..28fa49846b73 --- /dev/null +++ b/src/librustc_codegen_llvm/Cargo.toml @@ -0,0 +1,22 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_codegen_llvm" +version = "0.0.0" + +[lib] +name = "rustc_codegen_llvm" +path = "lib.rs" +crate-type = ["dylib"] +test = false + +[dependencies] +cc = "1.0.1" +num_cpus = "1.0" +rustc-demangle = "0.1.4" +rustc_llvm = { path = "../librustc_llvm" } + +[features] +# This is used to convince Cargo to separately cache builds of `rustc_codegen_llvm` +# when this option is enabled or not. That way we can build two, cache two +# artifacts, and have nice speedy rebuilds. +emscripten = ["rustc_llvm/emscripten"] diff --git a/src/librustc_codegen_llvm/README.md b/src/librustc_codegen_llvm/README.md new file mode 100644 index 000000000000..8d1c9a52b242 --- /dev/null +++ b/src/librustc_codegen_llvm/README.md @@ -0,0 +1,7 @@ +The `codegen` crate contains the code to convert from MIR into LLVM IR, +and then from LLVM IR into machine code. In general it contains code +that runs towards the end of the compilation process. + +For more information about how codegen works, see the [rustc guide]. + +[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/codegen.html diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs new file mode 100644 index 000000000000..79343505c78a --- /dev/null +++ b/src/librustc_codegen_llvm/abi.rs @@ -0,0 +1,735 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::{self, AttributePlace}; +use base; +use builder::{Builder, MemFlags}; +use common::{ty_fn_sig, C_usize}; +use context::CodegenCx; +use mir::place::PlaceRef; +use mir::operand::OperandValue; +use type_::Type; +use type_of::{LayoutLlvmExt, PointerKind}; +use value::Value; + +use rustc_target::abi::{LayoutOf, Size, TyLayout}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout; + +use libc::c_uint; + +pub use rustc_target::spec::abi::Abi; +pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; +pub use rustc_target::abi::call::*; + +macro_rules! for_each_kind { + ($flags: ident, $f: ident, $($kind: ident),+) => ({ + $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+ + }) +} + +trait ArgAttributeExt { + fn for_each_kind(&self, f: F) where F: FnMut(llvm::Attribute); +} + +impl ArgAttributeExt for ArgAttribute { + fn for_each_kind(&self, mut f: F) where F: FnMut(llvm::Attribute) { + for_each_kind!(self, f, + ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg) + } +} + +pub trait ArgAttributesExt { + fn apply_llfn(&self, idx: AttributePlace, llfn: &Value); + fn apply_callsite(&self, idx: AttributePlace, callsite: &Value); +} + +impl ArgAttributesExt for ArgAttributes { + fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) { + let mut regular = self.regular; + unsafe { + let deref = self.pointee_size.bytes(); + if deref != 0 { + if regular.contains(ArgAttribute::NonNull) { + llvm::LLVMRustAddDereferenceableAttr(llfn, + idx.as_uint(), + deref); + } else { + llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, + idx.as_uint(), + deref); + } + regular -= ArgAttribute::NonNull; + } + if let Some(align) = self.pointee_align { + llvm::LLVMRustAddAlignmentAttr(llfn, + idx.as_uint(), + align.abi() as u32); + } + regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); + } + } + + fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) { + let mut regular = self.regular; + unsafe { + let deref = self.pointee_size.bytes(); + if deref != 0 { + if regular.contains(ArgAttribute::NonNull) { + llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, + idx.as_uint(), + deref); + } else { + llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite, + idx.as_uint(), + deref); + } + regular -= ArgAttribute::NonNull; + } + if let Some(align) = self.pointee_align { + llvm::LLVMRustAddAlignmentCallSiteAttr(callsite, + idx.as_uint(), + align.abi() as u32); + } + regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); + } + } +} + +pub trait LlvmType { + fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type; +} + +impl LlvmType for Reg { + fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { + match self.kind { + RegKind::Integer => Type::ix(cx, self.size.bits()), + RegKind::Float => { + match self.size.bits() { + 32 => Type::f32(cx), + 64 => Type::f64(cx), + _ => bug!("unsupported float: {:?}", self) + } + } + RegKind::Vector => { + Type::vector(Type::i8(cx), self.size.bytes()) + } + } + } +} + +impl LlvmType for CastTarget { + fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { + let rest_ll_unit = self.rest.unit.llvm_type(cx); + let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 { + (0, 0) + } else { + (self.rest.total.bytes() / self.rest.unit.size.bytes(), + self.rest.total.bytes() % self.rest.unit.size.bytes()) + }; + + if self.prefix.iter().all(|x| x.is_none()) { + // Simplify to a single unit when there is no prefix and size <= unit size + if self.rest.total <= self.rest.unit.size { + return rest_ll_unit; + } + + // Simplify to array when all chunks are the same size and type + if rem_bytes == 0 { + return Type::array(rest_ll_unit, rest_count); + } + } + + // Create list of fields in the main structure + let mut args: Vec<_> = + self.prefix.iter().flat_map(|option_kind| option_kind.map( + |kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx))) + .chain((0..rest_count).map(|_| rest_ll_unit)) + .collect(); + + // Append final integer + if rem_bytes != 0 { + // Only integers can be really split further. + assert_eq!(self.rest.unit.kind, RegKind::Integer); + args.push(Type::ix(cx, rem_bytes * 8)); + } + + Type::struct_(cx, &args, false) + } +} + +pub trait ArgTypeExt<'ll, 'tcx> { + fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; + fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>); + fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>); +} + +impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { + /// Get the LLVM type for a place of the original Rust type of + /// this argument/return, i.e. the result of `type_of::type_of`. + fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { + self.layout.llvm_type(cx) + } + + /// Store a direct/indirect value described by this ArgType into a + /// place for the original Rust type of this argument/return. + /// Can be used for both storing formal arguments into Rust variables + /// or results of call/invoke instructions into their destinations. + fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>) { + if self.is_ignore() { + return; + } + let cx = bx.cx; + if self.is_sized_indirect() { + OperandValue::Ref(val, None, self.layout.align).store(bx, dst) + } else if self.is_unsized_indirect() { + bug!("unsized ArgType must be handled through store_fn_arg"); + } else if let PassMode::Cast(cast) = self.mode { + // FIXME(eddyb): Figure out when the simpler Store is safe, clang + // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. + let can_store_through_cast_ptr = false; + if can_store_through_cast_ptr { + let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to()); + bx.store(val, cast_dst, self.layout.align); + } else { + // The actual return type is a struct, but the ABI + // adaptation code has cast it into some scalar type. The + // code that follows is the only reliable way I have + // found to do a transform like i64 -> {i32,i32}. + // Basically we dump the data onto the stack then memcpy it. + // + // Other approaches I tried: + // - Casting rust ret pointer to the foreign type and using Store + // is (a) unsafe if size of foreign type > size of rust type and + // (b) runs afoul of strict aliasing rules, yielding invalid + // assembly under -O (specifically, the store gets removed). + // - Truncating foreign type to correct integral type and then + // bitcasting to the struct type yields invalid cast errors. + + // We instead thus allocate some scratch space... + let scratch_size = cast.size(cx); + let scratch_align = cast.align(cx); + let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align); + bx.lifetime_start(llscratch, scratch_size); + + // ...where we first store the value... + bx.store(val, llscratch, scratch_align); + + // ...and then memcpy it to the intended destination. + base::call_memcpy(bx, + bx.pointercast(dst.llval, Type::i8p(cx)), + bx.pointercast(llscratch, Type::i8p(cx)), + C_usize(cx, self.layout.size.bytes()), + self.layout.align.min(scratch_align), + MemFlags::empty()); + + bx.lifetime_end(llscratch, scratch_size); + } + } else { + OperandValue::Immediate(val).store(bx, dst); + } + } + + fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>) { + let mut next = || { + let val = llvm::get_param(bx.llfn(), *idx as c_uint); + *idx += 1; + val + }; + match self.mode { + PassMode::Ignore => {}, + PassMode::Pair(..) => { + OperandValue::Pair(next(), next()).store(bx, dst); + } + PassMode::Indirect(_, Some(_)) => { + OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst); + } + PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => { + self.store(bx, next(), dst); + } + } + } +} + +pub trait FnTypeExt<'tcx> { + fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) + -> Self; + fn new(cx: &CodegenCx<'ll, 'tcx>, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> Self; + fn new_vtable(cx: &CodegenCx<'ll, 'tcx>, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> Self; + fn new_internal( + cx: &CodegenCx<'ll, 'tcx>, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>], + mk_arg_type: impl Fn(Ty<'tcx>, Option) -> ArgType<'tcx, Ty<'tcx>>, + ) -> Self; + fn adjust_for_abi(&mut self, + cx: &CodegenCx<'ll, 'tcx>, + abi: Abi); + fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; + fn llvm_cconv(&self) -> llvm::CallConv; + fn apply_attrs_llfn(&self, llfn: &'ll Value); + fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value); +} + +impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { + fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) + -> Self { + let fn_ty = instance.ty(cx.tcx); + let sig = ty_fn_sig(cx, fn_ty); + let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + FnType::new(cx, sig, &[]) + } + + fn new(cx: &CodegenCx<'ll, 'tcx>, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> Self { + FnType::new_internal(cx, sig, extra_args, |ty, _| { + ArgType::new(cx.layout_of(ty)) + }) + } + + fn new_vtable(cx: &CodegenCx<'ll, 'tcx>, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> Self { + FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| { + let mut layout = cx.layout_of(ty); + // Don't pass the vtable, it's not an argument of the virtual fn. + // Instead, pass just the (thin pointer) first field of `*dyn Trait`. + if arg_idx == Some(0) { + if layout.is_unsized() { + unimplemented!("by-value trait object is not \ + yet implemented in #![feature(unsized_locals)]"); + } + // FIXME(eddyb) `layout.field(cx, 0)` is not enough because e.g. + // `Box` has a few newtype wrappers around the raw + // pointer, so we'd have to "dig down" to find `*dyn Trait`. + let pointee = layout.ty.builtin_deref(true) + .unwrap_or_else(|| { + bug!("FnType::new_vtable: non-pointer self {:?}", layout) + }).ty; + let fat_ptr_ty = cx.tcx.mk_mut_ptr(pointee); + layout = cx.layout_of(fat_ptr_ty).field(cx, 0); + } + ArgType::new(layout) + }) + } + + fn new_internal( + cx: &CodegenCx<'ll, 'tcx>, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>], + mk_arg_type: impl Fn(Ty<'tcx>, Option) -> ArgType<'tcx, Ty<'tcx>>, + ) -> Self { + debug!("FnType::new_internal({:?}, {:?})", sig, extra_args); + + use self::Abi::*; + let conv = match cx.sess().target.target.adjust_abi(sig.abi) { + RustIntrinsic | PlatformIntrinsic | + Rust | RustCall => Conv::C, + + // It's the ABI's job to select this, not us. + System => bug!("system abi should be selected elsewhere"), + + Stdcall => Conv::X86Stdcall, + Fastcall => Conv::X86Fastcall, + Vectorcall => Conv::X86VectorCall, + Thiscall => Conv::X86ThisCall, + C => Conv::C, + Unadjusted => Conv::C, + Win64 => Conv::X86_64Win64, + SysV64 => Conv::X86_64SysV, + Aapcs => Conv::ArmAapcs, + PtxKernel => Conv::PtxKernel, + Msp430Interrupt => Conv::Msp430Intr, + X86Interrupt => Conv::X86Intr, + AmdGpuKernel => Conv::AmdGpuKernel, + + // These API constants ought to be more specific... + Cdecl => Conv::C, + }; + + let mut inputs = sig.inputs(); + let extra_args = if sig.abi == RustCall { + assert!(!sig.variadic && extra_args.is_empty()); + + match sig.inputs().last().unwrap().sty { + ty::TyTuple(ref tupled_arguments) => { + inputs = &sig.inputs()[0..sig.inputs().len() - 1]; + tupled_arguments + } + _ => { + bug!("argument to function with \"rust-call\" ABI \ + is not a tuple"); + } + } + } else { + assert!(sig.variadic || extra_args.is_empty()); + extra_args + }; + + let target = &cx.sess().target.target; + let win_x64_gnu = target.target_os == "windows" + && target.arch == "x86_64" + && target.target_env == "gnu"; + let linux_s390x = target.target_os == "linux" + && target.arch == "s390x" + && target.target_env == "gnu"; + let rust_abi = match sig.abi { + RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true, + _ => false + }; + + // Handle safe Rust thin and fat pointers. + let adjust_for_rust_scalar = |attrs: &mut ArgAttributes, + scalar: &layout::Scalar, + layout: TyLayout<'tcx, Ty<'tcx>>, + offset: Size, + is_return: bool| { + // Booleans are always an i1 that needs to be zero-extended. + if scalar.is_bool() { + attrs.set(ArgAttribute::ZExt); + return; + } + + // Only pointer types handled below. + if scalar.value != layout::Pointer { + return; + } + + if scalar.valid_range.start() < scalar.valid_range.end() { + if *scalar.valid_range.start() > 0 { + attrs.set(ArgAttribute::NonNull); + } + } + + if let Some(pointee) = layout.pointee_info_at(cx, offset) { + if let Some(kind) = pointee.safe { + attrs.pointee_size = pointee.size; + attrs.pointee_align = Some(pointee.align); + + // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions + // with align attributes, and those calls later block optimizations. + if !is_return && !cx.tcx.sess.opts.debugging_opts.arg_align_attributes { + attrs.pointee_align = None; + } + + // `Box` pointer parameters never alias because ownership is transferred + // `&mut` pointer parameters never alias other parameters, + // or mutable global data + // + // `&T` where `T` contains no `UnsafeCell` is immutable, + // and can be marked as both `readonly` and `noalias`, as + // LLVM's definition of `noalias` is based solely on memory + // dependencies rather than pointer equality + let no_alias = match kind { + PointerKind::Shared => false, + PointerKind::UniqueOwned => true, + PointerKind::Frozen | + PointerKind::UniqueBorrowed => !is_return + }; + if no_alias { + attrs.set(ArgAttribute::NoAlias); + } + + if kind == PointerKind::Frozen && !is_return { + attrs.set(ArgAttribute::ReadOnly); + } + } + } + }; + + let arg_of = |ty: Ty<'tcx>, arg_idx: Option| { + let is_return = arg_idx.is_none(); + let mut arg = mk_arg_type(ty, arg_idx); + if arg.layout.is_zst() { + // For some forsaken reason, x86_64-pc-windows-gnu + // doesn't ignore zero-sized struct arguments. + // The same is true for s390x-unknown-linux-gnu. + if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) { + arg.mode = PassMode::Ignore; + } + } + + // FIXME(eddyb) other ABIs don't have logic for scalar pairs. + if !is_return && rust_abi { + if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi { + let mut a_attrs = ArgAttributes::new(); + let mut b_attrs = ArgAttributes::new(); + adjust_for_rust_scalar(&mut a_attrs, + a, + arg.layout, + Size::ZERO, + false); + adjust_for_rust_scalar(&mut b_attrs, + b, + arg.layout, + a.value.size(cx).abi_align(b.value.align(cx)), + false); + arg.mode = PassMode::Pair(a_attrs, b_attrs); + return arg; + } + } + + if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { + if let PassMode::Direct(ref mut attrs) = arg.mode { + adjust_for_rust_scalar(attrs, + scalar, + arg.layout, + Size::ZERO, + is_return); + } + } + + arg + }; + + let mut fn_ty = FnType { + ret: arg_of(sig.output(), None), + args: inputs.iter().chain(extra_args).enumerate().map(|(i, ty)| { + arg_of(ty, Some(i)) + }).collect(), + variadic: sig.variadic, + conv, + }; + fn_ty.adjust_for_abi(cx, sig.abi); + fn_ty + } + + fn adjust_for_abi(&mut self, + cx: &CodegenCx<'ll, 'tcx>, + abi: Abi) { + if abi == Abi::Unadjusted { return } + + if abi == Abi::Rust || abi == Abi::RustCall || + abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { + let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| { + if arg.is_ignore() { return; } + + match arg.layout.abi { + layout::Abi::Aggregate { .. } => {} + + // This is a fun case! The gist of what this is doing is + // that we want callers and callees to always agree on the + // ABI of how they pass SIMD arguments. If we were to *not* + // make these arguments indirect then they'd be immediates + // in LLVM, which means that they'd used whatever the + // appropriate ABI is for the callee and the caller. That + // means, for example, if the caller doesn't have AVX + // enabled but the callee does, then passing an AVX argument + // across this boundary would cause corrupt data to show up. + // + // This problem is fixed by unconditionally passing SIMD + // arguments through memory between callers and callees + // which should get them all to agree on ABI regardless of + // target feature sets. Some more information about this + // issue can be found in #44367. + // + // Note that the platform intrinsic ABI is exempt here as + // that's how we connect up to LLVM and it's unstable + // anyway, we control all calls to it in libstd. + layout::Abi::Vector { .. } if abi != Abi::PlatformIntrinsic => { + arg.make_indirect(); + return + } + + _ => return + } + + let size = arg.layout.size; + if arg.layout.is_unsized() || size > layout::Pointer.size(cx) { + arg.make_indirect(); + } else { + // We want to pass small aggregates as immediates, but using + // a LLVM aggregate type for this leads to bad optimizations, + // so we pick an appropriately sized integer type instead. + arg.cast_to(Reg { + kind: RegKind::Integer, + size + }); + } + }; + fixup(&mut self.ret); + for arg in &mut self.args { + fixup(arg); + } + if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode { + attrs.set(ArgAttribute::StructRet); + } + return; + } + + if let Err(msg) = self.adjust_for_cabi(cx, abi) { + cx.sess().fatal(&msg); + } + } + + fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { + let args_capacity: usize = self.args.iter().map(|arg| + if arg.pad.is_some() { 1 } else { 0 } + + if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 } + ).sum(); + let mut llargument_tys = Vec::with_capacity( + if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity + ); + + let llreturn_ty = match self.ret.mode { + PassMode::Ignore => Type::void(cx), + PassMode::Direct(_) | PassMode::Pair(..) => { + self.ret.layout.immediate_llvm_type(cx) + } + PassMode::Cast(cast) => cast.llvm_type(cx), + PassMode::Indirect(..) => { + llargument_tys.push(self.ret.memory_ty(cx).ptr_to()); + Type::void(cx) + } + }; + + for arg in &self.args { + // add padding + if let Some(ty) = arg.pad { + llargument_tys.push(ty.llvm_type(cx)); + } + + let llarg_ty = match arg.mode { + PassMode::Ignore => continue, + PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx), + PassMode::Pair(..) => { + llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true)); + llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true)); + continue; + } + PassMode::Indirect(_, Some(_)) => { + let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty); + let ptr_layout = cx.layout_of(ptr_ty); + llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true)); + llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true)); + continue; + } + PassMode::Cast(cast) => cast.llvm_type(cx), + PassMode::Indirect(_, None) => arg.memory_ty(cx).ptr_to(), + }; + llargument_tys.push(llarg_ty); + } + + if self.variadic { + Type::variadic_func(&llargument_tys, llreturn_ty) + } else { + Type::func(&llargument_tys, llreturn_ty) + } + } + + fn llvm_cconv(&self) -> llvm::CallConv { + match self.conv { + Conv::C => llvm::CCallConv, + Conv::AmdGpuKernel => llvm::AmdGpuKernel, + Conv::ArmAapcs => llvm::ArmAapcsCallConv, + Conv::Msp430Intr => llvm::Msp430Intr, + Conv::PtxKernel => llvm::PtxKernel, + Conv::X86Fastcall => llvm::X86FastcallCallConv, + Conv::X86Intr => llvm::X86_Intr, + Conv::X86Stdcall => llvm::X86StdcallCallConv, + Conv::X86ThisCall => llvm::X86_ThisCall, + Conv::X86VectorCall => llvm::X86_VectorCall, + Conv::X86_64SysV => llvm::X86_64_SysV, + Conv::X86_64Win64 => llvm::X86_64_Win64, + } + } + + fn apply_attrs_llfn(&self, llfn: &'ll Value) { + let mut i = 0; + let mut apply = |attrs: &ArgAttributes| { + attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); + i += 1; + }; + match self.ret.mode { + PassMode::Direct(ref attrs) => { + attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn); + } + PassMode::Indirect(ref attrs, _) => apply(attrs), + _ => {} + } + for arg in &self.args { + if arg.pad.is_some() { + apply(&ArgAttributes::new()); + } + match arg.mode { + PassMode::Ignore => {} + PassMode::Direct(ref attrs) | + PassMode::Indirect(ref attrs, None) => apply(attrs), + PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => { + apply(attrs); + apply(extra_attrs); + } + PassMode::Pair(ref a, ref b) => { + apply(a); + apply(b); + } + PassMode::Cast(_) => apply(&ArgAttributes::new()), + } + } + } + + fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) { + let mut i = 0; + let mut apply = |attrs: &ArgAttributes| { + attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); + i += 1; + }; + match self.ret.mode { + PassMode::Direct(ref attrs) => { + attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite); + } + PassMode::Indirect(ref attrs, _) => apply(attrs), + _ => {} + } + if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi { + // If the value is a boolean, the range is 0..2 and that ultimately + // become 0..0 when the type becomes i1, which would be rejected + // by the LLVM verifier. + match scalar.value { + layout::Int(..) if !scalar.is_bool() => { + let range = scalar.valid_range_exclusive(bx.cx); + if range.start != range.end { + bx.range_metadata(callsite, range); + } + } + _ => {} + } + } + for arg in &self.args { + if arg.pad.is_some() { + apply(&ArgAttributes::new()); + } + match arg.mode { + PassMode::Ignore => {} + PassMode::Direct(ref attrs) | + PassMode::Indirect(ref attrs, None) => apply(attrs), + PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => { + apply(attrs); + apply(extra_attrs); + } + PassMode::Pair(ref a, ref b) => { + apply(a); + apply(b); + } + PassMode::Cast(_) => apply(&ArgAttributes::new()), + } + } + + let cconv = self.llvm_cconv(); + if cconv != llvm::CCallConv { + llvm::SetInstructionCallConv(callsite, cconv); + } + } +} diff --git a/src/librustc_codegen_llvm/allocator.rs b/src/librustc_codegen_llvm/allocator.rs new file mode 100644 index 000000000000..0beb8a8844c9 --- /dev/null +++ b/src/librustc_codegen_llvm/allocator.rs @@ -0,0 +1,103 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::ffi::CString; + +use attributes; +use libc::c_uint; +use rustc::middle::allocator::AllocatorKind; +use rustc::ty::TyCtxt; +use rustc_allocator::{ALLOCATOR_METHODS, AllocatorTy}; + +use ModuleLlvm; +use llvm::{self, False, True}; + +pub(crate) unsafe fn codegen(tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) { + let llcx = &*mods.llcx; + let llmod = mods.llmod(); + let usize = match &tcx.sess.target.target.target_pointer_width[..] { + "16" => llvm::LLVMInt16TypeInContext(llcx), + "32" => llvm::LLVMInt32TypeInContext(llcx), + "64" => llvm::LLVMInt64TypeInContext(llcx), + tws => bug!("Unsupported target word size for int: {}", tws), + }; + let i8 = llvm::LLVMInt8TypeInContext(llcx); + let i8p = llvm::LLVMPointerType(i8, 0); + let void = llvm::LLVMVoidTypeInContext(llcx); + + for method in ALLOCATOR_METHODS { + let mut args = Vec::new(); + for ty in method.inputs.iter() { + match *ty { + AllocatorTy::Layout => { + args.push(usize); // size + args.push(usize); // align + } + AllocatorTy::Ptr => args.push(i8p), + AllocatorTy::Usize => args.push(usize), + + AllocatorTy::ResultPtr | + AllocatorTy::Unit => panic!("invalid allocator arg"), + } + } + let output = match method.output { + AllocatorTy::ResultPtr => Some(i8p), + AllocatorTy::Unit => None, + + AllocatorTy::Layout | + AllocatorTy::Usize | + AllocatorTy::Ptr => panic!("invalid allocator output"), + }; + let ty = llvm::LLVMFunctionType(output.unwrap_or(void), + args.as_ptr(), + args.len() as c_uint, + False); + let name = CString::new(format!("__rust_{}", method.name)).unwrap(); + let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, + name.as_ptr(), + ty); + + if tcx.sess.target.target.options.default_hidden_visibility { + llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); + } + if tcx.sess.target.target.options.requires_uwtable { + attributes::emit_uwtable(llfn, true); + } + + let callee = CString::new(kind.fn_name(method.name)).unwrap(); + let callee = llvm::LLVMRustGetOrInsertFunction(llmod, + callee.as_ptr(), + ty); + llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); + + let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, + llfn, + "entry\0".as_ptr() as *const _); + + let llbuilder = llvm::LLVMCreateBuilderInContext(llcx); + llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb); + let args = args.iter().enumerate().map(|(i, _)| { + llvm::LLVMGetParam(llfn, i as c_uint) + }).collect::>(); + let ret = llvm::LLVMRustBuildCall(llbuilder, + callee, + args.as_ptr(), + args.len() as c_uint, + None, + "\0".as_ptr() as *const _); + llvm::LLVMSetTailCall(ret, True); + if output.is_some() { + llvm::LLVMBuildRet(llbuilder, ret); + } else { + llvm::LLVMBuildRetVoid(llbuilder); + } + llvm::LLVMDisposeBuilder(llbuilder); + } +} diff --git a/src/librustc_trans/asm.rs b/src/librustc_codegen_llvm/asm.rs similarity index 89% rename from src/librustc_trans/asm.rs rename to src/librustc_codegen_llvm/asm.rs index c7be0c4e67d7..5d27f8eab3ec 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -8,13 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! # Translation of inline assembly. - -use llvm::{self, ValueRef}; +use llvm; use common::*; use type_::Type; use type_of::LayoutLlvmExt; use builder::Builder; +use value::Value; use rustc::hir; @@ -26,11 +25,11 @@ use syntax::ast::AsmDialect; use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM -pub fn trans_inline_asm<'a, 'tcx>( - bx: &Builder<'a, 'tcx>, +pub fn codegen_inline_asm( + bx: &Builder<'a, 'll, 'tcx>, ia: &hir::InlineAsm, - outputs: Vec>, - mut inputs: Vec + outputs: Vec>, + mut inputs: Vec<&'ll Value> ) { let mut ext_constraints = vec![]; let mut output_types = vec![]; @@ -59,8 +58,9 @@ pub fn trans_inline_asm<'a, 'tcx>( // Default per-arch clobbers // Basically what clang does let arch_clobbers = match &bx.sess().target.target.arch[..] { - "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], - _ => Vec::new() + "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], + "mips" | "mips64" => vec!["~{$1}"], + _ => Vec::new() }; let all_constraints = @@ -112,14 +112,14 @@ pub fn trans_inline_asm<'a, 'tcx>( let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx, key.as_ptr() as *const c_char, key.len() as c_uint); - let val: llvm::ValueRef = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32); + let val: &'ll Value = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32); llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1)); } } -pub fn trans_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ga: &hir::GlobalAsm) { let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); unsafe { diff --git a/src/librustc_codegen_llvm/attributes.rs b/src/librustc_codegen_llvm/attributes.rs new file mode 100644 index 000000000000..2b64642b766a --- /dev/null +++ b/src/librustc_codegen_llvm/attributes.rs @@ -0,0 +1,307 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +//! Set and unset common attributes on LLVM values. + +use std::ffi::CString; + +use rustc::hir::{CodegenFnAttrFlags, CodegenFnAttrs}; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::session::Session; +use rustc::session::config::Sanitizer; +use rustc::ty::TyCtxt; +use rustc::ty::query::Providers; +use rustc_data_structures::sync::Lrc; +use rustc_data_structures::fx::FxHashMap; +use rustc_target::spec::PanicStrategy; + +use attributes; +use llvm::{self, Attribute}; +use llvm::AttributePlace::Function; +use llvm_util; +pub use syntax::attr::{self, InlineAttr}; + +use context::CodegenCx; +use value::Value; + +/// Mark LLVM function to use provided inline heuristic. +#[inline] +pub fn inline(val: &'ll Value, inline: InlineAttr) { + use self::InlineAttr::*; + match inline { + Hint => Attribute::InlineHint.apply_llfn(Function, val), + Always => Attribute::AlwaysInline.apply_llfn(Function, val), + Never => Attribute::NoInline.apply_llfn(Function, val), + None => { + Attribute::InlineHint.unapply_llfn(Function, val); + Attribute::AlwaysInline.unapply_llfn(Function, val); + Attribute::NoInline.unapply_llfn(Function, val); + }, + }; +} + +/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function. +#[inline] +pub fn emit_uwtable(val: &'ll Value, emit: bool) { + Attribute::UWTable.toggle_llfn(Function, val, emit); +} + +/// Tell LLVM whether the function can or cannot unwind. +#[inline] +pub fn unwind(val: &'ll Value, can_unwind: bool) { + Attribute::NoUnwind.toggle_llfn(Function, val, !can_unwind); +} + +/// Tell LLVM whether it should optimize function for size. +#[inline] +#[allow(dead_code)] // possibly useful function +pub fn set_optimize_for_size(val: &'ll Value, optimize: bool) { + Attribute::OptimizeForSize.toggle_llfn(Function, val, optimize); +} + +/// Tell LLVM if this function should be 'naked', i.e. skip the epilogue and prologue. +#[inline] +pub fn naked(val: &'ll Value, is_naked: bool) { + Attribute::Naked.toggle_llfn(Function, val, is_naked); +} + +pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { + if cx.sess().must_not_eliminate_frame_pointers() { + llvm::AddFunctionAttrStringValue( + llfn, llvm::AttributePlace::Function, + const_cstr!("no-frame-pointer-elim"), const_cstr!("true")); + } +} + +pub fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { + // Only use stack probes if the target specification indicates that we + // should be using stack probes + if !cx.sess().target.target.options.stack_probes { + return + } + + // Currently stack probes seem somewhat incompatible with the address + // sanitizer. With asan we're already protected from stack overflow anyway + // so we don't really need stack probes regardless. + match cx.sess().opts.debugging_opts.sanitizer { + Some(Sanitizer::Address) => return, + _ => {} + } + + // probestack doesn't play nice either with pgo-gen. + if cx.sess().opts.debugging_opts.pgo_gen.is_some() { + return; + } + + // probestack doesn't play nice either with gcov profiling. + if cx.sess().opts.debugging_opts.profile { + return; + } + + // Flag our internal `__rust_probestack` function as the stack probe symbol. + // This is defined in the `compiler-builtins` crate for each architecture. + llvm::AddFunctionAttrStringValue( + llfn, llvm::AttributePlace::Function, + const_cstr!("probe-stack"), const_cstr!("__rust_probestack")); +} + +pub fn llvm_target_features(sess: &Session) -> impl Iterator { + const RUSTC_SPECIFIC_FEATURES: &[&str] = &[ + "crt-static", + ]; + + let cmdline = sess.opts.cg.target_feature.split(',') + .filter(|f| !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s))); + sess.target.target.options.features.split(',') + .chain(cmdline) + .filter(|l| !l.is_empty()) +} + +pub fn apply_target_cpu_attr(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { + let target_cpu = CString::new(cx.tcx.sess.target_cpu().to_string()).unwrap(); + llvm::AddFunctionAttrStringValue( + llfn, + llvm::AttributePlace::Function, + const_cstr!("target-cpu"), + target_cpu.as_c_str()); +} + +/// Composite function which sets LLVM attributes for function depending on its AST (#[attribute]) +/// attributes. +pub fn from_fn_attrs( + cx: &CodegenCx<'ll, '_>, + llfn: &'ll Value, + id: Option, +) { + let codegen_fn_attrs = id.map(|id| cx.tcx.codegen_fn_attrs(id)) + .unwrap_or(CodegenFnAttrs::new()); + + inline(llfn, codegen_fn_attrs.inline); + + // The `uwtable` attribute according to LLVM is: + // + // This attribute indicates that the ABI being targeted requires that an + // unwind table entry be produced for this function even if we can show + // that no exceptions passes by it. This is normally the case for the + // ELF x86-64 abi, but it can be disabled for some compilation units. + // + // Typically when we're compiling with `-C panic=abort` (which implies this + // `no_landing_pads` check) we don't need `uwtable` because we can't + // generate any exceptions! On Windows, however, exceptions include other + // events such as illegal instructions, segfaults, etc. This means that on + // Windows we end up still needing the `uwtable` attribute even if the `-C + // panic=abort` flag is passed. + // + // You can also find more info on why Windows is whitelisted here in: + // https://bugzilla.mozilla.org/show_bug.cgi?id=1302078 + if !cx.sess().no_landing_pads() || + cx.sess().target.target.options.requires_uwtable { + attributes::emit_uwtable(llfn, true); + } + + set_frame_pointer_elimination(cx, llfn); + set_probestack(cx, llfn); + + if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) { + Attribute::Cold.apply_llfn(Function, llfn); + } + if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { + naked(llfn, true); + } + if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) { + Attribute::NoAlias.apply_llfn( + llvm::AttributePlace::ReturnValue, llfn); + } + + let can_unwind = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::UNWIND) { + Some(true) + } else if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND) { + Some(false) + + // Perhaps questionable, but we assume that anything defined + // *in Rust code* may unwind. Foreign items like `extern "C" { + // fn foo(); }` are assumed not to unwind **unless** they have + // a `#[unwind]` attribute. + } else if id.map(|id| !cx.tcx.is_foreign_item(id)).unwrap_or(false) { + Some(true) + } else { + None + }; + + match can_unwind { + Some(false) => attributes::unwind(llfn, false), + Some(true) if cx.tcx.sess.panic_strategy() == PanicStrategy::Unwind => { + attributes::unwind(llfn, true); + } + Some(true) | None => {} + } + + // Always annotate functions with the target-cpu they are compiled for. + // Without this, ThinLTO won't inline Rust functions into Clang generated + // functions (because Clang annotates functions this way too). + // NOTE: For now we just apply this if -Zcross-lang-lto is specified, since + // it introduce a little overhead and isn't really necessary otherwise. + if cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() { + apply_target_cpu_attr(cx, llfn); + } + + let features = llvm_target_features(cx.tcx.sess) + .map(|s| s.to_string()) + .chain( + codegen_fn_attrs.target_features + .iter() + .map(|f| { + let feature = &*f.as_str(); + format!("+{}", llvm_util::to_llvm_feature(cx.tcx.sess, feature)) + }) + ) + .collect::>() + .join(","); + + if !features.is_empty() { + let val = CString::new(features).unwrap(); + llvm::AddFunctionAttrStringValue( + llfn, llvm::AttributePlace::Function, + const_cstr!("target-features"), &val); + } + + // Note that currently the `wasm-import-module` doesn't do anything, but + // eventually LLVM 7 should read this and ferry the appropriate import + // module to the output file. + if let Some(id) = id { + if cx.tcx.sess.target.target.arch == "wasm32" { + if let Some(module) = wasm_import_module(cx.tcx, id) { + llvm::AddFunctionAttrStringValue( + llfn, + llvm::AttributePlace::Function, + const_cstr!("wasm-import-module"), + &module, + ); + } + } + } +} + +pub fn provide(providers: &mut Providers) { + providers.target_features_whitelist = |tcx, cnum| { + assert_eq!(cnum, LOCAL_CRATE); + if tcx.sess.opts.actually_rustdoc { + // rustdoc needs to be able to document functions that use all the features, so + // whitelist them all + Lrc::new(llvm_util::all_known_features() + .map(|(a, b)| (a.to_string(), b.map(|s| s.to_string()))) + .collect()) + } else { + Lrc::new(llvm_util::target_feature_whitelist(tcx.sess) + .iter() + .map(|&(a, b)| (a.to_string(), b.map(|s| s.to_string()))) + .collect()) + } + }; + + provide_extern(providers); +} + +pub fn provide_extern(providers: &mut Providers) { + providers.wasm_import_module_map = |tcx, cnum| { + // Build up a map from DefId to a `NativeLibrary` structure, where + // `NativeLibrary` internally contains information about + // `#[link(wasm_import_module = "...")]` for example. + let native_libs = tcx.native_libraries(cnum); + let mut def_id_to_native_lib = FxHashMap(); + for lib in native_libs.iter() { + if let Some(id) = lib.foreign_module { + def_id_to_native_lib.insert(id, lib); + } + } + + let mut ret = FxHashMap(); + for lib in tcx.foreign_modules(cnum).iter() { + let module = def_id_to_native_lib + .get(&lib.def_id) + .and_then(|s| s.wasm_import_module); + let module = match module { + Some(s) => s, + None => continue, + }; + for id in lib.foreign_items.iter() { + assert_eq!(id.krate, cnum); + ret.insert(*id, module.to_string()); + } + } + + Lrc::new(ret) + }; +} + +fn wasm_import_module(tcx: TyCtxt, id: DefId) -> Option { + tcx.wasm_import_module_map(id.krate) + .get(&id) + .map(|s| CString::new(&s[..]).unwrap()) +} diff --git a/src/librustc_trans/back/archive.rs b/src/librustc_codegen_llvm/back/archive.rs similarity index 94% rename from src/librustc_trans/back/archive.rs rename to src/librustc_codegen_llvm/back/archive.rs index 609629bffb9d..af9efc6d7c41 100644 --- a/src/librustc_trans/back/archive.rs +++ b/src/librustc_codegen_llvm/back/archive.rs @@ -48,7 +48,7 @@ enum Addition { }, Archive { archive: ArchiveRO, - skip: Box bool>, + skip: Box bool>, }, } @@ -149,7 +149,7 @@ impl<'a> ArchiveBuilder<'a> { // Ignoring obj file starting with the crate name // as simple comparison is not enough - there // might be also an extra name suffix - let obj_start = format!("{}", name); + let obj_start = name.to_owned(); self.add_archive(rlib, move |fname: &str| { // Ignore bytecode/metadata files, no matter the name. @@ -226,10 +226,13 @@ impl<'a> ArchiveBuilder<'a> { } fn build_with_llvm(&mut self, kind: ArchiveKind) -> io::Result<()> { - let mut archives = Vec::new(); + let removals = mem::replace(&mut self.removals, Vec::new()); + let mut additions = mem::replace(&mut self.additions, Vec::new()); let mut strings = Vec::new(); let mut members = Vec::new(); - let removals = mem::replace(&mut self.removals, Vec::new()); + + let dst = CString::new(self.config.dst.to_str().unwrap())?; + let should_update_symbols = self.should_update_symbols; unsafe { if let Some(archive) = self.src_archive() { @@ -246,22 +249,22 @@ impl<'a> ArchiveBuilder<'a> { let name = CString::new(child_name)?; members.push(llvm::LLVMRustArchiveMemberNew(ptr::null(), name.as_ptr(), - child.raw())); + Some(child.raw))); strings.push(name); } } - for addition in mem::replace(&mut self.additions, Vec::new()) { + for addition in &mut additions { match addition { Addition::File { path, name_in_archive } => { let path = CString::new(path.to_str().unwrap())?; - let name = CString::new(name_in_archive)?; + let name = CString::new(name_in_archive.clone())?; members.push(llvm::LLVMRustArchiveMemberNew(path.as_ptr(), name.as_ptr(), - ptr::null_mut())); + None)); strings.push(path); strings.push(name); } - Addition::Archive { archive, mut skip } => { + Addition::Archive { archive, skip } => { for child in archive.iter() { let child = child.map_err(string_to_io_error)?; if !is_relevant_child(&child) { @@ -284,21 +287,18 @@ impl<'a> ArchiveBuilder<'a> { let name = CString::new(child_name)?; let m = llvm::LLVMRustArchiveMemberNew(ptr::null(), name.as_ptr(), - child.raw()); + Some(child.raw)); members.push(m); strings.push(name); } - archives.push(archive); } } } - let dst = self.config.dst.to_str().unwrap().as_bytes(); - let dst = CString::new(dst)?; let r = llvm::LLVMRustWriteArchive(dst.as_ptr(), members.len() as libc::size_t, - members.as_ptr(), - self.should_update_symbols, + members.as_ptr() as *const &_, + should_update_symbols, kind); let ret = if r.into_result().is_err() { let err = llvm::LLVMRustGetLastError(); diff --git a/src/librustc_trans/back/bytecode.rs b/src/librustc_codegen_llvm/back/bytecode.rs similarity index 92% rename from src/librustc_trans/back/bytecode.rs rename to src/librustc_codegen_llvm/back/bytecode.rs index 212d1aaf055d..9a3dd9d2f881 100644 --- a/src/librustc_trans/back/bytecode.rs +++ b/src/librustc_codegen_llvm/back/bytecode.rs @@ -108,37 +108,37 @@ pub struct DecodedBytecode<'a> { impl<'a> DecodedBytecode<'a> { pub fn new(data: &'a [u8]) -> Result, String> { if !data.starts_with(RLIB_BYTECODE_OBJECT_MAGIC) { - return Err(format!("magic bytecode prefix not found")) + return Err("magic bytecode prefix not found".to_string()) } let data = &data[RLIB_BYTECODE_OBJECT_MAGIC.len()..]; if !data.starts_with(&[RLIB_BYTECODE_OBJECT_VERSION, 0, 0, 0]) { - return Err(format!("wrong version prefix found in bytecode")) + return Err("wrong version prefix found in bytecode".to_string()) } let data = &data[4..]; if data.len() < 4 { - return Err(format!("bytecode corrupted")) + return Err("bytecode corrupted".to_string()) } let identifier_len = unsafe { u32::from_le(ptr::read_unaligned(data.as_ptr() as *const u32)) as usize }; let data = &data[4..]; if data.len() < identifier_len { - return Err(format!("bytecode corrupted")) + return Err("bytecode corrupted".to_string()) } let identifier = match str::from_utf8(&data[..identifier_len]) { Ok(s) => s, - Err(_) => return Err(format!("bytecode corrupted")) + Err(_) => return Err("bytecode corrupted".to_string()) }; let data = &data[identifier_len..]; if data.len() < 8 { - return Err(format!("bytecode corrupted")) + return Err("bytecode corrupted".to_string()) } let bytecode_len = unsafe { u64::from_le(ptr::read_unaligned(data.as_ptr() as *const u64)) as usize }; let data = &data[8..]; if data.len() < bytecode_len { - return Err(format!("bytecode corrupted")) + return Err("bytecode corrupted".to_string()) } let encoded_bytecode = &data[..bytecode_len]; diff --git a/src/librustc_codegen_llvm/back/command.rs b/src/librustc_codegen_llvm/back/command.rs new file mode 100644 index 000000000000..9ebbdd7c3c93 --- /dev/null +++ b/src/librustc_codegen_llvm/back/command.rs @@ -0,0 +1,175 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A thin wrapper around `Command` in the standard library which allows us to +//! read the arguments that are built up. + +use std::ffi::{OsStr, OsString}; +use std::fmt; +use std::io; +use std::mem; +use std::process::{self, Output}; + +use rustc_target::spec::LldFlavor; + +#[derive(Clone)] +pub struct Command { + program: Program, + args: Vec, + env: Vec<(OsString, OsString)>, +} + +#[derive(Clone)] +enum Program { + Normal(OsString), + CmdBatScript(OsString), + Lld(OsString, LldFlavor) +} + +impl Command { + pub fn new>(program: P) -> Command { + Command::_new(Program::Normal(program.as_ref().to_owned())) + } + + pub fn bat_script>(program: P) -> Command { + Command::_new(Program::CmdBatScript(program.as_ref().to_owned())) + } + + pub fn lld>(program: P, flavor: LldFlavor) -> Command { + Command::_new(Program::Lld(program.as_ref().to_owned(), flavor)) + } + + fn _new(program: Program) -> Command { + Command { + program, + args: Vec::new(), + env: Vec::new(), + } + } + + pub fn arg>(&mut self, arg: P) -> &mut Command { + self._arg(arg.as_ref()); + self + } + + pub fn args(&mut self, args: I) -> &mut Command + where I: IntoIterator, + I::Item: AsRef, + { + for arg in args { + self._arg(arg.as_ref()); + } + self + } + + fn _arg(&mut self, arg: &OsStr) { + self.args.push(arg.to_owned()); + } + + pub fn env(&mut self, key: K, value: V) -> &mut Command + where K: AsRef, + V: AsRef + { + self._env(key.as_ref(), value.as_ref()); + self + } + + fn _env(&mut self, key: &OsStr, value: &OsStr) { + self.env.push((key.to_owned(), value.to_owned())); + } + + pub fn output(&mut self) -> io::Result { + self.command().output() + } + + pub fn command(&self) -> process::Command { + let mut ret = match self.program { + Program::Normal(ref p) => process::Command::new(p), + Program::CmdBatScript(ref p) => { + let mut c = process::Command::new("cmd"); + c.arg("/c").arg(p); + c + } + Program::Lld(ref p, flavor) => { + let mut c = process::Command::new(p); + c.arg("-flavor").arg(match flavor { + LldFlavor::Wasm => "wasm", + LldFlavor::Ld => "gnu", + LldFlavor::Link => "link", + LldFlavor::Ld64 => "darwin", + }); + c + } + }; + ret.args(&self.args); + ret.envs(self.env.clone()); + return ret + } + + // extensions + + pub fn get_args(&self) -> &[OsString] { + &self.args + } + + pub fn take_args(&mut self) -> Vec { + mem::replace(&mut self.args, Vec::new()) + } + + /// Returns a `true` if we're pretty sure that this'll blow OS spawn limits, + /// or `false` if we should attempt to spawn and see what the OS says. + pub fn very_likely_to_exceed_some_spawn_limit(&self) -> bool { + // We mostly only care about Windows in this method, on Unix the limits + // can be gargantuan anyway so we're pretty unlikely to hit them + if cfg!(unix) { + return false + } + + // Right now LLD doesn't support the `@` syntax of passing an argument + // through files, so regardless of the platform we try to go to the OS + // on this one. + if let Program::Lld(..) = self.program { + return false + } + + // Ok so on Windows to spawn a process is 32,768 characters in its + // command line [1]. Unfortunately we don't actually have access to that + // as it's calculated just before spawning. Instead we perform a + // poor-man's guess as to how long our command line will be. We're + // assuming here that we don't have to escape every character... + // + // Turns out though that `cmd.exe` has even smaller limits, 8192 + // characters [2]. Linkers can often be batch scripts (for example + // Emscripten, Gecko's current build system) which means that we're + // running through batch scripts. These linkers often just forward + // arguments elsewhere (and maybe tack on more), so if we blow 8192 + // bytes we'll typically cause them to blow as well. + // + // Basically as a result just perform an inflated estimate of what our + // command line will look like and test if it's > 8192 (we actually + // test against 6k to artificially inflate our estimate). If all else + // fails we'll fall back to the normal unix logic of testing the OS + // error code if we fail to spawn and automatically re-spawning the + // linker with smaller arguments. + // + // [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/ms682425(v=vs.85).aspx + // [2]: https://blogs.msdn.microsoft.com/oldnewthing/20031210-00/?p=41553 + + let estimated_command_line_len = + self.args.iter().map(|a| a.len()).sum::(); + estimated_command_line_len > 1024 * 6 + } +} + +impl fmt::Debug for Command { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.command().fmt(f) + } +} diff --git a/src/librustc_codegen_llvm/back/link.rs b/src/librustc_codegen_llvm/back/link.rs new file mode 100644 index 000000000000..37c99932f565 --- /dev/null +++ b/src/librustc_codegen_llvm/back/link.rs @@ -0,0 +1,1631 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use back::wasm; +use cc::windows_registry; +use super::archive::{ArchiveBuilder, ArchiveConfig}; +use super::bytecode::RLIB_BYTECODE_EXTENSION; +use super::linker::Linker; +use super::command::Command; +use super::rpath::RPathConfig; +use super::rpath; +use metadata::METADATA_FILENAME; +use rustc::session::config::{self, DebugInfo, OutputFilenames, OutputType, PrintRequest}; +use rustc::session::config::{RUST_CGU_EXT, Lto}; +use rustc::session::filesearch; +use rustc::session::search_paths::PathKind; +use rustc::session::Session; +use rustc::middle::cstore::{NativeLibrary, LibSource, NativeLibraryKind}; +use rustc::middle::dependency_format::Linkage; +use {CodegenResults, CrateInfo}; +use rustc::util::common::time; +use rustc_fs_util::fix_windows_verbatim_for_gcc; +use rustc::hir::def_id::CrateNum; +use tempfile::{Builder as TempFileBuilder, TempDir}; +use rustc_target::spec::{PanicStrategy, RelroLevel, LinkerFlavor}; +use rustc_data_structures::fx::FxHashSet; +use context::get_reloc_model; +use llvm; + +use std::ascii; +use std::char; +use std::env; +use std::fmt; +use std::fs; +use std::io; +use std::iter; +use std::path::{Path, PathBuf}; +use std::process::{Output, Stdio}; +use std::str; +use syntax::attr; + +pub use rustc_codegen_utils::link::{find_crate_name, filename_for_input, default_output_for_target, + invalid_output_for_target, build_link_meta, out_filename, + check_file_is_writeable}; + +// The third parameter is for env vars, used on windows to set up the +// path for MSVC to find its DLLs, and gcc to find its bundled +// toolchain +pub fn get_linker(sess: &Session) -> (PathBuf, Command) { + // If our linker looks like a batch script on Windows then to execute this + // we'll need to spawn `cmd` explicitly. This is primarily done to handle + // emscripten where the linker is `emcc.bat` and needs to be spawned as + // `cmd /c emcc.bat ...`. + // + // This worked historically but is needed manually since #42436 (regression + // was tagged as #42791) and some more info can be found on #44443 for + // emscripten itself. + let cmd = |linker: &Path| { + if let Some(linker) = linker.to_str() { + if cfg!(windows) && linker.ends_with(".bat") { + return Command::bat_script(linker) + } + } + match sess.linker_flavor() { + LinkerFlavor::Lld(f) => Command::lld(linker, f), + _ => Command::new(linker), + + } + }; + + let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe"); + + let linker_path = sess.opts.cg.linker.as_ref().map(|s| &**s) + .or(sess.target.target.options.linker.as_ref().map(|s| s.as_ref())) + .unwrap_or(match sess.linker_flavor() { + LinkerFlavor::Msvc => { + msvc_tool.as_ref().map(|t| t.path()).unwrap_or("link.exe".as_ref()) + } + LinkerFlavor::Em if cfg!(windows) => "emcc.bat".as_ref(), + LinkerFlavor::Em => "emcc".as_ref(), + LinkerFlavor::Gcc => "cc".as_ref(), + LinkerFlavor::Ld => "ld".as_ref(), + LinkerFlavor::Lld(_) => "lld".as_ref(), + }); + + let mut cmd = cmd(linker_path); + + // The compiler's sysroot often has some bundled tools, so add it to the + // PATH for the child. + let mut new_path = sess.host_filesearch(PathKind::All) + .get_tools_search_paths(); + let mut msvc_changed_path = false; + if sess.target.target.options.is_like_msvc { + if let Some(ref tool) = msvc_tool { + cmd.args(tool.args()); + for &(ref k, ref v) in tool.env() { + if k == "PATH" { + new_path.extend(env::split_paths(v)); + msvc_changed_path = true; + } else { + cmd.env(k, v); + } + } + } + } + + if !msvc_changed_path { + if let Some(path) = env::var_os("PATH") { + new_path.extend(env::split_paths(&path)); + } + } + cmd.env("PATH", env::join_paths(new_path).unwrap()); + + (linker_path.to_path_buf(), cmd) +} + +pub fn remove(sess: &Session, path: &Path) { + match fs::remove_file(path) { + Ok(..) => {} + Err(e) => { + sess.err(&format!("failed to remove {}: {}", + path.display(), + e)); + } + } +} + +/// Perform the linkage portion of the compilation phase. This will generate all +/// of the requested outputs for this compilation session. +pub(crate) fn link_binary(sess: &Session, + codegen_results: &CodegenResults, + outputs: &OutputFilenames, + crate_name: &str) -> Vec { + let mut out_filenames = Vec::new(); + for &crate_type in sess.crate_types.borrow().iter() { + // Ignore executable crates if we have -Z no-codegen, as they will error. + let output_metadata = sess.opts.output_types.contains_key(&OutputType::Metadata); + if (sess.opts.debugging_opts.no_codegen || !sess.opts.output_types.should_codegen()) && + !output_metadata && + crate_type == config::CrateType::Executable { + continue; + } + + if invalid_output_for_target(sess, crate_type) { + bug!("invalid output type `{:?}` for target os `{}`", + crate_type, sess.opts.target_triple); + } + let mut out_files = link_binary_output(sess, + codegen_results, + crate_type, + outputs, + crate_name); + out_filenames.append(&mut out_files); + } + + // Remove the temporary object file and metadata if we aren't saving temps + if !sess.opts.cg.save_temps { + if sess.opts.output_types.should_codegen() && + !preserve_objects_for_their_debuginfo(sess) + { + for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) { + remove(sess, obj); + } + } + for obj in codegen_results.modules.iter().filter_map(|m| m.bytecode_compressed.as_ref()) { + remove(sess, obj); + } + if let Some(ref obj) = codegen_results.metadata_module.object { + remove(sess, obj); + } + if let Some(ref allocator) = codegen_results.allocator_module { + if let Some(ref obj) = allocator.object { + remove(sess, obj); + } + if let Some(ref bc) = allocator.bytecode_compressed { + remove(sess, bc); + } + } + } + + out_filenames +} + +/// Returns a boolean indicating whether we should preserve the object files on +/// the filesystem for their debug information. This is often useful with +/// split-dwarf like schemes. +fn preserve_objects_for_their_debuginfo(sess: &Session) -> bool { + // If the objects don't have debuginfo there's nothing to preserve. + if sess.opts.debuginfo == DebugInfo::None { + return false + } + + // If we're only producing artifacts that are archives, no need to preserve + // the objects as they're losslessly contained inside the archives. + let output_linked = sess.crate_types.borrow() + .iter() + .any(|x| *x != config::CrateType::Rlib && *x != config::CrateType::Staticlib); + if !output_linked { + return false + } + + // If we're on OSX then the equivalent of split dwarf is turned on by + // default. The final executable won't actually have any debug information + // except it'll have pointers to elsewhere. Historically we've always run + // `dsymutil` to "link all the dwarf together" but this is actually sort of + // a bummer for incremental compilation! (the whole point of split dwarf is + // that you don't do this sort of dwarf link). + // + // Basically as a result this just means that if we're on OSX and we're + // *not* running dsymutil then the object files are the only source of truth + // for debug information, so we must preserve them. + if sess.target.target.options.is_like_osx { + match sess.opts.debugging_opts.run_dsymutil { + // dsymutil is not being run, preserve objects + Some(false) => return true, + + // dsymutil is being run, no need to preserve the objects + Some(true) => return false, + + // The default historical behavior was to always run dsymutil, so + // we're preserving that temporarily, but we're likely to switch the + // default soon. + None => return false, + } + } + + false +} + +fn filename_for_metadata(sess: &Session, crate_name: &str, outputs: &OutputFilenames) -> PathBuf { + let out_filename = outputs.single_output_file.clone() + .unwrap_or(outputs + .out_directory + .join(&format!("lib{}{}.rmeta", crate_name, sess.opts.cg.extra_filename))); + check_file_is_writeable(&out_filename, sess); + out_filename +} + +pub(crate) fn each_linked_rlib(sess: &Session, + info: &CrateInfo, + f: &mut dyn FnMut(CrateNum, &Path)) -> Result<(), String> { + let crates = info.used_crates_static.iter(); + let fmts = sess.dependency_formats.borrow(); + let fmts = fmts.get(&config::CrateType::Executable) + .or_else(|| fmts.get(&config::CrateType::Staticlib)) + .or_else(|| fmts.get(&config::CrateType::Cdylib)) + .or_else(|| fmts.get(&config::CrateType::ProcMacro)); + let fmts = match fmts { + Some(f) => f, + None => return Err("could not find formats for rlibs".to_string()) + }; + for &(cnum, ref path) in crates { + match fmts.get(cnum.as_usize() - 1) { + Some(&Linkage::NotLinked) | + Some(&Linkage::IncludedFromDylib) => continue, + Some(_) => {} + None => return Err("could not find formats for rlibs".to_string()) + } + let name = &info.crate_name[&cnum]; + let path = match *path { + LibSource::Some(ref p) => p, + LibSource::MetadataOnly => { + return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", + name)) + } + LibSource::None => { + return Err(format!("could not find rlib for: `{}`", name)) + } + }; + f(cnum, &path); + } + Ok(()) +} + +/// Returns a boolean indicating whether the specified crate should be ignored +/// during LTO. +/// +/// Crates ignored during LTO are not lumped together in the "massive object +/// file" that we create and are linked in their normal rlib states. See +/// comments below for what crates do not participate in LTO. +/// +/// It's unusual for a crate to not participate in LTO. Typically only +/// compiler-specific and unstable crates have a reason to not participate in +/// LTO. +pub(crate) fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool { + // If our target enables builtin function lowering in LLVM then the + // crates providing these functions don't participate in LTO (e.g. + // no_builtins or compiler builtins crates). + !sess.target.target.options.no_builtins && + (info.is_no_builtins.contains(&cnum) || info.compiler_builtins == Some(cnum)) +} + +fn link_binary_output(sess: &Session, + codegen_results: &CodegenResults, + crate_type: config::CrateType, + outputs: &OutputFilenames, + crate_name: &str) -> Vec { + for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) { + check_file_is_writeable(obj, sess); + } + + let mut out_filenames = vec![]; + + if outputs.outputs.contains_key(&OutputType::Metadata) { + let out_filename = filename_for_metadata(sess, crate_name, outputs); + // To avoid races with another rustc process scanning the output directory, + // we need to write the file somewhere else and atomically move it to its + // final destination, with a `fs::rename` call. In order for the rename to + // always succeed, the temporary file needs to be on the same filesystem, + // which is why we create it inside the output directory specifically. + let metadata_tmpdir = match TempFileBuilder::new() + .prefix("rmeta") + .tempdir_in(out_filename.parent().unwrap()) + { + Ok(tmpdir) => tmpdir, + Err(err) => sess.fatal(&format!("couldn't create a temp dir: {}", err)), + }; + let metadata = emit_metadata(sess, codegen_results, &metadata_tmpdir); + if let Err(e) = fs::rename(metadata, &out_filename) { + sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e)); + } + out_filenames.push(out_filename); + } + + let tmpdir = match TempFileBuilder::new().prefix("rustc").tempdir() { + Ok(tmpdir) => tmpdir, + Err(err) => sess.fatal(&format!("couldn't create a temp dir: {}", err)), + }; + + if outputs.outputs.should_codegen() { + let out_filename = out_filename(sess, crate_type, outputs, crate_name); + match crate_type { + config::CrateType::Rlib => { + link_rlib(sess, + codegen_results, + RlibFlavor::Normal, + &out_filename, + &tmpdir).build(); + } + config::CrateType::Staticlib => { + link_staticlib(sess, codegen_results, &out_filename, &tmpdir); + } + _ => { + link_natively(sess, crate_type, &out_filename, codegen_results, tmpdir.path()); + } + } + out_filenames.push(out_filename); + } + + if sess.opts.cg.save_temps { + let _ = tmpdir.into_path(); + } + + out_filenames +} + +fn archive_search_paths(sess: &Session) -> Vec { + let mut search = Vec::new(); + sess.target_filesearch(PathKind::Native).for_each_lib_search_path(|path, _| { + search.push(path.to_path_buf()); + }); + return search; +} + +fn archive_config<'a>(sess: &'a Session, + output: &Path, + input: Option<&Path>) -> ArchiveConfig<'a> { + ArchiveConfig { + sess, + dst: output.to_path_buf(), + src: input.map(|p| p.to_path_buf()), + lib_search_paths: archive_search_paths(sess), + } +} + +/// We use a temp directory here to avoid races between concurrent rustc processes, +/// such as builds in the same directory using the same filename for metadata while +/// building an `.rlib` (stomping over one another), or writing an `.rmeta` into a +/// directory being searched for `extern crate` (observing an incomplete file). +/// The returned path is the temporary file containing the complete metadata. +fn emit_metadata<'a>(sess: &'a Session, codegen_results: &CodegenResults, tmpdir: &TempDir) + -> PathBuf { + let out_filename = tmpdir.path().join(METADATA_FILENAME); + let result = fs::write(&out_filename, &codegen_results.metadata.raw_data); + + if let Err(e) = result { + sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e)); + } + + out_filename +} + +enum RlibFlavor { + Normal, + StaticlibBase, +} + +// Create an 'rlib' +// +// An rlib in its current incarnation is essentially a renamed .a file. The +// rlib primarily contains the object file of the crate, but it also contains +// all of the object files from native libraries. This is done by unzipping +// native libraries and inserting all of the contents into this archive. +fn link_rlib<'a>(sess: &'a Session, + codegen_results: &CodegenResults, + flavor: RlibFlavor, + out_filename: &Path, + tmpdir: &TempDir) -> ArchiveBuilder<'a> { + info!("preparing rlib to {:?}", out_filename); + let mut ab = ArchiveBuilder::new(archive_config(sess, out_filename, None)); + + for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) { + ab.add_file(obj); + } + + // Note that in this loop we are ignoring the value of `lib.cfg`. That is, + // we may not be configured to actually include a static library if we're + // adding it here. That's because later when we consume this rlib we'll + // decide whether we actually needed the static library or not. + // + // To do this "correctly" we'd need to keep track of which libraries added + // which object files to the archive. We don't do that here, however. The + // #[link(cfg(..))] feature is unstable, though, and only intended to get + // liblibc working. In that sense the check below just indicates that if + // there are any libraries we want to omit object files for at link time we + // just exclude all custom object files. + // + // Eventually if we want to stabilize or flesh out the #[link(cfg(..))] + // feature then we'll need to figure out how to record what objects were + // loaded from the libraries found here and then encode that into the + // metadata of the rlib we're generating somehow. + for lib in codegen_results.crate_info.used_libraries.iter() { + match lib.kind { + NativeLibraryKind::NativeStatic => {} + NativeLibraryKind::NativeStaticNobundle | + NativeLibraryKind::NativeFramework | + NativeLibraryKind::NativeUnknown => continue, + } + if let Some(name) = lib.name { + ab.add_native_library(&name.as_str()); + } + } + + // After adding all files to the archive, we need to update the + // symbol table of the archive. + ab.update_symbols(); + + // Note that it is important that we add all of our non-object "magical + // files" *after* all of the object files in the archive. The reason for + // this is as follows: + // + // * When performing LTO, this archive will be modified to remove + // objects from above. The reason for this is described below. + // + // * When the system linker looks at an archive, it will attempt to + // determine the architecture of the archive in order to see whether its + // linkable. + // + // The algorithm for this detection is: iterate over the files in the + // archive. Skip magical SYMDEF names. Interpret the first file as an + // object file. Read architecture from the object file. + // + // * As one can probably see, if "metadata" and "foo.bc" were placed + // before all of the objects, then the architecture of this archive would + // not be correctly inferred once 'foo.o' is removed. + // + // Basically, all this means is that this code should not move above the + // code above. + match flavor { + RlibFlavor::Normal => { + // Instead of putting the metadata in an object file section, rlibs + // contain the metadata in a separate file. + ab.add_file(&emit_metadata(sess, codegen_results, tmpdir)); + + // For LTO purposes, the bytecode of this library is also inserted + // into the archive. + for bytecode in codegen_results + .modules + .iter() + .filter_map(|m| m.bytecode_compressed.as_ref()) + { + ab.add_file(bytecode); + } + + // After adding all files to the archive, we need to update the + // symbol table of the archive. This currently dies on macOS (see + // #11162), and isn't necessary there anyway + if !sess.target.target.options.is_like_osx { + ab.update_symbols(); + } + } + + RlibFlavor::StaticlibBase => { + let obj = codegen_results.allocator_module + .as_ref() + .and_then(|m| m.object.as_ref()); + if let Some(obj) = obj { + ab.add_file(obj); + } + } + } + + ab +} + +// Create a static archive +// +// This is essentially the same thing as an rlib, but it also involves adding +// all of the upstream crates' objects into the archive. This will slurp in +// all of the native libraries of upstream dependencies as well. +// +// Additionally, there's no way for us to link dynamic libraries, so we warn +// about all dynamic library dependencies that they're not linked in. +// +// There's no need to include metadata in a static archive, so ensure to not +// link in the metadata object file (and also don't prepare the archive with a +// metadata file). +fn link_staticlib(sess: &Session, + codegen_results: &CodegenResults, + out_filename: &Path, + tempdir: &TempDir) { + let mut ab = link_rlib(sess, + codegen_results, + RlibFlavor::StaticlibBase, + out_filename, + tempdir); + let mut all_native_libs = vec![]; + + let res = each_linked_rlib(sess, &codegen_results.crate_info, &mut |cnum, path| { + let name = &codegen_results.crate_info.crate_name[&cnum]; + let native_libs = &codegen_results.crate_info.native_libraries[&cnum]; + + // Here when we include the rlib into our staticlib we need to make a + // decision whether to include the extra object files along the way. + // These extra object files come from statically included native + // libraries, but they may be cfg'd away with #[link(cfg(..))]. + // + // This unstable feature, though, only needs liblibc to work. The only + // use case there is where musl is statically included in liblibc.rlib, + // so if we don't want the included version we just need to skip it. As + // a result the logic here is that if *any* linked library is cfg'd away + // we just skip all object files. + // + // Clearly this is not sufficient for a general purpose feature, and + // we'd want to read from the library's metadata to determine which + // object files come from where and selectively skip them. + let skip_object_files = native_libs.iter().any(|lib| { + lib.kind == NativeLibraryKind::NativeStatic && !relevant_lib(sess, lib) + }); + ab.add_rlib(path, + &name.as_str(), + are_upstream_rust_objects_already_included(sess) && + !ignored_for_lto(sess, &codegen_results.crate_info, cnum), + skip_object_files).unwrap(); + + all_native_libs.extend(codegen_results.crate_info.native_libraries[&cnum].iter().cloned()); + }); + if let Err(e) = res { + sess.fatal(&e); + } + + ab.update_symbols(); + ab.build(); + + if !all_native_libs.is_empty() { + if sess.opts.prints.contains(&PrintRequest::NativeStaticLibs) { + print_native_static_libs(sess, &all_native_libs); + } + } +} + +fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLibrary]) { + let lib_args: Vec<_> = all_native_libs.iter() + .filter(|l| relevant_lib(sess, l)) + .filter_map(|lib| { + let name = lib.name?; + match lib.kind { + NativeLibraryKind::NativeStaticNobundle | + NativeLibraryKind::NativeUnknown => { + if sess.target.target.options.is_like_msvc { + Some(format!("{}.lib", name)) + } else { + Some(format!("-l{}", name)) + } + }, + NativeLibraryKind::NativeFramework => { + // ld-only syntax, since there are no frameworks in MSVC + Some(format!("-framework {}", name)) + }, + // These are included, no need to print them + NativeLibraryKind::NativeStatic => None, + } + }) + .collect(); + if !lib_args.is_empty() { + sess.note_without_error("Link against the following native artifacts when linking \ + against this static library. The order and any duplication \ + can be significant on some platforms."); + // Prefix for greppability + sess.note_without_error(&format!("native-static-libs: {}", &lib_args.join(" "))); + } +} + +// Create a dynamic library or executable +// +// This will invoke the system linker/cc to create the resulting file. This +// links to all upstream files as well. +fn link_natively(sess: &Session, + crate_type: config::CrateType, + out_filename: &Path, + codegen_results: &CodegenResults, + tmpdir: &Path) { + info!("preparing {:?} to {:?}", crate_type, out_filename); + let flavor = sess.linker_flavor(); + + // The invocations of cc share some flags across platforms + let (pname, mut cmd) = get_linker(sess); + + let root = sess.target_filesearch(PathKind::Native).get_lib_path(); + if let Some(args) = sess.target.target.options.pre_link_args.get(&flavor) { + cmd.args(args); + } + if let Some(args) = sess.target.target.options.pre_link_args_crt.get(&flavor) { + if sess.crt_static() { + cmd.args(args); + } + } + if let Some(ref args) = sess.opts.debugging_opts.pre_link_args { + cmd.args(args); + } + cmd.args(&sess.opts.debugging_opts.pre_link_arg); + + let pre_link_objects = if crate_type == config::CrateType::Executable { + &sess.target.target.options.pre_link_objects_exe + } else { + &sess.target.target.options.pre_link_objects_dll + }; + for obj in pre_link_objects { + cmd.arg(root.join(obj)); + } + + if crate_type == config::CrateType::Executable && sess.crt_static() { + for obj in &sess.target.target.options.pre_link_objects_exe_crt { + cmd.arg(root.join(obj)); + } + } + + if sess.target.target.options.is_like_emscripten { + cmd.arg("-s"); + cmd.arg(if sess.panic_strategy() == PanicStrategy::Abort { + "DISABLE_EXCEPTION_CATCHING=1" + } else { + "DISABLE_EXCEPTION_CATCHING=0" + }); + } + + { + let mut linker = codegen_results.linker_info.to_linker(cmd, &sess); + link_args(&mut *linker, sess, crate_type, tmpdir, + out_filename, codegen_results); + cmd = linker.finalize(); + } + if let Some(args) = sess.target.target.options.late_link_args.get(&flavor) { + cmd.args(args); + } + for obj in &sess.target.target.options.post_link_objects { + cmd.arg(root.join(obj)); + } + if sess.crt_static() { + for obj in &sess.target.target.options.post_link_objects_crt { + cmd.arg(root.join(obj)); + } + } + if let Some(args) = sess.target.target.options.post_link_args.get(&flavor) { + cmd.args(args); + } + for &(ref k, ref v) in &sess.target.target.options.link_env { + cmd.env(k, v); + } + + if sess.opts.debugging_opts.print_link_args { + println!("{:?}", &cmd); + } + + // May have not found libraries in the right formats. + sess.abort_if_errors(); + + // Invoke the system linker + // + // Note that there's a terribly awful hack that really shouldn't be present + // in any compiler. Here an environment variable is supported to + // automatically retry the linker invocation if the linker looks like it + // segfaulted. + // + // Gee that seems odd, normally segfaults are things we want to know about! + // Unfortunately though in rust-lang/rust#38878 we're experiencing the + // linker segfaulting on Travis quite a bit which is causing quite a bit of + // pain to land PRs when they spuriously fail due to a segfault. + // + // The issue #38878 has some more debugging information on it as well, but + // this unfortunately looks like it's just a race condition in macOS's linker + // with some thread pool working in the background. It seems that no one + // currently knows a fix for this so in the meantime we're left with this... + info!("{:?}", &cmd); + let retry_on_segfault = env::var("RUSTC_RETRY_LINKER_ON_SEGFAULT").is_ok(); + let mut prog; + let mut i = 0; + loop { + i += 1; + prog = time(sess, "running linker", || { + exec_linker(sess, &mut cmd, out_filename, tmpdir) + }); + let output = match prog { + Ok(ref output) => output, + Err(_) => break, + }; + if output.status.success() { + break + } + let mut out = output.stderr.clone(); + out.extend(&output.stdout); + let out = String::from_utf8_lossy(&out); + + // Check to see if the link failed with "unrecognized command line option: + // '-no-pie'" for gcc or "unknown argument: '-no-pie'" for clang. If so, + // reperform the link step without the -no-pie option. This is safe because + // if the linker doesn't support -no-pie then it should not default to + // linking executables as pie. Different versions of gcc seem to use + // different quotes in the error message so don't check for them. + if sess.target.target.options.linker_is_gnu && + sess.linker_flavor() != LinkerFlavor::Ld && + (out.contains("unrecognized command line option") || + out.contains("unknown argument")) && + out.contains("-no-pie") && + cmd.get_args().iter().any(|e| e.to_string_lossy() == "-no-pie") { + info!("linker output: {:?}", out); + warn!("Linker does not support -no-pie command line option. Retrying without."); + for arg in cmd.take_args() { + if arg.to_string_lossy() != "-no-pie" { + cmd.arg(arg); + } + } + info!("{:?}", &cmd); + continue; + } + if !retry_on_segfault || i > 3 { + break + } + let msg_segv = "clang: error: unable to execute command: Segmentation fault: 11"; + let msg_bus = "clang: error: unable to execute command: Bus error: 10"; + if !(out.contains(msg_segv) || out.contains(msg_bus)) { + break + } + + warn!( + "looks like the linker segfaulted when we tried to call it, \ + automatically retrying again. cmd = {:?}, out = {}.", + cmd, + out, + ); + } + + match prog { + Ok(prog) => { + fn escape_string(s: &[u8]) -> String { + str::from_utf8(s).map(|s| s.to_owned()) + .unwrap_or_else(|_| { + let mut x = "Non-UTF-8 output: ".to_string(); + x.extend(s.iter() + .flat_map(|&b| ascii::escape_default(b)) + .map(|b| char::from_u32(b as u32).unwrap())); + x + }) + } + if !prog.status.success() { + let mut output = prog.stderr.clone(); + output.extend_from_slice(&prog.stdout); + sess.struct_err(&format!("linking with `{}` failed: {}", + pname.display(), + prog.status)) + .note(&format!("{:?}", &cmd)) + .note(&escape_string(&output)) + .emit(); + sess.abort_if_errors(); + } + info!("linker stderr:\n{}", escape_string(&prog.stderr)); + info!("linker stdout:\n{}", escape_string(&prog.stdout)); + }, + Err(e) => { + let linker_not_found = e.kind() == io::ErrorKind::NotFound; + + let mut linker_error = { + if linker_not_found { + sess.struct_err(&format!("linker `{}` not found", pname.display())) + } else { + sess.struct_err(&format!("could not exec the linker `{}`", pname.display())) + } + }; + + linker_error.note(&e.to_string()); + + if !linker_not_found { + linker_error.note(&format!("{:?}", &cmd)); + } + + linker_error.emit(); + + if sess.target.target.options.is_like_msvc && linker_not_found { + sess.note_without_error("the msvc targets depend on the msvc linker \ + but `link.exe` was not found"); + sess.note_without_error("please ensure that VS 2013, VS 2015 or VS 2017 \ + was installed with the Visual C++ option"); + } + sess.abort_if_errors(); + } + } + + + // On macOS, debuggers need this utility to get run to do some munging of + // the symbols. Note, though, that if the object files are being preserved + // for their debug information there's no need for us to run dsymutil. + if sess.target.target.options.is_like_osx && + sess.opts.debuginfo != DebugInfo::None && + !preserve_objects_for_their_debuginfo(sess) + { + match Command::new("dsymutil").arg(out_filename).output() { + Ok(..) => {} + Err(e) => sess.fatal(&format!("failed to run dsymutil: {}", e)), + } + } + + if sess.opts.target_triple.triple() == "wasm32-unknown-unknown" { + wasm::rewrite_imports(&out_filename, &codegen_results.crate_info.wasm_imports); + } +} + +fn exec_linker(sess: &Session, cmd: &mut Command, out_filename: &Path, tmpdir: &Path) + -> io::Result +{ + // When attempting to spawn the linker we run a risk of blowing out the + // size limits for spawning a new process with respect to the arguments + // we pass on the command line. + // + // Here we attempt to handle errors from the OS saying "your list of + // arguments is too big" by reinvoking the linker again with an `@`-file + // that contains all the arguments. The theory is that this is then + // accepted on all linkers and the linker will read all its options out of + // there instead of looking at the command line. + if !cmd.very_likely_to_exceed_some_spawn_limit() { + match cmd.command().stdout(Stdio::piped()).stderr(Stdio::piped()).spawn() { + Ok(child) => { + let output = child.wait_with_output(); + flush_linked_file(&output, out_filename)?; + return output; + } + Err(ref e) if command_line_too_big(e) => { + info!("command line to linker was too big: {}", e); + } + Err(e) => return Err(e) + } + } + + info!("falling back to passing arguments to linker via an @-file"); + let mut cmd2 = cmd.clone(); + let mut args = String::new(); + for arg in cmd2.take_args() { + args.push_str(&Escape { + arg: arg.to_str().unwrap(), + is_like_msvc: sess.target.target.options.is_like_msvc, + }.to_string()); + args.push_str("\n"); + } + let file = tmpdir.join("linker-arguments"); + let bytes = if sess.target.target.options.is_like_msvc { + let mut out = Vec::with_capacity((1 + args.len()) * 2); + // start the stream with a UTF-16 BOM + for c in iter::once(0xFEFF).chain(args.encode_utf16()) { + // encode in little endian + out.push(c as u8); + out.push((c >> 8) as u8); + } + out + } else { + args.into_bytes() + }; + fs::write(&file, &bytes)?; + cmd2.arg(format!("@{}", file.display())); + info!("invoking linker {:?}", cmd2); + let output = cmd2.output(); + flush_linked_file(&output, out_filename)?; + return output; + + #[cfg(unix)] + fn flush_linked_file(_: &io::Result, _: &Path) -> io::Result<()> { + Ok(()) + } + + #[cfg(windows)] + fn flush_linked_file(command_output: &io::Result, out_filename: &Path) + -> io::Result<()> + { + // On Windows, under high I/O load, output buffers are sometimes not flushed, + // even long after process exit, causing nasty, non-reproducible output bugs. + // + // File::sync_all() calls FlushFileBuffers() down the line, which solves the problem. + // + // А full writeup of the original Chrome bug can be found at + // randomascii.wordpress.com/2018/02/25/compiler-bug-linker-bug-windows-kernel-bug/amp + + if let &Ok(ref out) = command_output { + if out.status.success() { + if let Ok(of) = fs::OpenOptions::new().write(true).open(out_filename) { + of.sync_all()?; + } + } + } + + Ok(()) + } + + #[cfg(unix)] + fn command_line_too_big(err: &io::Error) -> bool { + err.raw_os_error() == Some(::libc::E2BIG) + } + + #[cfg(windows)] + fn command_line_too_big(err: &io::Error) -> bool { + const ERROR_FILENAME_EXCED_RANGE: i32 = 206; + err.raw_os_error() == Some(ERROR_FILENAME_EXCED_RANGE) + } + + struct Escape<'a> { + arg: &'a str, + is_like_msvc: bool, + } + + impl<'a> fmt::Display for Escape<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.is_like_msvc { + // This is "documented" at + // https://msdn.microsoft.com/en-us/library/4xdcbak7.aspx + // + // Unfortunately there's not a great specification of the + // syntax I could find online (at least) but some local + // testing showed that this seemed sufficient-ish to catch + // at least a few edge cases. + write!(f, "\"")?; + for c in self.arg.chars() { + match c { + '"' => write!(f, "\\{}", c)?, + c => write!(f, "{}", c)?, + } + } + write!(f, "\"")?; + } else { + // This is documented at https://linux.die.net/man/1/ld, namely: + // + // > Options in file are separated by whitespace. A whitespace + // > character may be included in an option by surrounding the + // > entire option in either single or double quotes. Any + // > character (including a backslash) may be included by + // > prefixing the character to be included with a backslash. + // + // We put an argument on each line, so all we need to do is + // ensure the line is interpreted as one whole argument. + for c in self.arg.chars() { + match c { + '\\' | + ' ' => write!(f, "\\{}", c)?, + c => write!(f, "{}", c)?, + } + } + } + Ok(()) + } + } +} + +fn link_args(cmd: &mut dyn Linker, + sess: &Session, + crate_type: config::CrateType, + tmpdir: &Path, + out_filename: &Path, + codegen_results: &CodegenResults) { + + // Linker plugins should be specified early in the list of arguments + cmd.cross_lang_lto(); + + // The default library location, we need this to find the runtime. + // The location of crates will be determined as needed. + let lib_path = sess.target_filesearch(PathKind::All).get_lib_path(); + + // target descriptor + let t = &sess.target.target; + + cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path)); + for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) { + cmd.add_object(obj); + } + cmd.output_filename(out_filename); + + if crate_type == config::CrateType::Executable && + sess.target.target.options.is_like_windows { + if let Some(ref s) = codegen_results.windows_subsystem { + cmd.subsystem(s); + } + } + + // If we're building a dynamic library then some platforms need to make sure + // that all symbols are exported correctly from the dynamic library. + if crate_type != config::CrateType::Executable || + sess.target.target.options.is_like_emscripten { + cmd.export_symbols(tmpdir, crate_type); + } + + // When linking a dynamic library, we put the metadata into a section of the + // executable. This metadata is in a separate object file from the main + // object file, so we link that in here. + if crate_type == config::CrateType::Dylib || + crate_type == config::CrateType::ProcMacro { + if let Some(obj) = codegen_results.metadata_module.object.as_ref() { + cmd.add_object(obj); + } + } + + let obj = codegen_results.allocator_module + .as_ref() + .and_then(|m| m.object.as_ref()); + if let Some(obj) = obj { + cmd.add_object(obj); + } + + // Try to strip as much out of the generated object by removing unused + // sections if possible. See more comments in linker.rs + if !sess.opts.cg.link_dead_code { + let keep_metadata = crate_type == config::CrateType::Dylib; + cmd.gc_sections(keep_metadata); + } + + let used_link_args = &codegen_results.crate_info.link_args; + + if crate_type == config::CrateType::Executable { + let mut position_independent_executable = false; + + if t.options.position_independent_executables { + let empty_vec = Vec::new(); + let args = sess.opts.cg.link_args.as_ref().unwrap_or(&empty_vec); + let more_args = &sess.opts.cg.link_arg; + let mut args = args.iter().chain(more_args.iter()).chain(used_link_args.iter()); + + if get_reloc_model(sess) == llvm::RelocMode::PIC + && !sess.crt_static() && !args.any(|x| *x == "-static") { + position_independent_executable = true; + } + } + + if position_independent_executable { + cmd.position_independent_executable(); + } else { + // recent versions of gcc can be configured to generate position + // independent executables by default. We have to pass -no-pie to + // explicitly turn that off. Not applicable to ld. + if sess.target.target.options.linker_is_gnu + && sess.linker_flavor() != LinkerFlavor::Ld { + cmd.no_position_independent_executable(); + } + } + } + + let relro_level = match sess.opts.debugging_opts.relro_level { + Some(level) => level, + None => t.options.relro_level, + }; + match relro_level { + RelroLevel::Full => { + cmd.full_relro(); + }, + RelroLevel::Partial => { + cmd.partial_relro(); + }, + RelroLevel::Off => { + cmd.no_relro(); + }, + RelroLevel::None => { + }, + } + + // Pass optimization flags down to the linker. + cmd.optimize(); + + // Pass debuginfo flags down to the linker. + cmd.debuginfo(); + + // We want to prevent the compiler from accidentally leaking in any system + // libraries, so we explicitly ask gcc to not link to any libraries by + // default. Note that this does not happen for windows because windows pulls + // in some large number of libraries and I couldn't quite figure out which + // subset we wanted. + if t.options.no_default_libraries { + cmd.no_default_libraries(); + } + + // Take careful note of the ordering of the arguments we pass to the linker + // here. Linkers will assume that things on the left depend on things to the + // right. Things on the right cannot depend on things on the left. This is + // all formally implemented in terms of resolving symbols (libs on the right + // resolve unknown symbols of libs on the left, but not vice versa). + // + // For this reason, we have organized the arguments we pass to the linker as + // such: + // + // 1. The local object that LLVM just generated + // 2. Local native libraries + // 3. Upstream rust libraries + // 4. Upstream native libraries + // + // The rationale behind this ordering is that those items lower down in the + // list can't depend on items higher up in the list. For example nothing can + // depend on what we just generated (e.g. that'd be a circular dependency). + // Upstream rust libraries are not allowed to depend on our local native + // libraries as that would violate the structure of the DAG, in that + // scenario they are required to link to them as well in a shared fashion. + // + // Note that upstream rust libraries may contain native dependencies as + // well, but they also can't depend on what we just started to add to the + // link line. And finally upstream native libraries can't depend on anything + // in this DAG so far because they're only dylibs and dylibs can only depend + // on other dylibs (e.g. other native deps). + add_local_native_libraries(cmd, sess, codegen_results); + add_upstream_rust_crates(cmd, sess, codegen_results, crate_type, tmpdir); + add_upstream_native_libraries(cmd, sess, codegen_results, crate_type); + + // Tell the linker what we're doing. + if crate_type != config::CrateType::Executable { + cmd.build_dylib(out_filename); + } + if crate_type == config::CrateType::Executable && sess.crt_static() { + cmd.build_static_executable(); + } + + if sess.opts.debugging_opts.pgo_gen.is_some() { + cmd.pgo_gen(); + } + + // FIXME (#2397): At some point we want to rpath our guesses as to + // where extern libraries might live, based on the + // addl_lib_search_paths + if sess.opts.cg.rpath { + let sysroot = sess.sysroot(); + let target_triple = sess.opts.target_triple.triple(); + let mut get_install_prefix_lib_path = || { + let install_prefix = option_env!("CFG_PREFIX").expect("CFG_PREFIX"); + let tlib = filesearch::relative_target_lib_path(sysroot, target_triple); + let mut path = PathBuf::from(install_prefix); + path.push(&tlib); + + path + }; + let mut rpath_config = RPathConfig { + used_crates: &codegen_results.crate_info.used_crates_dynamic, + out_filename: out_filename.to_path_buf(), + has_rpath: sess.target.target.options.has_rpath, + is_like_osx: sess.target.target.options.is_like_osx, + linker_is_gnu: sess.target.target.options.linker_is_gnu, + get_install_prefix_lib_path: &mut get_install_prefix_lib_path, + }; + cmd.args(&rpath::get_rpath_flags(&mut rpath_config)); + } + + // Finally add all the linker arguments provided on the command line along + // with any #[link_args] attributes found inside the crate + if let Some(ref args) = sess.opts.cg.link_args { + cmd.args(args); + } + cmd.args(&sess.opts.cg.link_arg); + cmd.args(&used_link_args); +} + +// # Native library linking +// +// User-supplied library search paths (-L on the command line). These are +// the same paths used to find Rust crates, so some of them may have been +// added already by the previous crate linking code. This only allows them +// to be found at compile time so it is still entirely up to outside +// forces to make sure that library can be found at runtime. +// +// Also note that the native libraries linked here are only the ones located +// in the current crate. Upstream crates with native library dependencies +// may have their native library pulled in above. +fn add_local_native_libraries(cmd: &mut dyn Linker, + sess: &Session, + codegen_results: &CodegenResults) { + sess.target_filesearch(PathKind::All).for_each_lib_search_path(|path, k| { + match k { + PathKind::Framework => { cmd.framework_path(path); } + _ => { cmd.include_path(&fix_windows_verbatim_for_gcc(path)); } + } + }); + + let relevant_libs = codegen_results.crate_info.used_libraries.iter().filter(|l| { + relevant_lib(sess, l) + }); + + let search_path = archive_search_paths(sess); + for lib in relevant_libs { + let name = match lib.name { + Some(ref l) => l, + None => continue, + }; + match lib.kind { + NativeLibraryKind::NativeUnknown => cmd.link_dylib(&name.as_str()), + NativeLibraryKind::NativeFramework => cmd.link_framework(&name.as_str()), + NativeLibraryKind::NativeStaticNobundle => cmd.link_staticlib(&name.as_str()), + NativeLibraryKind::NativeStatic => cmd.link_whole_staticlib(&name.as_str(), + &search_path) + } + } +} + +// # Rust Crate linking +// +// Rust crates are not considered at all when creating an rlib output. All +// dependencies will be linked when producing the final output (instead of +// the intermediate rlib version) +fn add_upstream_rust_crates(cmd: &mut dyn Linker, + sess: &Session, + codegen_results: &CodegenResults, + crate_type: config::CrateType, + tmpdir: &Path) { + // All of the heavy lifting has previously been accomplished by the + // dependency_format module of the compiler. This is just crawling the + // output of that module, adding crates as necessary. + // + // Linking to a rlib involves just passing it to the linker (the linker + // will slurp up the object files inside), and linking to a dynamic library + // involves just passing the right -l flag. + + let formats = sess.dependency_formats.borrow(); + let data = formats.get(&crate_type).unwrap(); + + // Invoke get_used_crates to ensure that we get a topological sorting of + // crates. + let deps = &codegen_results.crate_info.used_crates_dynamic; + + // There's a few internal crates in the standard library (aka libcore and + // libstd) which actually have a circular dependence upon one another. This + // currently arises through "weak lang items" where libcore requires things + // like `rust_begin_unwind` but libstd ends up defining it. To get this + // circular dependence to work correctly in all situations we'll need to be + // sure to correctly apply the `--start-group` and `--end-group` options to + // GNU linkers, otherwise if we don't use any other symbol from the standard + // library it'll get discarded and the whole application won't link. + // + // In this loop we're calculating the `group_end`, after which crate to + // pass `--end-group` and `group_start`, before which crate to pass + // `--start-group`. We currently do this by passing `--end-group` after + // the first crate (when iterating backwards) that requires a lang item + // defined somewhere else. Once that's set then when we've defined all the + // necessary lang items we'll pass `--start-group`. + // + // Note that this isn't amazing logic for now but it should do the trick + // for the current implementation of the standard library. + let mut group_end = None; + let mut group_start = None; + let mut end_with = FxHashSet(); + let info = &codegen_results.crate_info; + for &(cnum, _) in deps.iter().rev() { + if let Some(missing) = info.missing_lang_items.get(&cnum) { + end_with.extend(missing.iter().cloned()); + if end_with.len() > 0 && group_end.is_none() { + group_end = Some(cnum); + } + } + end_with.retain(|item| info.lang_item_to_crate.get(item) != Some(&cnum)); + if end_with.len() == 0 && group_end.is_some() { + group_start = Some(cnum); + break + } + } + + // If we didn't end up filling in all lang items from upstream crates then + // we'll be filling it in with our crate. This probably means we're the + // standard library itself, so skip this for now. + if group_end.is_some() && group_start.is_none() { + group_end = None; + } + + let mut compiler_builtins = None; + + for &(cnum, _) in deps.iter() { + if group_start == Some(cnum) { + cmd.group_start(); + } + + // We may not pass all crates through to the linker. Some crates may + // appear statically in an existing dylib, meaning we'll pick up all the + // symbols from the dylib. + let src = &codegen_results.crate_info.used_crate_source[&cnum]; + match data[cnum.as_usize() - 1] { + _ if codegen_results.crate_info.profiler_runtime == Some(cnum) => { + add_static_crate(cmd, sess, codegen_results, tmpdir, crate_type, cnum); + } + _ if codegen_results.crate_info.sanitizer_runtime == Some(cnum) => { + link_sanitizer_runtime(cmd, sess, codegen_results, tmpdir, cnum); + } + // compiler-builtins are always placed last to ensure that they're + // linked correctly. + _ if codegen_results.crate_info.compiler_builtins == Some(cnum) => { + assert!(compiler_builtins.is_none()); + compiler_builtins = Some(cnum); + } + Linkage::NotLinked | + Linkage::IncludedFromDylib => {} + Linkage::Static => { + add_static_crate(cmd, sess, codegen_results, tmpdir, crate_type, cnum); + } + Linkage::Dynamic => { + add_dynamic_crate(cmd, sess, &src.dylib.as_ref().unwrap().0) + } + } + + if group_end == Some(cnum) { + cmd.group_end(); + } + } + + // compiler-builtins are always placed last to ensure that they're + // linked correctly. + // We must always link the `compiler_builtins` crate statically. Even if it + // was already "included" in a dylib (e.g. `libstd` when `-C prefer-dynamic` + // is used) + if let Some(cnum) = compiler_builtins { + add_static_crate(cmd, sess, codegen_results, tmpdir, crate_type, cnum); + } + + // Converts a library file-stem into a cc -l argument + fn unlib<'a>(config: &config::Config, stem: &'a str) -> &'a str { + if stem.starts_with("lib") && !config.target.options.is_like_windows { + &stem[3..] + } else { + stem + } + } + + // We must link the sanitizer runtime using -Wl,--whole-archive but since + // it's packed in a .rlib, it contains stuff that are not objects that will + // make the linker error. So we must remove those bits from the .rlib before + // linking it. + fn link_sanitizer_runtime(cmd: &mut dyn Linker, + sess: &Session, + codegen_results: &CodegenResults, + tmpdir: &Path, + cnum: CrateNum) { + let src = &codegen_results.crate_info.used_crate_source[&cnum]; + let cratepath = &src.rlib.as_ref().unwrap().0; + + if sess.target.target.options.is_like_osx { + // On Apple platforms, the sanitizer is always built as a dylib, and + // LLVM will link to `@rpath/*.dylib`, so we need to specify an + // rpath to the library as well (the rpath should be absolute, see + // PR #41352 for details). + // + // FIXME: Remove this logic into librustc_*san once Cargo supports it + let rpath = cratepath.parent().unwrap(); + let rpath = rpath.to_str().expect("non-utf8 component in path"); + cmd.args(&["-Wl,-rpath".into(), "-Xlinker".into(), rpath.into()]); + } + + let dst = tmpdir.join(cratepath.file_name().unwrap()); + let cfg = archive_config(sess, &dst, Some(cratepath)); + let mut archive = ArchiveBuilder::new(cfg); + archive.update_symbols(); + + for f in archive.src_files() { + if f.ends_with(RLIB_BYTECODE_EXTENSION) || f == METADATA_FILENAME { + archive.remove_file(&f); + continue + } + } + + archive.build(); + + cmd.link_whole_rlib(&dst); + } + + // Adds the static "rlib" versions of all crates to the command line. + // There's a bit of magic which happens here specifically related to LTO and + // dynamic libraries. Specifically: + // + // * For LTO, we remove upstream object files. + // * For dylibs we remove metadata and bytecode from upstream rlibs + // + // When performing LTO, almost(*) all of the bytecode from the upstream + // libraries has already been included in our object file output. As a + // result we need to remove the object files in the upstream libraries so + // the linker doesn't try to include them twice (or whine about duplicate + // symbols). We must continue to include the rest of the rlib, however, as + // it may contain static native libraries which must be linked in. + // + // (*) Crates marked with `#![no_builtins]` don't participate in LTO and + // their bytecode wasn't included. The object files in those libraries must + // still be passed to the linker. + // + // When making a dynamic library, linkers by default don't include any + // object files in an archive if they're not necessary to resolve the link. + // We basically want to convert the archive (rlib) to a dylib, though, so we + // *do* want everything included in the output, regardless of whether the + // linker thinks it's needed or not. As a result we must use the + // --whole-archive option (or the platform equivalent). When using this + // option the linker will fail if there are non-objects in the archive (such + // as our own metadata and/or bytecode). All in all, for rlibs to be + // entirely included in dylibs, we need to remove all non-object files. + // + // Note, however, that if we're not doing LTO or we're not producing a dylib + // (aka we're making an executable), we can just pass the rlib blindly to + // the linker (fast) because it's fine if it's not actually included as + // we're at the end of the dependency chain. + fn add_static_crate(cmd: &mut dyn Linker, + sess: &Session, + codegen_results: &CodegenResults, + tmpdir: &Path, + crate_type: config::CrateType, + cnum: CrateNum) { + let src = &codegen_results.crate_info.used_crate_source[&cnum]; + let cratepath = &src.rlib.as_ref().unwrap().0; + + // See the comment above in `link_staticlib` and `link_rlib` for why if + // there's a static library that's not relevant we skip all object + // files. + let native_libs = &codegen_results.crate_info.native_libraries[&cnum]; + let skip_native = native_libs.iter().any(|lib| { + lib.kind == NativeLibraryKind::NativeStatic && !relevant_lib(sess, lib) + }); + + if (!are_upstream_rust_objects_already_included(sess) || + ignored_for_lto(sess, &codegen_results.crate_info, cnum)) && + crate_type != config::CrateType::Dylib && + !skip_native { + cmd.link_rlib(&fix_windows_verbatim_for_gcc(cratepath)); + return + } + + let dst = tmpdir.join(cratepath.file_name().unwrap()); + let name = cratepath.file_name().unwrap().to_str().unwrap(); + let name = &name[3..name.len() - 5]; // chop off lib/.rlib + + time(sess, &format!("altering {}.rlib", name), || { + let cfg = archive_config(sess, &dst, Some(cratepath)); + let mut archive = ArchiveBuilder::new(cfg); + archive.update_symbols(); + + let mut any_objects = false; + for f in archive.src_files() { + if f.ends_with(RLIB_BYTECODE_EXTENSION) || f == METADATA_FILENAME { + archive.remove_file(&f); + continue + } + + let canonical = f.replace("-", "_"); + let canonical_name = name.replace("-", "_"); + + // Look for `.rcgu.o` at the end of the filename to conclude + // that this is a Rust-related object file. + fn looks_like_rust(s: &str) -> bool { + let path = Path::new(s); + let ext = path.extension().and_then(|s| s.to_str()); + if ext != Some(OutputType::Object.extension()) { + return false + } + let ext2 = path.file_stem() + .and_then(|s| Path::new(s).extension()) + .and_then(|s| s.to_str()); + ext2 == Some(RUST_CGU_EXT) + } + + let is_rust_object = + canonical.starts_with(&canonical_name) && + looks_like_rust(&f); + + // If we've been requested to skip all native object files + // (those not generated by the rust compiler) then we can skip + // this file. See above for why we may want to do this. + let skip_because_cfg_say_so = skip_native && !is_rust_object; + + // If we're performing LTO and this is a rust-generated object + // file, then we don't need the object file as it's part of the + // LTO module. Note that `#![no_builtins]` is excluded from LTO, + // though, so we let that object file slide. + let skip_because_lto = are_upstream_rust_objects_already_included(sess) && + is_rust_object && + (sess.target.target.options.no_builtins || + !codegen_results.crate_info.is_no_builtins.contains(&cnum)); + + if skip_because_cfg_say_so || skip_because_lto { + archive.remove_file(&f); + } else { + any_objects = true; + } + } + + if !any_objects { + return + } + archive.build(); + + // If we're creating a dylib, then we need to include the + // whole of each object in our archive into that artifact. This is + // because a `dylib` can be reused as an intermediate artifact. + // + // Note, though, that we don't want to include the whole of a + // compiler-builtins crate (e.g. compiler-rt) because it'll get + // repeatedly linked anyway. + if crate_type == config::CrateType::Dylib && + codegen_results.crate_info.compiler_builtins != Some(cnum) { + cmd.link_whole_rlib(&fix_windows_verbatim_for_gcc(&dst)); + } else { + cmd.link_rlib(&fix_windows_verbatim_for_gcc(&dst)); + } + }); + } + + // Same thing as above, but for dynamic crates instead of static crates. + fn add_dynamic_crate(cmd: &mut dyn Linker, sess: &Session, cratepath: &Path) { + // If we're performing LTO, then it should have been previously required + // that all upstream rust dependencies were available in an rlib format. + assert!(!are_upstream_rust_objects_already_included(sess)); + + // Just need to tell the linker about where the library lives and + // what its name is + let parent = cratepath.parent(); + if let Some(dir) = parent { + cmd.include_path(&fix_windows_verbatim_for_gcc(dir)); + } + let filestem = cratepath.file_stem().unwrap().to_str().unwrap(); + cmd.link_rust_dylib(&unlib(&sess.target, filestem), + parent.unwrap_or(Path::new(""))); + } +} + +// Link in all of our upstream crates' native dependencies. Remember that +// all of these upstream native dependencies are all non-static +// dependencies. We've got two cases then: +// +// 1. The upstream crate is an rlib. In this case we *must* link in the +// native dependency because the rlib is just an archive. +// +// 2. The upstream crate is a dylib. In order to use the dylib, we have to +// have the dependency present on the system somewhere. Thus, we don't +// gain a whole lot from not linking in the dynamic dependency to this +// crate as well. +// +// The use case for this is a little subtle. In theory the native +// dependencies of a crate are purely an implementation detail of the crate +// itself, but the problem arises with generic and inlined functions. If a +// generic function calls a native function, then the generic function must +// be instantiated in the target crate, meaning that the native symbol must +// also be resolved in the target crate. +fn add_upstream_native_libraries(cmd: &mut dyn Linker, + sess: &Session, + codegen_results: &CodegenResults, + crate_type: config::CrateType) { + // Be sure to use a topological sorting of crates because there may be + // interdependencies between native libraries. When passing -nodefaultlibs, + // for example, almost all native libraries depend on libc, so we have to + // make sure that's all the way at the right (liblibc is near the base of + // the dependency chain). + // + // This passes RequireStatic, but the actual requirement doesn't matter, + // we're just getting an ordering of crate numbers, we're not worried about + // the paths. + let formats = sess.dependency_formats.borrow(); + let data = formats.get(&crate_type).unwrap(); + + let crates = &codegen_results.crate_info.used_crates_static; + for &(cnum, _) in crates { + for lib in codegen_results.crate_info.native_libraries[&cnum].iter() { + let name = match lib.name { + Some(ref l) => l, + None => continue, + }; + if !relevant_lib(sess, &lib) { + continue + } + match lib.kind { + NativeLibraryKind::NativeUnknown => cmd.link_dylib(&name.as_str()), + NativeLibraryKind::NativeFramework => cmd.link_framework(&name.as_str()), + NativeLibraryKind::NativeStaticNobundle => { + // Link "static-nobundle" native libs only if the crate they originate from + // is being linked statically to the current crate. If it's linked dynamically + // or is an rlib already included via some other dylib crate, the symbols from + // native libs will have already been included in that dylib. + if data[cnum.as_usize() - 1] == Linkage::Static { + cmd.link_staticlib(&name.as_str()) + } + }, + // ignore statically included native libraries here as we've + // already included them when we included the rust library + // previously + NativeLibraryKind::NativeStatic => {} + } + } + } +} + +fn relevant_lib(sess: &Session, lib: &NativeLibrary) -> bool { + match lib.cfg { + Some(ref cfg) => attr::cfg_matches(cfg, &sess.parse_sess, None), + None => true, + } +} + +fn are_upstream_rust_objects_already_included(sess: &Session) -> bool { + match sess.lto() { + Lto::Yes | + Lto::Fat => true, + Lto::Thin => { + // If we defer LTO to the linker, we haven't run LTO ourselves, so + // any upstream object files have not been copied yet. + !sess.opts.debugging_opts.cross_lang_lto.enabled() + } + Lto::No | + Lto::ThinLocal => false, + } +} diff --git a/src/librustc_codegen_llvm/back/linker.rs b/src/librustc_codegen_llvm/back/linker.rs new file mode 100644 index 000000000000..6311ab7c74c1 --- /dev/null +++ b/src/librustc_codegen_llvm/back/linker.rs @@ -0,0 +1,1085 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::HashMap; +use std::ffi::{OsStr, OsString}; +use std::fs::{self, File}; +use std::io::prelude::*; +use std::io::{self, BufWriter}; +use std::path::{Path, PathBuf}; + +use back::archive; +use back::command::Command; +use back::symbol_export; +use rustc::hir::def_id::{LOCAL_CRATE, CrateNum}; +use rustc::middle::dependency_format::Linkage; +use rustc::session::Session; +use rustc::session::config::{self, CrateType, OptLevel, DebugInfo, + CrossLangLto}; +use rustc::ty::TyCtxt; +use rustc_target::spec::{LinkerFlavor, LldFlavor}; +use serialize::{json, Encoder}; + +/// For all the linkers we support, and information they might +/// need out of the shared crate context before we get rid of it. +pub struct LinkerInfo { + exports: HashMap>, +} + +impl LinkerInfo { + pub fn new(tcx: TyCtxt) -> LinkerInfo { + LinkerInfo { + exports: tcx.sess.crate_types.borrow().iter().map(|&c| { + (c, exported_symbols(tcx, c)) + }).collect(), + } + } + + pub fn to_linker<'a>(&'a self, + cmd: Command, + sess: &'a Session) -> Box { + match sess.linker_flavor() { + LinkerFlavor::Lld(LldFlavor::Link) | + LinkerFlavor::Msvc => { + Box::new(MsvcLinker { + cmd, + sess, + info: self + }) as Box + } + LinkerFlavor::Em => { + Box::new(EmLinker { + cmd, + sess, + info: self + }) as Box + } + LinkerFlavor::Gcc => { + Box::new(GccLinker { + cmd, + sess, + info: self, + hinted_static: false, + is_ld: false, + }) as Box + } + + LinkerFlavor::Lld(LldFlavor::Ld) | + LinkerFlavor::Lld(LldFlavor::Ld64) | + LinkerFlavor::Ld => { + Box::new(GccLinker { + cmd, + sess, + info: self, + hinted_static: false, + is_ld: true, + }) as Box + } + + LinkerFlavor::Lld(LldFlavor::Wasm) => { + Box::new(WasmLd { + cmd, + sess, + }) as Box + } + } + } +} + +/// Linker abstraction used by back::link to build up the command to invoke a +/// linker. +/// +/// This trait is the total list of requirements needed by `back::link` and +/// represents the meaning of each option being passed down. This trait is then +/// used to dispatch on whether a GNU-like linker (generally `ld.exe`) or an +/// MSVC linker (e.g. `link.exe`) is being used. +pub trait Linker { + fn link_dylib(&mut self, lib: &str); + fn link_rust_dylib(&mut self, lib: &str, path: &Path); + fn link_framework(&mut self, framework: &str); + fn link_staticlib(&mut self, lib: &str); + fn link_rlib(&mut self, lib: &Path); + fn link_whole_rlib(&mut self, lib: &Path); + fn link_whole_staticlib(&mut self, lib: &str, search_path: &[PathBuf]); + fn include_path(&mut self, path: &Path); + fn framework_path(&mut self, path: &Path); + fn output_filename(&mut self, path: &Path); + fn add_object(&mut self, path: &Path); + fn gc_sections(&mut self, keep_metadata: bool); + fn position_independent_executable(&mut self); + fn no_position_independent_executable(&mut self); + fn full_relro(&mut self); + fn partial_relro(&mut self); + fn no_relro(&mut self); + fn optimize(&mut self); + fn pgo_gen(&mut self); + fn debuginfo(&mut self); + fn no_default_libraries(&mut self); + fn build_dylib(&mut self, out_filename: &Path); + fn build_static_executable(&mut self); + fn args(&mut self, args: &[String]); + fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType); + fn subsystem(&mut self, subsystem: &str); + fn group_start(&mut self); + fn group_end(&mut self); + fn cross_lang_lto(&mut self); + // Should have been finalize(self), but we don't support self-by-value on trait objects (yet?). + fn finalize(&mut self) -> Command; +} + +pub struct GccLinker<'a> { + cmd: Command, + sess: &'a Session, + info: &'a LinkerInfo, + hinted_static: bool, // Keeps track of the current hinting mode. + // Link as ld + is_ld: bool, +} + +impl<'a> GccLinker<'a> { + /// Argument that must be passed *directly* to the linker + /// + /// These arguments need to be prepended with '-Wl,' when a gcc-style linker is used + fn linker_arg(&mut self, arg: S) -> &mut Self + where S: AsRef + { + if !self.is_ld { + let mut os = OsString::from("-Wl,"); + os.push(arg.as_ref()); + self.cmd.arg(os); + } else { + self.cmd.arg(arg); + } + self + } + + fn takes_hints(&self) -> bool { + !self.sess.target.target.options.is_like_osx + } + + // Some platforms take hints about whether a library is static or dynamic. + // For those that support this, we ensure we pass the option if the library + // was flagged "static" (most defaults are dynamic) to ensure that if + // libfoo.a and libfoo.so both exist that the right one is chosen. + fn hint_static(&mut self) { + if !self.takes_hints() { return } + if !self.hinted_static { + self.linker_arg("-Bstatic"); + self.hinted_static = true; + } + } + + fn hint_dynamic(&mut self) { + if !self.takes_hints() { return } + if self.hinted_static { + self.linker_arg("-Bdynamic"); + self.hinted_static = false; + } + } + + fn push_cross_lang_lto_args(&mut self, plugin_path: Option<&OsStr>) { + if let Some(plugin_path) = plugin_path { + let mut arg = OsString::from("-plugin="); + arg.push(plugin_path); + self.linker_arg(&arg); + } + + let opt_level = match self.sess.opts.optimize { + config::OptLevel::No => "O0", + config::OptLevel::Less => "O1", + config::OptLevel::Default => "O2", + config::OptLevel::Aggressive => "O3", + config::OptLevel::Size => "Os", + config::OptLevel::SizeMin => "Oz", + }; + + self.linker_arg(&format!("-plugin-opt={}", opt_level)); + self.linker_arg(&format!("-plugin-opt=mcpu={}", self.sess.target_cpu())); + + match self.sess.opts.cg.lto { + config::Lto::Thin | + config::Lto::ThinLocal => { + self.linker_arg("-plugin-opt=thin"); + } + config::Lto::Fat | + config::Lto::Yes | + config::Lto::No => { + // default to regular LTO + } + } + } +} + +impl<'a> Linker for GccLinker<'a> { + fn link_dylib(&mut self, lib: &str) { self.hint_dynamic(); self.cmd.arg(format!("-l{}",lib)); } + fn link_staticlib(&mut self, lib: &str) { + self.hint_static(); self.cmd.arg(format!("-l{}",lib)); + } + fn link_rlib(&mut self, lib: &Path) { self.hint_static(); self.cmd.arg(lib); } + fn include_path(&mut self, path: &Path) { self.cmd.arg("-L").arg(path); } + fn framework_path(&mut self, path: &Path) { self.cmd.arg("-F").arg(path); } + fn output_filename(&mut self, path: &Path) { self.cmd.arg("-o").arg(path); } + fn add_object(&mut self, path: &Path) { self.cmd.arg(path); } + fn position_independent_executable(&mut self) { self.cmd.arg("-pie"); } + fn no_position_independent_executable(&mut self) { self.cmd.arg("-no-pie"); } + fn full_relro(&mut self) { self.linker_arg("-zrelro"); self.linker_arg("-znow"); } + fn partial_relro(&mut self) { self.linker_arg("-zrelro"); } + fn no_relro(&mut self) { self.linker_arg("-znorelro"); } + fn build_static_executable(&mut self) { self.cmd.arg("-static"); } + fn args(&mut self, args: &[String]) { self.cmd.args(args); } + + fn link_rust_dylib(&mut self, lib: &str, _path: &Path) { + self.hint_dynamic(); + self.cmd.arg(format!("-l{}",lib)); + } + + fn link_framework(&mut self, framework: &str) { + self.hint_dynamic(); + self.cmd.arg("-framework").arg(framework); + } + + // Here we explicitly ask that the entire archive is included into the + // result artifact. For more details see #15460, but the gist is that + // the linker will strip away any unused objects in the archive if we + // don't otherwise explicitly reference them. This can occur for + // libraries which are just providing bindings, libraries with generic + // functions, etc. + fn link_whole_staticlib(&mut self, lib: &str, search_path: &[PathBuf]) { + self.hint_static(); + let target = &self.sess.target.target; + if !target.options.is_like_osx { + self.linker_arg("--whole-archive").cmd.arg(format!("-l{}",lib)); + self.linker_arg("--no-whole-archive"); + } else { + // -force_load is the macOS equivalent of --whole-archive, but it + // involves passing the full path to the library to link. + self.linker_arg("-force_load"); + let lib = archive::find_library(lib, search_path, &self.sess); + self.linker_arg(&lib); + } + } + + fn link_whole_rlib(&mut self, lib: &Path) { + self.hint_static(); + if self.sess.target.target.options.is_like_osx { + self.linker_arg("-force_load"); + self.linker_arg(&lib); + } else { + self.linker_arg("--whole-archive").cmd.arg(lib); + self.linker_arg("--no-whole-archive"); + } + } + + fn gc_sections(&mut self, keep_metadata: bool) { + // The dead_strip option to the linker specifies that functions and data + // unreachable by the entry point will be removed. This is quite useful + // with Rust's compilation model of compiling libraries at a time into + // one object file. For example, this brings hello world from 1.7MB to + // 458K. + // + // Note that this is done for both executables and dynamic libraries. We + // won't get much benefit from dylibs because LLVM will have already + // stripped away as much as it could. This has not been seen to impact + // link times negatively. + // + // -dead_strip can't be part of the pre_link_args because it's also used + // for partial linking when using multiple codegen units (-r). So we + // insert it here. + if self.sess.target.target.options.is_like_osx { + self.linker_arg("-dead_strip"); + } else if self.sess.target.target.options.is_like_solaris { + self.linker_arg("-zignore"); + + // If we're building a dylib, we don't use --gc-sections because LLVM + // has already done the best it can do, and we also don't want to + // eliminate the metadata. If we're building an executable, however, + // --gc-sections drops the size of hello world from 1.8MB to 597K, a 67% + // reduction. + } else if !keep_metadata { + self.linker_arg("--gc-sections"); + } + } + + fn optimize(&mut self) { + if !self.sess.target.target.options.linker_is_gnu { return } + + // GNU-style linkers support optimization with -O. GNU ld doesn't + // need a numeric argument, but other linkers do. + if self.sess.opts.optimize == config::OptLevel::Default || + self.sess.opts.optimize == config::OptLevel::Aggressive { + self.linker_arg("-O1"); + } + } + + fn pgo_gen(&mut self) { + if !self.sess.target.target.options.linker_is_gnu { return } + + // If we're doing PGO generation stuff and on a GNU-like linker, use the + // "-u" flag to properly pull in the profiler runtime bits. + // + // This is because LLVM otherwise won't add the needed initialization + // for us on Linux (though the extra flag should be harmless if it + // does). + // + // See https://reviews.llvm.org/D14033 and https://reviews.llvm.org/D14030. + // + // Though it may be worth to try to revert those changes upstream, since + // the overhead of the initialization should be minor. + self.cmd.arg("-u"); + self.cmd.arg("__llvm_profile_runtime"); + } + + fn debuginfo(&mut self) { + match self.sess.opts.debuginfo { + DebugInfo::None => { + // If we are building without debuginfo enabled and we were called with + // `-Zstrip-debuginfo-if-disabled=yes`, tell the linker to strip any debuginfo + // found when linking to get rid of symbols from libstd. + match self.sess.opts.debugging_opts.strip_debuginfo_if_disabled { + Some(true) => { self.linker_arg("-S"); }, + _ => {}, + } + }, + _ => {}, + }; + } + + fn no_default_libraries(&mut self) { + if !self.is_ld { + self.cmd.arg("-nodefaultlibs"); + } + } + + fn build_dylib(&mut self, out_filename: &Path) { + // On mac we need to tell the linker to let this library be rpathed + if self.sess.target.target.options.is_like_osx { + self.cmd.arg("-dynamiclib"); + self.linker_arg("-dylib"); + + // Note that the `osx_rpath_install_name` option here is a hack + // purely to support rustbuild right now, we should get a more + // principled solution at some point to force the compiler to pass + // the right `-Wl,-install_name` with an `@rpath` in it. + if self.sess.opts.cg.rpath || + self.sess.opts.debugging_opts.osx_rpath_install_name { + self.linker_arg("-install_name"); + let mut v = OsString::from("@rpath/"); + v.push(out_filename.file_name().unwrap()); + self.linker_arg(&v); + } + } else { + self.cmd.arg("-shared"); + } + } + + fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType) { + // If we're compiling a dylib, then we let symbol visibility in object + // files to take care of whether they're exported or not. + // + // If we're compiling a cdylib, however, we manually create a list of + // exported symbols to ensure we don't expose any more. The object files + // have far more public symbols than we actually want to export, so we + // hide them all here. + if crate_type == CrateType::Dylib || + crate_type == CrateType::ProcMacro { + return + } + + let mut arg = OsString::new(); + let path = tmpdir.join("list"); + + debug!("EXPORTED SYMBOLS:"); + + if self.sess.target.target.options.is_like_osx { + // Write a plain, newline-separated list of symbols + let res = (|| -> io::Result<()> { + let mut f = BufWriter::new(File::create(&path)?); + for sym in self.info.exports[&crate_type].iter() { + debug!(" _{}", sym); + writeln!(f, "_{}", sym)?; + } + Ok(()) + })(); + if let Err(e) = res { + self.sess.fatal(&format!("failed to write lib.def file: {}", e)); + } + } else { + // Write an LD version script + let res = (|| -> io::Result<()> { + let mut f = BufWriter::new(File::create(&path)?); + writeln!(f, "{{\n global:")?; + for sym in self.info.exports[&crate_type].iter() { + debug!(" {};", sym); + writeln!(f, " {};", sym)?; + } + writeln!(f, "\n local:\n *;\n}};")?; + Ok(()) + })(); + if let Err(e) = res { + self.sess.fatal(&format!("failed to write version script: {}", e)); + } + } + + if self.sess.target.target.options.is_like_osx { + if !self.is_ld { + arg.push("-Wl,") + } + arg.push("-exported_symbols_list,"); + } else if self.sess.target.target.options.is_like_solaris { + if !self.is_ld { + arg.push("-Wl,") + } + arg.push("-M,"); + } else { + if !self.is_ld { + arg.push("-Wl,") + } + arg.push("--version-script="); + } + + arg.push(&path); + self.cmd.arg(arg); + } + + fn subsystem(&mut self, subsystem: &str) { + self.linker_arg("--subsystem"); + self.linker_arg(&subsystem); + } + + fn finalize(&mut self) -> Command { + self.hint_dynamic(); // Reset to default before returning the composed command line. + let mut cmd = Command::new(""); + ::std::mem::swap(&mut cmd, &mut self.cmd); + cmd + } + + fn group_start(&mut self) { + if !self.sess.target.target.options.is_like_osx { + self.linker_arg("--start-group"); + } + } + + fn group_end(&mut self) { + if !self.sess.target.target.options.is_like_osx { + self.linker_arg("--end-group"); + } + } + + fn cross_lang_lto(&mut self) { + match self.sess.opts.debugging_opts.cross_lang_lto { + CrossLangLto::Disabled => { + // Nothing to do + } + CrossLangLto::LinkerPluginAuto => { + self.push_cross_lang_lto_args(None); + } + CrossLangLto::LinkerPlugin(ref path) => { + self.push_cross_lang_lto_args(Some(path.as_os_str())); + } + } + } +} + +pub struct MsvcLinker<'a> { + cmd: Command, + sess: &'a Session, + info: &'a LinkerInfo +} + +impl<'a> Linker for MsvcLinker<'a> { + fn link_rlib(&mut self, lib: &Path) { self.cmd.arg(lib); } + fn add_object(&mut self, path: &Path) { self.cmd.arg(path); } + fn args(&mut self, args: &[String]) { self.cmd.args(args); } + + fn build_dylib(&mut self, out_filename: &Path) { + self.cmd.arg("/DLL"); + let mut arg: OsString = "/IMPLIB:".into(); + arg.push(out_filename.with_extension("dll.lib")); + self.cmd.arg(arg); + } + + fn build_static_executable(&mut self) { + // noop + } + + fn gc_sections(&mut self, _keep_metadata: bool) { + // MSVC's ICF (Identical COMDAT Folding) link optimization is + // slow for Rust and thus we disable it by default when not in + // optimization build. + if self.sess.opts.optimize != config::OptLevel::No { + self.cmd.arg("/OPT:REF,ICF"); + } else { + // It is necessary to specify NOICF here, because /OPT:REF + // implies ICF by default. + self.cmd.arg("/OPT:REF,NOICF"); + } + } + + fn link_dylib(&mut self, lib: &str) { + self.cmd.arg(&format!("{}.lib", lib)); + } + + fn link_rust_dylib(&mut self, lib: &str, path: &Path) { + // When producing a dll, the MSVC linker may not actually emit a + // `foo.lib` file if the dll doesn't actually export any symbols, so we + // check to see if the file is there and just omit linking to it if it's + // not present. + let name = format!("{}.dll.lib", lib); + if fs::metadata(&path.join(&name)).is_ok() { + self.cmd.arg(name); + } + } + + fn link_staticlib(&mut self, lib: &str) { + self.cmd.arg(&format!("{}.lib", lib)); + } + + fn position_independent_executable(&mut self) { + // noop + } + + fn no_position_independent_executable(&mut self) { + // noop + } + + fn full_relro(&mut self) { + // noop + } + + fn partial_relro(&mut self) { + // noop + } + + fn no_relro(&mut self) { + // noop + } + + fn no_default_libraries(&mut self) { + // Currently we don't pass the /NODEFAULTLIB flag to the linker on MSVC + // as there's been trouble in the past of linking the C++ standard + // library required by LLVM. This likely needs to happen one day, but + // in general Windows is also a more controlled environment than + // Unix, so it's not necessarily as critical that this be implemented. + // + // Note that there are also some licensing worries about statically + // linking some libraries which require a specific agreement, so it may + // not ever be possible for us to pass this flag. + } + + fn include_path(&mut self, path: &Path) { + let mut arg = OsString::from("/LIBPATH:"); + arg.push(path); + self.cmd.arg(&arg); + } + + fn output_filename(&mut self, path: &Path) { + let mut arg = OsString::from("/OUT:"); + arg.push(path); + self.cmd.arg(&arg); + } + + fn framework_path(&mut self, _path: &Path) { + bug!("frameworks are not supported on windows") + } + fn link_framework(&mut self, _framework: &str) { + bug!("frameworks are not supported on windows") + } + + fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) { + // not supported? + self.link_staticlib(lib); + } + fn link_whole_rlib(&mut self, path: &Path) { + // not supported? + self.link_rlib(path); + } + fn optimize(&mut self) { + // Needs more investigation of `/OPT` arguments + } + + fn pgo_gen(&mut self) { + // Nothing needed here. + } + + fn debuginfo(&mut self) { + // This will cause the Microsoft linker to generate a PDB file + // from the CodeView line tables in the object files. + self.cmd.arg("/DEBUG"); + + // This will cause the Microsoft linker to embed .natvis info into the the PDB file + let sysroot = self.sess.sysroot(); + let natvis_dir_path = sysroot.join("lib\\rustlib\\etc"); + if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) { + // LLVM 5.0.0's lld-link frontend doesn't yet recognize, and chokes + // on, the /NATVIS:... flags. LLVM 6 (or earlier) should at worst ignore + // them, eventually mooting this workaround, per this landed patch: + // https://github.com/llvm-mirror/lld/commit/27b9c4285364d8d76bb43839daa100 + if let Some(ref linker_path) = self.sess.opts.cg.linker { + if let Some(linker_name) = Path::new(&linker_path).file_stem() { + if linker_name.to_str().unwrap().to_lowercase() == "lld-link" { + self.sess.warn("not embedding natvis: lld-link may not support the flag"); + return; + } + } + } + for entry in natvis_dir { + match entry { + Ok(entry) => { + let path = entry.path(); + if path.extension() == Some("natvis".as_ref()) { + let mut arg = OsString::from("/NATVIS:"); + arg.push(path); + self.cmd.arg(arg); + } + }, + Err(err) => { + self.sess.warn(&format!("error enumerating natvis directory: {}", err)); + }, + } + } + } + } + + // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to + // export symbols from a dynamic library. When building a dynamic library, + // however, we're going to want some symbols exported, so this function + // generates a DEF file which lists all the symbols. + // + // The linker will read this `*.def` file and export all the symbols from + // the dynamic library. Note that this is not as simple as just exporting + // all the symbols in the current crate (as specified by `codegen.reachable`) + // but rather we also need to possibly export the symbols of upstream + // crates. Upstream rlibs may be linked statically to this dynamic library, + // in which case they may continue to transitively be used and hence need + // their symbols exported. + fn export_symbols(&mut self, + tmpdir: &Path, + crate_type: CrateType) { + let path = tmpdir.join("lib.def"); + let res = (|| -> io::Result<()> { + let mut f = BufWriter::new(File::create(&path)?); + + // Start off with the standard module name header and then go + // straight to exports. + writeln!(f, "LIBRARY")?; + writeln!(f, "EXPORTS")?; + for symbol in self.info.exports[&crate_type].iter() { + debug!(" _{}", symbol); + writeln!(f, " {}", symbol)?; + } + Ok(()) + })(); + if let Err(e) = res { + self.sess.fatal(&format!("failed to write lib.def file: {}", e)); + } + let mut arg = OsString::from("/DEF:"); + arg.push(path); + self.cmd.arg(&arg); + } + + fn subsystem(&mut self, subsystem: &str) { + // Note that previous passes of the compiler validated this subsystem, + // so we just blindly pass it to the linker. + self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem)); + + // Windows has two subsystems we're interested in right now, the console + // and windows subsystems. These both implicitly have different entry + // points (starting symbols). The console entry point starts with + // `mainCRTStartup` and the windows entry point starts with + // `WinMainCRTStartup`. These entry points, defined in system libraries, + // will then later probe for either `main` or `WinMain`, respectively to + // start the application. + // + // In Rust we just always generate a `main` function so we want control + // to always start there, so we force the entry point on the windows + // subsystem to be `mainCRTStartup` to get everything booted up + // correctly. + // + // For more information see RFC #1665 + if subsystem == "windows" { + self.cmd.arg("/ENTRY:mainCRTStartup"); + } + } + + fn finalize(&mut self) -> Command { + let mut cmd = Command::new(""); + ::std::mem::swap(&mut cmd, &mut self.cmd); + cmd + } + + // MSVC doesn't need group indicators + fn group_start(&mut self) {} + fn group_end(&mut self) {} + + fn cross_lang_lto(&mut self) { + // Do nothing + } +} + +pub struct EmLinker<'a> { + cmd: Command, + sess: &'a Session, + info: &'a LinkerInfo +} + +impl<'a> Linker for EmLinker<'a> { + fn include_path(&mut self, path: &Path) { + self.cmd.arg("-L").arg(path); + } + + fn link_staticlib(&mut self, lib: &str) { + self.cmd.arg("-l").arg(lib); + } + + fn output_filename(&mut self, path: &Path) { + self.cmd.arg("-o").arg(path); + } + + fn add_object(&mut self, path: &Path) { + self.cmd.arg(path); + } + + fn link_dylib(&mut self, lib: &str) { + // Emscripten always links statically + self.link_staticlib(lib); + } + + fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) { + // not supported? + self.link_staticlib(lib); + } + + fn link_whole_rlib(&mut self, lib: &Path) { + // not supported? + self.link_rlib(lib); + } + + fn link_rust_dylib(&mut self, lib: &str, _path: &Path) { + self.link_dylib(lib); + } + + fn link_rlib(&mut self, lib: &Path) { + self.add_object(lib); + } + + fn position_independent_executable(&mut self) { + // noop + } + + fn no_position_independent_executable(&mut self) { + // noop + } + + fn full_relro(&mut self) { + // noop + } + + fn partial_relro(&mut self) { + // noop + } + + fn no_relro(&mut self) { + // noop + } + + fn args(&mut self, args: &[String]) { + self.cmd.args(args); + } + + fn framework_path(&mut self, _path: &Path) { + bug!("frameworks are not supported on Emscripten") + } + + fn link_framework(&mut self, _framework: &str) { + bug!("frameworks are not supported on Emscripten") + } + + fn gc_sections(&mut self, _keep_metadata: bool) { + // noop + } + + fn optimize(&mut self) { + // Emscripten performs own optimizations + self.cmd.arg(match self.sess.opts.optimize { + OptLevel::No => "-O0", + OptLevel::Less => "-O1", + OptLevel::Default => "-O2", + OptLevel::Aggressive => "-O3", + OptLevel::Size => "-Os", + OptLevel::SizeMin => "-Oz" + }); + // Unusable until https://github.com/rust-lang/rust/issues/38454 is resolved + self.cmd.args(&["--memory-init-file", "0"]); + } + + fn pgo_gen(&mut self) { + // noop, but maybe we need something like the gnu linker? + } + + fn debuginfo(&mut self) { + // Preserve names or generate source maps depending on debug info + self.cmd.arg(match self.sess.opts.debuginfo { + DebugInfo::None => "-g0", + DebugInfo::Limited => "-g3", + DebugInfo::Full => "-g4" + }); + } + + fn no_default_libraries(&mut self) { + self.cmd.args(&["-s", "DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=[]"]); + } + + fn build_dylib(&mut self, _out_filename: &Path) { + bug!("building dynamic library is unsupported on Emscripten") + } + + fn build_static_executable(&mut self) { + // noop + } + + fn export_symbols(&mut self, _tmpdir: &Path, crate_type: CrateType) { + let symbols = &self.info.exports[&crate_type]; + + debug!("EXPORTED SYMBOLS:"); + + self.cmd.arg("-s"); + + let mut arg = OsString::from("EXPORTED_FUNCTIONS="); + let mut encoded = String::new(); + + { + let mut encoder = json::Encoder::new(&mut encoded); + let res = encoder.emit_seq(symbols.len(), |encoder| { + for (i, sym) in symbols.iter().enumerate() { + encoder.emit_seq_elt(i, |encoder| { + encoder.emit_str(&("_".to_string() + sym)) + })?; + } + Ok(()) + }); + if let Err(e) = res { + self.sess.fatal(&format!("failed to encode exported symbols: {}", e)); + } + } + debug!("{}", encoded); + arg.push(encoded); + + self.cmd.arg(arg); + } + + fn subsystem(&mut self, _subsystem: &str) { + // noop + } + + fn finalize(&mut self) -> Command { + let mut cmd = Command::new(""); + ::std::mem::swap(&mut cmd, &mut self.cmd); + cmd + } + + // Appears not necessary on Emscripten + fn group_start(&mut self) {} + fn group_end(&mut self) {} + + fn cross_lang_lto(&mut self) { + // Do nothing + } +} + +fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { + let mut symbols = Vec::new(); + + let export_threshold = symbol_export::crates_export_threshold(&[crate_type]); + for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(symbol.symbol_name(tcx).to_string()); + } + } + + let formats = tcx.sess.dependency_formats.borrow(); + let deps = formats[&crate_type].iter(); + + for (index, dep_format) in deps.enumerate() { + let cnum = CrateNum::new(index + 1); + // For each dependency that we are linking to statically ... + if *dep_format == Linkage::Static { + // ... we add its symbol list to our export list. + for &(symbol, level) in tcx.exported_symbols(cnum).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(symbol.symbol_name(tcx).to_string()); + } + } + } + } + + symbols +} + +pub struct WasmLd<'a> { + cmd: Command, + sess: &'a Session, +} + +impl<'a> Linker for WasmLd<'a> { + fn link_dylib(&mut self, lib: &str) { + self.cmd.arg("-l").arg(lib); + } + + fn link_staticlib(&mut self, lib: &str) { + self.cmd.arg("-l").arg(lib); + } + + fn link_rlib(&mut self, lib: &Path) { + self.cmd.arg(lib); + } + + fn include_path(&mut self, path: &Path) { + self.cmd.arg("-L").arg(path); + } + + fn framework_path(&mut self, _path: &Path) { + panic!("frameworks not supported") + } + + fn output_filename(&mut self, path: &Path) { + self.cmd.arg("-o").arg(path); + } + + fn add_object(&mut self, path: &Path) { + self.cmd.arg(path); + } + + fn position_independent_executable(&mut self) { + } + + fn full_relro(&mut self) { + } + + fn partial_relro(&mut self) { + } + + fn no_relro(&mut self) { + } + + fn build_static_executable(&mut self) { + } + + fn args(&mut self, args: &[String]) { + self.cmd.args(args); + } + + fn link_rust_dylib(&mut self, lib: &str, _path: &Path) { + self.cmd.arg("-l").arg(lib); + } + + fn link_framework(&mut self, _framework: &str) { + panic!("frameworks not supported") + } + + fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) { + self.cmd.arg("-l").arg(lib); + } + + fn link_whole_rlib(&mut self, lib: &Path) { + self.cmd.arg(lib); + } + + fn gc_sections(&mut self, _keep_metadata: bool) { + self.cmd.arg("--gc-sections"); + } + + fn optimize(&mut self) { + self.cmd.arg(match self.sess.opts.optimize { + OptLevel::No => "-O0", + OptLevel::Less => "-O1", + OptLevel::Default => "-O2", + OptLevel::Aggressive => "-O3", + // Currently LLD doesn't support `Os` and `Oz`, so pass through `O2` + // instead. + OptLevel::Size => "-O2", + OptLevel::SizeMin => "-O2" + }); + } + + fn pgo_gen(&mut self) { + } + + fn debuginfo(&mut self) { + } + + fn no_default_libraries(&mut self) { + } + + fn build_dylib(&mut self, _out_filename: &Path) { + } + + fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType) { + } + + fn subsystem(&mut self, _subsystem: &str) { + } + + fn no_position_independent_executable(&mut self) { + } + + fn finalize(&mut self) -> Command { + // There have been reports in the wild (rustwasm/wasm-bindgen#119) of + // using threads causing weird hangs and bugs. Disable it entirely as + // this isn't yet the bottleneck of compilation at all anyway. + self.cmd.arg("--no-threads"); + + // By default LLD only gives us one page of stack (64k) which is a + // little small. Default to a larger stack closer to other PC platforms + // (1MB) and users can always inject their own link-args to override this. + self.cmd.arg("-z").arg("stack-size=1048576"); + + // By default LLD's memory layout is: + // + // 1. First, a blank page + // 2. Next, all static data + // 3. Finally, the main stack (which grows down) + // + // This has the unfortunate consequence that on stack overflows you + // corrupt static data and can cause some exceedingly weird bugs. To + // help detect this a little sooner we instead request that the stack is + // placed before static data. + // + // This means that we'll generate slightly larger binaries as references + // to static data will take more bytes in the ULEB128 encoding, but + // stack overflow will be guaranteed to trap as it underflows instead of + // corrupting static data. + self.cmd.arg("--stack-first"); + + // FIXME we probably shouldn't pass this but instead pass an explicit + // whitelist of symbols we'll allow to be undefined. Unfortunately + // though we can't handle symbols like `log10` that LLVM injects at a + // super late date without actually parsing object files. For now let's + // stick to this and hopefully fix it before stabilization happens. + self.cmd.arg("--allow-undefined"); + + // For now we just never have an entry symbol + self.cmd.arg("--no-entry"); + + // Make the default table accessible + self.cmd.arg("--export-table"); + + let mut cmd = Command::new(""); + ::std::mem::swap(&mut cmd, &mut self.cmd); + cmd + } + + // Not needed for now with LLD + fn group_start(&mut self) {} + fn group_end(&mut self) {} + + fn cross_lang_lto(&mut self) { + // Do nothing for now + } +} diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs new file mode 100644 index 000000000000..56858a31efd2 --- /dev/null +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -0,0 +1,778 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION}; +use back::symbol_export; +use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext}; +use back::write::{self, DiagnosticHandlers}; +use errors::{FatalError, Handler}; +use llvm::archive_ro::ArchiveRO; +use llvm::{True, False}; +use llvm; +use rustc::hir::def_id::LOCAL_CRATE; +use rustc::middle::exported_symbols::SymbolExportLevel; +use rustc::session::config::{self, Lto}; +use rustc::util::common::time_ext; +use time_graph::Timeline; +use {ModuleCodegen, ModuleLlvm, ModuleKind, ModuleSource}; + +use libc; + +use std::ffi::CString; +use std::ptr; +use std::slice; +use std::sync::Arc; + +pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { + match crate_type { + config::CrateType::Executable | + config::CrateType::Staticlib | + config::CrateType::Cdylib => true, + + config::CrateType::Dylib | + config::CrateType::Rlib | + config::CrateType::ProcMacro => false, + } +} + +pub(crate) enum LtoModuleCodegen { + Fat { + module: Option, + _serialized_bitcode: Vec, + }, + + Thin(ThinModule), +} + +impl LtoModuleCodegen { + pub fn name(&self) -> &str { + match *self { + LtoModuleCodegen::Fat { .. } => "everything", + LtoModuleCodegen::Thin(ref m) => m.name(), + } + } + + /// Optimize this module within the given codegen context. + /// + /// This function is unsafe as it'll return a `ModuleCodegen` still + /// points to LLVM data structures owned by this `LtoModuleCodegen`. + /// It's intended that the module returned is immediately code generated and + /// dropped, and then this LTO module is dropped. + pub(crate) unsafe fn optimize(&mut self, + cgcx: &CodegenContext, + timeline: &mut Timeline) + -> Result + { + match *self { + LtoModuleCodegen::Fat { ref mut module, .. } => { + let module = module.take().unwrap(); + { + let config = cgcx.config(module.kind); + let llmod = module.llvm().unwrap().llmod(); + let tm = &*module.llvm().unwrap().tm; + run_pass_manager(cgcx, tm, llmod, config, false); + timeline.record("fat-done"); + } + Ok(module) + } + LtoModuleCodegen::Thin(ref mut thin) => thin.optimize(cgcx, timeline), + } + } + + /// A "gauge" of how costly it is to optimize this module, used to sort + /// biggest modules first. + pub fn cost(&self) -> u64 { + match *self { + // Only one module with fat LTO, so the cost doesn't matter. + LtoModuleCodegen::Fat { .. } => 0, + LtoModuleCodegen::Thin(ref m) => m.cost(), + } + } +} + +pub(crate) fn run(cgcx: &CodegenContext, + modules: Vec, + timeline: &mut Timeline) + -> Result, FatalError> +{ + let diag_handler = cgcx.create_diag_handler(); + let export_threshold = match cgcx.lto { + // We're just doing LTO for our one crate + Lto::ThinLocal => SymbolExportLevel::Rust, + + // We're doing LTO for the entire crate graph + Lto::Yes | Lto::Fat | Lto::Thin => { + symbol_export::crates_export_threshold(&cgcx.crate_types) + } + + Lto::No => panic!("didn't request LTO but we're doing LTO"), + }; + + let symbol_filter = &|&(ref name, level): &(String, SymbolExportLevel)| { + if level.is_below_threshold(export_threshold) { + let mut bytes = Vec::with_capacity(name.len() + 1); + bytes.extend(name.bytes()); + Some(CString::new(bytes).unwrap()) + } else { + None + } + }; + let exported_symbols = cgcx.exported_symbols + .as_ref().expect("needs exported symbols for LTO"); + let mut symbol_white_list = exported_symbols[&LOCAL_CRATE] + .iter() + .filter_map(symbol_filter) + .collect::>(); + timeline.record("whitelist"); + info!("{} symbols to preserve in this crate", symbol_white_list.len()); + + // If we're performing LTO for the entire crate graph, then for each of our + // upstream dependencies, find the corresponding rlib and load the bitcode + // from the archive. + // + // We save off all the bytecode and LLVM module ids for later processing + // with either fat or thin LTO + let mut upstream_modules = Vec::new(); + if cgcx.lto != Lto::ThinLocal { + if cgcx.opts.cg.prefer_dynamic { + diag_handler.struct_err("cannot prefer dynamic linking when performing LTO") + .note("only 'staticlib', 'bin', and 'cdylib' outputs are \ + supported with LTO") + .emit(); + return Err(FatalError) + } + + // Make sure we actually can run LTO + for crate_type in cgcx.crate_types.iter() { + if !crate_type_allows_lto(*crate_type) { + let e = diag_handler.fatal("lto can only be run for executables, cdylibs and \ + static library outputs"); + return Err(e) + } + } + + for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() { + let exported_symbols = cgcx.exported_symbols + .as_ref().expect("needs exported symbols for LTO"); + symbol_white_list.extend( + exported_symbols[&cnum] + .iter() + .filter_map(symbol_filter)); + + let archive = ArchiveRO::open(&path).expect("wanted an rlib"); + let bytecodes = archive.iter().filter_map(|child| { + child.ok().and_then(|c| c.name().map(|name| (name, c))) + }).filter(|&(name, _)| name.ends_with(RLIB_BYTECODE_EXTENSION)); + for (name, data) in bytecodes { + info!("adding bytecode {}", name); + let bc_encoded = data.data(); + + let (bc, id) = time_ext(cgcx.time_passes, None, &format!("decode {}", name), || { + match DecodedBytecode::new(bc_encoded) { + Ok(b) => Ok((b.bytecode(), b.identifier().to_string())), + Err(e) => Err(diag_handler.fatal(&e)), + } + })?; + let bc = SerializedModule::FromRlib(bc); + upstream_modules.push((bc, CString::new(id).unwrap())); + } + timeline.record(&format!("load: {}", path.display())); + } + } + + let arr = symbol_white_list.iter().map(|c| c.as_ptr()).collect::>(); + match cgcx.lto { + Lto::Yes | // `-C lto` == fat LTO by default + Lto::Fat => { + fat_lto(cgcx, &diag_handler, modules, upstream_modules, &arr, timeline) + } + Lto::Thin | + Lto::ThinLocal => { + if cgcx.opts.debugging_opts.cross_lang_lto.enabled() { + unreachable!("We should never reach this case if the LTO step \ + is deferred to the linker"); + } + thin_lto(&diag_handler, modules, upstream_modules, &arr, timeline) + } + Lto::No => unreachable!(), + } +} + +fn fat_lto(cgcx: &CodegenContext, + diag_handler: &Handler, + mut modules: Vec, + mut serialized_modules: Vec<(SerializedModule, CString)>, + symbol_white_list: &[*const libc::c_char], + timeline: &mut Timeline) + -> Result, FatalError> +{ + info!("going for a fat lto"); + + // Find the "costliest" module and merge everything into that codegen unit. + // All the other modules will be serialized and reparsed into the new + // context, so this hopefully avoids serializing and parsing the largest + // codegen unit. + // + // Additionally use a regular module as the base here to ensure that various + // file copy operations in the backend work correctly. The only other kind + // of module here should be an allocator one, and if your crate is smaller + // than the allocator module then the size doesn't really matter anyway. + let (_, costliest_module) = modules.iter() + .enumerate() + .filter(|&(_, module)| module.kind == ModuleKind::Regular) + .map(|(i, module)| { + let cost = unsafe { + llvm::LLVMRustModuleCost(module.llvm().unwrap().llmod()) + }; + (cost, i) + }) + .max() + .expect("must be codegen'ing at least one module"); + let module = modules.remove(costliest_module); + let mut serialized_bitcode = Vec::new(); + { + let (llcx, llmod) = { + let llvm = module.llvm().expect("can't lto pre-codegened modules"); + (&llvm.llcx, llvm.llmod()) + }; + info!("using {:?} as a base module", module.name); + + // The linking steps below may produce errors and diagnostics within LLVM + // which we'd like to handle and print, so set up our diagnostic handlers + // (which get unregistered when they go out of scope below). + let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx); + + // For all other modules we codegened we'll need to link them into our own + // bitcode. All modules were codegened in their own LLVM context, however, + // and we want to move everything to the same LLVM context. Currently the + // way we know of to do that is to serialize them to a string and them parse + // them later. Not great but hey, that's why it's "fat" LTO, right? + for module in modules { + let llvm = module.llvm().expect("can't lto pre-codegened modules"); + let buffer = ModuleBuffer::new(llvm.llmod()); + let llmod_id = CString::new(&module.name[..]).unwrap(); + serialized_modules.push((SerializedModule::Local(buffer), llmod_id)); + } + + // For all serialized bitcode files we parse them and link them in as we did + // above, this is all mostly handled in C++. Like above, though, we don't + // know much about the memory management here so we err on the side of being + // save and persist everything with the original module. + let mut linker = Linker::new(llmod); + for (bc_decoded, name) in serialized_modules { + info!("linking {:?}", name); + time_ext(cgcx.time_passes, None, &format!("ll link {:?}", name), || { + let data = bc_decoded.data(); + linker.add(&data).map_err(|()| { + let msg = format!("failed to load bc of {:?}", name); + write::llvm_err(&diag_handler, msg) + }) + })?; + timeline.record(&format!("link {:?}", name)); + serialized_bitcode.push(bc_decoded); + } + drop(linker); + cgcx.save_temp_bitcode(&module, "lto.input"); + + // Internalize everything that *isn't* in our whitelist to help strip out + // more modules and such + unsafe { + let ptr = symbol_white_list.as_ptr(); + llvm::LLVMRustRunRestrictionPass(llmod, + ptr as *const *const libc::c_char, + symbol_white_list.len() as libc::size_t); + cgcx.save_temp_bitcode(&module, "lto.after-restriction"); + } + + if cgcx.no_landing_pads { + unsafe { + llvm::LLVMRustMarkAllFunctionsNounwind(llmod); + } + cgcx.save_temp_bitcode(&module, "lto.after-nounwind"); + } + timeline.record("passes"); + } + + Ok(vec![LtoModuleCodegen::Fat { + module: Some(module), + _serialized_bitcode: serialized_bitcode, + }]) +} + +struct Linker<'a>(&'a mut llvm::Linker<'a>); + +impl Linker<'a> { + fn new(llmod: &'a llvm::Module) -> Self { + unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) } + } + + fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> { + unsafe { + if llvm::LLVMRustLinkerAdd(self.0, + bytecode.as_ptr() as *const libc::c_char, + bytecode.len()) { + Ok(()) + } else { + Err(()) + } + } + } +} + +impl Drop for Linker<'a> { + fn drop(&mut self) { + unsafe { llvm::LLVMRustLinkerFree(&mut *(self.0 as *mut _)); } + } +} + +/// Prepare "thin" LTO to get run on these modules. +/// +/// The general structure of ThinLTO is quite different from the structure of +/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into +/// one giant LLVM module, and then we run more optimization passes over this +/// big module after internalizing most symbols. Thin LTO, on the other hand, +/// avoid this large bottleneck through more targeted optimization. +/// +/// At a high level Thin LTO looks like: +/// +/// 1. Prepare a "summary" of each LLVM module in question which describes +/// the values inside, cost of the values, etc. +/// 2. Merge the summaries of all modules in question into one "index" +/// 3. Perform some global analysis on this index +/// 4. For each module, use the index and analysis calculated previously to +/// perform local transformations on the module, for example inlining +/// small functions from other modules. +/// 5. Run thin-specific optimization passes over each module, and then code +/// generate everything at the end. +/// +/// The summary for each module is intended to be quite cheap, and the global +/// index is relatively quite cheap to create as well. As a result, the goal of +/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more +/// situations. For example one cheap optimization is that we can parallelize +/// all codegen modules, easily making use of all the cores on a machine. +/// +/// With all that in mind, the function here is designed at specifically just +/// calculating the *index* for ThinLTO. This index will then be shared amongst +/// all of the `LtoModuleCodegen` units returned below and destroyed once +/// they all go out of scope. +fn thin_lto(diag_handler: &Handler, + modules: Vec, + serialized_modules: Vec<(SerializedModule, CString)>, + symbol_white_list: &[*const libc::c_char], + timeline: &mut Timeline) + -> Result, FatalError> +{ + unsafe { + info!("going for that thin, thin LTO"); + + let mut thin_buffers = Vec::new(); + let mut module_names = Vec::new(); + let mut thin_modules = Vec::new(); + + // FIXME: right now, like with fat LTO, we serialize all in-memory + // modules before working with them and ThinLTO. We really + // shouldn't do this, however, and instead figure out how to + // extract a summary from an in-memory module and then merge that + // into the global index. It turns out that this loop is by far + // the most expensive portion of this small bit of global + // analysis! + for (i, module) in modules.iter().enumerate() { + info!("local module: {} - {}", i, module.name); + let llvm = module.llvm().expect("can't lto precodegened module"); + let name = CString::new(module.name.clone()).unwrap(); + let buffer = ThinBuffer::new(llvm.llmod()); + thin_modules.push(llvm::ThinLTOModule { + identifier: name.as_ptr(), + data: buffer.data().as_ptr(), + len: buffer.data().len(), + }); + thin_buffers.push(buffer); + module_names.push(name); + timeline.record(&module.name); + } + + // FIXME: All upstream crates are deserialized internally in the + // function below to extract their summary and modules. Note that + // unlike the loop above we *must* decode and/or read something + // here as these are all just serialized files on disk. An + // improvement, however, to make here would be to store the + // module summary separately from the actual module itself. Right + // now this is store in one large bitcode file, and the entire + // file is deflate-compressed. We could try to bypass some of the + // decompression by storing the index uncompressed and only + // lazily decompressing the bytecode if necessary. + // + // Note that truly taking advantage of this optimization will + // likely be further down the road. We'd have to implement + // incremental ThinLTO first where we could actually avoid + // looking at upstream modules entirely sometimes (the contents, + // we must always unconditionally look at the index). + let mut serialized = Vec::new(); + for (module, name) in serialized_modules { + info!("foreign module {:?}", name); + thin_modules.push(llvm::ThinLTOModule { + identifier: name.as_ptr(), + data: module.data().as_ptr(), + len: module.data().len(), + }); + serialized.push(module); + module_names.push(name); + } + + // Delegate to the C++ bindings to create some data here. Once this is a + // tried-and-true interface we may wish to try to upstream some of this + // to LLVM itself, right now we reimplement a lot of what they do + // upstream... + let data = llvm::LLVMRustCreateThinLTOData( + thin_modules.as_ptr(), + thin_modules.len() as u32, + symbol_white_list.as_ptr(), + symbol_white_list.len() as u32, + ).ok_or_else(|| { + write::llvm_err(&diag_handler, "failed to prepare thin LTO context".to_string()) + })?; + + let data = ThinData(data); + info!("thin LTO data created"); + timeline.record("data"); + + // Throw our data in an `Arc` as we'll be sharing it across threads. We + // also put all memory referenced by the C++ data (buffers, ids, etc) + // into the arc as well. After this we'll create a thin module + // codegen per module in this data. + let shared = Arc::new(ThinShared { + data, + thin_buffers, + serialized_modules: serialized, + module_names, + }); + Ok((0..shared.module_names.len()).map(|i| { + LtoModuleCodegen::Thin(ThinModule { + shared: shared.clone(), + idx: i, + }) + }).collect()) + } +} + +fn run_pass_manager(cgcx: &CodegenContext, + tm: &llvm::TargetMachine, + llmod: &llvm::Module, + config: &ModuleConfig, + thin: bool) { + // Now we have one massive module inside of llmod. Time to run the + // LTO-specific optimization passes that LLVM provides. + // + // This code is based off the code found in llvm's LTO code generator: + // tools/lto/LTOCodeGenerator.cpp + debug!("running the pass manager"); + unsafe { + let pm = llvm::LLVMCreatePassManager(); + llvm::LLVMRustAddAnalysisPasses(tm, pm, llmod); + + if config.verify_llvm_ir { + let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _); + llvm::LLVMRustAddPass(pm, pass.unwrap()); + } + + // When optimizing for LTO we don't actually pass in `-O0`, but we force + // it to always happen at least with `-O1`. + // + // With ThinLTO we mess around a lot with symbol visibility in a way + // that will actually cause linking failures if we optimize at O0 which + // notable is lacking in dead code elimination. To ensure we at least + // get some optimizations and correctly link we forcibly switch to `-O1` + // to get dead code elimination. + // + // Note that in general this shouldn't matter too much as you typically + // only turn on ThinLTO when you're compiling with optimizations + // otherwise. + let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let opt_level = match opt_level { + llvm::CodeGenOptLevel::None => llvm::CodeGenOptLevel::Less, + level => level, + }; + with_llvm_pmb(llmod, config, opt_level, false, &mut |b| { + if thin { + if !llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm) { + panic!("this version of LLVM does not support ThinLTO"); + } + } else { + llvm::LLVMPassManagerBuilderPopulateLTOPassManager(b, pm, + /* Internalize = */ False, + /* RunInliner = */ True); + } + }); + + if config.verify_llvm_ir { + let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _); + llvm::LLVMRustAddPass(pm, pass.unwrap()); + } + + time_ext(cgcx.time_passes, None, "LTO passes", || + llvm::LLVMRunPassManager(pm, llmod)); + + llvm::LLVMDisposePassManager(pm); + } + debug!("lto done"); +} + +pub enum SerializedModule { + Local(ModuleBuffer), + FromRlib(Vec), +} + +impl SerializedModule { + fn data(&self) -> &[u8] { + match *self { + SerializedModule::Local(ref m) => m.data(), + SerializedModule::FromRlib(ref m) => m, + } + } +} + +pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer); + +unsafe impl Send for ModuleBuffer {} +unsafe impl Sync for ModuleBuffer {} + +impl ModuleBuffer { + pub fn new(m: &llvm::Module) -> ModuleBuffer { + ModuleBuffer(unsafe { + llvm::LLVMRustModuleBufferCreate(m) + }) + } + + pub fn data(&self) -> &[u8] { + unsafe { + let ptr = llvm::LLVMRustModuleBufferPtr(self.0); + let len = llvm::LLVMRustModuleBufferLen(self.0); + slice::from_raw_parts(ptr, len) + } + } +} + +impl Drop for ModuleBuffer { + fn drop(&mut self) { + unsafe { llvm::LLVMRustModuleBufferFree(&mut *(self.0 as *mut _)); } + } +} + +pub struct ThinModule { + shared: Arc, + idx: usize, +} + +struct ThinShared { + data: ThinData, + thin_buffers: Vec, + serialized_modules: Vec, + module_names: Vec, +} + +struct ThinData(&'static mut llvm::ThinLTOData); + +unsafe impl Send for ThinData {} +unsafe impl Sync for ThinData {} + +impl Drop for ThinData { + fn drop(&mut self) { + unsafe { + llvm::LLVMRustFreeThinLTOData(&mut *(self.0 as *mut _)); + } + } +} + +pub struct ThinBuffer(&'static mut llvm::ThinLTOBuffer); + +unsafe impl Send for ThinBuffer {} +unsafe impl Sync for ThinBuffer {} + +impl ThinBuffer { + pub fn new(m: &llvm::Module) -> ThinBuffer { + unsafe { + let buffer = llvm::LLVMRustThinLTOBufferCreate(m); + ThinBuffer(buffer) + } + } + + pub fn data(&self) -> &[u8] { + unsafe { + let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _; + let len = llvm::LLVMRustThinLTOBufferLen(self.0); + slice::from_raw_parts(ptr, len) + } + } +} + +impl Drop for ThinBuffer { + fn drop(&mut self) { + unsafe { + llvm::LLVMRustThinLTOBufferFree(&mut *(self.0 as *mut _)); + } + } +} + +impl ThinModule { + fn name(&self) -> &str { + self.shared.module_names[self.idx].to_str().unwrap() + } + + fn cost(&self) -> u64 { + // Yes, that's correct, we're using the size of the bytecode as an + // indicator for how costly this codegen unit is. + self.data().len() as u64 + } + + fn data(&self) -> &[u8] { + let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data()); + a.unwrap_or_else(|| { + let len = self.shared.thin_buffers.len(); + self.shared.serialized_modules[self.idx - len].data() + }) + } + + unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline) + -> Result + { + let diag_handler = cgcx.create_diag_handler(); + let tm = (cgcx.tm_factory)().map_err(|e| { + write::llvm_err(&diag_handler, e) + })?; + + // Right now the implementation we've got only works over serialized + // modules, so we create a fresh new LLVM context and parse the module + // into that context. One day, however, we may do this for upstream + // crates but for locally codegened modules we may be able to reuse + // that LLVM Context and Module. + let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); + let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( + llcx, + self.data().as_ptr(), + self.data().len(), + self.shared.module_names[self.idx].as_ptr(), + ).ok_or_else(|| { + let msg = "failed to parse bitcode for thin LTO module".to_string(); + write::llvm_err(&diag_handler, msg) + })? as *const _; + let module = ModuleCodegen { + source: ModuleSource::Codegened(ModuleLlvm { + llmod_raw, + llcx, + tm, + }), + name: self.name().to_string(), + kind: ModuleKind::Regular, + }; + { + let llmod = module.llvm().unwrap().llmod(); + cgcx.save_temp_bitcode(&module, "thin-lto-input"); + + // Before we do much else find the "main" `DICompileUnit` that we'll be + // using below. If we find more than one though then rustc has changed + // in a way we're not ready for, so generate an ICE by returning + // an error. + let mut cu1 = ptr::null_mut(); + let mut cu2 = ptr::null_mut(); + llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); + if !cu2.is_null() { + let msg = "multiple source DICompileUnits found".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + + // Like with "fat" LTO, get some better optimizations if landing pads + // are disabled by removing all landing pads. + if cgcx.no_landing_pads { + llvm::LLVMRustMarkAllFunctionsNounwind(llmod); + cgcx.save_temp_bitcode(&module, "thin-lto-after-nounwind"); + timeline.record("nounwind"); + } + + // Up next comes the per-module local analyses that we do for Thin LTO. + // Each of these functions is basically copied from the LLVM + // implementation and then tailored to suit this implementation. Ideally + // each of these would be supported by upstream LLVM but that's perhaps + // a patch for another day! + // + // You can find some more comments about these functions in the LLVM + // bindings we've got (currently `PassWrapper.cpp`) + if !llvm::LLVMRustPrepareThinLTORename(self.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + cgcx.save_temp_bitcode(&module, "thin-lto-after-rename"); + timeline.record("rename"); + if !llvm::LLVMRustPrepareThinLTOResolveWeak(self.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + cgcx.save_temp_bitcode(&module, "thin-lto-after-resolve"); + timeline.record("resolve"); + if !llvm::LLVMRustPrepareThinLTOInternalize(self.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + cgcx.save_temp_bitcode(&module, "thin-lto-after-internalize"); + timeline.record("internalize"); + if !llvm::LLVMRustPrepareThinLTOImport(self.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + cgcx.save_temp_bitcode(&module, "thin-lto-after-import"); + timeline.record("import"); + + // Ok now this is a bit unfortunate. This is also something you won't + // find upstream in LLVM's ThinLTO passes! This is a hack for now to + // work around bugs in LLVM. + // + // First discovered in #45511 it was found that as part of ThinLTO + // importing passes LLVM will import `DICompileUnit` metadata + // information across modules. This means that we'll be working with one + // LLVM module that has multiple `DICompileUnit` instances in it (a + // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of + // bugs in LLVM's backend which generates invalid DWARF in a situation + // like this: + // + // https://bugs.llvm.org/show_bug.cgi?id=35212 + // https://bugs.llvm.org/show_bug.cgi?id=35562 + // + // While the first bug there is fixed the second ended up causing #46346 + // which was basically a resurgence of #45511 after LLVM's bug 35212 was + // fixed. + // + // This function below is a huge hack around this problem. The function + // below is defined in `PassWrapper.cpp` and will basically "merge" + // all `DICompileUnit` instances in a module. Basically it'll take all + // the objects, rewrite all pointers of `DISubprogram` to point to the + // first `DICompileUnit`, and then delete all the other units. + // + // This is probably mangling to the debug info slightly (but hopefully + // not too much) but for now at least gets LLVM to emit valid DWARF (or + // so it appears). Hopefully we can remove this once upstream bugs are + // fixed in LLVM. + llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); + cgcx.save_temp_bitcode(&module, "thin-lto-after-patch"); + timeline.record("patch"); + + // Alright now that we've done everything related to the ThinLTO + // analysis it's time to run some optimizations! Here we use the same + // `run_pass_manager` as the "fat" LTO above except that we tell it to + // populate a thin-specific pass manager, which presumably LLVM treats a + // little differently. + info!("running thin lto passes over {}", module.name); + let config = cgcx.config(module.kind); + run_pass_manager(cgcx, module.llvm().unwrap().tm, llmod, config, true); + cgcx.save_temp_bitcode(&module, "thin-lto-after-pm"); + timeline.record("thin-done"); + } + + Ok(module) + } +} diff --git a/src/librustc_trans/back/rpath.rs b/src/librustc_codegen_llvm/back/rpath.rs similarity index 96% rename from src/librustc_trans/back/rpath.rs rename to src/librustc_codegen_llvm/back/rpath.rs index 8e5e7d376488..2c3a143646c2 100644 --- a/src/librustc_trans/back/rpath.rs +++ b/src/librustc_codegen_llvm/back/rpath.rs @@ -22,7 +22,7 @@ pub struct RPathConfig<'a> { pub is_like_osx: bool, pub has_rpath: bool, pub linker_is_gnu: bool, - pub get_install_prefix_lib_path: &'a mut FnMut() -> PathBuf, + pub get_install_prefix_lib_path: &'a mut dyn FnMut() -> PathBuf, } pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec { @@ -114,8 +114,8 @@ fn get_rpath_relative_to_output(config: &mut RPathConfig, lib: &Path) -> String let mut output = cwd.join(&config.out_filename); output.pop(); let output = fs::canonicalize(&output).unwrap_or(output); - let relative = path_relative_from(&lib, &output) - .expect(&format!("couldn't create relative path from {:?} to {:?}", output, lib)); + let relative = path_relative_from(&lib, &output).unwrap_or_else(|| + panic!("couldn't create relative path from {:?} to {:?}", output, lib)); // FIXME (#9639): This needs to handle non-utf8 paths format!("{}/{}", prefix, relative.to_str().expect("non-utf8 component in path")) @@ -152,9 +152,7 @@ fn path_relative_from(path: &Path, base: &Path) -> Option { (Some(_), Some(b)) if b == Component::ParentDir => return None, (Some(a), Some(_)) => { comps.push(Component::ParentDir); - for _ in itb { - comps.push(Component::ParentDir); - } + comps.extend(itb.map(|_| Component::ParentDir)); comps.push(a); comps.extend(ita.by_ref()); break; diff --git a/src/librustc_codegen_llvm/back/symbol_export.rs b/src/librustc_codegen_llvm/back/symbol_export.rs new file mode 100644 index 000000000000..edb1da0b5582 --- /dev/null +++ b/src/librustc_codegen_llvm/back/symbol_export.rs @@ -0,0 +1,394 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc_data_structures::sync::Lrc; +use std::sync::Arc; + +use monomorphize::Instance; +use rustc::hir; +use rustc::hir::CodegenFnAttrFlags; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX}; +use rustc_data_structures::fingerprint::Fingerprint; +use rustc::middle::exported_symbols::{SymbolExportLevel, ExportedSymbol, metadata_symbol_name}; +use rustc::session::config; +use rustc::ty::{TyCtxt, SymbolName}; +use rustc::ty::query::Providers; +use rustc::ty::subst::Substs; +use rustc::util::nodemap::{FxHashMap, DefIdMap}; +use rustc_allocator::ALLOCATOR_METHODS; +use rustc_data_structures::indexed_vec::IndexVec; +use std::collections::hash_map::Entry::*; + +pub type ExportedSymbols = FxHashMap< + CrateNum, + Arc>, +>; + +pub fn threshold(tcx: TyCtxt) -> SymbolExportLevel { + crates_export_threshold(&tcx.sess.crate_types.borrow()) +} + +fn crate_export_threshold(crate_type: config::CrateType) -> SymbolExportLevel { + match crate_type { + config::CrateType::Executable | + config::CrateType::Staticlib | + config::CrateType::ProcMacro | + config::CrateType::Cdylib => SymbolExportLevel::C, + config::CrateType::Rlib | + config::CrateType::Dylib => SymbolExportLevel::Rust, + } +} + +pub fn crates_export_threshold(crate_types: &[config::CrateType]) + -> SymbolExportLevel { + if crate_types.iter().any(|&crate_type| { + crate_export_threshold(crate_type) == SymbolExportLevel::Rust + }) { + SymbolExportLevel::Rust + } else { + SymbolExportLevel::C + } +} + +fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + cnum: CrateNum) + -> Lrc> +{ + assert_eq!(cnum, LOCAL_CRATE); + + if !tcx.sess.opts.output_types.should_codegen() { + return Lrc::new(DefIdMap()) + } + + // Check to see if this crate is a "special runtime crate". These + // crates, implementation details of the standard library, typically + // have a bunch of `pub extern` and `#[no_mangle]` functions as the + // ABI between them. We don't want their symbols to have a `C` + // export level, however, as they're just implementation details. + // Down below we'll hardwire all of the symbols to the `Rust` export + // level instead. + let special_runtime_crate = tcx.is_panic_runtime(LOCAL_CRATE) || + tcx.is_compiler_builtins(LOCAL_CRATE); + + let mut reachable_non_generics: DefIdMap<_> = tcx.reachable_set(LOCAL_CRATE).0 + .iter() + .filter_map(|&node_id| { + // We want to ignore some FFI functions that are not exposed from + // this crate. Reachable FFI functions can be lumped into two + // categories: + // + // 1. Those that are included statically via a static library + // 2. Those included otherwise (e.g. dynamically or via a framework) + // + // Although our LLVM module is not literally emitting code for the + // statically included symbols, it's an export of our library which + // needs to be passed on to the linker and encoded in the metadata. + // + // As a result, if this id is an FFI item (foreign item) then we only + // let it through if it's included statically. + match tcx.hir.get(node_id) { + hir::map::NodeForeignItem(..) => { + let def_id = tcx.hir.local_def_id(node_id); + if tcx.is_statically_included_foreign_item(def_id) { + Some(def_id) + } else { + None + } + } + + // Only consider nodes that actually have exported symbols. + hir::map::NodeItem(&hir::Item { + node: hir::ItemKind::Static(..), + .. + }) | + hir::map::NodeItem(&hir::Item { + node: hir::ItemKind::Fn(..), .. + }) | + hir::map::NodeImplItem(&hir::ImplItem { + node: hir::ImplItemKind::Method(..), + .. + }) => { + let def_id = tcx.hir.local_def_id(node_id); + let generics = tcx.generics_of(def_id); + if !generics.requires_monomorphization(tcx) && + // Functions marked with #[inline] are only ever codegened + // with "internal" linkage and are never exported. + !Instance::mono(tcx, def_id).def.requires_local(tcx) { + Some(def_id) + } else { + None + } + } + + _ => None + } + }) + .map(|def_id| { + let export_level = if special_runtime_crate { + let name = tcx.symbol_name(Instance::mono(tcx, def_id)).as_str(); + // We can probably do better here by just ensuring that + // it has hidden visibility rather than public + // visibility, as this is primarily here to ensure it's + // not stripped during LTO. + // + // In general though we won't link right if these + // symbols are stripped, and LTO currently strips them. + if &*name == "rust_eh_personality" || + &*name == "rust_eh_register_frames" || + &*name == "rust_eh_unregister_frames" { + SymbolExportLevel::C + } else { + SymbolExportLevel::Rust + } + } else { + symbol_export_level(tcx, def_id) + }; + debug!("EXPORTED SYMBOL (local): {} ({:?})", + tcx.symbol_name(Instance::mono(tcx, def_id)), + export_level); + (def_id, export_level) + }) + .collect(); + + if let Some(id) = *tcx.sess.derive_registrar_fn.get() { + let def_id = tcx.hir.local_def_id(id); + reachable_non_generics.insert(def_id, SymbolExportLevel::C); + } + + if let Some(id) = *tcx.sess.plugin_registrar_fn.get() { + let def_id = tcx.hir.local_def_id(id); + reachable_non_generics.insert(def_id, SymbolExportLevel::C); + } + + Lrc::new(reachable_non_generics) +} + +fn is_reachable_non_generic_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> bool { + let export_threshold = threshold(tcx); + + if let Some(&level) = tcx.reachable_non_generics(def_id.krate).get(&def_id) { + level.is_below_threshold(export_threshold) + } else { + false + } +} + +fn is_reachable_non_generic_provider_extern<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> bool { + tcx.reachable_non_generics(def_id.krate).contains_key(&def_id) +} + +fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + cnum: CrateNum) + -> Arc, + SymbolExportLevel)>> +{ + assert_eq!(cnum, LOCAL_CRATE); + + if !tcx.sess.opts.output_types.should_codegen() { + return Arc::new(vec![]) + } + + let mut symbols: Vec<_> = tcx.reachable_non_generics(LOCAL_CRATE) + .iter() + .map(|(&def_id, &level)| { + (ExportedSymbol::NonGeneric(def_id), level) + }) + .collect(); + + if tcx.sess.entry_fn.borrow().is_some() { + let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new("main")); + + symbols.push((exported_symbol, SymbolExportLevel::C)); + } + + if tcx.sess.allocator_kind.get().is_some() { + for method in ALLOCATOR_METHODS { + let symbol_name = format!("__rust_{}", method.name); + let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(&symbol_name)); + + symbols.push((exported_symbol, SymbolExportLevel::Rust)); + } + } + + if tcx.sess.opts.debugging_opts.pgo_gen.is_some() { + // These are weak symbols that point to the profile version and the + // profile name, which need to be treated as exported so LTO doesn't nix + // them. + const PROFILER_WEAK_SYMBOLS: [&'static str; 2] = [ + "__llvm_profile_raw_version", + "__llvm_profile_filename", + ]; + for sym in &PROFILER_WEAK_SYMBOLS { + let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(sym)); + symbols.push((exported_symbol, SymbolExportLevel::C)); + } + } + + if tcx.sess.crate_types.borrow().contains(&config::CrateType::Dylib) { + let symbol_name = metadata_symbol_name(tcx); + let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(&symbol_name)); + + symbols.push((exported_symbol, SymbolExportLevel::Rust)); + } + + if tcx.sess.opts.share_generics() && tcx.local_crate_exports_generics() { + use rustc::mir::mono::{Linkage, Visibility, MonoItem}; + use rustc::ty::InstanceDef; + + // Normally, we require that shared monomorphizations are not hidden, + // because if we want to re-use a monomorphization from a Rust dylib, it + // needs to be exported. + // However, on platforms that don't allow for Rust dylibs, having + // external linkage is enough for monomorphization to be linked to. + let need_visibility = tcx.sess.target.target.options.dynamic_linking && + !tcx.sess.target.target.options.only_cdylib; + + let (_, cgus) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); + + for (mono_item, &(linkage, visibility)) in cgus.iter() + .flat_map(|cgu| cgu.items().iter()) { + if linkage != Linkage::External { + // We can only re-use things with external linkage, otherwise + // we'll get a linker error + continue + } + + if need_visibility && visibility == Visibility::Hidden { + // If we potentially share things from Rust dylibs, they must + // not be hidden + continue + } + + if let &MonoItem::Fn(Instance { + def: InstanceDef::Item(def_id), + substs, + }) = mono_item { + if substs.types().next().is_some() { + symbols.push((ExportedSymbol::Generic(def_id, substs), + SymbolExportLevel::Rust)); + } + } + } + } + + // Sort so we get a stable incr. comp. hash. + symbols.sort_unstable_by(|&(ref symbol1, ..), &(ref symbol2, ..)| { + symbol1.compare_stable(tcx, symbol2) + }); + + Arc::new(symbols) +} + +fn upstream_monomorphizations_provider<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + cnum: CrateNum) + -> Lrc, CrateNum>>>> +{ + debug_assert!(cnum == LOCAL_CRATE); + + let cnums = tcx.all_crate_nums(LOCAL_CRATE); + + let mut instances: DefIdMap> = DefIdMap(); + + let cnum_stable_ids: IndexVec = { + let mut cnum_stable_ids = IndexVec::from_elem_n(Fingerprint::ZERO, + cnums.len() + 1); + + for &cnum in cnums.iter() { + cnum_stable_ids[cnum] = tcx.def_path_hash(DefId { + krate: cnum, + index: CRATE_DEF_INDEX, + }).0; + } + + cnum_stable_ids + }; + + for &cnum in cnums.iter() { + for &(ref exported_symbol, _) in tcx.exported_symbols(cnum).iter() { + if let &ExportedSymbol::Generic(def_id, substs) = exported_symbol { + let substs_map = instances.entry(def_id).or_default(); + + match substs_map.entry(substs) { + Occupied(mut e) => { + // If there are multiple monomorphizations available, + // we select one deterministically. + let other_cnum = *e.get(); + if cnum_stable_ids[other_cnum] > cnum_stable_ids[cnum] { + e.insert(cnum); + } + } + Vacant(e) => { + e.insert(cnum); + } + } + } + } + } + + Lrc::new(instances.into_iter() + .map(|(key, value)| (key, Lrc::new(value))) + .collect()) +} + +fn upstream_monomorphizations_for_provider<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> Option, CrateNum>>> +{ + debug_assert!(!def_id.is_local()); + tcx.upstream_monomorphizations(LOCAL_CRATE) + .get(&def_id) + .cloned() +} + +fn is_unreachable_local_definition_provider(tcx: TyCtxt, def_id: DefId) -> bool { + if let Some(node_id) = tcx.hir.as_local_node_id(def_id) { + !tcx.reachable_set(LOCAL_CRATE).0.contains(&node_id) + } else { + bug!("is_unreachable_local_definition called with non-local DefId: {:?}", + def_id) + } +} + +pub fn provide(providers: &mut Providers) { + providers.reachable_non_generics = reachable_non_generics_provider; + providers.is_reachable_non_generic = is_reachable_non_generic_provider_local; + providers.exported_symbols = exported_symbols_provider_local; + providers.upstream_monomorphizations = upstream_monomorphizations_provider; + providers.is_unreachable_local_definition = is_unreachable_local_definition_provider; +} + +pub fn provide_extern(providers: &mut Providers) { + providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern; + providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider; +} + +fn symbol_export_level(tcx: TyCtxt, sym_def_id: DefId) -> SymbolExportLevel { + // We export anything that's not mangled at the "C" layer as it probably has + // to do with ABI concerns. We do not, however, apply such treatment to + // special symbols in the standard library for various plumbing between + // core/std/allocators/etc. For example symbols used to hook up allocation + // are not considered for export + let codegen_fn_attrs = tcx.codegen_fn_attrs(sym_def_id); + let is_extern = codegen_fn_attrs.contains_extern_indicator(); + let std_internal = + codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL); + + if is_extern && !std_internal { + SymbolExportLevel::C + } else { + SymbolExportLevel::Rust + } +} diff --git a/src/librustc_codegen_llvm/back/wasm.rs b/src/librustc_codegen_llvm/back/wasm.rs new file mode 100644 index 000000000000..f37854b7bcae --- /dev/null +++ b/src/librustc_codegen_llvm/back/wasm.rs @@ -0,0 +1,220 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fs; +use std::path::Path; +use std::str; + +use rustc_data_structures::fx::FxHashMap; +use serialize::leb128; + +// https://webassembly.github.io/spec/core/binary/modules.html#binary-importsec +const WASM_IMPORT_SECTION_ID: u8 = 2; + +const WASM_EXTERNAL_KIND_FUNCTION: u8 = 0; +const WASM_EXTERNAL_KIND_TABLE: u8 = 1; +const WASM_EXTERNAL_KIND_MEMORY: u8 = 2; +const WASM_EXTERNAL_KIND_GLOBAL: u8 = 3; + +/// Rewrite the module imports are listed from in a wasm module given the field +/// name to module name mapping in `import_map`. +/// +/// LLVM 6 which we're using right now doesn't have the ability to configure the +/// module a wasm symbol is import from. Rather all imported symbols come from +/// the bland `"env"` module unconditionally. Furthermore we'd *also* need +/// support in LLD for preserving these import modules, which it unfortunately +/// currently does not. +/// +/// This function is intended as a hack for now where we manually rewrite the +/// wasm output by LLVM to have the correct import modules listed. The +/// `#[link(wasm_import_module = "...")]` attribute in Rust translates to the +/// module that each symbol is imported from, so here we manually go through the +/// wasm file, decode it, rewrite imports, and then rewrite the wasm module. +/// +/// Support for this was added to LLVM in +/// https://github.com/llvm-mirror/llvm/commit/0f32e1365, although support still +/// needs to be added, tracked at https://bugs.llvm.org/show_bug.cgi?id=37168 +pub fn rewrite_imports(path: &Path, import_map: &FxHashMap) { + if import_map.len() == 0 { + return + } + + let wasm = fs::read(path).expect("failed to read wasm output"); + let mut ret = WasmEncoder::new(); + ret.data.extend(&wasm[..8]); + + // skip the 8 byte wasm/version header + for (id, raw) in WasmSections(WasmDecoder::new(&wasm[8..])) { + ret.byte(id); + if id == WASM_IMPORT_SECTION_ID { + info!("rewriting import section"); + let data = rewrite_import_section( + &mut WasmDecoder::new(raw), + import_map, + ); + ret.bytes(&data); + } else { + info!("carry forward section {}, {} bytes long", id, raw.len()); + ret.bytes(raw); + } + } + + fs::write(path, &ret.data).expect("failed to write wasm output"); + + fn rewrite_import_section( + wasm: &mut WasmDecoder, + import_map: &FxHashMap, + ) + -> Vec + { + let mut dst = WasmEncoder::new(); + let n = wasm.u32(); + dst.u32(n); + info!("rewriting {} imports", n); + for _ in 0..n { + rewrite_import_entry(wasm, &mut dst, import_map); + } + return dst.data + } + + fn rewrite_import_entry(wasm: &mut WasmDecoder, + dst: &mut WasmEncoder, + import_map: &FxHashMap) { + // More info about the binary format here is available at: + // https://webassembly.github.io/spec/core/binary/modules.html#import-section + // + // Note that you can also find the whole point of existence of this + // function here, where we map the `module` name to a different one if + // we've got one listed. + let module = wasm.str(); + let field = wasm.str(); + let new_module = if module == "env" { + import_map.get(field).map(|s| &**s).unwrap_or(module) + } else { + module + }; + info!("import rewrite ({} => {}) / {}", module, new_module, field); + dst.str(new_module); + dst.str(field); + let kind = wasm.byte(); + dst.byte(kind); + match kind { + WASM_EXTERNAL_KIND_FUNCTION => dst.u32(wasm.u32()), + WASM_EXTERNAL_KIND_TABLE => { + dst.byte(wasm.byte()); // element_type + dst.limits(wasm.limits()); + } + WASM_EXTERNAL_KIND_MEMORY => dst.limits(wasm.limits()), + WASM_EXTERNAL_KIND_GLOBAL => { + dst.byte(wasm.byte()); // content_type + dst.bool(wasm.bool()); // mutable + } + b => panic!("unknown kind: {}", b), + } + } +} + +struct WasmSections<'a>(WasmDecoder<'a>); + +impl<'a> Iterator for WasmSections<'a> { + type Item = (u8, &'a [u8]); + + fn next(&mut self) -> Option<(u8, &'a [u8])> { + if self.0.data.len() == 0 { + return None + } + + // see https://webassembly.github.io/spec/core/binary/modules.html#sections + let id = self.0.byte(); + let section_len = self.0.u32(); + info!("new section {} / {} bytes", id, section_len); + let section = self.0.skip(section_len as usize); + Some((id, section)) + } +} + +struct WasmDecoder<'a> { + data: &'a [u8], +} + +impl<'a> WasmDecoder<'a> { + fn new(data: &'a [u8]) -> WasmDecoder<'a> { + WasmDecoder { data } + } + + fn byte(&mut self) -> u8 { + self.skip(1)[0] + } + + fn u32(&mut self) -> u32 { + let (n, l1) = leb128::read_u32_leb128(self.data); + self.data = &self.data[l1..]; + return n + } + + fn skip(&mut self, amt: usize) -> &'a [u8] { + let (data, rest) = self.data.split_at(amt); + self.data = rest; + data + } + + fn str(&mut self) -> &'a str { + let len = self.u32(); + str::from_utf8(self.skip(len as usize)).unwrap() + } + + fn bool(&mut self) -> bool { + self.byte() == 1 + } + + fn limits(&mut self) -> (u32, Option) { + let has_max = self.bool(); + (self.u32(), if has_max { Some(self.u32()) } else { None }) + } +} + +struct WasmEncoder { + data: Vec, +} + +impl WasmEncoder { + fn new() -> WasmEncoder { + WasmEncoder { data: Vec::new() } + } + + fn u32(&mut self, val: u32) { + leb128::write_u32_leb128(&mut self.data, val); + } + + fn byte(&mut self, val: u8) { + self.data.push(val); + } + + fn bytes(&mut self, val: &[u8]) { + self.u32(val.len() as u32); + self.data.extend_from_slice(val); + } + + fn str(&mut self, val: &str) { + self.bytes(val.as_bytes()) + } + + fn bool(&mut self, b: bool) { + self.byte(b as u8); + } + + fn limits(&mut self, limits: (u32, Option)) { + self.bool(limits.1.is_some()); + self.u32(limits.0); + if let Some(c) = limits.1 { + self.u32(c); + } + } +} diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs new file mode 100644 index 000000000000..b62836ec255d --- /dev/null +++ b/src/librustc_codegen_llvm/back/write.rs @@ -0,0 +1,2435 @@ +// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use attributes; +use back::bytecode::{self, RLIB_BYTECODE_EXTENSION}; +use back::lto::{self, ModuleBuffer, ThinBuffer}; +use back::link::{self, get_linker, remove}; +use back::command::Command; +use back::linker::LinkerInfo; +use back::symbol_export::ExportedSymbols; +use base; +use consts; +use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, in_incr_comp_dir}; +use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind}; +use rustc::middle::cstore::{LinkMeta, EncodedMetadata}; +use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto}; +use rustc::session::Session; +use rustc::util::nodemap::FxHashMap; +use time_graph::{self, TimeGraph, Timeline}; +use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; +use {CodegenResults, ModuleSource, ModuleCodegen, CompiledModule, ModuleKind}; +use CrateInfo; +use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc::ty::TyCtxt; +use rustc::util::common::{time_ext, time_depth, set_time_depth, print_time_passes_entry}; +use rustc_fs_util::{path2cstr, link_or_copy}; +use rustc_data_structures::small_c_str::SmallCStr; +use errors::{self, Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId}; +use errors::emitter::{Emitter}; +use syntax::attr; +use syntax::ext::hygiene::Mark; +use syntax_pos::MultiSpan; +use syntax_pos::symbol::Symbol; +use type_::Type; +use context::{is_pie_binary, get_reloc_model}; +use common::{C_bytes_in_context, val_ty}; +use jobserver::{Client, Acquired}; +use rustc_demangle; + +use std::any::Any; +use std::ffi::{CString, CStr}; +use std::fs; +use std::io::{self, Write}; +use std::mem; +use std::path::{Path, PathBuf}; +use std::str; +use std::sync::Arc; +use std::sync::mpsc::{channel, Sender, Receiver}; +use std::slice; +use std::time::Instant; +use std::thread; +use libc::{c_uint, c_void, c_char, size_t}; + +pub const RELOC_MODEL_ARGS : [(&'static str, llvm::RelocMode); 7] = [ + ("pic", llvm::RelocMode::PIC), + ("static", llvm::RelocMode::Static), + ("default", llvm::RelocMode::Default), + ("dynamic-no-pic", llvm::RelocMode::DynamicNoPic), + ("ropi", llvm::RelocMode::ROPI), + ("rwpi", llvm::RelocMode::RWPI), + ("ropi-rwpi", llvm::RelocMode::ROPI_RWPI), +]; + +pub const CODE_GEN_MODEL_ARGS: &[(&str, llvm::CodeModel)] = &[ + ("small", llvm::CodeModel::Small), + ("kernel", llvm::CodeModel::Kernel), + ("medium", llvm::CodeModel::Medium), + ("large", llvm::CodeModel::Large), +]; + +pub const TLS_MODEL_ARGS : [(&'static str, llvm::ThreadLocalMode); 4] = [ + ("global-dynamic", llvm::ThreadLocalMode::GeneralDynamic), + ("local-dynamic", llvm::ThreadLocalMode::LocalDynamic), + ("initial-exec", llvm::ThreadLocalMode::InitialExec), + ("local-exec", llvm::ThreadLocalMode::LocalExec), +]; + +pub fn llvm_err(handler: &errors::Handler, msg: String) -> FatalError { + match llvm::last_error() { + Some(err) => handler.fatal(&format!("{}: {}", msg, err)), + None => handler.fatal(&msg), + } +} + +pub fn write_output_file( + handler: &errors::Handler, + target: &'ll llvm::TargetMachine, + pm: &llvm::PassManager<'ll>, + m: &'ll llvm::Module, + output: &Path, + file_type: llvm::FileType) -> Result<(), FatalError> { + unsafe { + let output_c = path2cstr(output); + let result = llvm::LLVMRustWriteOutputFile( + target, pm, m, output_c.as_ptr(), file_type); + if result.into_result().is_err() { + let msg = format!("could not write output to {}", output.display()); + Err(llvm_err(handler, msg)) + } else { + Ok(()) + } + } +} + +fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { + match optimize { + config::OptLevel::No => llvm::CodeGenOptLevel::None, + config::OptLevel::Less => llvm::CodeGenOptLevel::Less, + config::OptLevel::Default => llvm::CodeGenOptLevel::Default, + config::OptLevel::Aggressive => llvm::CodeGenOptLevel::Aggressive, + _ => llvm::CodeGenOptLevel::Default, + } +} + +fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { + match optimize { + config::OptLevel::Size => llvm::CodeGenOptSizeDefault, + config::OptLevel::SizeMin => llvm::CodeGenOptSizeAggressive, + _ => llvm::CodeGenOptSizeNone, + } +} + +pub fn create_target_machine( + sess: &Session, + find_features: bool, +) -> &'static mut llvm::TargetMachine { + target_machine_factory(sess, find_features)().unwrap_or_else(|err| { + llvm_err(sess.diagnostic(), err).raise() + }) +} + +// If find_features is true this won't access `sess.crate_types` by assuming +// that `is_pie_binary` is false. When we discover LLVM target features +// `sess.crate_types` is uninitialized so we cannot access it. +pub fn target_machine_factory(sess: &Session, find_features: bool) + -> Arc Result<&'static mut llvm::TargetMachine, String> + Send + Sync> +{ + let reloc_model = get_reloc_model(sess); + + let opt_level = get_llvm_opt_level(sess.opts.optimize); + let use_softfp = sess.opts.cg.soft_float; + + let ffunction_sections = sess.target.target.options.function_sections; + let fdata_sections = ffunction_sections; + + let code_model_arg = sess.opts.cg.code_model.as_ref().or( + sess.target.target.options.code_model.as_ref(), + ); + + let code_model = match code_model_arg { + Some(s) => { + match CODE_GEN_MODEL_ARGS.iter().find(|arg| arg.0 == s) { + Some(x) => x.1, + _ => { + sess.err(&format!("{:?} is not a valid code model", + code_model_arg)); + sess.abort_if_errors(); + bug!(); + } + } + } + None => llvm::CodeModel::None, + }; + + let singlethread = sess.target.target.options.singlethread; + + let triple = SmallCStr::new(&sess.target.target.llvm_target); + let cpu = SmallCStr::new(sess.target_cpu()); + let features = attributes::llvm_target_features(sess) + .collect::>() + .join(","); + let features = CString::new(features).unwrap(); + let is_pie_binary = !find_features && is_pie_binary(sess); + let trap_unreachable = sess.target.target.options.trap_unreachable; + + let asm_comments = sess.asm_comments(); + + Arc::new(move || { + let tm = unsafe { + llvm::LLVMRustCreateTargetMachine( + triple.as_ptr(), cpu.as_ptr(), features.as_ptr(), + code_model, + reloc_model, + opt_level, + use_softfp, + is_pie_binary, + ffunction_sections, + fdata_sections, + trap_unreachable, + singlethread, + asm_comments, + ) + }; + + tm.ok_or_else(|| { + format!("Could not create LLVM TargetMachine for triple: {}", + triple.to_str().unwrap()) + }) + }) +} + +/// Module-specific configuration for `optimize_and_codegen`. +pub struct ModuleConfig { + /// Names of additional optimization passes to run. + passes: Vec, + /// Some(level) to optimize at a certain level, or None to run + /// absolutely no optimizations (used for the metadata module). + pub opt_level: Option, + + /// Some(level) to optimize binary size, or None to not affect program size. + opt_size: Option, + + pgo_gen: Option, + pgo_use: String, + + // Flags indicating which outputs to produce. + emit_no_opt_bc: bool, + emit_bc: bool, + emit_bc_compressed: bool, + emit_lto_bc: bool, + emit_ir: bool, + emit_asm: bool, + emit_obj: bool, + // Miscellaneous flags. These are mostly copied from command-line + // options. + pub verify_llvm_ir: bool, + no_prepopulate_passes: bool, + no_builtins: bool, + time_passes: bool, + vectorize_loop: bool, + vectorize_slp: bool, + merge_functions: bool, + inline_threshold: Option, + // Instead of creating an object file by doing LLVM codegen, just + // make the object file bitcode. Provides easy compatibility with + // emscripten's ecc compiler, when used as the linker. + obj_is_bitcode: bool, + no_integrated_as: bool, + embed_bitcode: bool, + embed_bitcode_marker: bool, +} + +impl ModuleConfig { + fn new(passes: Vec) -> ModuleConfig { + ModuleConfig { + passes, + opt_level: None, + opt_size: None, + + pgo_gen: None, + pgo_use: String::new(), + + emit_no_opt_bc: false, + emit_bc: false, + emit_bc_compressed: false, + emit_lto_bc: false, + emit_ir: false, + emit_asm: false, + emit_obj: false, + obj_is_bitcode: false, + embed_bitcode: false, + embed_bitcode_marker: false, + no_integrated_as: false, + + verify_llvm_ir: false, + no_prepopulate_passes: false, + no_builtins: false, + time_passes: false, + vectorize_loop: false, + vectorize_slp: false, + merge_functions: false, + inline_threshold: None + } + } + + fn set_flags(&mut self, sess: &Session, no_builtins: bool) { + self.verify_llvm_ir = sess.verify_llvm_ir(); + self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; + self.no_builtins = no_builtins || sess.target.target.options.no_builtins; + self.time_passes = sess.time_passes(); + self.inline_threshold = sess.opts.cg.inline_threshold; + self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode || + sess.opts.debugging_opts.cross_lang_lto.enabled(); + let embed_bitcode = sess.target.target.options.embed_bitcode || + sess.opts.debugging_opts.embed_bitcode; + if embed_bitcode { + match sess.opts.optimize { + config::OptLevel::No | + config::OptLevel::Less => { + self.embed_bitcode_marker = embed_bitcode; + } + _ => self.embed_bitcode = embed_bitcode, + } + } + + // Copy what clang does by turning on loop vectorization at O2 and + // slp vectorization at O3. Otherwise configure other optimization aspects + // of this pass manager builder. + // Turn off vectorization for emscripten, as it's not very well supported. + self.vectorize_loop = !sess.opts.cg.no_vectorize_loops && + (sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive) && + !sess.target.target.options.is_like_emscripten; + + self.vectorize_slp = !sess.opts.cg.no_vectorize_slp && + sess.opts.optimize == config::OptLevel::Aggressive && + !sess.target.target.options.is_like_emscripten; + + self.merge_functions = sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive; + } +} + +/// Assembler name and command used by codegen when no_integrated_as is enabled +struct AssemblerCommand { + name: PathBuf, + cmd: Command, +} + +/// Additional resources used by optimize_and_codegen (not module specific) +#[derive(Clone)] +pub struct CodegenContext { + // Resouces needed when running LTO + pub time_passes: bool, + pub lto: Lto, + pub no_landing_pads: bool, + pub save_temps: bool, + pub fewer_names: bool, + pub exported_symbols: Option>, + pub opts: Arc, + pub crate_types: Vec, + pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, + output_filenames: Arc, + regular_module_config: Arc, + metadata_module_config: Arc, + allocator_module_config: Arc, + pub tm_factory: Arc Result<&'static mut llvm::TargetMachine, String> + Send + Sync>, + pub msvc_imps_needed: bool, + pub target_pointer_width: String, + debuginfo: config::DebugInfo, + + // Number of cgus excluding the allocator/metadata modules + pub total_cgus: usize, + // Handler to use for diagnostics produced during codegen. + pub diag_emitter: SharedEmitter, + // LLVM passes added by plugins. + pub plugin_passes: Vec, + // LLVM optimizations for which we want to print remarks. + pub remark: Passes, + // Worker thread number + pub worker: usize, + // The incremental compilation session directory, or None if we are not + // compiling incrementally + pub incr_comp_session_dir: Option, + // Channel back to the main control thread to send messages to + coordinator_send: Sender>, + // A reference to the TimeGraph so we can register timings. None means that + // measuring is disabled. + time_graph: Option, + // The assembler command if no_integrated_as option is enabled, None otherwise + assembler_cmd: Option>, +} + +impl CodegenContext { + pub fn create_diag_handler(&self) -> Handler { + Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) + } + + pub(crate) fn config(&self, kind: ModuleKind) -> &ModuleConfig { + match kind { + ModuleKind::Regular => &self.regular_module_config, + ModuleKind::Metadata => &self.metadata_module_config, + ModuleKind::Allocator => &self.allocator_module_config, + } + } + + pub(crate) fn save_temp_bitcode(&self, module: &ModuleCodegen, name: &str) { + if !self.save_temps { + return + } + unsafe { + let ext = format!("{}.bc", name); + let cgu = Some(&module.name[..]); + let path = self.output_filenames.temp_path_ext(&ext, cgu); + let cstr = path2cstr(&path); + let llmod = module.llvm().unwrap().llmod(); + llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); + } + } +} + +pub struct DiagnosticHandlers<'a> { + data: *mut (&'a CodegenContext, &'a Handler), + llcx: &'a llvm::Context, +} + +impl<'a> DiagnosticHandlers<'a> { + pub fn new(cgcx: &'a CodegenContext, + handler: &'a Handler, + llcx: &'a llvm::Context) -> Self { + let data = Box::into_raw(Box::new((cgcx, handler))); + unsafe { + llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data as *mut _); + llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data as *mut _); + } + DiagnosticHandlers { data, llcx } + } +} + +impl<'a> Drop for DiagnosticHandlers<'a> { + fn drop(&mut self) { + use std::ptr::null_mut; + unsafe { + llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut()); + llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut()); + drop(Box::from_raw(self.data)); + } + } +} + +unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, + msg: &'b str, + cookie: c_uint) { + cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_string()); +} + +unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, + user: *const c_void, + cookie: c_uint) { + if user.is_null() { + return + } + let (cgcx, _) = *(user as *const (&CodegenContext, &Handler)); + + let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s)) + .expect("non-UTF8 SMDiagnostic"); + + report_inline_asm(cgcx, &msg, cookie); +} + +unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) { + if user.is_null() { + return + } + let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); + + match llvm::diagnostic::Diagnostic::unpack(info) { + llvm::diagnostic::InlineAsm(inline) => { + report_inline_asm(cgcx, + &llvm::twine_to_string(inline.message), + inline.cookie); + } + + llvm::diagnostic::Optimization(opt) => { + let enabled = match cgcx.remark { + Passes::All => true, + Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name), + }; + + if enabled { + diag_handler.note_without_error(&format!("optimization {} for {} at {}:{}:{}: {}", + opt.kind.describe(), + opt.pass_name, + opt.filename, + opt.line, + opt.column, + opt.message)); + } + } + llvm::diagnostic::PGO(diagnostic_ref) | + llvm::diagnostic::Linker(diagnostic_ref) => { + let msg = llvm::build_string(|s| { + llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s) + }).expect("non-UTF8 diagnostic"); + diag_handler.warn(&msg); + } + llvm::diagnostic::UnknownDiagnostic(..) => {}, + } +} + +// Unsafe due to LLVM calls. +unsafe fn optimize(cgcx: &CodegenContext, + diag_handler: &Handler, + module: &ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline) + -> Result<(), FatalError> +{ + let (llmod, llcx, tm) = match module.source { + ModuleSource::Codegened(ref llvm) => (llvm.llmod(), &*llvm.llcx, &*llvm.tm), + ModuleSource::Preexisting(_) => { + bug!("optimize_and_codegen: called with ModuleSource::Preexisting") + } + }; + + let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); + + let module_name = module.name.clone(); + let module_name = Some(&module_name[..]); + + if config.emit_no_opt_bc { + let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name); + let out = path2cstr(&out); + llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr()); + } + + if config.opt_level.is_some() { + // Create the two optimizing pass managers. These mirror what clang + // does, and are by populated by LLVM's default PassManagerBuilder. + // Each manager has a different set of passes, but they also share + // some common passes. + let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod); + let mpm = llvm::LLVMCreatePassManager(); + + { + // If we're verifying or linting, add them to the function pass + // manager. + let addpass = |pass_name: &str| { + let pass_name = SmallCStr::new(pass_name); + let pass = match llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr()) { + Some(pass) => pass, + None => return false, + }; + let pass_manager = match llvm::LLVMRustPassKind(pass) { + llvm::PassKind::Function => &*fpm, + llvm::PassKind::Module => &*mpm, + llvm::PassKind::Other => { + diag_handler.err("Encountered LLVM pass kind we can't handle"); + return true + }, + }; + llvm::LLVMRustAddPass(pass_manager, pass); + true + }; + + if config.verify_llvm_ir { assert!(addpass("verify")); } + + // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need + // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise + // we'll get errors in LLVM. + let using_thin_buffers = llvm::LLVMRustThinLTOAvailable() && (config.emit_bc + || config.obj_is_bitcode || config.emit_bc_compressed || config.embed_bitcode); + let mut have_name_anon_globals_pass = false; + if !config.no_prepopulate_passes { + llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod); + llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod); + let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal || + (cgcx.lto != Lto::Fat && cgcx.opts.debugging_opts.cross_lang_lto.enabled()); + have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto; + if using_thin_buffers && !prepare_for_thin_lto { + assert!(addpass("name-anon-globals")); + have_name_anon_globals_pass = true; + } + with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| { + llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm); + llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm); + }) + } + + for pass in &config.passes { + if !addpass(pass) { + diag_handler.warn(&format!("unknown pass `{}`, ignoring", + pass)); + } + if pass == "name-anon-globals" { + have_name_anon_globals_pass = true; + } + } + + for pass in &cgcx.plugin_passes { + if !addpass(pass) { + diag_handler.err(&format!("a plugin asked for LLVM pass \ + `{}` but LLVM does not \ + recognize it", pass)); + } + if pass == "name-anon-globals" { + have_name_anon_globals_pass = true; + } + } + + if using_thin_buffers && !have_name_anon_globals_pass { + // As described above, this will probably cause an error in LLVM + if config.no_prepopulate_passes { + diag_handler.err("The current compilation is going to use thin LTO buffers \ + without running LLVM's NameAnonGlobals pass. \ + This will likely cause errors in LLVM. Consider adding \ + -C passes=name-anon-globals to the compiler command line."); + } else { + bug!("We are using thin LTO buffers without running the NameAnonGlobals pass. \ + This will likely cause errors in LLVM and shoud never happen."); + } + } + } + + diag_handler.abort_if_errors(); + + // Finally, run the actual optimization passes + time_ext(config.time_passes, + None, + &format!("llvm function passes [{}]", module_name.unwrap()), + || { + llvm::LLVMRustRunFunctionPassManager(fpm, llmod) + }); + timeline.record("fpm"); + time_ext(config.time_passes, + None, + &format!("llvm module passes [{}]", module_name.unwrap()), + || { + llvm::LLVMRunPassManager(mpm, llmod) + }); + + // Deallocate managers that we're now done with + llvm::LLVMDisposePassManager(fpm); + llvm::LLVMDisposePassManager(mpm); + } + Ok(()) +} + +fn generate_lto_work(cgcx: &CodegenContext, + modules: Vec) + -> Vec<(WorkItem, u64)> +{ + let mut timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(CODEGEN_WORKER_TIMELINE, + CODEGEN_WORK_PACKAGE_KIND, + "generate lto") + }).unwrap_or(Timeline::noop()); + let lto_modules = lto::run(cgcx, modules, &mut timeline) + .unwrap_or_else(|e| e.raise()); + + lto_modules.into_iter().map(|module| { + let cost = module.cost(); + (WorkItem::LTO(module), cost) + }).collect() +} + +unsafe fn codegen(cgcx: &CodegenContext, + diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline) + -> Result +{ + timeline.record("codegen"); + { + let (llmod, llcx, tm) = match module.source { + ModuleSource::Codegened(ref llvm) => (llvm.llmod(), &*llvm.llcx, &*llvm.tm), + ModuleSource::Preexisting(_) => { + bug!("codegen: called with ModuleSource::Preexisting") + } + }; + let module_name = module.name.clone(); + let module_name = Some(&module_name[..]); + let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); + + if cgcx.msvc_imps_needed { + create_msvc_imps(cgcx, llcx, llmod); + } + + // A codegen-specific pass manager is used to generate object + // files for an LLVM module. + // + // Apparently each of these pass managers is a one-shot kind of + // thing, so we create a new one for each type of output. The + // pass manager passed to the closure should be ensured to not + // escape the closure itself, and the manager should only be + // used once. + unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine, + llmod: &'ll llvm::Module, + no_builtins: bool, + f: F) -> R + where F: FnOnce(&'ll mut PassManager<'ll>) -> R, + { + let cpm = llvm::LLVMCreatePassManager(); + llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod); + llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins); + f(cpm) + } + + // If we don't have the integrated assembler, then we need to emit asm + // from LLVM and use `gcc` to create the object file. + let asm_to_obj = config.emit_obj && config.no_integrated_as; + + // Change what we write and cleanup based on whether obj files are + // just llvm bitcode. In that case write bitcode, and possibly + // delete the bitcode if it wasn't requested. Don't generate the + // machine code, instead copy the .o file from the .bc + let write_bc = config.emit_bc || config.obj_is_bitcode; + let rm_bc = !config.emit_bc && config.obj_is_bitcode; + let write_obj = config.emit_obj && !config.obj_is_bitcode && !asm_to_obj; + let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode; + + let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name); + let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name); + + + if write_bc || config.emit_bc_compressed || config.embed_bitcode { + let thin; + let old; + let data = if llvm::LLVMRustThinLTOAvailable() { + thin = ThinBuffer::new(llmod); + thin.data() + } else { + old = ModuleBuffer::new(llmod); + old.data() + }; + timeline.record("make-bc"); + + if write_bc { + if let Err(e) = fs::write(&bc_out, data) { + diag_handler.err(&format!("failed to write bytecode: {}", e)); + } + timeline.record("write-bc"); + } + + if config.embed_bitcode { + embed_bitcode(cgcx, llcx, llmod, Some(data)); + timeline.record("embed-bc"); + } + + if config.emit_bc_compressed { + let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION); + let data = bytecode::encode(&module.name, data); + if let Err(e) = fs::write(&dst, data) { + diag_handler.err(&format!("failed to write bytecode: {}", e)); + } + timeline.record("compress-bc"); + } + } else if config.embed_bitcode_marker { + embed_bitcode(cgcx, llcx, llmod, None); + } + + time_ext(config.time_passes, None, &format!("codegen passes [{}]", module_name.unwrap()), + || -> Result<(), FatalError> { + if config.emit_ir { + let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name); + let out = path2cstr(&out); + + extern "C" fn demangle_callback(input_ptr: *const c_char, + input_len: size_t, + output_ptr: *mut c_char, + output_len: size_t) -> size_t { + let input = unsafe { + slice::from_raw_parts(input_ptr as *const u8, input_len as usize) + }; + + let input = match str::from_utf8(input) { + Ok(s) => s, + Err(_) => return 0, + }; + + let output = unsafe { + slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize) + }; + let mut cursor = io::Cursor::new(output); + + let demangled = match rustc_demangle::try_demangle(input) { + Ok(d) => d, + Err(_) => return 0, + }; + + if let Err(_) = write!(cursor, "{:#}", demangled) { + // Possible only if provided buffer is not big enough + return 0; + } + + cursor.position() as size_t + } + + with_codegen(tm, llmod, config.no_builtins, |cpm| { + llvm::LLVMRustPrintModule(cpm, llmod, out.as_ptr(), demangle_callback); + llvm::LLVMDisposePassManager(cpm); + }); + timeline.record("ir"); + } + + if config.emit_asm || asm_to_obj { + let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name); + + // We can't use the same module for asm and binary output, because that triggers + // various errors like invalid IR or broken binaries, so we might have to clone the + // module to produce the asm output + let llmod = if config.emit_obj { + llvm::LLVMCloneModule(llmod) + } else { + llmod + }; + with_codegen(tm, llmod, config.no_builtins, |cpm| { + write_output_file(diag_handler, tm, cpm, llmod, &path, + llvm::FileType::AssemblyFile) + })?; + timeline.record("asm"); + } + + if write_obj { + with_codegen(tm, llmod, config.no_builtins, |cpm| { + write_output_file(diag_handler, tm, cpm, llmod, &obj_out, + llvm::FileType::ObjectFile) + })?; + timeline.record("obj"); + } else if asm_to_obj { + let assembly = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name); + run_assembler(cgcx, diag_handler, &assembly, &obj_out); + timeline.record("asm_to_obj"); + + if !config.emit_asm && !cgcx.save_temps { + drop(fs::remove_file(&assembly)); + } + } + + Ok(()) + })?; + + if copy_bc_to_obj { + debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out); + if let Err(e) = link_or_copy(&bc_out, &obj_out) { + diag_handler.err(&format!("failed to copy bitcode to object file: {}", e)); + } + } + + if rm_bc { + debug!("removing_bitcode {:?}", bc_out); + if let Err(e) = fs::remove_file(&bc_out) { + diag_handler.err(&format!("failed to remove bitcode: {}", e)); + } + } + + drop(handlers); + } + Ok(module.into_compiled_module(config.emit_obj, + config.emit_bc, + config.emit_bc_compressed, + &cgcx.output_filenames)) +} + +/// Embed the bitcode of an LLVM module in the LLVM module itself. +/// +/// This is done primarily for iOS where it appears to be standard to compile C +/// code at least with `-fembed-bitcode` which creates two sections in the +/// executable: +/// +/// * __LLVM,__bitcode +/// * __LLVM,__cmdline +/// +/// It appears *both* of these sections are necessary to get the linker to +/// recognize what's going on. For us though we just always throw in an empty +/// cmdline section. +/// +/// Furthermore debug/O1 builds don't actually embed bitcode but rather just +/// embed an empty section. +/// +/// Basically all of this is us attempting to follow in the footsteps of clang +/// on iOS. See #35968 for lots more info. +unsafe fn embed_bitcode(cgcx: &CodegenContext, + llcx: &llvm::Context, + llmod: &llvm::Module, + bitcode: Option<&[u8]>) { + let llconst = C_bytes_in_context(llcx, bitcode.unwrap_or(&[])); + let llglobal = llvm::LLVMAddGlobal( + llmod, + val_ty(llconst), + "rustc.embedded.module\0".as_ptr() as *const _, + ); + llvm::LLVMSetInitializer(llglobal, llconst); + + let is_apple = cgcx.opts.target_triple.triple().contains("-ios") || + cgcx.opts.target_triple.triple().contains("-darwin"); + + let section = if is_apple { + "__LLVM,__bitcode\0" + } else { + ".llvmbc\0" + }; + llvm::LLVMSetSection(llglobal, section.as_ptr() as *const _); + llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); + llvm::LLVMSetGlobalConstant(llglobal, llvm::True); + + let llconst = C_bytes_in_context(llcx, &[]); + let llglobal = llvm::LLVMAddGlobal( + llmod, + val_ty(llconst), + "rustc.embedded.cmdline\0".as_ptr() as *const _, + ); + llvm::LLVMSetInitializer(llglobal, llconst); + let section = if is_apple { + "__LLVM,__cmdline\0" + } else { + ".llvmcmd\0" + }; + llvm::LLVMSetSection(llglobal, section.as_ptr() as *const _); + llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); +} + +pub(crate) struct CompiledModules { + pub modules: Vec, + pub metadata_module: CompiledModule, + pub allocator_module: Option, +} + +fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { + sess.crate_types.borrow().contains(&config::CrateType::Rlib) && + sess.opts.output_types.contains_key(&OutputType::Exe) +} + +pub fn start_async_codegen(tcx: TyCtxt, + time_graph: Option, + link: LinkMeta, + metadata: EncodedMetadata, + coordinator_receive: Receiver>, + total_cgus: usize) + -> OngoingCodegen { + let sess = tcx.sess; + let crate_name = tcx.crate_name(LOCAL_CRATE); + let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins"); + let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs, + "windows_subsystem"); + let windows_subsystem = subsystem.map(|subsystem| { + if subsystem != "windows" && subsystem != "console" { + tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ + `windows` and `console` are allowed", + subsystem)); + } + subsystem.to_string() + }); + + let linker_info = LinkerInfo::new(tcx); + let crate_info = CrateInfo::new(tcx); + + // Figure out what we actually need to build. + let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone()); + let mut metadata_config = ModuleConfig::new(vec![]); + let mut allocator_config = ModuleConfig::new(vec![]); + + if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { + match *sanitizer { + Sanitizer::Address => { + modules_config.passes.push("asan".to_owned()); + modules_config.passes.push("asan-module".to_owned()); + } + Sanitizer::Memory => { + modules_config.passes.push("msan".to_owned()) + } + Sanitizer::Thread => { + modules_config.passes.push("tsan".to_owned()) + } + _ => {} + } + } + + if sess.opts.debugging_opts.profile { + modules_config.passes.push("insert-gcov-profiling".to_owned()) + } + + modules_config.pgo_gen = sess.opts.debugging_opts.pgo_gen.clone(); + modules_config.pgo_use = sess.opts.debugging_opts.pgo_use.clone(); + + modules_config.opt_level = Some(get_llvm_opt_level(sess.opts.optimize)); + modules_config.opt_size = Some(get_llvm_opt_size(sess.opts.optimize)); + + // Save all versions of the bytecode if we're saving our temporaries. + if sess.opts.cg.save_temps { + modules_config.emit_no_opt_bc = true; + modules_config.emit_bc = true; + modules_config.emit_lto_bc = true; + metadata_config.emit_bc = true; + allocator_config.emit_bc = true; + } + + // Emit compressed bitcode files for the crate if we're emitting an rlib. + // Whenever an rlib is created, the bitcode is inserted into the archive in + // order to allow LTO against it. + if need_crate_bitcode_for_rlib(sess) { + modules_config.emit_bc_compressed = true; + allocator_config.emit_bc_compressed = true; + } + + modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || + tcx.sess.target.target.options.no_integrated_as; + + for output_type in sess.opts.output_types.keys() { + match *output_type { + OutputType::Bitcode => { modules_config.emit_bc = true; } + OutputType::LlvmAssembly => { modules_config.emit_ir = true; } + OutputType::Assembly => { + modules_config.emit_asm = true; + // If we're not using the LLVM assembler, this function + // could be invoked specially with output_type_assembly, so + // in this case we still want the metadata object file. + if !sess.opts.output_types.contains_key(&OutputType::Assembly) { + metadata_config.emit_obj = true; + allocator_config.emit_obj = true; + } + } + OutputType::Object => { modules_config.emit_obj = true; } + OutputType::Metadata => { metadata_config.emit_obj = true; } + OutputType::Exe => { + modules_config.emit_obj = true; + metadata_config.emit_obj = true; + allocator_config.emit_obj = true; + }, + OutputType::Mir => {} + OutputType::DepInfo => {} + } + } + + modules_config.set_flags(sess, no_builtins); + metadata_config.set_flags(sess, no_builtins); + allocator_config.set_flags(sess, no_builtins); + + // Exclude metadata and allocator modules from time_passes output, since + // they throw off the "LLVM passes" measurement. + metadata_config.time_passes = false; + allocator_config.time_passes = false; + + let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); + let (codegen_worker_send, codegen_worker_receive) = channel(); + + let coordinator_thread = start_executing_work(tcx, + &crate_info, + shared_emitter, + codegen_worker_send, + coordinator_receive, + total_cgus, + sess.jobserver.clone(), + time_graph.clone(), + Arc::new(modules_config), + Arc::new(metadata_config), + Arc::new(allocator_config)); + + OngoingCodegen { + crate_name, + link, + metadata, + windows_subsystem, + linker_info, + crate_info, + + time_graph, + coordinator_send: tcx.tx_to_llvm_workers.lock().clone(), + codegen_worker_receive, + shared_emitter_main, + future: coordinator_thread, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + } +} + +fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( + sess: &Session, + compiled_modules: &CompiledModules +) -> FxHashMap { + let mut work_products = FxHashMap::default(); + + if sess.opts.incremental.is_none() { + return work_products; + } + + for module in compiled_modules.modules.iter() { + let mut files = vec![]; + + if let Some(ref path) = module.object { + files.push((WorkProductFileKind::Object, path.clone())); + } + if let Some(ref path) = module.bytecode { + files.push((WorkProductFileKind::Bytecode, path.clone())); + } + if let Some(ref path) = module.bytecode_compressed { + files.push((WorkProductFileKind::BytecodeCompressed, path.clone())); + } + + if let Some((id, product)) = + copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) { + work_products.insert(id, product); + } + } + + work_products +} + +fn produce_final_output_artifacts(sess: &Session, + compiled_modules: &CompiledModules, + crate_output: &OutputFilenames) { + let mut user_wants_bitcode = false; + let mut user_wants_objects = false; + + // Produce final compile outputs. + let copy_gracefully = |from: &Path, to: &Path| { + if let Err(e) = fs::copy(from, to) { + sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); + } + }; + + let copy_if_one_unit = |output_type: OutputType, + keep_numbered: bool| { + if compiled_modules.modules.len() == 1 { + // 1) Only one codegen unit. In this case it's no difficulty + // to copy `foo.0.x` to `foo.x`. + let module_name = Some(&compiled_modules.modules[0].name[..]); + let path = crate_output.temp_path(output_type, module_name); + copy_gracefully(&path, + &crate_output.path(output_type)); + if !sess.opts.cg.save_temps && !keep_numbered { + // The user just wants `foo.x`, not `foo.#module-name#.x`. + remove(sess, &path); + } + } else { + let ext = crate_output.temp_path(output_type, None) + .extension() + .unwrap() + .to_str() + .unwrap() + .to_owned(); + + if crate_output.outputs.contains_key(&output_type) { + // 2) Multiple codegen units, with `--emit foo=some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring emit path because multiple .{} files \ + were produced", ext)); + } else if crate_output.single_output_file.is_some() { + // 3) Multiple codegen units, with `-o some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring -o because multiple .{} files \ + were produced", ext)); + } else { + // 4) Multiple codegen units, but no explicit name. We + // just leave the `foo.0.x` files in place. + // (We don't have to do any work in this case.) + } + } + }; + + // Flag to indicate whether the user explicitly requested bitcode. + // Otherwise, we produced it only as a temporary output, and will need + // to get rid of it. + for output_type in crate_output.outputs.keys() { + match *output_type { + OutputType::Bitcode => { + user_wants_bitcode = true; + // Copy to .bc, but always keep the .0.bc. There is a later + // check to figure out if we should delete .0.bc files, or keep + // them for making an rlib. + copy_if_one_unit(OutputType::Bitcode, true); + } + OutputType::LlvmAssembly => { + copy_if_one_unit(OutputType::LlvmAssembly, false); + } + OutputType::Assembly => { + copy_if_one_unit(OutputType::Assembly, false); + } + OutputType::Object => { + user_wants_objects = true; + copy_if_one_unit(OutputType::Object, true); + } + OutputType::Mir | + OutputType::Metadata | + OutputType::Exe | + OutputType::DepInfo => {} + } + } + + // Clean up unwanted temporary files. + + // We create the following files by default: + // - #crate#.#module-name#.bc + // - #crate#.#module-name#.o + // - #crate#.crate.metadata.bc + // - #crate#.crate.metadata.o + // - #crate#.o (linked from crate.##.o) + // - #crate#.bc (copied from crate.##.bc) + // We may create additional files if requested by the user (through + // `-C save-temps` or `--emit=` flags). + + if !sess.opts.cg.save_temps { + // Remove the temporary .#module-name#.o objects. If the user didn't + // explicitly request bitcode (with --emit=bc), and the bitcode is not + // needed for building an rlib, then we must remove .#module-name#.bc as + // well. + + // Specific rules for keeping .#module-name#.bc: + // - If the user requested bitcode (`user_wants_bitcode`), and + // codegen_units > 1, then keep it. + // - If the user requested bitcode but codegen_units == 1, then we + // can toss .#module-name#.bc because we copied it to .bc earlier. + // - If we're not building an rlib and the user didn't request + // bitcode, then delete .#module-name#.bc. + // If you change how this works, also update back::link::link_rlib, + // where .#module-name#.bc files are (maybe) deleted after making an + // rlib. + let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); + + let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1; + + let keep_numbered_objects = needs_crate_object || + (user_wants_objects && sess.codegen_units() > 1); + + for module in compiled_modules.modules.iter() { + if let Some(ref path) = module.object { + if !keep_numbered_objects { + remove(sess, path); + } + } + + if let Some(ref path) = module.bytecode { + if !keep_numbered_bitcode { + remove(sess, path); + } + } + } + + if !user_wants_bitcode { + if let Some(ref path) = compiled_modules.metadata_module.bytecode { + remove(sess, &path); + } + + if let Some(ref allocator_module) = compiled_modules.allocator_module { + if let Some(ref path) = allocator_module.bytecode { + remove(sess, path); + } + } + } + } + + // We leave the following files around by default: + // - #crate#.o + // - #crate#.crate.metadata.o + // - #crate#.bc + // These are used in linking steps and will be cleaned up afterward. +} + +pub(crate) fn dump_incremental_data(codegen_results: &CodegenResults) { + println!("[incremental] Re-using {} out of {} modules", + codegen_results.modules.iter().filter(|m| m.pre_existing).count(), + codegen_results.modules.len()); +} + +enum WorkItem { + Optimize(ModuleCodegen), + LTO(lto::LtoModuleCodegen), +} + +impl WorkItem { + fn kind(&self) -> ModuleKind { + match *self { + WorkItem::Optimize(ref m) => m.kind, + WorkItem::LTO(_) => ModuleKind::Regular, + } + } + + fn name(&self) -> String { + match *self { + WorkItem::Optimize(ref m) => format!("optimize: {}", m.name), + WorkItem::LTO(ref m) => format!("lto: {}", m.name()), + } + } +} + +enum WorkItemResult { + Compiled(CompiledModule), + NeedsLTO(ModuleCodegen), +} + +fn execute_work_item(cgcx: &CodegenContext, + work_item: WorkItem, + timeline: &mut Timeline) + -> Result +{ + let diag_handler = cgcx.create_diag_handler(); + let config = cgcx.config(work_item.kind()); + let module = match work_item { + WorkItem::Optimize(module) => module, + WorkItem::LTO(mut lto) => { + unsafe { + let module = lto.optimize(cgcx, timeline)?; + let module = codegen(cgcx, &diag_handler, module, config, timeline)?; + return Ok(WorkItemResult::Compiled(module)) + } + } + }; + let module_name = module.name.clone(); + + let pre_existing = match module.source { + ModuleSource::Codegened(_) => None, + ModuleSource::Preexisting(ref wp) => Some(wp.clone()), + }; + + if let Some(wp) = pre_existing { + let incr_comp_session_dir = cgcx.incr_comp_session_dir + .as_ref() + .unwrap(); + let name = &module.name; + let mut object = None; + let mut bytecode = None; + let mut bytecode_compressed = None; + for (kind, saved_file) in wp.saved_files { + let obj_out = match kind { + WorkProductFileKind::Object => { + let path = cgcx.output_filenames.temp_path(OutputType::Object, Some(name)); + object = Some(path.clone()); + path + } + WorkProductFileKind::Bytecode => { + let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, Some(name)); + bytecode = Some(path.clone()); + path + } + WorkProductFileKind::BytecodeCompressed => { + let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, Some(name)) + .with_extension(RLIB_BYTECODE_EXTENSION); + bytecode_compressed = Some(path.clone()); + path + } + }; + let source_file = in_incr_comp_dir(&incr_comp_session_dir, + &saved_file); + debug!("copying pre-existing module `{}` from {:?} to {}", + module.name, + source_file, + obj_out.display()); + match link_or_copy(&source_file, &obj_out) { + Ok(_) => { } + Err(err) => { + diag_handler.err(&format!("unable to copy {} to {}: {}", + source_file.display(), + obj_out.display(), + err)); + } + } + } + assert_eq!(object.is_some(), config.emit_obj); + assert_eq!(bytecode.is_some(), config.emit_bc); + assert_eq!(bytecode_compressed.is_some(), config.emit_bc_compressed); + + Ok(WorkItemResult::Compiled(CompiledModule { + name: module_name, + kind: ModuleKind::Regular, + pre_existing: true, + object, + bytecode, + bytecode_compressed, + })) + } else { + debug!("llvm-optimizing {:?}", module_name); + + unsafe { + optimize(cgcx, &diag_handler, &module, config, timeline)?; + + let linker_does_lto = cgcx.opts.debugging_opts.cross_lang_lto.enabled(); + + // After we've done the initial round of optimizations we need to + // decide whether to synchronously codegen this module or ship it + // back to the coordinator thread for further LTO processing (which + // has to wait for all the initial modules to be optimized). + // + // Here we dispatch based on the `cgcx.lto` and kind of module we're + // codegenning... + let needs_lto = match cgcx.lto { + Lto::No => false, + + // If the linker does LTO, we don't have to do it. Note that we + // keep doing full LTO, if it is requested, as not to break the + // assumption that the output will be a single module. + Lto::Thin | Lto::ThinLocal if linker_does_lto => false, + + // Here we've got a full crate graph LTO requested. We ignore + // this, however, if the crate type is only an rlib as there's + // no full crate graph to process, that'll happen later. + // + // This use case currently comes up primarily for targets that + // require LTO so the request for LTO is always unconditionally + // passed down to the backend, but we don't actually want to do + // anything about it yet until we've got a final product. + Lto::Yes | Lto::Fat | Lto::Thin => { + cgcx.crate_types.len() != 1 || + cgcx.crate_types[0] != config::CrateType::Rlib + } + + // When we're automatically doing ThinLTO for multi-codegen-unit + // builds we don't actually want to LTO the allocator modules if + // it shows up. This is due to various linker shenanigans that + // we'll encounter later. + // + // Additionally here's where we also factor in the current LLVM + // version. If it doesn't support ThinLTO we skip this. + Lto::ThinLocal => { + module.kind != ModuleKind::Allocator && + llvm::LLVMRustThinLTOAvailable() + } + }; + + // Metadata modules never participate in LTO regardless of the lto + // settings. + let needs_lto = needs_lto && module.kind != ModuleKind::Metadata; + + if needs_lto { + Ok(WorkItemResult::NeedsLTO(module)) + } else { + let module = codegen(cgcx, &diag_handler, module, config, timeline)?; + Ok(WorkItemResult::Compiled(module)) + } + } + } +} + +enum Message { + Token(io::Result), + NeedsLTO { + result: ModuleCodegen, + worker_id: usize, + }, + Done { + result: Result, + worker_id: usize, + }, + CodegenDone { + llvm_work_item: WorkItem, + cost: u64, + }, + CodegenComplete, + CodegenItem, +} + +struct Diagnostic { + msg: String, + code: Option, + lvl: Level, +} + +#[derive(PartialEq, Clone, Copy, Debug)] +enum MainThreadWorkerState { + Idle, + Codegenning, + LLVMing, +} + +fn start_executing_work(tcx: TyCtxt, + crate_info: &CrateInfo, + shared_emitter: SharedEmitter, + codegen_worker_send: Sender, + coordinator_receive: Receiver>, + total_cgus: usize, + jobserver: Client, + time_graph: Option, + modules_config: Arc, + metadata_config: Arc, + allocator_config: Arc) + -> thread::JoinHandle> { + let coordinator_send = tcx.tx_to_llvm_workers.lock().clone(); + let sess = tcx.sess; + + // Compute the set of symbols we need to retain when doing LTO (if we need to) + let exported_symbols = { + let mut exported_symbols = FxHashMap(); + + let copy_symbols = |cnum| { + let symbols = tcx.exported_symbols(cnum) + .iter() + .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl)) + .collect(); + Arc::new(symbols) + }; + + match sess.lto() { + Lto::No => None, + Lto::ThinLocal => { + exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); + Some(Arc::new(exported_symbols)) + } + Lto::Yes | Lto::Fat | Lto::Thin => { + exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); + for &cnum in tcx.crates().iter() { + exported_symbols.insert(cnum, copy_symbols(cnum)); + } + Some(Arc::new(exported_symbols)) + } + } + }; + + // First up, convert our jobserver into a helper thread so we can use normal + // mpsc channels to manage our messages and such. + // After we've requested tokens then we'll, when we can, + // get tokens on `coordinator_receive` which will + // get managed in the main loop below. + let coordinator_send2 = coordinator_send.clone(); + let helper = jobserver.into_helper_thread(move |token| { + drop(coordinator_send2.send(Box::new(Message::Token(token)))); + }).expect("failed to spawn helper thread"); + + let mut each_linked_rlib_for_lto = Vec::new(); + drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| { + if link::ignored_for_lto(sess, crate_info, cnum) { + return + } + each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); + })); + + let assembler_cmd = if modules_config.no_integrated_as { + // HACK: currently we use linker (gcc) as our assembler + let (name, mut cmd) = get_linker(sess); + cmd.args(&sess.target.target.options.asm_args); + Some(Arc::new(AssemblerCommand { + name, + cmd, + })) + } else { + None + }; + + let cgcx = CodegenContext { + crate_types: sess.crate_types.borrow().clone(), + each_linked_rlib_for_lto, + lto: sess.lto(), + no_landing_pads: sess.no_landing_pads(), + fewer_names: sess.fewer_names(), + save_temps: sess.opts.cg.save_temps, + opts: Arc::new(sess.opts.clone()), + time_passes: sess.time_passes(), + exported_symbols, + plugin_passes: sess.plugin_llvm_passes.borrow().clone(), + remark: sess.opts.cg.remark.clone(), + worker: 0, + incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), + coordinator_send, + diag_emitter: shared_emitter.clone(), + time_graph, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + regular_module_config: modules_config, + metadata_module_config: metadata_config, + allocator_module_config: allocator_config, + tm_factory: target_machine_factory(tcx.sess, false), + total_cgus, + msvc_imps_needed: msvc_imps_needed(tcx), + target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), + debuginfo: tcx.sess.opts.debuginfo, + assembler_cmd, + }; + + // This is the "main loop" of parallel work happening for parallel codegen. + // It's here that we manage parallelism, schedule work, and work with + // messages coming from clients. + // + // There are a few environmental pre-conditions that shape how the system + // is set up: + // + // - Error reporting only can happen on the main thread because that's the + // only place where we have access to the compiler `Session`. + // - LLVM work can be done on any thread. + // - Codegen can only happen on the main thread. + // - Each thread doing substantial work most be in possession of a `Token` + // from the `Jobserver`. + // - The compiler process always holds one `Token`. Any additional `Tokens` + // have to be requested from the `Jobserver`. + // + // Error Reporting + // =============== + // The error reporting restriction is handled separately from the rest: We + // set up a `SharedEmitter` the holds an open channel to the main thread. + // When an error occurs on any thread, the shared emitter will send the + // error message to the receiver main thread (`SharedEmitterMain`). The + // main thread will periodically query this error message queue and emit + // any error messages it has received. It might even abort compilation if + // has received a fatal error. In this case we rely on all other threads + // being torn down automatically with the main thread. + // Since the main thread will often be busy doing codegen work, error + // reporting will be somewhat delayed, since the message queue can only be + // checked in between to work packages. + // + // Work Processing Infrastructure + // ============================== + // The work processing infrastructure knows three major actors: + // + // - the coordinator thread, + // - the main thread, and + // - LLVM worker threads + // + // The coordinator thread is running a message loop. It instructs the main + // thread about what work to do when, and it will spawn off LLVM worker + // threads as open LLVM WorkItems become available. + // + // The job of the main thread is to codegen CGUs into LLVM work package + // (since the main thread is the only thread that can do this). The main + // thread will block until it receives a message from the coordinator, upon + // which it will codegen one CGU, send it to the coordinator and block + // again. This way the coordinator can control what the main thread is + // doing. + // + // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is + // available, it will spawn off a new LLVM worker thread and let it process + // that a WorkItem. When a LLVM worker thread is done with its WorkItem, + // it will just shut down, which also frees all resources associated with + // the given LLVM module, and sends a message to the coordinator that the + // has been completed. + // + // Work Scheduling + // =============== + // The scheduler's goal is to minimize the time it takes to complete all + // work there is, however, we also want to keep memory consumption low + // if possible. These two goals are at odds with each other: If memory + // consumption were not an issue, we could just let the main thread produce + // LLVM WorkItems at full speed, assuring maximal utilization of + // Tokens/LLVM worker threads. However, since codegen usual is faster + // than LLVM processing, the queue of LLVM WorkItems would fill up and each + // WorkItem potentially holds on to a substantial amount of memory. + // + // So the actual goal is to always produce just enough LLVM WorkItems as + // not to starve our LLVM worker threads. That means, once we have enough + // WorkItems in our queue, we can block the main thread, so it does not + // produce more until we need them. + // + // Doing LLVM Work on the Main Thread + // ---------------------------------- + // Since the main thread owns the compiler processes implicit `Token`, it is + // wasteful to keep it blocked without doing any work. Therefore, what we do + // in this case is: We spawn off an additional LLVM worker thread that helps + // reduce the queue. The work it is doing corresponds to the implicit + // `Token`. The coordinator will mark the main thread as being busy with + // LLVM work. (The actual work happens on another OS thread but we just care + // about `Tokens`, not actual threads). + // + // When any LLVM worker thread finishes while the main thread is marked as + // "busy with LLVM work", we can do a little switcheroo: We give the Token + // of the just finished thread to the LLVM worker thread that is working on + // behalf of the main thread's implicit Token, thus freeing up the main + // thread again. The coordinator can then again decide what the main thread + // should do. This allows the coordinator to make decisions at more points + // in time. + // + // Striking a Balance between Throughput and Memory Consumption + // ------------------------------------------------------------ + // Since our two goals, (1) use as many Tokens as possible and (2) keep + // memory consumption as low as possible, are in conflict with each other, + // we have to find a trade off between them. Right now, the goal is to keep + // all workers busy, which means that no worker should find the queue empty + // when it is ready to start. + // How do we do achieve this? Good question :) We actually never know how + // many `Tokens` are potentially available so it's hard to say how much to + // fill up the queue before switching the main thread to LLVM work. Also we + // currently don't have a means to estimate how long a running LLVM worker + // will still be busy with it's current WorkItem. However, we know the + // maximal count of available Tokens that makes sense (=the number of CPU + // cores), so we can take a conservative guess. The heuristic we use here + // is implemented in the `queue_full_enough()` function. + // + // Some Background on Jobservers + // ----------------------------- + // It's worth also touching on the management of parallelism here. We don't + // want to just spawn a thread per work item because while that's optimal + // parallelism it may overload a system with too many threads or violate our + // configuration for the maximum amount of cpu to use for this process. To + // manage this we use the `jobserver` crate. + // + // Job servers are an artifact of GNU make and are used to manage + // parallelism between processes. A jobserver is a glorified IPC semaphore + // basically. Whenever we want to run some work we acquire the semaphore, + // and whenever we're done with that work we release the semaphore. In this + // manner we can ensure that the maximum number of parallel workers is + // capped at any one point in time. + // + // LTO and the coordinator thread + // ------------------------------ + // + // The final job the coordinator thread is responsible for is managing LTO + // and how that works. When LTO is requested what we'll to is collect all + // optimized LLVM modules into a local vector on the coordinator. Once all + // modules have been codegened and optimized we hand this to the `lto` + // module for further optimization. The `lto` module will return back a list + // of more modules to work on, which the coordinator will continue to spawn + // work for. + // + // Each LLVM module is automatically sent back to the coordinator for LTO if + // necessary. There's already optimizations in place to avoid sending work + // back to the coordinator if LTO isn't requested. + return thread::spawn(move || { + // We pretend to be within the top-level LLVM time-passes task here: + set_time_depth(1); + + let max_workers = ::num_cpus::get(); + let mut worker_id_counter = 0; + let mut free_worker_ids = Vec::new(); + let mut get_worker_id = |free_worker_ids: &mut Vec| { + if let Some(id) = free_worker_ids.pop() { + id + } else { + let id = worker_id_counter; + worker_id_counter += 1; + id + } + }; + + // This is where we collect codegen units that have gone all the way + // through codegen and LLVM. + let mut compiled_modules = vec![]; + let mut compiled_metadata_module = None; + let mut compiled_allocator_module = None; + let mut needs_lto = Vec::new(); + let mut started_lto = false; + + // This flag tracks whether all items have gone through codegens + let mut codegen_done = false; + + // This is the queue of LLVM work items that still need processing. + let mut work_items = Vec::<(WorkItem, u64)>::new(); + + // This are the Jobserver Tokens we currently hold. Does not include + // the implicit Token the compiler process owns no matter what. + let mut tokens = Vec::new(); + + let mut main_thread_worker_state = MainThreadWorkerState::Idle; + let mut running = 0; + + let mut llvm_start_time = None; + + // Run the message loop while there's still anything that needs message + // processing: + while !codegen_done || + work_items.len() > 0 || + running > 0 || + needs_lto.len() > 0 || + main_thread_worker_state != MainThreadWorkerState::Idle { + + // While there are still CGUs to be codegened, the coordinator has + // to decide how to utilize the compiler processes implicit Token: + // For codegenning more CGU or for running them through LLVM. + if !codegen_done { + if main_thread_worker_state == MainThreadWorkerState::Idle { + if !queue_full_enough(work_items.len(), running, max_workers) { + // The queue is not full enough, codegen more items: + if let Err(_) = codegen_worker_send.send(Message::CodegenItem) { + panic!("Could not send Message::CodegenItem to main thread") + } + main_thread_worker_state = MainThreadWorkerState::Codegenning; + } else { + // The queue is full enough to not let the worker + // threads starve. Use the implicit Token to do some + // LLVM work too. + let (item, _) = work_items.pop() + .expect("queue empty - queue_full_enough() broken?"); + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(cgcx.config(item.kind()), + &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } + } + } else { + // If we've finished everything related to normal codegen + // then it must be the case that we've got some LTO work to do. + // Perform the serial work here of figuring out what we're + // going to LTO and then push a bunch of work items onto our + // queue to do LTO + if work_items.len() == 0 && + running == 0 && + main_thread_worker_state == MainThreadWorkerState::Idle { + assert!(!started_lto); + assert!(needs_lto.len() > 0); + started_lto = true; + let modules = mem::replace(&mut needs_lto, Vec::new()); + for (work, cost) in generate_lto_work(&cgcx, modules) { + let insertion_index = work_items + .binary_search_by_key(&cost, |&(_, cost)| cost) + .unwrap_or_else(|e| e); + work_items.insert(insertion_index, (work, cost)); + if !cgcx.opts.debugging_opts.no_parallel_llvm { + helper.request_token(); + } + } + } + + // In this branch, we know that everything has been codegened, + // so it's just a matter of determining whether the implicit + // Token is free to use for LLVM work. + match main_thread_worker_state { + MainThreadWorkerState::Idle => { + if let Some((item, _)) = work_items.pop() { + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(cgcx.config(item.kind()), + &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } else { + // There is no unstarted work, so let the main thread + // take over for a running worker. Otherwise the + // implicit token would just go to waste. + // We reduce the `running` counter by one. The + // `tokens.truncate()` below will take care of + // giving the Token back. + debug_assert!(running > 0); + running -= 1; + main_thread_worker_state = MainThreadWorkerState::LLVMing; + } + } + MainThreadWorkerState::Codegenning => { + bug!("codegen worker should not be codegenning after \ + codegen was already completed") + } + MainThreadWorkerState::LLVMing => { + // Already making good use of that token + } + } + } + + // Spin up what work we can, only doing this while we've got available + // parallelism slots and work left to spawn. + while work_items.len() > 0 && running < tokens.len() { + let (item, _) = work_items.pop().unwrap(); + + maybe_start_llvm_timer(cgcx.config(item.kind()), + &mut llvm_start_time); + + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + + spawn_work(cgcx, item); + running += 1; + } + + // Relinquish accidentally acquired extra tokens + tokens.truncate(running); + + let msg = coordinator_receive.recv().unwrap(); + match *msg.downcast::().ok().unwrap() { + // Save the token locally and the next turn of the loop will use + // this to spawn a new unit of work, or it may get dropped + // immediately if we have no more work to spawn. + Message::Token(token) => { + match token { + Ok(token) => { + tokens.push(token); + + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + // If the main thread token is used for LLVM work + // at the moment, we turn that thread into a regular + // LLVM worker thread, so the main thread is free + // to react to codegen demand. + main_thread_worker_state = MainThreadWorkerState::Idle; + running += 1; + } + } + Err(e) => { + let msg = &format!("failed to acquire jobserver token: {}", e); + shared_emitter.fatal(msg); + // Exit the coordinator thread + panic!("{}", msg) + } + } + } + + Message::CodegenDone { llvm_work_item, cost } => { + // We keep the queue sorted by estimated processing cost, + // so that more expensive items are processed earlier. This + // is good for throughput as it gives the main thread more + // time to fill up the queue and it avoids scheduling + // expensive items to the end. + // Note, however, that this is not ideal for memory + // consumption, as LLVM module sizes are not evenly + // distributed. + let insertion_index = + work_items.binary_search_by_key(&cost, |&(_, cost)| cost); + let insertion_index = match insertion_index { + Ok(idx) | Err(idx) => idx + }; + work_items.insert(insertion_index, (llvm_work_item, cost)); + + if !cgcx.opts.debugging_opts.no_parallel_llvm { + helper.request_token(); + } + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + + Message::CodegenComplete => { + codegen_done = true; + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + + // If a thread exits successfully then we drop a token associated + // with that worker and update our `running` count. We may later + // re-acquire a token to continue running more work. We may also not + // actually drop a token here if the worker was running with an + // "ephemeral token" + // + // Note that if the thread failed that means it panicked, so we + // abort immediately. + Message::Done { result: Ok(compiled_module), worker_id } => { + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + + free_worker_ids.push(worker_id); + + match compiled_module.kind { + ModuleKind::Regular => { + compiled_modules.push(compiled_module); + } + ModuleKind::Metadata => { + assert!(compiled_metadata_module.is_none()); + compiled_metadata_module = Some(compiled_module); + } + ModuleKind::Allocator => { + assert!(compiled_allocator_module.is_none()); + compiled_allocator_module = Some(compiled_module); + } + } + } + Message::NeedsLTO { result, worker_id } => { + assert!(!started_lto); + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + + free_worker_ids.push(worker_id); + needs_lto.push(result); + } + Message::Done { result: Err(()), worker_id: _ } => { + shared_emitter.fatal("aborting due to worker thread failure"); + // Exit the coordinator thread + return Err(()) + } + Message::CodegenItem => { + bug!("the coordinator should not receive codegen requests") + } + } + } + + if let Some(llvm_start_time) = llvm_start_time { + let total_llvm_time = Instant::now().duration_since(llvm_start_time); + // This is the top-level timing for all of LLVM, set the time-depth + // to zero. + set_time_depth(0); + print_time_passes_entry(cgcx.time_passes, + "LLVM passes", + total_llvm_time); + } + + // Regardless of what order these modules completed in, report them to + // the backend in the same order every time to ensure that we're handing + // out deterministic results. + compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); + + let compiled_metadata_module = compiled_metadata_module + .expect("Metadata module not compiled?"); + + Ok(CompiledModules { + modules: compiled_modules, + metadata_module: compiled_metadata_module, + allocator_module: compiled_allocator_module, + }) + }); + + // A heuristic that determines if we have enough LLVM WorkItems in the + // queue so that the main thread can do LLVM work instead of codegen + fn queue_full_enough(items_in_queue: usize, + workers_running: usize, + max_workers: usize) -> bool { + // Tune me, plz. + items_in_queue > 0 && + items_in_queue >= max_workers.saturating_sub(workers_running / 2) + } + + fn maybe_start_llvm_timer(config: &ModuleConfig, + llvm_start_time: &mut Option) { + // We keep track of the -Ztime-passes output manually, + // since the closure-based interface does not fit well here. + if config.time_passes { + if llvm_start_time.is_none() { + *llvm_start_time = Some(Instant::now()); + } + } + } +} + +pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; +pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(CODEGEN_WORKER_ID); +pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); +const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); + +fn spawn_work(cgcx: CodegenContext, work: WorkItem) { + let depth = time_depth(); + + thread::spawn(move || { + set_time_depth(depth); + + // Set up a destructor which will fire off a message that we're done as + // we exit. + struct Bomb { + coordinator_send: Sender>, + result: Option, + worker_id: usize, + } + impl Drop for Bomb { + fn drop(&mut self) { + let worker_id = self.worker_id; + let msg = match self.result.take() { + Some(WorkItemResult::Compiled(m)) => { + Message::Done { result: Ok(m), worker_id } + } + Some(WorkItemResult::NeedsLTO(m)) => { + Message::NeedsLTO { result: m, worker_id } + } + None => Message::Done { result: Err(()), worker_id } + }; + drop(self.coordinator_send.send(Box::new(msg))); + } + } + + let mut bomb = Bomb { + coordinator_send: cgcx.coordinator_send.clone(), + result: None, + worker_id: cgcx.worker, + }; + + // Execute the work itself, and if it finishes successfully then flag + // ourselves as a success as well. + // + // Note that we ignore any `FatalError` coming out of `execute_work_item`, + // as a diagnostic was already sent off to the main thread - just + // surface that there was an error in this worker. + bomb.result = { + let timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(time_graph::TimelineId(cgcx.worker), + LLVM_WORK_PACKAGE_KIND, + &work.name()) + }); + let mut timeline = timeline.unwrap_or(Timeline::noop()); + execute_work_item(&cgcx, work, &mut timeline).ok() + }; + }); +} + +pub fn run_assembler(cgcx: &CodegenContext, handler: &Handler, assembly: &Path, object: &Path) { + let assembler = cgcx.assembler_cmd + .as_ref() + .expect("cgcx.assembler_cmd is missing?"); + + let pname = &assembler.name; + let mut cmd = assembler.cmd.clone(); + cmd.arg("-c").arg("-o").arg(object).arg(assembly); + debug!("{:?}", cmd); + + match cmd.output() { + Ok(prog) => { + if !prog.status.success() { + let mut note = prog.stderr.clone(); + note.extend_from_slice(&prog.stdout); + + handler.struct_err(&format!("linking with `{}` failed: {}", + pname.display(), + prog.status)) + .note(&format!("{:?}", &cmd)) + .note(str::from_utf8(¬e[..]).unwrap()) + .emit(); + handler.abort_if_errors(); + } + }, + Err(e) => { + handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e)); + handler.abort_if_errors(); + } + } +} + +pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, + config: &ModuleConfig, + opt_level: llvm::CodeGenOptLevel, + prepare_for_thin_lto: bool, + f: &mut dyn FnMut(&llvm::PassManagerBuilder)) { + use std::ptr; + + // Create the PassManagerBuilder for LLVM. We configure it with + // reasonable defaults and prepare it to actually populate the pass + // manager. + let builder = llvm::LLVMPassManagerBuilderCreate(); + let opt_size = config.opt_size.unwrap_or(llvm::CodeGenOptSizeNone); + let inline_threshold = config.inline_threshold; + + let pgo_gen_path = config.pgo_gen.as_ref().map(|s| { + let s = if s.is_empty() { "default_%m.profraw" } else { s }; + CString::new(s.as_bytes()).unwrap() + }); + + let pgo_use_path = if config.pgo_use.is_empty() { + None + } else { + Some(CString::new(config.pgo_use.as_bytes()).unwrap()) + }; + + llvm::LLVMRustConfigurePassManagerBuilder( + builder, + opt_level, + config.merge_functions, + config.vectorize_slp, + config.vectorize_loop, + prepare_for_thin_lto, + pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()), + pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()), + ); + + llvm::LLVMPassManagerBuilderSetSizeLevel(builder, opt_size as u32); + + if opt_size != llvm::CodeGenOptSizeNone { + llvm::LLVMPassManagerBuilderSetDisableUnrollLoops(builder, 1); + } + + llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins); + + // Here we match what clang does (kinda). For O0 we only inline + // always-inline functions (but don't add lifetime intrinsics), at O1 we + // inline with lifetime intrinsics, and O2+ we add an inliner with a + // thresholds copied from clang. + match (opt_level, opt_size, inline_threshold) { + (.., Some(t)) => { + llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32); + } + (llvm::CodeGenOptLevel::Aggressive, ..) => { + llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275); + } + (_, llvm::CodeGenOptSizeDefault, _) => { + llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 75); + } + (_, llvm::CodeGenOptSizeAggressive, _) => { + llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25); + } + (llvm::CodeGenOptLevel::None, ..) => { + llvm::LLVMRustAddAlwaysInlinePass(builder, false); + } + (llvm::CodeGenOptLevel::Less, ..) => { + llvm::LLVMRustAddAlwaysInlinePass(builder, true); + } + (llvm::CodeGenOptLevel::Default, ..) => { + llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225); + } + (llvm::CodeGenOptLevel::Other, ..) => { + bug!("CodeGenOptLevel::Other selected") + } + } + + f(builder); + llvm::LLVMPassManagerBuilderDispose(builder); +} + + +enum SharedEmitterMessage { + Diagnostic(Diagnostic), + InlineAsmError(u32, String), + AbortIfErrors, + Fatal(String), +} + +#[derive(Clone)] +pub struct SharedEmitter { + sender: Sender, +} + +pub struct SharedEmitterMain { + receiver: Receiver, +} + +impl SharedEmitter { + pub fn new() -> (SharedEmitter, SharedEmitterMain) { + let (sender, receiver) = channel(); + + (SharedEmitter { sender }, SharedEmitterMain { receiver }) + } + + fn inline_asm_error(&self, cookie: u32, msg: String) { + drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); + } + + fn fatal(&self, msg: &str) { + drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); + } +} + +impl Emitter for SharedEmitter { + fn emit(&mut self, db: &DiagnosticBuilder) { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: db.message(), + code: db.code.clone(), + lvl: db.level, + }))); + for child in &db.children { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: child.message(), + code: None, + lvl: child.level, + }))); + } + drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); + } +} + +impl SharedEmitterMain { + pub fn check(&self, sess: &Session, blocking: bool) { + loop { + let message = if blocking { + match self.receiver.recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + } else { + match self.receiver.try_recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + }; + + match message { + Ok(SharedEmitterMessage::Diagnostic(diag)) => { + let handler = sess.diagnostic(); + match diag.code { + Some(ref code) => { + handler.emit_with_code(&MultiSpan::new(), + &diag.msg, + code.clone(), + diag.lvl); + } + None => { + handler.emit(&MultiSpan::new(), + &diag.msg, + diag.lvl); + } + } + } + Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { + match Mark::from_u32(cookie).expn_info() { + Some(ei) => sess.span_err(ei.call_site, &msg), + None => sess.err(&msg), + } + } + Ok(SharedEmitterMessage::AbortIfErrors) => { + sess.abort_if_errors(); + } + Ok(SharedEmitterMessage::Fatal(msg)) => { + sess.fatal(&msg); + } + Err(_) => { + break; + } + } + + } + } +} + +pub struct OngoingCodegen { + crate_name: Symbol, + link: LinkMeta, + metadata: EncodedMetadata, + windows_subsystem: Option, + linker_info: LinkerInfo, + crate_info: CrateInfo, + time_graph: Option, + coordinator_send: Sender>, + codegen_worker_receive: Receiver, + shared_emitter_main: SharedEmitterMain, + future: thread::JoinHandle>, + output_filenames: Arc, +} + +impl OngoingCodegen { + pub(crate) fn join( + self, + sess: &Session + ) -> (CodegenResults, FxHashMap) { + self.shared_emitter_main.check(sess, true); + let compiled_modules = match self.future.join() { + Ok(Ok(compiled_modules)) => compiled_modules, + Ok(Err(())) => { + sess.abort_if_errors(); + panic!("expected abort due to worker thread errors") + }, + Err(_) => { + sess.fatal("Error during codegen/LLVM phase."); + } + }; + + sess.abort_if_errors(); + + if let Some(time_graph) = self.time_graph { + time_graph.dump(&format!("{}-timings", self.crate_name)); + } + + let work_products = copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, + &compiled_modules); + + produce_final_output_artifacts(sess, + &compiled_modules, + &self.output_filenames); + + // FIXME: time_llvm_passes support - does this use a global context or + // something? + if sess.codegen_units() == 1 && sess.time_llvm_passes() { + unsafe { llvm::LLVMRustPrintPassTimings(); } + } + + (CodegenResults { + crate_name: self.crate_name, + link: self.link, + metadata: self.metadata, + windows_subsystem: self.windows_subsystem, + linker_info: self.linker_info, + crate_info: self.crate_info, + + modules: compiled_modules.modules, + allocator_module: compiled_modules.allocator_module, + metadata_module: compiled_modules.metadata_module, + }, work_products) + } + + pub(crate) fn submit_pre_codegened_module_to_llvm(&self, + tcx: TyCtxt, + module: ModuleCodegen) { + self.wait_for_signal_to_codegen_item(); + self.check_for_errors(tcx.sess); + + // These are generally cheap and won't through off scheduling. + let cost = 0; + submit_codegened_module_to_llvm(tcx, module, cost); + } + + pub fn codegen_finished(&self, tcx: TyCtxt) { + self.wait_for_signal_to_codegen_item(); + self.check_for_errors(tcx.sess); + drop(self.coordinator_send.send(Box::new(Message::CodegenComplete))); + } + + pub fn check_for_errors(&self, sess: &Session) { + self.shared_emitter_main.check(sess, false); + } + + pub fn wait_for_signal_to_codegen_item(&self) { + match self.codegen_worker_receive.recv() { + Ok(Message::CodegenItem) => { + // Nothing to do + } + Ok(_) => panic!("unexpected message"), + Err(_) => { + // One of the LLVM threads must have panicked, fall through so + // error handling can be reached. + } + } + } +} + +pub(crate) fn submit_codegened_module_to_llvm(tcx: TyCtxt, + module: ModuleCodegen, + cost: u64) { + let llvm_work_item = WorkItem::Optimize(module); + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone { + llvm_work_item, + cost, + }))); +} + +fn msvc_imps_needed(tcx: TyCtxt) -> bool { + // This should never be true (because it's not supported). If it is true, + // something is wrong with commandline arg validation. + assert!(!(tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + tcx.sess.target.target.options.is_like_msvc && + tcx.sess.opts.cg.prefer_dynamic)); + + tcx.sess.target.target.options.is_like_msvc && + tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing + // dynamic linking when cross-language LTO is enabled. + !tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() +} + +// Create a `__imp_ = &symbol` global for every public static `symbol`. +// This is required to satisfy `dllimport` references to static data in .rlibs +// when using MSVC linker. We do this only for data, as linker can fix up +// code references on its own. +// See #26591, #27438 +fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module) { + if !cgcx.msvc_imps_needed { + return + } + // The x86 ABI seems to require that leading underscores are added to symbol + // names, so we need an extra underscore on 32-bit. There's also a leading + // '\x01' here which disables LLVM's symbol mangling (e.g. no extra + // underscores added in front). + let prefix = if cgcx.target_pointer_width == "32" { + "\x01__imp__" + } else { + "\x01__imp_" + }; + unsafe { + let i8p_ty = Type::i8p_llcx(llcx); + let globals = base::iter_globals(llmod) + .filter(|&val| { + llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage && + llvm::LLVMIsDeclaration(val) == 0 + }) + .map(move |val| { + let name = CStr::from_ptr(llvm::LLVMGetValueName(val)); + let mut imp_name = prefix.as_bytes().to_vec(); + imp_name.extend(name.to_bytes()); + let imp_name = CString::new(imp_name).unwrap(); + (imp_name, val) + }) + .collect::>(); + for (imp_name, val) in globals { + let imp = llvm::LLVMAddGlobal(llmod, + i8p_ty, + imp_name.as_ptr() as *const _); + llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty)); + llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); + } + } +} diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs new file mode 100644 index 000000000000..0330a0598268 --- /dev/null +++ b/src/librustc_codegen_llvm/base.rs @@ -0,0 +1,1343 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Codegen the completed AST to the LLVM IR. +//! +//! Some functions here, such as codegen_block and codegen_expr, return a value -- +//! the result of the codegen to LLVM -- while others, such as codegen_fn +//! and mono_item, are called only for the side effect of adding a +//! particular definition to the LLVM IR output we're producing. +//! +//! Hopefully useful general knowledge about codegen: +//! +//! * There's no way to find out the Ty type of a Value. Doing so +//! would be "trying to get the eggs out of an omelette" (credit: +//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty, +//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int, +//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. + +use super::ModuleLlvm; +use super::ModuleSource; +use super::ModuleCodegen; +use super::ModuleKind; + +use abi; +use back::link; +use back::write::{self, OngoingCodegen}; +use llvm::{self, TypeKind, get_param}; +use metadata; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use rustc::middle::lang_items::StartFnLangItem; +use rustc::middle::weak_lang_items; +use rustc::mir::mono::{Linkage, Visibility, Stats, CodegenUnitNameBuilder}; +use rustc::middle::cstore::{EncodedMetadata}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; +use rustc::ty::query::Providers; +use rustc::dep_graph::{DepNode, DepConstructor}; +use rustc::middle::cstore::{self, LinkMeta, LinkagePreference}; +use rustc::middle::exported_symbols; +use rustc::util::common::{time, print_time_passes_entry}; +use rustc::util::profiling::ProfileCategory; +use rustc::session::config::{self, DebugInfo, EntryFnType}; +use rustc::session::Session; +use rustc_incremental; +use allocator; +use mir::place::PlaceRef; +use attributes; +use builder::{Builder, MemFlags}; +use callee; +use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; +use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; +use rustc_mir::monomorphize::item::DefPathBasedNames; +use common::{self, C_struct_in_context, C_array, val_ty}; +use consts; +use context::CodegenCx; +use debuginfo; +use declare; +use meth; +use mir; +use monomorphize::Instance; +use monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; +use rustc_codegen_utils::symbol_names_test; +use time_graph; +use mono_item::{MonoItem, BaseMonoItemExt, MonoItemExt}; +use type_::Type; +use type_of::LayoutLlvmExt; +use rustc::util::nodemap::{FxHashMap, FxHashSet, DefIdSet}; +use CrateInfo; +use rustc_data_structures::small_c_str::SmallCStr; +use rustc_data_structures::sync::Lrc; + +use std::any::Any; +use std::ffi::CString; +use std::sync::Arc; +use std::time::{Instant, Duration}; +use std::i32; +use std::cmp; +use std::sync::mpsc; +use syntax_pos::Span; +use syntax_pos::symbol::InternedString; +use syntax::attr; +use rustc::hir::{self, CodegenFnAttrs}; + +use value::Value; + +use mir::operand::OperandValue; + +use rustc_codegen_utils::check_for_rustc_errors_attr; + +pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll> { + cx: &'a CodegenCx<'ll, 'tcx>, + name: Option, + istart: usize, +} + +impl StatRecorder<'a, 'll, 'tcx> { + pub fn new(cx: &'a CodegenCx<'ll, 'tcx>, name: String) -> Self { + let istart = cx.stats.borrow().n_llvm_insns; + StatRecorder { + cx, + name: Some(name), + istart, + } + } +} + +impl Drop for StatRecorder<'a, 'll, 'tcx> { + fn drop(&mut self) { + if self.cx.sess().codegen_stats() { + let mut stats = self.cx.stats.borrow_mut(); + let iend = stats.n_llvm_insns; + stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); + stats.n_fns += 1; + // Reset LLVM insn count to avoid compound costs. + stats.n_llvm_insns = self.istart; + } + } +} + +pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, + signed: bool) + -> llvm::IntPredicate { + match op { + hir::BinOpKind::Eq => llvm::IntEQ, + hir::BinOpKind::Ne => llvm::IntNE, + hir::BinOpKind::Lt => if signed { llvm::IntSLT } else { llvm::IntULT }, + hir::BinOpKind::Le => if signed { llvm::IntSLE } else { llvm::IntULE }, + hir::BinOpKind::Gt => if signed { llvm::IntSGT } else { llvm::IntUGT }, + hir::BinOpKind::Ge => if signed { llvm::IntSGE } else { llvm::IntUGE }, + op => { + bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ + found {:?}", + op) + } + } +} + +pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> llvm::RealPredicate { + match op { + hir::BinOpKind::Eq => llvm::RealOEQ, + hir::BinOpKind::Ne => llvm::RealUNE, + hir::BinOpKind::Lt => llvm::RealOLT, + hir::BinOpKind::Le => llvm::RealOLE, + hir::BinOpKind::Gt => llvm::RealOGT, + hir::BinOpKind::Ge => llvm::RealOGE, + op => { + bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ + found {:?}", + op); + } + } +} + +pub fn compare_simd_types( + bx: &Builder<'a, 'll, 'tcx>, + lhs: &'ll Value, + rhs: &'ll Value, + t: Ty<'tcx>, + ret_ty: &'ll Type, + op: hir::BinOpKind +) -> &'ll Value { + let signed = match t.sty { + ty::TyFloat(_) => { + let cmp = bin_op_to_fcmp_predicate(op); + return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty); + }, + ty::TyUint(_) => false, + ty::TyInt(_) => true, + _ => bug!("compare_simd_types: invalid SIMD type"), + }; + + let cmp = bin_op_to_icmp_predicate(op, signed); + // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension + // to get the correctly sized type. This will compile to a single instruction + // once the IR is converted to assembly if the SIMD instruction is supported + // by the target architecture. + bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty) +} + +/// Retrieve the information we are losing (making dynamic) in an unsizing +/// adjustment. +/// +/// The `old_info` argument is a bit funny. It is intended for use +/// in an upcast, where the new vtable for an object will be derived +/// from the old one. +pub fn unsized_info( + cx: &CodegenCx<'ll, 'tcx>, + source: Ty<'tcx>, + target: Ty<'tcx>, + old_info: Option<&'ll Value>, +) -> &'ll Value { + let (source, target) = cx.tcx.struct_lockstep_tails(source, target); + match (&source.sty, &target.sty) { + (&ty::TyArray(_, len), &ty::TySlice(_)) => { + C_usize(cx, len.unwrap_usize(cx.tcx)) + } + (&ty::TyDynamic(..), &ty::TyDynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + old_info.expect("unsized_info: missing old info for trait upcast") + } + (_, &ty::TyDynamic(ref data, ..)) => { + let vtable_ptr = cx.layout_of(cx.tcx.mk_mut_ptr(target)) + .field(cx, abi::FAT_PTR_EXTRA); + consts::ptrcast(meth::get_vtable(cx, source, data.principal()), + vtable_ptr.llvm_type(cx)) + } + _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", + source, + target), + } +} + +/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. +pub fn unsize_thin_ptr( + bx: &Builder<'a, 'll, 'tcx>, + src: &'ll Value, + src_ty: Ty<'tcx>, + dst_ty: Ty<'tcx> +) -> (&'ll Value, &'ll Value) { + debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); + match (&src_ty.sty, &dst_ty.sty) { + (&ty::TyRef(_, a, _), + &ty::TyRef(_, b, _)) | + (&ty::TyRef(_, a, _), + &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) | + (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), + &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { + assert!(bx.cx.type_is_sized(a)); + let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to(); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None)) + } + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); + assert!(bx.cx.type_is_sized(a)); + let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to(); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None)) + } + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { + assert_eq!(def_a, def_b); + + let src_layout = bx.cx.layout_of(src_ty); + let dst_layout = bx.cx.layout_of(dst_ty); + let mut result = None; + for i in 0..src_layout.fields.count() { + let src_f = src_layout.field(bx.cx, i); + assert_eq!(src_layout.fields.offset(i).bytes(), 0); + assert_eq!(dst_layout.fields.offset(i).bytes(), 0); + if src_f.is_zst() { + continue; + } + assert_eq!(src_layout.size, src_f.size); + + let dst_f = dst_layout.field(bx.cx, i); + assert_ne!(src_f.ty, dst_f.ty); + assert_eq!(result, None); + result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); + } + let (lldata, llextra) = result.unwrap(); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx, 0, true)), + bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx, 1, true))) + } + _ => bug!("unsize_thin_ptr: called on bad types"), + } +} + +/// Coerce `src`, which is a reference to a value of type `src_ty`, +/// to a value of type `dst_ty` and store the result in `dst` +pub fn coerce_unsized_into( + bx: &Builder<'a, 'll, 'tcx>, + src: PlaceRef<'ll, 'tcx>, + dst: PlaceRef<'ll, 'tcx> +) { + let src_ty = src.layout.ty; + let dst_ty = dst.layout.ty; + let coerce_ptr = || { + let (base, info) = match src.load(bx).val { + OperandValue::Pair(base, info) => { + // fat-ptr to fat-ptr unsize preserves the vtable + // i.e. &'a fmt::Debug+Send => &'a fmt::Debug + // So we need to pointercast the base to ensure + // the types match up. + let thin_ptr = dst.layout.field(bx.cx, abi::FAT_PTR_ADDR); + (bx.pointercast(base, thin_ptr.llvm_type(bx.cx)), info) + } + OperandValue::Immediate(base) => { + unsize_thin_ptr(bx, base, src_ty, dst_ty) + } + OperandValue::Ref(..) => bug!() + }; + OperandValue::Pair(base, info).store(bx, dst); + }; + match (&src_ty.sty, &dst_ty.sty) { + (&ty::TyRef(..), &ty::TyRef(..)) | + (&ty::TyRef(..), &ty::TyRawPtr(..)) | + (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => { + coerce_ptr() + } + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + coerce_ptr() + } + + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { + assert_eq!(def_a, def_b); + + for i in 0..def_a.variants[0].fields.len() { + let src_f = src.project_field(bx, i); + let dst_f = dst.project_field(bx, i); + + if dst_f.layout.is_zst() { + continue; + } + + if src_f.layout.ty == dst_f.layout.ty { + memcpy_ty(bx, dst_f.llval, src_f.llval, src_f.layout, + src_f.align.min(dst_f.align), MemFlags::empty()); + } else { + coerce_unsized_into(bx, src_f, dst_f); + } + } + } + _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", + src_ty, + dst_ty), + } +} + +pub fn cast_shift_expr_rhs( + cx: &Builder<'_, 'll, '_>, op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value +) -> &'ll Value { + cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) +} + +fn cast_shift_rhs<'ll, F, G>(op: hir::BinOpKind, + lhs: &'ll Value, + rhs: &'ll Value, + trunc: F, + zext: G) + -> &'ll Value + where F: FnOnce(&'ll Value, &'ll Type) -> &'ll Value, + G: FnOnce(&'ll Value, &'ll Type) -> &'ll Value +{ + // Shifts may have any size int on the rhs + if op.is_shift() { + let mut rhs_llty = val_ty(rhs); + let mut lhs_llty = val_ty(lhs); + if rhs_llty.kind() == TypeKind::Vector { + rhs_llty = rhs_llty.element_type() + } + if lhs_llty.kind() == TypeKind::Vector { + lhs_llty = lhs_llty.element_type() + } + let rhs_sz = rhs_llty.int_width(); + let lhs_sz = lhs_llty.int_width(); + if lhs_sz < rhs_sz { + trunc(rhs, lhs_llty) + } else if lhs_sz > rhs_sz { + // FIXME (#1877: If shifting by negative + // values becomes not undefined then this is wrong. + zext(rhs, lhs_llty) + } else { + rhs + } + } else { + rhs + } +} + +/// Returns whether this session's target will use SEH-based unwinding. +/// +/// This is only true for MSVC targets, and even then the 64-bit MSVC target +/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as +/// 64-bit MinGW) instead of "full SEH". +pub fn wants_msvc_seh(sess: &Session) -> bool { + sess.target.target.options.is_like_msvc +} + +pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) { + let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume"); + bx.call(assume_intrinsic, &[val], None); +} + +pub fn from_immediate(bx: &Builder<'_, 'll, '_>, val: &'ll Value) -> &'ll Value { + if val_ty(val) == Type::i1(bx.cx) { + bx.zext(val, Type::i8(bx.cx)) + } else { + val + } +} + +pub fn to_immediate( + bx: &Builder<'_, 'll, '_>, + val: &'ll Value, + layout: layout::TyLayout, +) -> &'ll Value { + if let layout::Abi::Scalar(ref scalar) = layout.abi { + return to_immediate_scalar(bx, val, scalar); + } + val +} + +pub fn to_immediate_scalar( + bx: &Builder<'_, 'll, '_>, + val: &'ll Value, + scalar: &layout::Scalar, +) -> &'ll Value { + if scalar.is_bool() { + return bx.trunc(val, Type::i1(bx.cx)); + } + val +} + +pub fn call_memcpy( + bx: &Builder<'_, 'll, '_>, + dst: &'ll Value, + src: &'ll Value, + n_bytes: &'ll Value, + align: Align, + flags: MemFlags, +) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memcpy. + let val = bx.load(src, align); + let ptr = bx.pointercast(dst, val_ty(val).ptr_to()); + bx.store_with_flags(val, ptr, align, flags); + return; + } + let cx = bx.cx; + let ptr_width = &cx.sess().target.target.target_pointer_width; + let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); + let memcpy = cx.get_intrinsic(&key); + let src_ptr = bx.pointercast(src, Type::i8p(cx)); + let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); + let size = bx.intcast(n_bytes, cx.isize_ty, false); + let align = C_i32(cx, align.abi() as i32); + let volatile = C_bool(cx, flags.contains(MemFlags::VOLATILE)); + bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); +} + +pub fn memcpy_ty( + bx: &Builder<'_, 'll, 'tcx>, + dst: &'ll Value, + src: &'ll Value, + layout: TyLayout<'tcx>, + align: Align, + flags: MemFlags, +) { + let size = layout.size.bytes(); + if size == 0 { + return; + } + + call_memcpy(bx, dst, src, C_usize(bx.cx, size), align, flags); +} + +pub fn call_memset( + bx: &Builder<'_, 'll, '_>, + ptr: &'ll Value, + fill_byte: &'ll Value, + size: &'ll Value, + align: &'ll Value, + volatile: bool, +) -> &'ll Value { + let ptr_width = &bx.cx.sess().target.target.target_pointer_width; + let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); + let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key); + let volatile = C_bool(bx.cx, volatile); + bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) +} + +pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) { + let _s = if cx.sess().codegen_stats() { + let mut instance_name = String::new(); + DefPathBasedNames::new(cx.tcx, true, true) + .push_def_path(instance.def_id(), &mut instance_name); + Some(StatRecorder::new(cx, instance_name)) + } else { + None + }; + + // this is an info! to allow collecting monomorphization statistics + // and to allow finding the last function before LLVM aborts from + // release builds. + info!("codegen_instance({})", instance); + + let fn_ty = instance.ty(cx.tcx); + let sig = common::ty_fn_sig(cx, fn_ty); + let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + + let lldecl = match cx.instances.borrow().get(&instance) { + Some(&val) => val, + None => bug!("Instance `{:?}` not already declared", instance) + }; + + cx.stats.borrow_mut().n_closures += 1; + + let mir = cx.tcx.instance_mir(instance.def); + mir::codegen_mir(cx, lldecl, &mir, instance, sig); +} + +pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { + let sect = match attrs.link_section { + Some(name) => name, + None => return, + }; + unsafe { + let buf = SmallCStr::new(§.as_str()); + llvm::LLVMSetSection(llval, buf.as_ptr()); + } +} + +/// Create the `main` function which will initialize the rust runtime and call +/// users main function. +fn maybe_create_entry_wrapper(cx: &CodegenCx) { + let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { + Some((id, span, _)) => { + (cx.tcx.hir.local_def_id(id), span) + } + None => return, + }; + + let instance = Instance::mono(cx.tcx, main_def_id); + + if !cx.codegen_unit.contains_item(&MonoItem::Fn(instance)) { + // We want to create the wrapper in the same codegen unit as Rust's main + // function. + return; + } + + let main_llfn = callee::get_fn(cx, instance); + + let et = cx.sess().entry_fn.get().map(|e| e.2); + match et { + Some(EntryFnType::Main) => create_entry_fn(cx, span, main_llfn, main_def_id, true), + Some(EntryFnType::Start) => create_entry_fn(cx, span, main_llfn, main_def_id, false), + None => {} // Do nothing. + } + + fn create_entry_fn( + cx: &CodegenCx<'ll, '_>, + sp: Span, + rust_main: &'ll Value, + rust_main_def_id: DefId, + use_start_lang_item: bool, + ) { + let llfty = Type::func(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], Type::c_int(cx)); + + let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output(); + // Given that `main()` has no arguments, + // then its return type cannot have + // late-bound regions, since late-bound + // regions must appear in the argument + // listing. + let main_ret_ty = cx.tcx.erase_regions( + &main_ret_ty.no_late_bound_regions().unwrap(), + ); + + if declare::get_defined_value(cx, "main").is_some() { + // FIXME: We should be smart and show a better diagnostic here. + cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") + .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") + .emit(); + cx.sess().abort_if_errors(); + bug!(); + } + let llfn = declare::declare_cfn(cx, "main", llfty); + + // `main` should respect same config for frame pointer elimination as rest of code + attributes::set_frame_pointer_elimination(cx, llfn); + attributes::apply_target_cpu_attr(cx, llfn); + + let bx = Builder::new_block(cx, llfn, "top"); + + debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(&bx); + + // Params from native main() used as args for rust start function + let param_argc = get_param(llfn, 0); + let param_argv = get_param(llfn, 1); + let arg_argc = bx.intcast(param_argc, cx.isize_ty, true); + let arg_argv = param_argv; + + let (start_fn, args) = if use_start_lang_item { + let start_def_id = cx.tcx.require_lang_item(StartFnLangItem); + let start_fn = callee::resolve_and_get_fn( + cx, + start_def_id, + cx.tcx.intern_substs(&[main_ret_ty.into()]), + ); + (start_fn, vec![bx.pointercast(rust_main, Type::i8p(cx).ptr_to()), + arg_argc, arg_argv]) + } else { + debug!("using user-defined start fn"); + (rust_main, vec![arg_argc, arg_argv]) + }; + + let result = bx.call(start_fn, &args, None); + bx.ret(bx.intcast(result, Type::c_int(cx), true)); + } +} + +fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, + llvm_module: &ModuleLlvm, + link_meta: &LinkMeta) + -> EncodedMetadata { + use std::io::Write; + use flate2::Compression; + use flate2::write::DeflateEncoder; + + let (metadata_llcx, metadata_llmod) = (&*llvm_module.llcx, llvm_module.llmod()); + + #[derive(PartialEq, Eq, PartialOrd, Ord)] + enum MetadataKind { + None, + Uncompressed, + Compressed + } + + let kind = tcx.sess.crate_types.borrow().iter().map(|ty| { + match *ty { + config::CrateType::Executable | + config::CrateType::Staticlib | + config::CrateType::Cdylib => MetadataKind::None, + + config::CrateType::Rlib => MetadataKind::Uncompressed, + + config::CrateType::Dylib | + config::CrateType::ProcMacro => MetadataKind::Compressed, + } + }).max().unwrap_or(MetadataKind::None); + + if kind == MetadataKind::None { + return EncodedMetadata::new(); + } + + let metadata = tcx.encode_metadata(link_meta); + if kind == MetadataKind::Uncompressed { + return metadata; + } + + assert!(kind == MetadataKind::Compressed); + let mut compressed = tcx.metadata_encoding_version(); + DeflateEncoder::new(&mut compressed, Compression::fast()) + .write_all(&metadata.raw_data).unwrap(); + + let llmeta = C_bytes_in_context(metadata_llcx, &compressed); + let llconst = C_struct_in_context(metadata_llcx, &[llmeta], false); + let name = exported_symbols::metadata_symbol_name(tcx); + let buf = CString::new(name).unwrap(); + let llglobal = unsafe { + llvm::LLVMAddGlobal(metadata_llmod, val_ty(llconst), buf.as_ptr()) + }; + unsafe { + llvm::LLVMSetInitializer(llglobal, llconst); + let section_name = metadata::metadata_section_name(&tcx.sess.target.target); + let name = SmallCStr::new(section_name); + llvm::LLVMSetSection(llglobal, name.as_ptr()); + + // Also generate a .section directive to force no + // flags, at least for ELF outputs, so that the + // metadata doesn't get loaded into memory. + let directive = format!(".section {}", section_name); + let directive = CString::new(directive).unwrap(); + llvm::LLVMSetModuleInlineAsm(metadata_llmod, directive.as_ptr()) + } + return metadata; +} + +pub struct ValueIter<'ll> { + cur: Option<&'ll Value>, + step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>, +} + +impl Iterator for ValueIter<'ll> { + type Item = &'ll Value; + + fn next(&mut self) -> Option<&'ll Value> { + let old = self.cur; + if let Some(old) = old { + self.cur = unsafe { (self.step)(old) }; + } + old + } +} + +pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> { + unsafe { + ValueIter { + cur: llvm::LLVMGetFirstGlobal(llmod), + step: llvm::LLVMGetNextGlobal, + } + } +} + +pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver>) + -> OngoingCodegen { + + check_for_rustc_errors_attr(tcx); + + if let Some(true) = tcx.sess.opts.debugging_opts.thinlto { + if unsafe { !llvm::LLVMRustThinLTOAvailable() } { + tcx.sess.fatal("this compiler's LLVM does not support ThinLTO"); + } + } + + if (tcx.sess.opts.debugging_opts.pgo_gen.is_some() || + !tcx.sess.opts.debugging_opts.pgo_use.is_empty()) && + unsafe { !llvm::LLVMRustPGOAvailable() } + { + tcx.sess.fatal("this compiler's LLVM does not support PGO"); + } + + let crate_hash = tcx.crate_hash(LOCAL_CRATE); + let link_meta = link::build_link_meta(crate_hash); + let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); + + // Codegen the metadata. + tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); + + let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE, + &["crate"], + Some("metadata")).as_str() + .to_string(); + let metadata_llvm_module = ModuleLlvm::new(tcx.sess, &metadata_cgu_name); + let metadata = time(tcx.sess, "write metadata", || { + write_metadata(tcx, &metadata_llvm_module, &link_meta) + }); + tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); + + let metadata_module = ModuleCodegen { + name: metadata_cgu_name, + source: ModuleSource::Codegened(metadata_llvm_module), + kind: ModuleKind::Metadata, + }; + + let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph { + Some(time_graph::TimeGraph::new()) + } else { + None + }; + + // Skip crate items and just output metadata in -Z no-codegen mode. + if tcx.sess.opts.debugging_opts.no_codegen || + !tcx.sess.opts.output_types.should_codegen() { + let ongoing_codegen = write::start_async_codegen( + tcx, + time_graph.clone(), + link_meta, + metadata, + rx, + 1); + + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); + ongoing_codegen.codegen_finished(tcx); + + assert_and_save_dep_graph(tcx); + + ongoing_codegen.check_for_errors(tcx.sess); + + return ongoing_codegen; + } + + // Run the monomorphization collector and partition the collected items into + // codegen units. + let codegen_units = + tcx.collect_and_partition_mono_items(LOCAL_CRATE).1; + let codegen_units = (*codegen_units).clone(); + + // Force all codegen_unit queries so they are already either red or green + // when compile_codegen_unit accesses them. We are not able to re-execute + // the codegen_unit query from just the DepNode, so an unknown color would + // lead to having to re-execute compile_codegen_unit, possibly + // unnecessarily. + if tcx.dep_graph.is_fully_enabled() { + for cgu in &codegen_units { + tcx.codegen_unit(cgu.name().clone()); + } + } + + let ongoing_codegen = write::start_async_codegen( + tcx, + time_graph.clone(), + link_meta, + metadata, + rx, + codegen_units.len()); + + // Codegen an allocator shim, if necessary. + // + // If the crate doesn't have an `allocator_kind` set then there's definitely + // no shim to generate. Otherwise we also check our dependency graph for all + // our output crate types. If anything there looks like its a `Dynamic` + // linkage, then it's already got an allocator shim and we'll be using that + // one instead. If nothing exists then it's our job to generate the + // allocator! + let any_dynamic_crate = tcx.sess.dependency_formats.borrow() + .iter() + .any(|(_, list)| { + use rustc::middle::dependency_format::Linkage; + list.iter().any(|linkage| { + match linkage { + Linkage::Dynamic => true, + _ => false, + } + }) + }); + let allocator_module = if any_dynamic_crate { + None + } else if let Some(kind) = *tcx.sess.allocator_kind.get() { + let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE, + &["crate"], + Some("allocator")).as_str() + .to_string(); + let modules = ModuleLlvm::new(tcx.sess, &llmod_id); + time(tcx.sess, "write allocator module", || { + unsafe { + allocator::codegen(tcx, &modules, kind) + } + }); + + Some(ModuleCodegen { + name: llmod_id, + source: ModuleSource::Codegened(modules), + kind: ModuleKind::Allocator, + }) + } else { + None + }; + + if let Some(allocator_module) = allocator_module { + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module); + } + + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); + + // We sort the codegen units by size. This way we can schedule work for LLVM + // a bit more efficiently. + let codegen_units = { + let mut codegen_units = codegen_units; + codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); + codegen_units + }; + + let mut total_codegen_time = Duration::new(0, 0); + let mut all_stats = Stats::default(); + + for cgu in codegen_units.into_iter() { + ongoing_codegen.wait_for_signal_to_codegen_item(); + ongoing_codegen.check_for_errors(tcx.sess); + + // First, if incremental compilation is enabled, we try to re-use the + // codegen unit from the cache. + if tcx.dep_graph.is_fully_enabled() { + let cgu_id = cgu.work_product_id(); + + // Check whether there is a previous work-product we can + // re-use. Not only must the file exist, and the inputs not + // be dirty, but the hash of the symbols we will generate must + // be the same. + if let Some(buf) = tcx.dep_graph.previous_work_product(&cgu_id) { + let dep_node = &DepNode::new(tcx, + DepConstructor::CompileCodegenUnit(cgu.name().clone())); + + // We try to mark the DepNode::CompileCodegenUnit green. If we + // succeed it means that none of the dependencies has changed + // and we can safely re-use. + if let Some(dep_node_index) = tcx.dep_graph.try_mark_green(tcx, dep_node) { + let module = ModuleCodegen { + name: cgu.name().to_string(), + source: ModuleSource::Preexisting(buf), + kind: ModuleKind::Regular, + }; + tcx.dep_graph.mark_loaded_from_cache(dep_node_index, true); + write::submit_codegened_module_to_llvm(tcx, module, 0); + // Continue to next cgu, this one is done. + continue + } + } else { + // This can happen if files were deleted from the cache + // directory for some reason. We just re-compile then. + } + } + + let _timing_guard = time_graph.as_ref().map(|time_graph| { + time_graph.start(write::CODEGEN_WORKER_TIMELINE, + write::CODEGEN_WORK_PACKAGE_KIND, + &format!("codegen {}", cgu.name())) + }); + let start_time = Instant::now(); + all_stats.extend(tcx.compile_codegen_unit(*cgu.name())); + total_codegen_time += start_time.elapsed(); + ongoing_codegen.check_for_errors(tcx.sess); + } + + ongoing_codegen.codegen_finished(tcx); + + // Since the main thread is sometimes blocked during codegen, we keep track + // -Ztime-passes output manually. + print_time_passes_entry(tcx.sess.time_passes(), + "codegen to LLVM IR", + total_codegen_time); + + if tcx.sess.opts.incremental.is_some() { + ::rustc_incremental::assert_module_sources::assert_module_sources(tcx); + } + + symbol_names_test::report_symbol_names(tcx); + + if tcx.sess.codegen_stats() { + println!("--- codegen stats ---"); + println!("n_glues_created: {}", all_stats.n_glues_created); + println!("n_null_glues: {}", all_stats.n_null_glues); + println!("n_real_glues: {}", all_stats.n_real_glues); + + println!("n_fns: {}", all_stats.n_fns); + println!("n_inlines: {}", all_stats.n_inlines); + println!("n_closures: {}", all_stats.n_closures); + println!("fn stats:"); + all_stats.fn_stats.sort_by_key(|&(_, insns)| insns); + for &(ref name, insns) in all_stats.fn_stats.iter() { + println!("{} insns, {}", insns, *name); + } + } + + if tcx.sess.count_llvm_insns() { + for (k, v) in all_stats.llvm_insns.iter() { + println!("{:7} {}", *v, *k); + } + } + + ongoing_codegen.check_for_errors(tcx.sess); + + assert_and_save_dep_graph(tcx); + ongoing_codegen +} + +fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + time(tcx.sess, + "assert dep graph", + || rustc_incremental::assert_dep_graph(tcx)); + + time(tcx.sess, + "serialize dep graph", + || rustc_incremental::save_dep_graph(tcx)); +} + +fn collect_and_partition_mono_items<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + cnum: CrateNum, +) -> (Arc, Arc>>>) +{ + assert_eq!(cnum, LOCAL_CRATE); + + let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items { + Some(ref s) => { + let mode_string = s.to_lowercase(); + let mode_string = mode_string.trim(); + if mode_string == "eager" { + MonoItemCollectionMode::Eager + } else { + if mode_string != "lazy" { + let message = format!("Unknown codegen-item collection mode '{}'. \ + Falling back to 'lazy' mode.", + mode_string); + tcx.sess.warn(&message); + } + + MonoItemCollectionMode::Lazy + } + } + None => { + if tcx.sess.opts.cg.link_dead_code { + MonoItemCollectionMode::Eager + } else { + MonoItemCollectionMode::Lazy + } + } + }; + + let (items, inlining_map) = + time(tcx.sess, "monomorphization collection", || { + collector::collect_crate_mono_items(tcx, collection_mode) + }); + + tcx.sess.abort_if_errors(); + + ::rustc_mir::monomorphize::assert_symbols_are_distinct(tcx, items.iter()); + + let strategy = if tcx.sess.opts.incremental.is_some() { + PartitioningStrategy::PerModule + } else { + PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units()) + }; + + let codegen_units = time(tcx.sess, "codegen unit partitioning", || { + partitioning::partition(tcx, + items.iter().cloned(), + strategy, + &inlining_map) + .into_iter() + .map(Arc::new) + .collect::>() + }); + + let mono_items: DefIdSet = items.iter().filter_map(|mono_item| { + match *mono_item { + MonoItem::Fn(ref instance) => Some(instance.def_id()), + MonoItem::Static(def_id) => Some(def_id), + _ => None, + } + }).collect(); + + if tcx.sess.opts.debugging_opts.print_mono_items.is_some() { + let mut item_to_cgus: FxHashMap<_, Vec<_>> = FxHashMap(); + + for cgu in &codegen_units { + for (&mono_item, &linkage) in cgu.items() { + item_to_cgus.entry(mono_item) + .or_default() + .push((cgu.name().clone(), linkage)); + } + } + + let mut item_keys: Vec<_> = items + .iter() + .map(|i| { + let mut output = i.to_string(tcx); + output.push_str(" @@"); + let mut empty = Vec::new(); + let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); + cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone()); + cgus.dedup(); + for &(ref cgu_name, (linkage, _)) in cgus.iter() { + output.push_str(" "); + output.push_str(&cgu_name.as_str()); + + let linkage_abbrev = match linkage { + Linkage::External => "External", + Linkage::AvailableExternally => "Available", + Linkage::LinkOnceAny => "OnceAny", + Linkage::LinkOnceODR => "OnceODR", + Linkage::WeakAny => "WeakAny", + Linkage::WeakODR => "WeakODR", + Linkage::Appending => "Appending", + Linkage::Internal => "Internal", + Linkage::Private => "Private", + Linkage::ExternalWeak => "ExternalWeak", + Linkage::Common => "Common", + }; + + output.push_str("["); + output.push_str(linkage_abbrev); + output.push_str("]"); + } + output + }) + .collect(); + + item_keys.sort(); + + for item in item_keys { + println!("MONO_ITEM {}", item); + } + } + + (Arc::new(mono_items), Arc::new(codegen_units)) +} + +impl CrateInfo { + pub fn new(tcx: TyCtxt) -> CrateInfo { + let mut info = CrateInfo { + panic_runtime: None, + compiler_builtins: None, + profiler_runtime: None, + sanitizer_runtime: None, + is_no_builtins: FxHashSet(), + native_libraries: FxHashMap(), + used_libraries: tcx.native_libraries(LOCAL_CRATE), + link_args: tcx.link_args(LOCAL_CRATE), + crate_name: FxHashMap(), + used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), + used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), + used_crate_source: FxHashMap(), + wasm_imports: FxHashMap(), + lang_item_to_crate: FxHashMap(), + missing_lang_items: FxHashMap(), + }; + let lang_items = tcx.lang_items(); + + let load_wasm_items = tcx.sess.crate_types.borrow() + .iter() + .any(|c| *c != config::CrateType::Rlib) && + tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown"; + + if load_wasm_items { + info.load_wasm_imports(tcx, LOCAL_CRATE); + } + + for &cnum in tcx.crates().iter() { + info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); + info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); + info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); + if tcx.is_panic_runtime(cnum) { + info.panic_runtime = Some(cnum); + } + if tcx.is_compiler_builtins(cnum) { + info.compiler_builtins = Some(cnum); + } + if tcx.is_profiler_runtime(cnum) { + info.profiler_runtime = Some(cnum); + } + if tcx.is_sanitizer_runtime(cnum) { + info.sanitizer_runtime = Some(cnum); + } + if tcx.is_no_builtins(cnum) { + info.is_no_builtins.insert(cnum); + } + if load_wasm_items { + info.load_wasm_imports(tcx, cnum); + } + let missing = tcx.missing_lang_items(cnum); + for &item in missing.iter() { + if let Ok(id) = lang_items.require(item) { + info.lang_item_to_crate.insert(item, id.krate); + } + } + + // No need to look for lang items that are whitelisted and don't + // actually need to exist. + let missing = missing.iter() + .cloned() + .filter(|&l| !weak_lang_items::whitelisted(tcx, l)) + .collect(); + info.missing_lang_items.insert(cnum, missing); + } + + return info + } + + fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) { + for (&id, module) in tcx.wasm_import_module_map(cnum).iter() { + let instance = Instance::mono(tcx, id); + let import_name = tcx.symbol_name(instance); + self.wasm_imports.insert(import_name.to_string(), module.clone()); + } + } +} + +fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { + let (all_mono_items, _) = + tcx.collect_and_partition_mono_items(LOCAL_CRATE); + all_mono_items.contains(&id) +} + +fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu: InternedString) -> Stats { + let cgu = tcx.codegen_unit(cgu); + + let start_time = Instant::now(); + let (stats, module) = module_codegen(tcx, cgu); + let time_to_codegen = start_time.elapsed(); + + // We assume that the cost to run LLVM on a CGU is proportional to + // the time we needed for codegenning it. + let cost = time_to_codegen.as_secs() * 1_000_000_000 + + time_to_codegen.subsec_nanos() as u64; + + write::submit_codegened_module_to_llvm(tcx, + module, + cost); + return stats; + + fn module_codegen<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu: Arc>) + -> (Stats, ModuleCodegen) + { + let cgu_name = cgu.name().to_string(); + + // Instantiate monomorphizations without filling out definitions yet... + let llvm_module = ModuleLlvm::new(tcx.sess, &cgu_name); + let stats = { + let cx = CodegenCx::new(tcx, cgu, &llvm_module); + let mono_items = cx.codegen_unit + .items_in_deterministic_order(cx.tcx); + for &(mono_item, (linkage, visibility)) in &mono_items { + mono_item.predefine(&cx, linkage, visibility); + } + + // ... and now that we have everything pre-defined, fill out those definitions. + for &(mono_item, _) in &mono_items { + mono_item.define(&cx); + } + + // If this codegen unit contains the main function, also create the + // wrapper here + maybe_create_entry_wrapper(&cx); + + // Run replace-all-uses-with for statics that need it + for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() { + unsafe { + let bitcast = llvm::LLVMConstPointerCast(new_g, val_ty(old_g)); + llvm::LLVMReplaceAllUsesWith(old_g, bitcast); + llvm::LLVMDeleteGlobal(old_g); + } + } + + // Create the llvm.used variable + // This variable has type [N x i8*] and is stored in the llvm.metadata section + if !cx.used_statics.borrow().is_empty() { + let name = const_cstr!("llvm.used"); + let section = const_cstr!("llvm.metadata"); + let array = C_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow()); + + unsafe { + let g = llvm::LLVMAddGlobal(cx.llmod, + val_ty(array), + name.as_ptr()); + llvm::LLVMSetInitializer(g, array); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); + llvm::LLVMSetSection(g, section.as_ptr()); + } + } + + // Finalize debuginfo + if cx.sess().opts.debuginfo != DebugInfo::None { + debuginfo::finalize(&cx); + } + + cx.stats.into_inner() + }; + + (stats, ModuleCodegen { + name: cgu_name, + source: ModuleSource::Codegened(llvm_module), + kind: ModuleKind::Regular, + }) + } +} + +pub fn provide(providers: &mut Providers) { + providers.collect_and_partition_mono_items = + collect_and_partition_mono_items; + + providers.is_codegened_item = is_codegened_item; + + providers.codegen_unit = |tcx, name| { + let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); + all.iter() + .find(|cgu| *cgu.name() == name) + .cloned() + .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name)) + }; + providers.compile_codegen_unit = compile_codegen_unit; + + provide_extern(providers); +} + +pub fn provide_extern(providers: &mut Providers) { + providers.dllimport_foreign_items = |tcx, krate| { + let module_map = tcx.foreign_modules(krate); + let module_map = module_map.iter() + .map(|lib| (lib.def_id, lib)) + .collect::>(); + + let dllimports = tcx.native_libraries(krate) + .iter() + .filter(|lib| { + if lib.kind != cstore::NativeLibraryKind::NativeUnknown { + return false + } + let cfg = match lib.cfg { + Some(ref cfg) => cfg, + None => return true, + }; + attr::cfg_matches(cfg, &tcx.sess.parse_sess, None) + }) + .filter_map(|lib| lib.foreign_module) + .map(|id| &module_map[&id]) + .flat_map(|module| module.foreign_items.iter().cloned()) + .collect(); + Lrc::new(dllimports) + }; + + providers.is_dllimport_foreign_item = |tcx, def_id| { + tcx.dllimport_foreign_items(def_id.krate).contains(&def_id) + }; +} + +pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { + match linkage { + Linkage::External => llvm::Linkage::ExternalLinkage, + Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage, + Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage, + Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage, + Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage, + Linkage::WeakODR => llvm::Linkage::WeakODRLinkage, + Linkage::Appending => llvm::Linkage::AppendingLinkage, + Linkage::Internal => llvm::Linkage::InternalLinkage, + Linkage::Private => llvm::Linkage::PrivateLinkage, + Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage, + Linkage::Common => llvm::Linkage::CommonLinkage, + } +} + +pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility { + match linkage { + Visibility::Default => llvm::Visibility::Default, + Visibility::Hidden => llvm::Visibility::Hidden, + Visibility::Protected => llvm::Visibility::Protected, + } +} + +// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement +// the HashStable trait. Normally DepGraph::with_task() calls are +// hidden behind queries, but CGU creation is a special case in two +// ways: (1) it's not a query and (2) CGU are output nodes, so their +// Fingerprints are not actually needed. It remains to be clarified +// how exactly this case will be handled in the red/green system but +// for now we content ourselves with providing a no-op HashStable +// implementation for CGUs. +mod temp_stable_hash_impls { + use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, + HashStable}; + use ModuleCodegen; + + impl HashStable for ModuleCodegen { + fn hash_stable(&self, + _: &mut HCX, + _: &mut StableHasher) { + // do nothing + } + } +} diff --git a/src/librustc_trans/build.rs b/src/librustc_codegen_llvm/build.rs similarity index 100% rename from src/librustc_trans/build.rs rename to src/librustc_codegen_llvm/build.rs diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs new file mode 100644 index 000000000000..cfbc2ab90072 --- /dev/null +++ b/src/librustc_codegen_llvm/builder.rs @@ -0,0 +1,1215 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; +use llvm::{IntPredicate, RealPredicate, False, OperandBundleDef}; +use llvm::{self, BasicBlock}; +use common::*; +use type_::Type; +use value::Value; +use libc::{c_uint, c_char}; +use rustc::ty::TyCtxt; +use rustc::ty::layout::{Align, Size}; +use rustc::session::{config, Session}; +use rustc_data_structures::small_c_str::SmallCStr; + +use std::borrow::Cow; +use std::ops::Range; +use std::ptr; + +// All Builders must have an llfn associated with them +#[must_use] +pub struct Builder<'a, 'll: 'a, 'tcx: 'll> { + pub llbuilder: &'ll mut llvm::Builder<'ll>, + pub cx: &'a CodegenCx<'ll, 'tcx>, +} + +impl Drop for Builder<'a, 'll, 'tcx> { + fn drop(&mut self) { + unsafe { + llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _)); + } + } +} + +// This is a really awful way to get a zero-length c-string, but better (and a +// lot more efficient) than doing str::as_c_str("", ...) every time. +fn noname() -> *const c_char { + static CNULL: c_char = 0; + &CNULL +} + +bitflags! { + pub struct MemFlags: u8 { + const VOLATILE = 1 << 0; + const NONTEMPORAL = 1 << 1; + const UNALIGNED = 1 << 2; + } +} + +impl Builder<'a, 'll, 'tcx> { + pub fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self { + let bx = Builder::with_cx(cx); + let llbb = unsafe { + let name = SmallCStr::new(name); + llvm::LLVMAppendBasicBlockInContext( + cx.llcx, + llfn, + name.as_ptr() + ) + }; + bx.position_at_end(llbb); + bx + } + + pub fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { + // Create a fresh builder from the crate context. + let llbuilder = unsafe { + llvm::LLVMCreateBuilderInContext(cx.llcx) + }; + Builder { + llbuilder, + cx, + } + } + + pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'll, 'tcx> { + Builder::new_block(self.cx, self.llfn(), name) + } + + pub fn sess(&self) -> &Session { + self.cx.sess() + } + + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.cx.tcx + } + + pub fn llfn(&self) -> &'ll Value { + unsafe { + llvm::LLVMGetBasicBlockParent(self.llbb()) + } + } + + pub fn llbb(&self) -> &'ll BasicBlock { + unsafe { + llvm::LLVMGetInsertBlock(self.llbuilder) + } + } + + fn count_insn(&self, category: &str) { + if self.cx.sess().codegen_stats() { + self.cx.stats.borrow_mut().n_llvm_insns += 1; + } + if self.cx.sess().count_llvm_insns() { + *self.cx.stats + .borrow_mut() + .llvm_insns + .entry(category.to_string()) + .or_insert(0) += 1; + } + } + + pub fn set_value_name(&self, value: &'ll Value, name: &str) { + let cname = SmallCStr::new(name); + unsafe { + llvm::LLVMSetValueName(value, cname.as_ptr()); + } + } + + pub fn position_at_end(&self, llbb: &'ll BasicBlock) { + unsafe { + llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); + } + } + + pub fn position_at_start(&self, llbb: &'ll BasicBlock) { + unsafe { + llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); + } + } + + pub fn ret_void(&self) { + self.count_insn("retvoid"); + unsafe { + llvm::LLVMBuildRetVoid(self.llbuilder); + } + } + + pub fn ret(&self, v: &'ll Value) { + self.count_insn("ret"); + unsafe { + llvm::LLVMBuildRet(self.llbuilder, v); + } + } + + pub fn br(&self, dest: &'ll BasicBlock) { + self.count_insn("br"); + unsafe { + llvm::LLVMBuildBr(self.llbuilder, dest); + } + } + + pub fn cond_br( + &self, + cond: &'ll Value, + then_llbb: &'ll BasicBlock, + else_llbb: &'ll BasicBlock, + ) { + self.count_insn("condbr"); + unsafe { + llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb); + } + } + + pub fn switch( + &self, + v: &'ll Value, + else_llbb: &'ll BasicBlock, + num_cases: usize, + ) -> &'ll Value { + unsafe { + llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint) + } + } + + pub fn invoke(&self, + llfn: &'ll Value, + args: &[&'ll Value], + then: &'ll BasicBlock, + catch: &'ll BasicBlock, + bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { + self.count_insn("invoke"); + + debug!("Invoke {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("invoke", llfn, args); + let bundle = bundle.map(|b| &*b.raw); + + unsafe { + llvm::LLVMRustBuildInvoke(self.llbuilder, + llfn, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + bundle, + noname()) + } + } + + pub fn unreachable(&self) { + self.count_insn("unreachable"); + unsafe { + llvm::LLVMBuildUnreachable(self.llbuilder); + } + } + + /* Arithmetic */ + pub fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("add"); + unsafe { + llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fadd"); + unsafe { + llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fadd"); + unsafe { + let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + + pub fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("sub"); + unsafe { + llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fsub"); + unsafe { + llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fsub"); + unsafe { + let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + + pub fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("mul"); + unsafe { + llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fmul"); + unsafe { + llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fmul"); + unsafe { + let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + + + pub fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("udiv"); + unsafe { + llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("exactudiv"); + unsafe { + llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("sdiv"); + unsafe { + llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("exactsdiv"); + unsafe { + llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fdiv"); + unsafe { + llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fdiv"); + unsafe { + let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + + pub fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("urem"); + unsafe { + llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("srem"); + unsafe { + llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("frem"); + unsafe { + llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("frem"); + unsafe { + let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + + pub fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("shl"); + unsafe { + llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("lshr"); + unsafe { + llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("ashr"); + unsafe { + llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("and"); + unsafe { + llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("or"); + unsafe { + llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("xor"); + unsafe { + llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) + } + } + + pub fn neg(&self, v: &'ll Value) -> &'ll Value { + self.count_insn("neg"); + unsafe { + llvm::LLVMBuildNeg(self.llbuilder, v, noname()) + } + } + + pub fn fneg(&self, v: &'ll Value) -> &'ll Value { + self.count_insn("fneg"); + unsafe { + llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) + } + } + + pub fn not(&self, v: &'ll Value) -> &'ll Value { + self.count_insn("not"); + unsafe { + llvm::LLVMBuildNot(self.llbuilder, v, noname()) + } + } + + pub fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + let bx = Builder::with_cx(self.cx); + bx.position_at_start(unsafe { + llvm::LLVMGetFirstBasicBlock(self.llfn()) + }); + bx.dynamic_alloca(ty, name, align) + } + + pub fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + self.count_insn("alloca"); + unsafe { + let alloca = if name.is_empty() { + llvm::LLVMBuildAlloca(self.llbuilder, ty, noname()) + } else { + let name = SmallCStr::new(name); + llvm::LLVMBuildAlloca(self.llbuilder, ty, + name.as_ptr()) + }; + llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); + alloca + } + } + + pub fn array_alloca(&self, + ty: &'ll Type, + len: &'ll Value, + name: &str, + align: Align) -> &'ll Value { + self.count_insn("alloca"); + unsafe { + let alloca = if name.is_empty() { + llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, noname()) + } else { + let name = SmallCStr::new(name); + llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, + name.as_ptr()) + }; + llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); + alloca + } + } + + pub fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value { + self.count_insn("load"); + unsafe { + let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); + llvm::LLVMSetAlignment(load, align.abi() as c_uint); + load + } + } + + pub fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value { + self.count_insn("load.volatile"); + unsafe { + let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); + llvm::LLVMSetVolatile(insn, llvm::True); + insn + } + } + + pub fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, align: Align) -> &'ll Value { + self.count_insn("load.atomic"); + unsafe { + let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order); + // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? + // However, 64-bit atomic loads on `i686-apple-darwin` appear to + // require `___atomic_load` with ABI-alignment, so it's staying. + llvm::LLVMSetAlignment(load, align.pref() as c_uint); + load + } + } + + + pub fn range_metadata(&self, load: &'ll Value, range: Range) { + unsafe { + let llty = val_ty(load); + let v = [ + C_uint_big(llty, range.start), + C_uint_big(llty, range.end) + ]; + + llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, + v.as_ptr(), + v.len() as c_uint)); + } + } + + pub fn nonnull_metadata(&self, load: &'ll Value) { + unsafe { + llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); + } + } + + pub fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { + self.store_with_flags(val, ptr, align, MemFlags::empty()) + } + + pub fn store_with_flags( + &self, + val: &'ll Value, + ptr: &'ll Value, + align: Align, + flags: MemFlags, + ) -> &'ll Value { + debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); + self.count_insn("store"); + let ptr = self.check_store(val, ptr); + unsafe { + let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); + let align = if flags.contains(MemFlags::UNALIGNED) { + 1 + } else { + align.abi() as c_uint + }; + llvm::LLVMSetAlignment(store, align); + if flags.contains(MemFlags::VOLATILE) { + llvm::LLVMSetVolatile(store, llvm::True); + } + if flags.contains(MemFlags::NONTEMPORAL) { + // According to LLVM [1] building a nontemporal store must + // *always* point to a metadata value of the integer 1. + // + // [1]: http://llvm.org/docs/LangRef.html#store-instruction + let one = C_i32(self.cx, 1); + let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); + llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); + } + store + } + } + + pub fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, + order: AtomicOrdering, align: Align) { + debug!("Store {:?} -> {:?}", val, ptr); + self.count_insn("store.atomic"); + let ptr = self.check_store(val, ptr); + unsafe { + let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); + // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? + // Also see `atomic_load` for more context. + llvm::LLVMSetAlignment(store, align.pref() as c_uint); + } + } + + pub fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + self.count_insn("gep"); + unsafe { + llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), + indices.len() as c_uint, noname()) + } + } + + pub fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + self.count_insn("inboundsgep"); + unsafe { + llvm::LLVMBuildInBoundsGEP( + self.llbuilder, ptr, indices.as_ptr(), indices.len() as c_uint, noname()) + } + } + + pub fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value { + self.count_insn("structgep"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) + } + } + + /* Casts */ + pub fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("trunc"); + unsafe { + llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("zext"); + unsafe { + llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("sext"); + unsafe { + llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("fptoui"); + unsafe { + llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("fptosi"); + unsafe { + llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) + } + } + + pub fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("uitofp"); + unsafe { + llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("sitofp"); + unsafe { + llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("fptrunc"); + unsafe { + llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("fpext"); + unsafe { + llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("ptrtoint"); + unsafe { + llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("inttoptr"); + unsafe { + llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("bitcast"); + unsafe { + llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("pointercast"); + unsafe { + llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) + } + } + + pub fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { + self.count_insn("intcast"); + unsafe { + llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) + } + } + + /* Comparisons */ + pub fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("icmp"); + unsafe { + llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) + } + } + + pub fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("fcmp"); + unsafe { + llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) + } + } + + /* Miscellaneous instructions */ + pub fn empty_phi(&self, ty: &'ll Type) -> &'ll Value { + self.count_insn("emptyphi"); + unsafe { + llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) + } + } + + pub fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + assert_eq!(vals.len(), bbs.len()); + let phi = self.empty_phi(ty); + self.count_insn("addincoming"); + unsafe { + llvm::LLVMAddIncoming(phi, vals.as_ptr(), + bbs.as_ptr(), + vals.len() as c_uint); + phi + } + } + + pub fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, + inputs: &[&'ll Value], output: &'ll Type, + volatile: bool, alignstack: bool, + dia: AsmDialect) -> &'ll Value { + self.count_insn("inlineasm"); + + let volatile = if volatile { llvm::True } + else { llvm::False }; + let alignstack = if alignstack { llvm::True } + else { llvm::False }; + + let argtys = inputs.iter().map(|v| { + debug!("Asm Input Type: {:?}", *v); + val_ty(*v) + }).collect::>(); + + debug!("Asm Output Type: {:?}", output); + let fty = Type::func(&argtys[..], output); + unsafe { + let v = llvm::LLVMRustInlineAsm( + fty, asm, cons, volatile, alignstack, dia); + self.call(v, inputs, None) + } + } + + pub fn call(&self, llfn: &'ll Value, args: &[&'ll Value], + bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { + self.count_insn("call"); + + debug!("Call {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("call", llfn, args); + let bundle = bundle.map(|b| &*b.raw); + + unsafe { + llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(), + args.len() as c_uint, bundle, noname()) + } + } + + pub fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("minnum"); + unsafe { + let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs); + instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0") + } + } + pub fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("maxnum"); + unsafe { + let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs); + instr.expect("LLVMRustBuildMaxNum is not available in LLVM version < 6.0") + } + } + + pub fn select( + &self, cond: &'ll Value, + then_val: &'ll Value, + else_val: &'ll Value, + ) -> &'ll Value { + self.count_insn("select"); + unsafe { + llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, noname()) + } + } + + #[allow(dead_code)] + pub fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + self.count_insn("vaarg"); + unsafe { + llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) + } + } + + pub fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { + self.count_insn("extractelement"); + unsafe { + llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) + } + } + + pub fn insert_element( + &self, vec: &'ll Value, + elt: &'ll Value, + idx: &'ll Value, + ) -> &'ll Value { + self.count_insn("insertelement"); + unsafe { + llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname()) + } + } + + pub fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { + self.count_insn("shufflevector"); + unsafe { + llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) + } + } + + pub fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { + unsafe { + let elt_ty = val_ty(elt); + let undef = llvm::LLVMGetUndef(Type::vector(elt_ty, num_elts as u64)); + let vec = self.insert_element(undef, elt, C_i32(self.cx, 0)); + let vec_i32_ty = Type::vector(Type::i32(self.cx), num_elts as u64); + self.shuffle_vector(vec, undef, C_null(vec_i32_ty)) + } + } + + pub fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fadd_fast"); + unsafe { + // FIXME: add a non-fast math version once + // https://bugs.llvm.org/show_bug.cgi?id=36732 + // is fixed. + let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) + .expect("LLVMRustBuildVectorReduceFAdd is not available in LLVM version < 5.0"); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmul_fast"); + unsafe { + // FIXME: add a non-fast math version once + // https://bugs.llvm.org/show_bug.cgi?id=36732 + // is fixed. + let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) + .expect("LLVMRustBuildVectorReduceFMul is not available in LLVM version < 5.0"); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.add"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src); + instr.expect("LLVMRustBuildVectorReduceAdd is not available in LLVM version < 5.0") + } + } + pub fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.mul"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src); + instr.expect("LLVMRustBuildVectorReduceMul is not available in LLVM version < 5.0") + } + } + pub fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.and"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src); + instr.expect("LLVMRustBuildVectorReduceAnd is not available in LLVM version < 5.0") + } + } + pub fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.or"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src); + instr.expect("LLVMRustBuildVectorReduceOr is not available in LLVM version < 5.0") + } + } + pub fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.xor"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src); + instr.expect("LLVMRustBuildVectorReduceXor is not available in LLVM version < 5.0") + } + } + pub fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmin"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false); + instr.expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0") + } + } + pub fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmax"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false); + instr.expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0") + } + } + pub fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmin_fast"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true) + .expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0"); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmax_fast"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true) + .expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0"); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + self.count_insn("vector.reduce.min"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed); + instr.expect("LLVMRustBuildVectorReduceMin is not available in LLVM version < 5.0") + } + } + pub fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + self.count_insn("vector.reduce.max"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed); + instr.expect("LLVMRustBuildVectorReduceMax is not available in LLVM version < 5.0") + } + } + + pub fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value { + self.count_insn("extractvalue"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname()) + } + } + + pub fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value, + idx: u64) -> &'ll Value { + self.count_insn("insertvalue"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, + noname()) + } + } + + pub fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value, + num_clauses: usize) -> &'ll Value { + self.count_insn("landingpad"); + unsafe { + llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, + num_clauses as c_uint, noname()) + } + } + + pub fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) { + unsafe { + llvm::LLVMAddClause(landing_pad, clause); + } + } + + pub fn set_cleanup(&self, landing_pad: &'ll Value) { + self.count_insn("setcleanup"); + unsafe { + llvm::LLVMSetCleanup(landing_pad, llvm::True); + } + } + + pub fn resume(&self, exn: &'ll Value) -> &'ll Value { + self.count_insn("resume"); + unsafe { + llvm::LLVMBuildResume(self.llbuilder, exn) + } + } + + pub fn cleanup_pad(&self, + parent: Option<&'ll Value>, + args: &[&'ll Value]) -> &'ll Value { + self.count_insn("cleanuppad"); + let name = const_cstr!("cleanuppad"); + let ret = unsafe { + llvm::LLVMRustBuildCleanupPad(self.llbuilder, + parent, + args.len() as c_uint, + args.as_ptr(), + name.as_ptr()) + }; + ret.expect("LLVM does not have support for cleanuppad") + } + + pub fn cleanup_ret( + &self, cleanup: &'ll Value, + unwind: Option<&'ll BasicBlock>, + ) -> &'ll Value { + self.count_insn("cleanupret"); + let ret = unsafe { + llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind) + }; + ret.expect("LLVM does not have support for cleanupret") + } + + pub fn catch_pad(&self, + parent: &'ll Value, + args: &[&'ll Value]) -> &'ll Value { + self.count_insn("catchpad"); + let name = const_cstr!("catchpad"); + let ret = unsafe { + llvm::LLVMRustBuildCatchPad(self.llbuilder, parent, + args.len() as c_uint, args.as_ptr(), + name.as_ptr()) + }; + ret.expect("LLVM does not have support for catchpad") + } + + pub fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { + self.count_insn("catchret"); + let ret = unsafe { + llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind) + }; + ret.expect("LLVM does not have support for catchret") + } + + pub fn catch_switch( + &self, + parent: Option<&'ll Value>, + unwind: Option<&'ll BasicBlock>, + num_handlers: usize, + ) -> &'ll Value { + self.count_insn("catchswitch"); + let name = const_cstr!("catchswitch"); + let ret = unsafe { + llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind, + num_handlers as c_uint, + name.as_ptr()) + }; + ret.expect("LLVM does not have support for catchswitch") + } + + pub fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { + unsafe { + llvm::LLVMRustAddHandler(catch_switch, handler); + } + } + + pub fn set_personality_fn(&self, personality: &'ll Value) { + unsafe { + llvm::LLVMSetPersonalityFn(self.llfn(), personality); + } + } + + // Atomic Operations + pub fn atomic_cmpxchg( + &self, + dst: &'ll Value, + cmp: &'ll Value, + src: &'ll Value, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: llvm::Bool, + ) -> &'ll Value { + unsafe { + llvm::LLVMRustBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src, + order, failure_order, weak) + } + } + pub fn atomic_rmw( + &self, + op: AtomicRmwBinOp, + dst: &'ll Value, + src: &'ll Value, + order: AtomicOrdering, + ) -> &'ll Value { + unsafe { + llvm::LLVMBuildAtomicRMW(self.llbuilder, op, dst, src, order, False) + } + } + + pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) { + unsafe { + llvm::LLVMRustBuildAtomicFence(self.llbuilder, order, scope); + } + } + + pub fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + unsafe { + llvm::LLVMAddCase(s, on_val, dest) + } + } + + pub fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + self.count_insn("addincoming"); + unsafe { + llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); + } + } + + pub fn set_invariant_load(&self, load: &'ll Value) { + unsafe { + llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); + } + } + + /// Returns the ptr value that should be used for storing `val`. + fn check_store<'b>(&self, + val: &'ll Value, + ptr: &'ll Value) -> &'ll Value { + let dest_ptr_ty = val_ty(ptr); + let stored_ty = val_ty(val); + let stored_ptr_ty = stored_ty.ptr_to(); + + assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer); + + if dest_ptr_ty == stored_ptr_ty { + ptr + } else { + debug!("Type mismatch in store. \ + Expected {:?}, got {:?}; inserting bitcast", + dest_ptr_ty, stored_ptr_ty); + self.bitcast(ptr, stored_ptr_ty) + } + } + + /// Returns the args that should be used for a call to `llfn`. + fn check_call<'b>(&self, + typ: &str, + llfn: &'ll Value, + args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { + let mut fn_ty = val_ty(llfn); + // Strip off pointers + while fn_ty.kind() == llvm::TypeKind::Pointer { + fn_ty = fn_ty.element_type(); + } + + assert!(fn_ty.kind() == llvm::TypeKind::Function, + "builder::{} not passed a function, but {:?}", typ, fn_ty); + + let param_tys = fn_ty.func_params(); + + let all_args_match = param_tys.iter() + .zip(args.iter().map(|&v| val_ty(v))) + .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); + + if all_args_match { + return Cow::Borrowed(args); + } + + let casted_args: Vec<_> = param_tys.into_iter() + .zip(args.iter()) + .enumerate() + .map(|(i, (expected_ty, &actual_val))| { + let actual_ty = val_ty(actual_val); + if expected_ty != actual_ty { + debug!("Type mismatch in function call of {:?}. \ + Expected {:?} for param {}, got {:?}; injecting bitcast", + llfn, expected_ty, i, actual_ty); + self.bitcast(actual_val, expected_ty) + } else { + actual_val + } + }) + .collect(); + + return Cow::Owned(casted_args); + } + + pub fn lifetime_start(&self, ptr: &'ll Value, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); + } + + pub fn lifetime_end(&self, ptr: &'ll Value, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); + } + + /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations + /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` + /// and the intrinsic for `lt` and passes them to `emit`, which is in + /// charge of generating code to call the passed intrinsic on whatever + /// block of generated code is targeted for the intrinsic. + /// + /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations + /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). + fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) { + if self.cx.sess().opts.optimize == config::OptLevel::No { + return; + } + + let size = size.bytes(); + if size == 0 { + return; + } + + let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); + + let ptr = self.pointercast(ptr, Type::i8p(self.cx)); + self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None); + } +} diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs new file mode 100644 index 000000000000..2e90f95fa8e2 --- /dev/null +++ b/src/librustc_codegen_llvm/callee.rs @@ -0,0 +1,222 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Handles codegen of callees as well as other call-related +//! things. Callees are a superset of normal rust values and sometimes +//! have different representations. In particular, top-level fn items +//! and methods are represented as just a fn ptr and not a full +//! closure. + +use attributes; +use common::{self, CodegenCx}; +use consts; +use declare; +use llvm; +use monomorphize::Instance; +use type_of::LayoutLlvmExt; +use value::Value; + +use rustc::hir::def_id::DefId; +use rustc::ty::{self, TypeFoldable}; +use rustc::ty::layout::LayoutOf; +use rustc::ty::subst::Substs; + +/// Codegens a reference to a fn/method item, monomorphizing and +/// inlining as it goes. +/// +/// # Parameters +/// +/// - `cx`: the crate context +/// - `instance`: the instance to be instantiated +pub fn get_fn( + cx: &CodegenCx<'ll, 'tcx>, + instance: Instance<'tcx>, +) -> &'ll Value { + let tcx = cx.tcx; + + debug!("get_fn(instance={:?})", instance); + + assert!(!instance.substs.needs_infer()); + assert!(!instance.substs.has_escaping_regions()); + assert!(!instance.substs.has_param_types()); + + let fn_ty = instance.ty(cx.tcx); + if let Some(&llfn) = cx.instances.borrow().get(&instance) { + return llfn; + } + + let sym = tcx.symbol_name(instance).as_str(); + debug!("get_fn({:?}: {:?}) => {}", instance, fn_ty, sym); + + // Create a fn pointer with the substituted signature. + let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(cx, fn_ty)); + let llptrty = cx.layout_of(fn_ptr_ty).llvm_type(cx); + + let llfn = if let Some(llfn) = declare::get_declared_value(cx, &sym) { + // This is subtle and surprising, but sometimes we have to bitcast + // the resulting fn pointer. The reason has to do with external + // functions. If you have two crates that both bind the same C + // library, they may not use precisely the same types: for + // example, they will probably each declare their own structs, + // which are distinct types from LLVM's point of view (nominal + // types). + // + // Now, if those two crates are linked into an application, and + // they contain inlined code, you can wind up with a situation + // where both of those functions wind up being loaded into this + // application simultaneously. In that case, the same function + // (from LLVM's point of view) requires two types. But of course + // LLVM won't allow one function to have two types. + // + // What we currently do, therefore, is declare the function with + // one of the two types (whichever happens to come first) and then + // bitcast as needed when the function is referenced to make sure + // it has the type we expect. + // + // This can occur on either a crate-local or crate-external + // reference. It also occurs when testing libcore and in some + // other weird situations. Annoying. + if common::val_ty(llfn) != llptrty { + debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); + consts::ptrcast(llfn, llptrty) + } else { + debug!("get_fn: not casting pointer!"); + llfn + } + } else { + let llfn = declare::declare_fn(cx, &sym, fn_ty); + assert_eq!(common::val_ty(llfn), llptrty); + debug!("get_fn: not casting pointer!"); + + if instance.def.is_inline(tcx) { + attributes::inline(llfn, attributes::InlineAttr::Hint); + } + attributes::from_fn_attrs(cx, llfn, Some(instance.def.def_id())); + + let instance_def_id = instance.def_id(); + + // Apply an appropriate linkage/visibility value to our item that we + // just declared. + // + // This is sort of subtle. Inside our codegen unit we started off + // compilation by predefining all our own `MonoItem` instances. That + // is, everything we're codegenning ourselves is already defined. That + // means that anything we're actually codegenning in this codegen unit + // will have hit the above branch in `get_declared_value`. As a result, + // we're guaranteed here that we're declaring a symbol that won't get + // defined, or in other words we're referencing a value from another + // codegen unit or even another crate. + // + // So because this is a foreign value we blanket apply an external + // linkage directive because it's coming from a different object file. + // The visibility here is where it gets tricky. This symbol could be + // referencing some foreign crate or foreign library (an `extern` + // block) in which case we want to leave the default visibility. We may + // also, though, have multiple codegen units. It could be a + // monomorphization, in which case its expected visibility depends on + // whether we are sharing generics or not. The important thing here is + // that the visibility we apply to the declaration is the same one that + // has been applied to the definition (wherever that definition may be). + unsafe { + llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage); + + let is_generic = instance.substs.types().next().is_some(); + + if is_generic { + // This is a monomorphization. Its expected visibility depends + // on whether we are in share-generics mode. + + if cx.tcx.sess.opts.share_generics() { + // We are in share_generics mode. + + if instance_def_id.is_local() { + // This is a definition from the current crate. If the + // definition is unreachable for downstream crates or + // the current crate does not re-export generics, the + // definition of the instance will have been declared + // as `hidden`. + if cx.tcx.is_unreachable_local_definition(instance_def_id) || + !cx.tcx.local_crate_exports_generics() { + llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); + } + } else { + // This is a monomorphization of a generic function + // defined in an upstream crate. + if cx.tcx.upstream_monomorphizations_for(instance_def_id) + .map(|set| set.contains_key(instance.substs)) + .unwrap_or(false) { + // This is instantiated in another crate. It cannot + // be `hidden`. + } else { + // This is a local instantiation of an upstream definition. + // If the current crate does not re-export it + // (because it is a C library or an executable), it + // will have been declared `hidden`. + if !cx.tcx.local_crate_exports_generics() { + llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); + } + } + } + } else { + // When not sharing generics, all instances are in the same + // crate and have hidden visibility + llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); + } + } else { + // This is a non-generic function + if cx.tcx.is_codegened_item(instance_def_id) { + // This is a function that is instantiated in the local crate + + if instance_def_id.is_local() { + // This is function that is defined in the local crate. + // If it is not reachable, it is hidden. + if !cx.tcx.is_reachable_non_generic(instance_def_id) { + llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); + } + } else { + // This is a function from an upstream crate that has + // been instantiated here. These are always hidden. + llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); + } + } + } + } + + if cx.use_dll_storage_attrs && + tcx.is_dllimport_foreign_item(instance_def_id) + { + unsafe { + llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport); + } + } + + llfn + }; + + cx.instances.borrow_mut().insert(instance, llfn); + + llfn +} + +pub fn resolve_and_get_fn( + cx: &CodegenCx<'ll, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>, +) -> &'ll Value { + get_fn( + cx, + ty::Instance::resolve( + cx.tcx, + ty::ParamEnv::reveal_all(), + def_id, + substs + ).unwrap() + ) +} diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs new file mode 100644 index 000000000000..51fc610408b5 --- /dev/null +++ b/src/librustc_codegen_llvm/common.rs @@ -0,0 +1,458 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_camel_case_types, non_snake_case)] + +//! Code that is useful in various codegen modules. + +use llvm::{self, TypeKind}; +use llvm::{True, False, Bool, OperandBundleDef}; +use rustc::hir::def_id::DefId; +use rustc::middle::lang_items::LangItem; +use abi; +use base; +use builder::Builder; +use consts; +use declare; +use type_::Type; +use type_of::LayoutLlvmExt; +use value::Value; + +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{HasDataLayout, LayoutOf}; +use rustc::hir; + +use libc::{c_uint, c_char}; +use std::iter; + +use rustc_target::spec::abi::Abi; +use syntax::symbol::LocalInternedString; +use syntax_pos::{Span, DUMMY_SP}; + +pub use context::CodegenCx; + +pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) +} + +pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) +} + +pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) +} + +/* +* A note on nomenclature of linking: "extern", "foreign", and "upcall". +* +* An "extern" is an LLVM symbol we wind up emitting an undefined external +* reference to. This means "we don't have the thing in this compilation unit, +* please make sure you link it in at runtime". This could be a reference to +* C code found in a C library, or rust code found in a rust crate. +* +* Most "externs" are implicitly declared (automatically) as a result of a +* user declaring an extern _module_ dependency; this causes the rust driver +* to locate an extern crate, scan its compilation metadata, and emit extern +* declarations for any symbols used by the declaring crate. +* +* A "foreign" is an extern that references C (or other non-rust ABI) code. +* There is no metadata to scan for extern references so in these cases either +* a header-digester like bindgen, or manual function prototypes, have to +* serve as declarators. So these are usually given explicitly as prototype +* declarations, in rust code, with ABI attributes on them noting which ABI to +* link via. +* +* An "upcall" is a foreign call generated by the compiler (not corresponding +* to any user-written call in the code) into the runtime library, to perform +* some helper task such as bringing a task to life, allocating memory, etc. +* +*/ + +/// A structure representing an active landing pad for the duration of a basic +/// block. +/// +/// Each `Block` may contain an instance of this, indicating whether the block +/// is part of a landing pad or not. This is used to make decision about whether +/// to emit `invoke` instructions (e.g. in a landing pad we don't continue to +/// use `invoke`) and also about various function call metadata. +/// +/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is +/// just a bunch of `None` instances (not too interesting), but for MSVC +/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data. +/// When inside of a landing pad, each function call in LLVM IR needs to be +/// annotated with which landing pad it's a part of. This is accomplished via +/// the `OperandBundleDef` value created for MSVC landing pads. +pub struct Funclet<'ll> { + cleanuppad: &'ll Value, + operand: OperandBundleDef<'ll>, +} + +impl Funclet<'ll> { + pub fn new(cleanuppad: &'ll Value) -> Self { + Funclet { + cleanuppad, + operand: OperandBundleDef::new("funclet", &[cleanuppad]), + } + } + + pub fn cleanuppad(&self) -> &'ll Value { + self.cleanuppad + } + + pub fn bundle(&self) -> &OperandBundleDef<'ll> { + &self.operand + } +} + +pub fn val_ty(v: &'ll Value) -> &'ll Type { + unsafe { + llvm::LLVMTypeOf(v) + } +} + +// LLVM constant constructors. +pub fn C_null(t: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstNull(t) + } +} + +pub fn C_undef(t: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMGetUndef(t) + } +} + +pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value { + unsafe { + llvm::LLVMConstInt(t, i as u64, True) + } +} + +pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value { + unsafe { + llvm::LLVMConstInt(t, i, False) + } +} + +pub fn C_uint_big(t: &'ll Type, u: u128) -> &'ll Value { + unsafe { + let words = [u as u64, (u >> 64) as u64]; + llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) + } +} + +pub fn C_bool(cx: &CodegenCx<'ll, '_>, val: bool) -> &'ll Value { + C_uint(Type::i1(cx), val as u64) +} + +pub fn C_i32(cx: &CodegenCx<'ll, '_>, i: i32) -> &'ll Value { + C_int(Type::i32(cx), i as i64) +} + +pub fn C_u32(cx: &CodegenCx<'ll, '_>, i: u32) -> &'ll Value { + C_uint(Type::i32(cx), i as u64) +} + +pub fn C_u64(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value { + C_uint(Type::i64(cx), i) +} + +pub fn C_usize(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value { + let bit_size = cx.data_layout().pointer_size.bits(); + if bit_size < 64 { + // make sure it doesn't overflow + assert!(i < (1<, i: u8) -> &'ll Value { + C_uint(Type::i8(cx), i as u64) +} + + +// This is a 'c-like' raw string, which differs from +// our boxed-and-length-annotated strings. +pub fn C_cstr( + cx: &CodegenCx<'ll, '_>, + s: LocalInternedString, + null_terminated: bool, +) -> &'ll Value { + unsafe { + if let Some(&llval) = cx.const_cstr_cache.borrow().get(&s) { + return llval; + } + + let sc = llvm::LLVMConstStringInContext(cx.llcx, + s.as_ptr() as *const c_char, + s.len() as c_uint, + !null_terminated as Bool); + let sym = cx.generate_local_symbol_name("str"); + let g = declare::define_global(cx, &sym[..], val_ty(sc)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", sym); + }); + llvm::LLVMSetInitializer(g, sc); + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); + + cx.const_cstr_cache.borrow_mut().insert(s, g); + g + } +} + +// NB: Do not use `do_spill_noroot` to make this into a constant string, or +// you will be kicked off fast isel. See issue #4352 for an example of this. +pub fn C_str_slice(cx: &CodegenCx<'ll, '_>, s: LocalInternedString) -> &'ll Value { + let len = s.len(); + let cs = consts::ptrcast(C_cstr(cx, s, false), + cx.layout_of(cx.tcx.mk_str()).llvm_type(cx).ptr_to()); + C_fat_ptr(cx, cs, C_usize(cx, len as u64)) +} + +pub fn C_fat_ptr(cx: &CodegenCx<'ll, '_>, ptr: &'ll Value, meta: &'ll Value) -> &'ll Value { + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + C_struct(cx, &[ptr, meta], false) +} + +pub fn C_struct(cx: &CodegenCx<'ll, '_>, elts: &[&'ll Value], packed: bool) -> &'ll Value { + C_struct_in_context(cx.llcx, elts, packed) +} + +pub fn C_struct_in_context( + llcx: &'ll llvm::Context, + elts: &[&'ll Value], + packed: bool, +) -> &'ll Value { + unsafe { + llvm::LLVMConstStructInContext(llcx, + elts.as_ptr(), elts.len() as c_uint, + packed as Bool) + } +} + +pub fn C_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + } +} + +pub fn C_vector(elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + } +} + +pub fn C_bytes(cx: &CodegenCx<'ll, '_>, bytes: &[u8]) -> &'ll Value { + C_bytes_in_context(cx.llcx, bytes) +} + +pub fn C_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + unsafe { + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + } +} + +pub fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value { + unsafe { + assert_eq!(idx as c_uint as u64, idx); + let us = &[idx as c_uint]; + let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); + + debug!("const_get_elt(v={:?}, idx={}, r={:?})", + v, idx, r); + + r + } +} + +pub fn const_get_real(v: &'ll Value) -> Option<(f64, bool)> { + unsafe { + if is_const_real(v) { + let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); + let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); + let loses_info = if loses_info == 1 { true } else { false }; + Some((r, loses_info)) + } else { + None + } + } +} + +pub fn const_to_uint(v: &'ll Value) -> u64 { + unsafe { + llvm::LLVMConstIntGetZExtValue(v) + } +} + +pub fn is_const_integral(v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantInt(v).is_some() + } +} + +pub fn is_const_real(v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantFP(v).is_some() + } +} + + +#[inline] +fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { + ((hi as u128) << 64) | (lo as u128) +} + +pub fn const_to_opt_u128(v: &'ll Value, sign_ext: bool) -> Option { + unsafe { + if is_const_integral(v) { + let (mut lo, mut hi) = (0u64, 0u64); + let success = llvm::LLVMRustConstInt128Get(v, sign_ext, + &mut hi, &mut lo); + if success { + Some(hi_lo_to_u128(lo, hi)) + } else { + None + } + } else { + None + } + } +} + +pub fn langcall(tcx: TyCtxt, + span: Option, + msg: &str, + li: LangItem) + -> DefId { + match tcx.lang_items().require(li) { + Ok(id) => id, + Err(s) => { + let msg = format!("{} {}", msg, s); + match span { + Some(span) => tcx.sess.span_fatal(span, &msg[..]), + None => tcx.sess.fatal(&msg[..]), + } + } + } +} + +// To avoid UB from LLVM, these two functions mask RHS with an +// appropriate mask unconditionally (i.e. the fallback behavior for +// all shifts). For 32- and 64-bit types, this matches the semantics +// of Java. (See related discussion on #1877 and #10183.) + +pub fn build_unchecked_lshift( + bx: &Builder<'a, 'll, 'tcx>, + lhs: &'ll Value, + rhs: &'ll Value +) -> &'ll Value { + let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bx, rhs); + bx.shl(lhs, rhs) +} + +pub fn build_unchecked_rshift( + bx: &Builder<'a, 'll, 'tcx>, lhs_t: Ty<'tcx>, lhs: &'ll Value, rhs: &'ll Value +) -> &'ll Value { + let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bx, rhs); + let is_signed = lhs_t.is_signed(); + if is_signed { + bx.ashr(lhs, rhs) + } else { + bx.lshr(lhs, rhs) + } +} + +fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx>, rhs: &'ll Value) -> &'ll Value { + let rhs_llty = val_ty(rhs); + bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) +} + +pub fn shift_mask_val( + bx: &Builder<'a, 'll, 'tcx>, + llty: &'ll Type, + mask_llty: &'ll Type, + invert: bool +) -> &'ll Value { + let kind = llty.kind(); + match kind { + TypeKind::Integer => { + // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. + let val = llty.int_width() - 1; + if invert { + C_int(mask_llty, !val as i64) + } else { + C_uint(mask_llty, val) + } + }, + TypeKind::Vector => { + let mask = shift_mask_val(bx, llty.element_type(), mask_llty.element_type(), invert); + bx.vector_splat(mask_llty.vector_length(), mask) + }, + _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), + } +} + +pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, + ty: Ty<'tcx>) + -> ty::PolyFnSig<'tcx> +{ + match ty.sty { + ty::TyFnDef(..) | + // Shims currently have type TyFnPtr. Not sure this should remain. + ty::TyFnPtr(_) => ty.fn_sig(cx.tcx), + ty::TyClosure(def_id, substs) => { + let tcx = cx.tcx; + let sig = substs.closure_sig(def_id, tcx); + + let env_ty = tcx.closure_env_ty(def_id, substs).unwrap(); + sig.map_bound(|sig| tcx.mk_fn_sig( + iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()), + sig.output(), + sig.variadic, + sig.unsafety, + sig.abi + )) + } + ty::TyGenerator(def_id, substs, _) => { + let tcx = cx.tcx; + let sig = substs.poly_sig(def_id, cx.tcx); + + let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); + let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); + + sig.map_bound(|sig| { + let state_did = tcx.lang_items().gen_state().unwrap(); + let state_adt_ref = tcx.adt_def(state_did); + let state_substs = tcx.intern_substs(&[ + sig.yield_ty.into(), + sig.return_ty.into(), + ]); + let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); + + tcx.mk_fn_sig(iter::once(env_ty), + ret_ty, + false, + hir::Unsafety::Normal, + Abi::Rust + ) + }) + } + _ => bug!("unexpected type {:?} to ty_fn_sig", ty) + } +} diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs new file mode 100644 index 000000000000..770a22ad6584 --- /dev/null +++ b/src/librustc_codegen_llvm/consts.rs @@ -0,0 +1,438 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use libc::c_uint; +use llvm::{self, SetUnnamedAddr, True}; +use rustc::hir::def_id::DefId; +use rustc::hir::map as hir_map; +use debuginfo; +use base; +use monomorphize::MonoItem; +use common::{CodegenCx, val_ty}; +use declare; +use monomorphize::Instance; +use syntax_pos::Span; +use syntax_pos::symbol::LocalInternedString; +use type_::Type; +use type_of::LayoutLlvmExt; +use value::Value; +use rustc::ty::{self, Ty}; + +use rustc::ty::layout::{Align, LayoutOf}; + +use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags}; + +use std::ffi::{CStr, CString}; + +pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstPointerCast(val, ty) + } +} + +pub fn bitcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstBitCast(val, ty) + } +} + +fn set_global_alignment(cx: &CodegenCx<'ll, '_>, + gv: &'ll Value, + mut align: Align) { + // The target may require greater alignment for globals than the type does. + // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, + // which can force it to be smaller. Rust doesn't support this yet. + if let Some(min) = cx.sess().target.target.options.min_global_align { + match ty::layout::Align::from_bits(min, min) { + Ok(min) => align = align.max(min), + Err(err) => { + cx.sess().err(&format!("invalid minimum global alignment: {}", err)); + } + } + } + unsafe { + llvm::LLVMSetAlignment(gv, align.abi() as u32); + } +} + +pub fn addr_of_mut( + cx: &CodegenCx<'ll, '_>, + cv: &'ll Value, + align: Align, + kind: Option<&str>, +) -> &'ll Value { + unsafe { + let gv = match kind { + Some(kind) if !cx.tcx.sess.fewer_names() => { + let name = cx.generate_local_symbol_name(kind); + let gv = declare::define_global(cx, &name[..], val_ty(cv)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", name); + }); + llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); + gv + }, + _ => declare::define_private_global(cx, val_ty(cv)), + }; + llvm::LLVMSetInitializer(gv, cv); + set_global_alignment(cx, gv, align); + SetUnnamedAddr(gv, true); + gv + } +} + +pub fn addr_of( + cx: &CodegenCx<'ll, '_>, + cv: &'ll Value, + align: Align, + kind: Option<&str>, +) -> &'ll Value { + if let Some(&gv) = cx.const_globals.borrow().get(&cv) { + unsafe { + // Upgrade the alignment in cases where the same constant is used with different + // alignment requirements + let llalign = align.abi() as u32; + if llalign > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, llalign); + } + } + return gv; + } + let gv = addr_of_mut(cx, cv, align, kind); + unsafe { + llvm::LLVMSetGlobalConstant(gv, True); + } + cx.const_globals.borrow_mut().insert(cv, gv); + gv +} + +pub fn get_static(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll Value { + let instance = Instance::mono(cx.tcx, def_id); + if let Some(&g) = cx.instances.borrow().get(&instance) { + return g; + } + + let defined_in_current_codegen_unit = cx.codegen_unit + .items() + .contains_key(&MonoItem::Static(def_id)); + assert!(!defined_in_current_codegen_unit, + "consts::get_static() should always hit the cache for \ + statics defined in the same CGU, but did not for `{:?}`", + def_id); + + let ty = instance.ty(cx.tcx); + let sym = cx.tcx.symbol_name(instance).as_str(); + + debug!("get_static: sym={} instance={:?}", sym, instance); + + let g = if let Some(id) = cx.tcx.hir.as_local_node_id(def_id) { + + let llty = cx.layout_of(ty).llvm_type(cx); + let (g, attrs) = match cx.tcx.hir.get(id) { + hir_map::NodeItem(&hir::Item { + ref attrs, span, node: hir::ItemKind::Static(..), .. + }) => { + if declare::get_declared_value(cx, &sym[..]).is_some() { + span_bug!(span, "Conflicting symbol names for static?"); + } + + let g = declare::define_global(cx, &sym[..], llty).unwrap(); + + if !cx.tcx.is_reachable_non_generic(def_id) { + unsafe { + llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); + } + } + + (g, attrs) + } + + hir_map::NodeForeignItem(&hir::ForeignItem { + ref attrs, span, node: hir::ForeignItemKind::Static(..), .. + }) => { + let fn_attrs = cx.tcx.codegen_fn_attrs(def_id); + (check_and_apply_linkage(cx, &fn_attrs, ty, sym, Some(span)), attrs) + } + + item => bug!("get_static: expected static, found {:?}", item) + }; + + debug!("get_static: sym={} attrs={:?}", sym, attrs); + + for attr in attrs { + if attr.check_name("thread_local") { + llvm::set_thread_local_mode(g, cx.tls_model); + } + } + + g + } else { + // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? + debug!("get_static: sym={} item_attr={:?}", sym, cx.tcx.item_attrs(def_id)); + + let attrs = cx.tcx.codegen_fn_attrs(def_id); + let g = check_and_apply_linkage(cx, &attrs, ty, sym, None); + + // Thread-local statics in some other crate need to *always* be linked + // against in a thread-local fashion, so we need to be sure to apply the + // thread-local attribute locally if it was present remotely. If we + // don't do this then linker errors can be generated where the linker + // complains that one object files has a thread local version of the + // symbol and another one doesn't. + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, cx.tls_model); + } + + let needs_dll_storage_attr = + cx.use_dll_storage_attrs && !cx.tcx.is_foreign_item(def_id) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the attrs. Instead we make them unnecessary by disallowing + // dynamic linking when cross-language LTO is enabled. + !cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled(); + + // If this assertion triggers, there's something wrong with commandline + // argument validation. + debug_assert!(!(cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + cx.tcx.sess.target.target.options.is_like_msvc && + cx.tcx.sess.opts.cg.prefer_dynamic)); + + if needs_dll_storage_attr { + // This item is external but not foreign, i.e. it originates from an external Rust + // crate. Since we don't know whether this crate will be linked dynamically or + // statically in the final application, we always mark such symbols as 'dllimport'. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs to + // make things work. + // + // However, in some scenarios we defer emission of statics to downstream + // crates, so there are cases where a static with an upstream DefId + // is actually present in the current crate. We can find out via the + // is_codegened_item query. + if !cx.tcx.is_codegened_item(def_id) { + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); + } + } + } + g + }; + + if cx.use_dll_storage_attrs && cx.tcx.is_dllimport_foreign_item(def_id) { + // For foreign (native) libs we know the exact storage type to use. + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); + } + } + + cx.instances.borrow_mut().insert(instance, g); + g +} + +fn check_and_apply_linkage( + cx: &CodegenCx<'ll, 'tcx>, + attrs: &CodegenFnAttrs, + ty: Ty<'tcx>, + sym: LocalInternedString, + span: Option +) -> &'ll Value { + let llty = cx.layout_of(ty).llvm_type(cx); + if let Some(linkage) = attrs.linkage { + debug!("get_static: sym={} linkage={:?}", sym, linkage); + + // If this is a static with a linkage specified, then we need to handle + // it a little specially. The typesystem prevents things like &T and + // extern "C" fn() from being non-null, so we can't just declare a + // static and call it a day. Some linkages (like weak) will make it such + // that the static actually has a null value. + let llty2 = match ty.sty { + ty::TyRawPtr(ref mt) => cx.layout_of(mt.ty).llvm_type(cx), + _ => { + if span.is_some() { + cx.sess().span_fatal(span.unwrap(), "must have type `*const T` or `*mut T`") + } else { + bug!("must have type `*const T` or `*mut T`") + } + } + }; + unsafe { + // Declare a symbol `foo` with the desired linkage. + let g1 = declare::declare_global(cx, &sym, llty2); + llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage)); + + // Declare an internal global `extern_with_linkage_foo` which + // is initialized with the address of `foo`. If `foo` is + // discarded during linking (for example, if `foo` has weak + // linkage and there are no definitions), then + // `extern_with_linkage_foo` will instead be initialized to + // zero. + let mut real_name = "_rust_extern_with_linkage_".to_string(); + real_name.push_str(&sym); + let g2 = declare::define_global(cx, &real_name, llty).unwrap_or_else(||{ + if span.is_some() { + cx.sess().span_fatal( + span.unwrap(), + &format!("symbol `{}` is already defined", &sym) + ) + } else { + bug!("symbol `{}` is already defined", &sym) + } + }); + llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage); + llvm::LLVMSetInitializer(g2, g1); + g2 + } + } else { + // Generate an external declaration. + // FIXME(nagisa): investigate whether it can be changed into define_global + declare::declare_global(cx, &sym, llty) + } +} + +pub fn codegen_static<'a, 'tcx>( + cx: &CodegenCx<'a, 'tcx>, + def_id: DefId, + is_mutable: bool, +) { + unsafe { + let attrs = cx.tcx.codegen_fn_attrs(def_id); + + let (v, alloc) = match ::mir::codegen_static_initializer(cx, def_id) { + Ok(v) => v, + // Error has already been reported + Err(_) => return, + }; + + let g = get_static(cx, def_id); + + // boolean SSA values are i1, but they have to be stored in i8 slots, + // otherwise some LLVM optimization passes don't work as expected + let mut val_llty = val_ty(v); + let v = if val_llty == Type::i1(cx) { + val_llty = Type::i8(cx); + llvm::LLVMConstZExt(v, val_llty) + } else { + v + }; + + let instance = Instance::mono(cx.tcx, def_id); + let ty = instance.ty(cx.tcx); + let llty = cx.layout_of(ty).llvm_type(cx); + let g = if val_llty == llty { + g + } else { + // If we created the global with the wrong type, + // correct the type. + let empty_string = const_cstr!(""); + let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); + let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); + llvm::LLVMSetValueName(g, empty_string.as_ptr()); + + let linkage = llvm::LLVMRustGetLinkage(g); + let visibility = llvm::LLVMRustGetVisibility(g); + + let new_g = llvm::LLVMRustGetOrInsertGlobal( + cx.llmod, name_string.as_ptr(), val_llty); + + llvm::LLVMRustSetLinkage(new_g, linkage); + llvm::LLVMRustSetVisibility(new_g, visibility); + + // To avoid breaking any invariants, we leave around the old + // global for the moment; we'll replace all references to it + // with the new global later. (See base::codegen_backend.) + cx.statics_to_rauw.borrow_mut().push((g, new_g)); + new_g + }; + set_global_alignment(cx, g, cx.align_of(ty)); + llvm::LLVMSetInitializer(g, v); + + // As an optimization, all shared statics which do not have interior + // mutability are placed into read-only memory. + if !is_mutable { + if cx.type_is_freeze(ty) { + llvm::LLVMSetGlobalConstant(g, llvm::True); + } + } + + debuginfo::create_global_var_metadata(cx, def_id, g); + + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, cx.tls_model); + + // Do not allow LLVM to change the alignment of a TLS on macOS. + // + // By default a global's alignment can be freely increased. + // This allows LLVM to generate more performant instructions + // e.g. using load-aligned into a SIMD register. + // + // However, on macOS 10.10 or below, the dynamic linker does not + // respect any alignment given on the TLS (radar 24221680). + // This will violate the alignment assumption, and causing segfault at runtime. + // + // This bug is very easy to trigger. In `println!` and `panic!`, + // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, + // which the values would be `mem::replace`d on initialization. + // The implementation of `mem::replace` will use SIMD + // whenever the size is 32 bytes or higher. LLVM notices SIMD is used + // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, + // which macOS's dyld disregarded and causing crashes + // (see issues #51794, #51758, #50867, #48866 and #44056). + // + // To workaround the bug, we trick LLVM into not increasing + // the global's alignment by explicitly assigning a section to it + // (equivalent to automatically generating a `#[link_section]` attribute). + // See the comment in the `GlobalValue::canIncreaseAlignment()` function + // of `lib/IR/Globals.cpp` for why this works. + // + // When the alignment is not increased, the optimized `mem::replace` + // will use load-unaligned instructions instead, and thus avoiding the crash. + // + // We could remove this hack whenever we decide to drop macOS 10.10 support. + if cx.tcx.sess.target.target.options.is_like_osx { + let sect_name = if alloc.bytes.iter().all(|b| *b == 0) { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") + } else { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") + }; + llvm::LLVMSetSection(g, sect_name.as_ptr()); + } + } + + + // Wasm statics with custom link sections get special treatment as they + // go into custom sections of the wasm executable. + if cx.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { + if let Some(section) = attrs.link_section { + let section = llvm::LLVMMDStringInContext( + cx.llcx, + section.as_str().as_ptr() as *const _, + section.as_str().len() as c_uint, + ); + let alloc = llvm::LLVMMDStringInContext( + cx.llcx, + alloc.bytes.as_ptr() as *const _, + alloc.bytes.len() as c_uint, + ); + let data = [section, alloc]; + let meta = llvm::LLVMMDNodeInContext(cx.llcx, data.as_ptr(), 2); + llvm::LLVMAddNamedMetadataOperand( + cx.llmod, + "wasm.custom_sections\0".as_ptr() as *const _, + meta, + ); + } + } else { + base::set_link_section(g, &attrs); + } + + if attrs.flags.contains(CodegenFnAttrFlags::USED) { + // This static will be stored in the llvm.used variable which is an array of i8* + let cast = llvm::LLVMConstPointerCast(g, Type::i8p(cx)); + cx.used_statics.borrow_mut().push(cast); + } + } +} diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs new file mode 100644 index 000000000000..32ea2c5eb8f3 --- /dev/null +++ b/src/librustc_codegen_llvm/context.rs @@ -0,0 +1,776 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use attributes; +use common; +use llvm; +use rustc::dep_graph::DepGraphSafe; +use rustc::hir; +use debuginfo; +use callee; +use base; +use declare; +use monomorphize::Instance; +use value::Value; + +use monomorphize::partitioning::CodegenUnit; +use type_::Type; +use type_of::PointeeInfo; + +use rustc_data_structures::base_n; +use rustc_data_structures::small_c_str::SmallCStr; +use rustc::mir::mono::Stats; +use rustc::session::config::{self, DebugInfo}; +use rustc::session::Session; +use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::util::nodemap::FxHashMap; +use rustc_target::spec::{HasTargetSpec, Target}; + +use std::ffi::CStr; +use std::cell::{Cell, RefCell}; +use std::iter; +use std::str; +use std::sync::Arc; +use syntax::symbol::LocalInternedString; +use abi::Abi; + +/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM +/// `llvm::Context` so that several compilation units may be optimized in parallel. +/// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`. +pub struct CodegenCx<'a, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, + pub check_overflow: bool, + pub use_dll_storage_attrs: bool, + pub tls_model: llvm::ThreadLocalMode, + + pub llmod: &'a llvm::Module, + pub llcx: &'a llvm::Context, + pub stats: RefCell, + pub codegen_unit: Arc>, + + /// Cache instances of monomorphic and polymorphic items + pub instances: RefCell, &'a Value>>, + /// Cache generated vtables + pub vtables: RefCell, + Option>), &'a Value>>, + /// Cache of constant strings, + pub const_cstr_cache: RefCell>, + + /// Reverse-direction for const ptrs cast from globals. + /// Key is a Value holding a *T, + /// Val is a Value holding a *[T]. + /// + /// Needed because LLVM loses pointer->pointee association + /// when we ptrcast, and we have to ptrcast during codegen + /// of a [T] const because we form a slice, a (*T,usize) pair, not + /// a pointer to an LLVM array type. Similar for trait objects. + pub const_unsized: RefCell>, + + /// Cache of emitted const globals (value -> global) + pub const_globals: RefCell>, + + /// List of globals for static variables which need to be passed to the + /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete. + /// (We have to make sure we don't invalidate any Values referring + /// to constants.) + pub statics_to_rauw: RefCell>, + + /// Statics that will be placed in the llvm.used variable + /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details + pub used_statics: RefCell>, + + pub lltypes: RefCell, Option), &'a Type>>, + pub scalar_lltypes: RefCell, &'a Type>>, + pub pointee_infos: RefCell, Size), Option>>, + pub isize_ty: &'a Type, + + pub dbg_cx: Option>, + + eh_personality: Cell>, + eh_unwind_resume: Cell>, + pub rust_try_fn: Cell>, + + intrinsics: RefCell>, + + /// A counter that is used for generating local symbol names + local_gen_sym_counter: Cell, +} + +impl<'a, 'tcx> DepGraphSafe for CodegenCx<'a, 'tcx> { +} + +pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { + let reloc_model_arg = match sess.opts.cg.relocation_model { + Some(ref s) => &s[..], + None => &sess.target.target.options.relocation_model[..], + }; + + match ::back::write::RELOC_MODEL_ARGS.iter().find( + |&&arg| arg.0 == reloc_model_arg) { + Some(x) => x.1, + _ => { + sess.err(&format!("{:?} is not a valid relocation mode", + reloc_model_arg)); + sess.abort_if_errors(); + bug!(); + } + } +} + +fn get_tls_model(sess: &Session) -> llvm::ThreadLocalMode { + let tls_model_arg = match sess.opts.debugging_opts.tls_model { + Some(ref s) => &s[..], + None => &sess.target.target.options.tls_model[..], + }; + + match ::back::write::TLS_MODEL_ARGS.iter().find( + |&&arg| arg.0 == tls_model_arg) { + Some(x) => x.1, + _ => { + sess.err(&format!("{:?} is not a valid TLS model", + tls_model_arg)); + sess.abort_if_errors(); + bug!(); + } + } +} + +fn is_any_library(sess: &Session) -> bool { + sess.crate_types.borrow().iter().any(|ty| { + *ty != config::CrateType::Executable + }) +} + +pub fn is_pie_binary(sess: &Session) -> bool { + !is_any_library(sess) && get_reloc_model(sess) == llvm::RelocMode::PIC +} + +pub unsafe fn create_module( + sess: &Session, + llcx: &'ll llvm::Context, + mod_name: &str, +) -> &'ll llvm::Module { + let mod_name = SmallCStr::new(mod_name); + let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx); + + // Ensure the data-layout values hardcoded remain the defaults. + if sess.target.target.options.is_builtin { + let tm = ::back::write::create_target_machine(sess, false); + llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm); + llvm::LLVMRustDisposeTargetMachine(tm); + + let data_layout = llvm::LLVMGetDataLayout(llmod); + let data_layout = str::from_utf8(CStr::from_ptr(data_layout).to_bytes()) + .ok().expect("got a non-UTF8 data-layout from LLVM"); + + // Unfortunately LLVM target specs change over time, and right now we + // don't have proper support to work with any more than one + // `data_layout` than the one that is in the rust-lang/rust repo. If + // this compiler is configured against a custom LLVM, we may have a + // differing data layout, even though we should update our own to use + // that one. + // + // As an interim hack, if CFG_LLVM_ROOT is not an empty string then we + // disable this check entirely as we may be configured with something + // that has a different target layout. + // + // Unsure if this will actually cause breakage when rustc is configured + // as such. + // + // FIXME(#34960) + let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or(""); + let custom_llvm_used = cfg_llvm_root.trim() != ""; + + if !custom_llvm_used && sess.target.target.data_layout != data_layout { + bug!("data-layout for builtin `{}` target, `{}`, \ + differs from LLVM default, `{}`", + sess.target.target.llvm_target, + sess.target.target.data_layout, + data_layout); + } + } + + let data_layout = SmallCStr::new(&sess.target.target.data_layout); + llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr()); + + let llvm_target = SmallCStr::new(&sess.target.target.llvm_target); + llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr()); + + if is_pie_binary(sess) { + llvm::LLVMRustSetModulePIELevel(llmod); + } + + llmod +} + +impl<'a, 'tcx> CodegenCx<'a, 'tcx> { + crate fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, + codegen_unit: Arc>, + llvm_module: &'a ::ModuleLlvm) + -> CodegenCx<'a, 'tcx> { + // An interesting part of Windows which MSVC forces our hand on (and + // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` + // attributes in LLVM IR as well as native dependencies (in C these + // correspond to `__declspec(dllimport)`). + // + // Whenever a dynamic library is built by MSVC it must have its public + // interface specified by functions tagged with `dllexport` or otherwise + // they're not available to be linked against. This poses a few problems + // for the compiler, some of which are somewhat fundamental, but we use + // the `use_dll_storage_attrs` variable below to attach the `dllexport` + // attribute to all LLVM functions that are exported e.g. they're + // already tagged with external linkage). This is suboptimal for a few + // reasons: + // + // * If an object file will never be included in a dynamic library, + // there's no need to attach the dllexport attribute. Most object + // files in Rust are not destined to become part of a dll as binaries + // are statically linked by default. + // * If the compiler is emitting both an rlib and a dylib, the same + // source object file is currently used but with MSVC this may be less + // feasible. The compiler may be able to get around this, but it may + // involve some invasive changes to deal with this. + // + // The flipside of this situation is that whenever you link to a dll and + // you import a function from it, the import should be tagged with + // `dllimport`. At this time, however, the compiler does not emit + // `dllimport` for any declarations other than constants (where it is + // required), which is again suboptimal for even more reasons! + // + // * Calling a function imported from another dll without using + // `dllimport` causes the linker/compiler to have extra overhead (one + // `jmp` instruction on x86) when calling the function. + // * The same object file may be used in different circumstances, so a + // function may be imported from a dll if the object is linked into a + // dll, but it may be just linked against if linked into an rlib. + // * The compiler has no knowledge about whether native functions should + // be tagged dllimport or not. + // + // For now the compiler takes the perf hit (I do not have any numbers to + // this effect) by marking very little as `dllimport` and praying the + // linker will take care of everything. Fixing this problem will likely + // require adding a few attributes to Rust itself (feature gated at the + // start) and then strongly recommending static linkage on MSVC! + let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_msvc; + + let check_overflow = tcx.sess.overflow_checks(); + + let tls_model = get_tls_model(&tcx.sess); + + let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod()); + + let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None { + let dctx = debuginfo::CrateDebugContext::new(llmod); + debuginfo::metadata::compile_unit_metadata(tcx, + &codegen_unit.name().as_str(), + &dctx); + Some(dctx) + } else { + None + }; + + let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits()); + + CodegenCx { + tcx, + check_overflow, + use_dll_storage_attrs, + tls_model, + llmod, + llcx, + stats: RefCell::new(Stats::default()), + codegen_unit, + instances: RefCell::new(FxHashMap()), + vtables: RefCell::new(FxHashMap()), + const_cstr_cache: RefCell::new(FxHashMap()), + const_unsized: RefCell::new(FxHashMap()), + const_globals: RefCell::new(FxHashMap()), + statics_to_rauw: RefCell::new(Vec::new()), + used_statics: RefCell::new(Vec::new()), + lltypes: RefCell::new(FxHashMap()), + scalar_lltypes: RefCell::new(FxHashMap()), + pointee_infos: RefCell::new(FxHashMap()), + isize_ty, + dbg_cx, + eh_personality: Cell::new(None), + eh_unwind_resume: Cell::new(None), + rust_try_fn: Cell::new(None), + intrinsics: RefCell::new(FxHashMap()), + local_gen_sym_counter: Cell::new(0), + } + } +} + +impl<'b, 'tcx> CodegenCx<'b, 'tcx> { + pub fn sess<'a>(&'a self) -> &'a Session { + &self.tcx.sess + } + + pub fn get_intrinsic(&self, key: &str) -> &'b Value { + if let Some(v) = self.intrinsics.borrow().get(key).cloned() { + return v; + } + match declare_intrinsic(self, key) { + Some(v) => return v, + None => bug!("unknown intrinsic '{}'", key) + } + } + + /// Generate a new symbol name with the given prefix. This symbol name must + /// only be used for definitions with `internal` or `private` linkage. + pub fn generate_local_symbol_name(&self, prefix: &str) -> String { + let idx = self.local_gen_sym_counter.get(); + self.local_gen_sym_counter.set(idx + 1); + // Include a '.' character, so there can be no accidental conflicts with + // user defined names + let mut name = String::with_capacity(prefix.len() + 6); + name.push_str(prefix); + name.push_str("."); + base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); + name + } + + pub fn eh_personality(&self) -> &'b Value { + // The exception handling personality function. + // + // If our compilation unit has the `eh_personality` lang item somewhere + // within it, then we just need to codegen that. Otherwise, we're + // building an rlib which will depend on some upstream implementation of + // this function, so we just codegen a generic reference to it. We don't + // specify any of the types for the function, we just make it a symbol + // that LLVM can later use. + // + // Note that MSVC is a little special here in that we don't use the + // `eh_personality` lang item at all. Currently LLVM has support for + // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the + // *name of the personality function* to decide what kind of unwind side + // tables/landing pads to emit. It looks like Dwarf is used by default, + // injecting a dependency on the `_Unwind_Resume` symbol for resuming + // an "exception", but for MSVC we want to force SEH. This means that we + // can't actually have the personality function be our standard + // `rust_eh_personality` function, but rather we wired it up to the + // CRT's custom personality function, which forces LLVM to consider + // landing pads as "landing pads for SEH". + if let Some(llpersonality) = self.eh_personality.get() { + return llpersonality + } + let tcx = self.tcx; + let llfn = match tcx.lang_items().eh_personality() { + Some(def_id) if !base::wants_msvc_seh(self.sess()) => { + callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) + } + _ => { + let name = if base::wants_msvc_seh(self.sess()) { + "__CxxFrameHandler3" + } else { + "rust_eh_personality" + }; + let fty = Type::variadic_func(&[], Type::i32(self)); + declare::declare_cfn(self, name, fty) + } + }; + attributes::apply_target_cpu_attr(self, llfn); + self.eh_personality.set(Some(llfn)); + llfn + } + + // Returns a Value of the "eh_unwind_resume" lang item if one is defined, + // otherwise declares it as an external function. + pub fn eh_unwind_resume(&self) -> &'b Value { + use attributes; + let unwresume = &self.eh_unwind_resume; + if let Some(llfn) = unwresume.get() { + return llfn; + } + + let tcx = self.tcx; + assert!(self.sess().target.target.options.custom_unwind_resume); + if let Some(def_id) = tcx.lang_items().eh_unwind_resume() { + let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); + unwresume.set(Some(llfn)); + return llfn; + } + + let ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig( + iter::once(tcx.mk_mut_ptr(tcx.types.u8)), + tcx.types.never, + false, + hir::Unsafety::Unsafe, + Abi::C + ))); + + let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", ty); + attributes::unwind(llfn, true); + attributes::apply_target_cpu_attr(self, llfn); + unwresume.set(Some(llfn)); + llfn + } + + pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { + common::type_needs_drop(self.tcx, ty) + } + + pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + common::type_is_sized(self.tcx, ty) + } + + pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { + common::type_is_freeze(self.tcx, ty) + } + + pub fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { + use syntax_pos::DUMMY_SP; + if ty.is_sized(self.tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) { + return false; + } + + let tail = self.tcx.struct_tail(ty); + match tail.sty { + ty::TyForeign(..) => false, + ty::TyStr | ty::TySlice(..) | ty::TyDynamic(..) => true, + _ => bug!("unexpected unsized tail: {:?}", tail.sty), + } + } +} + +impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx> { + fn data_layout(&self) -> &ty::layout::TargetDataLayout { + &self.tcx.data_layout + } +} + +impl HasTargetSpec for &'a CodegenCx<'ll, 'tcx> { + fn target_spec(&self) -> &Target { + &self.tcx.sess.target.target + } +} + +impl ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'ll, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { + self.tcx + } +} + +impl LayoutOf for &'a CodegenCx<'ll, 'tcx> { + type Ty = Ty<'tcx>; + type TyLayout = TyLayout<'tcx>; + + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)) + .unwrap_or_else(|e| match e { + LayoutError::SizeOverflow(_) => self.sess().fatal(&e.to_string()), + _ => bug!("failed to get layout for `{}`: {}", ty, e) + }) + } +} + +/// Declare any llvm intrinsics that you might need +fn declare_intrinsic(cx: &CodegenCx<'ll, '_>, key: &str) -> Option<&'ll Value> { + macro_rules! ifn { + ($name:expr, fn() -> $ret:expr) => ( + if key == $name { + let f = declare::declare_cfn(cx, $name, Type::func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); + cx.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn(...) -> $ret:expr) => ( + if key == $name { + let f = declare::declare_cfn(cx, $name, Type::variadic_func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); + cx.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( + if key == $name { + let f = declare::declare_cfn(cx, $name, Type::func(&[$($arg),*], $ret)); + llvm::SetUnnamedAddr(f, false); + cx.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + } + macro_rules! mk_struct { + ($($field_ty:expr),*) => (Type::struct_(cx, &[$($field_ty),*], false)) + } + + let i8p = Type::i8p(cx); + let void = Type::void(cx); + let i1 = Type::i1(cx); + let t_i8 = Type::i8(cx); + let t_i16 = Type::i16(cx); + let t_i32 = Type::i32(cx); + let t_i64 = Type::i64(cx); + let t_i128 = Type::i128(cx); + let t_f32 = Type::f32(cx); + let t_f64 = Type::f64(cx); + + let t_v2f32 = Type::vector(t_f32, 2); + let t_v4f32 = Type::vector(t_f32, 4); + let t_v8f32 = Type::vector(t_f32, 8); + let t_v16f32 = Type::vector(t_f32, 16); + + let t_v2f64 = Type::vector(t_f64, 2); + let t_v4f64 = Type::vector(t_f64, 4); + let t_v8f64 = Type::vector(t_f64, 8); + + ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); + ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); + ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); + + ifn!("llvm.trap", fn() -> void); + ifn!("llvm.debugtrap", fn() -> void); + ifn!("llvm.frameaddress", fn(t_i32) -> i8p); + + ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); + ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32); + ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32); + ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32); + ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32); + ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); + ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64); + ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64); + ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64); + + ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32); + ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64); + + ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); + ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); + ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); + ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32); + ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); + ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64); + + ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); + ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); + ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); + ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); + ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); + ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); + ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); + ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.round.f32", fn(t_f32) -> t_f32); + ifn!("llvm.round.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); + ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); + ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); + ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); + ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); + ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); + ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128); + + ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); + ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128); + + ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); + ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8); + ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64); + ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); + ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); + + ifn!("llvm.expect.i1", fn(i1, i1) -> i1); + ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); + ifn!("llvm.localescape", fn(...) -> void); + ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); + ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); + + ifn!("llvm.assume", fn(i1) -> void); + ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); + + if cx.sess().opts.debuginfo != DebugInfo::None { + ifn!("llvm.dbg.declare", fn(Type::metadata(cx), Type::metadata(cx)) -> void); + ifn!("llvm.dbg.value", fn(Type::metadata(cx), t_i64, Type::metadata(cx)) -> void); + } + return None; +} diff --git a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs new file mode 100644 index 000000000000..a76f1d50fa7b --- /dev/null +++ b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs @@ -0,0 +1,138 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::{FunctionDebugContext, FunctionDebugContextData}; +use super::metadata::file_metadata; +use super::utils::{DIB, span_start}; + +use llvm; +use llvm::debuginfo::DIScope; +use common::CodegenCx; +use rustc::mir::{Mir, SourceScope}; + +use libc::c_uint; + +use syntax_pos::Pos; + +use rustc_data_structures::bitvec::BitArray; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; + +use syntax_pos::BytePos; + +#[derive(Clone, Copy, Debug)] +pub struct MirDebugScope<'ll> { + pub scope_metadata: Option<&'ll DIScope>, + // Start and end offsets of the file to which this DIScope belongs. + // These are used to quickly determine whether some span refers to the same file. + pub file_start_pos: BytePos, + pub file_end_pos: BytePos, +} + +impl MirDebugScope<'ll> { + pub fn is_valid(&self) -> bool { + !self.scope_metadata.is_none() + } +} + +/// Produce DIScope DIEs for each MIR Scope which has variables defined in it. +/// If debuginfo is disabled, the returned vector is empty. +pub fn create_mir_scopes( + cx: &CodegenCx<'ll, '_>, + mir: &Mir, + debug_context: &FunctionDebugContext<'ll>, +) -> IndexVec> { + let null_scope = MirDebugScope { + scope_metadata: None, + file_start_pos: BytePos(0), + file_end_pos: BytePos(0) + }; + let mut scopes = IndexVec::from_elem(null_scope, &mir.source_scopes); + + let debug_context = match *debug_context { + FunctionDebugContext::RegularContext(ref data) => data, + FunctionDebugContext::DebugInfoDisabled | + FunctionDebugContext::FunctionWithoutDebugInfo => { + return scopes; + } + }; + + // Find all the scopes with variables defined in them. + let mut has_variables = BitArray::new(mir.source_scopes.len()); + for var in mir.vars_iter() { + let decl = &mir.local_decls[var]; + has_variables.insert(decl.visibility_scope); + } + + // Instantiate all scopes. + for idx in 0..mir.source_scopes.len() { + let scope = SourceScope::new(idx); + make_mir_scope(cx, &mir, &has_variables, debug_context, scope, &mut scopes); + } + + scopes +} + +fn make_mir_scope(cx: &CodegenCx<'ll, '_>, + mir: &Mir, + has_variables: &BitArray, + debug_context: &FunctionDebugContextData<'ll>, + scope: SourceScope, + scopes: &mut IndexVec>) { + if scopes[scope].is_valid() { + return; + } + + let scope_data = &mir.source_scopes[scope]; + let parent_scope = if let Some(parent) = scope_data.parent_scope { + make_mir_scope(cx, mir, has_variables, debug_context, parent, scopes); + scopes[parent] + } else { + // The root is the function itself. + let loc = span_start(cx, mir.span); + scopes[scope] = MirDebugScope { + scope_metadata: Some(debug_context.fn_metadata), + file_start_pos: loc.file.start_pos, + file_end_pos: loc.file.end_pos, + }; + return; + }; + + if !has_variables.contains(scope) { + // Do not create a DIScope if there are no variables + // defined in this MIR Scope, to avoid debuginfo bloat. + + // However, we don't skip creating a nested scope if + // our parent is the root, because we might want to + // put arguments in the root and not have shadowing. + if parent_scope.scope_metadata.unwrap() != debug_context.fn_metadata { + scopes[scope] = parent_scope; + return; + } + } + + let loc = span_start(cx, scope_data.span); + let file_metadata = file_metadata(cx, + &loc.file.name, + debug_context.defining_crate); + + let scope_metadata = unsafe { + Some(llvm::LLVMRustDIBuilderCreateLexicalBlock( + DIB(cx), + parent_scope.scope_metadata.unwrap(), + file_metadata, + loc.line as c_uint, + loc.col.to_usize() as c_uint)) + }; + scopes[scope] = MirDebugScope { + scope_metadata, + file_start_pos: loc.file.start_pos, + file_end_pos: loc.file.end_pos, + }; +} diff --git a/src/librustc_trans/debuginfo/doc.rs b/src/librustc_codegen_llvm/debuginfo/doc.rs similarity index 99% rename from src/librustc_trans/debuginfo/doc.rs rename to src/librustc_codegen_llvm/debuginfo/doc.rs index cbecc0eb7d1b..ce0476b07eb4 100644 --- a/src/librustc_trans/debuginfo/doc.rs +++ b/src/librustc_codegen_llvm/debuginfo/doc.rs @@ -131,9 +131,9 @@ //! when generating prologue instructions we have to make sure that we don't //! emit source location information until the 'real' function body begins. For //! this reason, source location emission is disabled by default for any new -//! function being translated and is only activated after a call to the third +//! function being codegened and is only activated after a call to the third //! function from the list above, `start_emitting_source_locations()`. This -//! function should be called right before regularly starting to translate the +//! function should be called right before regularly starting to codegen the //! top-level block of the given function. //! //! There is one exception to the above rule: `llvm.dbg.declare` instruction diff --git a/src/librustc_trans/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs similarity index 88% rename from src/librustc_trans/debuginfo/gdb.rs rename to src/librustc_codegen_llvm/debuginfo/gdb.rs index 03e7c63dbca3..f6faddb894ff 100644 --- a/src/librustc_trans/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -15,10 +15,10 @@ use llvm; use common::{C_bytes, CodegenCx, C_i32}; use builder::Builder; use declare; +use rustc::session::config::DebugInfo; use type_::Type; -use rustc::session::config::NoDebugInfo; +use value::Value; -use std::ptr; use syntax::attr; @@ -40,8 +40,8 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) { /// Allocates the global variable responsible for the .debug_gdb_scripts binary /// section. -pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx) - -> llvm::ValueRef { +pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) + -> &'ll Value { let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0"; let section_var_name = &c_section_var_name[..c_section_var_name.len()-1]; @@ -50,12 +50,12 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx) c_section_var_name.as_ptr() as *const _) }; - if section_var == ptr::null_mut() { + section_var.unwrap_or_else(|| { let section_name = b".debug_gdb_scripts\0"; let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0"; unsafe { - let llvm_type = Type::array(&Type::i8(cx), + let llvm_type = Type::array(Type::i8(cx), section_contents.len() as u64); let section_var = declare::define_global(cx, section_var_name, @@ -72,9 +72,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx) llvm::LLVMSetAlignment(section_var, 1); section_var } - } else { - section_var - } + }) } pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx) -> bool { @@ -83,7 +81,6 @@ pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx) -> bool { "omit_gdb_pretty_printer_section"); !omit_gdb_pretty_printer_section && - !cx.sess().target.target.options.is_like_osx && - !cx.sess().target.target.options.is_like_windows && - cx.sess().opts.debuginfo != NoDebugInfo + cx.sess().opts.debuginfo != DebugInfo::None && + cx.sess().target.target.options.emit_debug_gdb_scripts } diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs new file mode 100644 index 000000000000..223fa75723cf --- /dev/null +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -0,0 +1,1820 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use self::RecursiveTypeDescription::*; +use self::MemberDescriptionFactory::*; +use self::EnumDiscriminantInfo::*; + +use super::utils::{debug_context, DIB, span_start, + get_namespace_for_item, create_DIArray, is_node_local_to_unit}; +use super::namespace::mangled_name_of_instance; +use super::type_names::compute_debuginfo_type_name; +use super::{CrateDebugContext}; +use abi; +use value::Value; + +use llvm; +use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, + DICompositeType, DILexicalBlock, DIFlags}; + +use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; +use rustc::hir::CodegenFnAttrFlags; +use rustc::hir::def::CtorKind; +use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE}; +use rustc::ich::NodeIdHashingMode; +use rustc_data_structures::fingerprint::Fingerprint; +use rustc::ty::Instance; +use common::CodegenCx; +use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt}; +use rustc::ty::layout::{self, Align, LayoutOf, PrimitiveExt, Size, TyLayout}; +use rustc::session::config; +use rustc::util::nodemap::FxHashMap; +use rustc_fs_util::path2cstr; +use rustc_data_structures::small_c_str::SmallCStr; + +use libc::{c_uint, c_longlong}; +use std::ffi::CString; +use std::fmt::{self, Write}; +use std::hash::{Hash, Hasher}; +use std::iter; +use std::ptr; +use std::path::{Path, PathBuf}; +use syntax::ast; +use syntax::symbol::{Interner, InternedString, Symbol}; +use syntax_pos::{self, Span, FileName}; + +impl PartialEq for llvm::Metadata { + fn eq(&self, other: &Self) -> bool { + self as *const _ == other as *const _ + } +} + +impl Eq for llvm::Metadata {} + +impl Hash for llvm::Metadata { + fn hash(&self, hasher: &mut H) { + (self as *const Self).hash(hasher); + } +} + +impl fmt::Debug for llvm::Metadata { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + (self as *const Self).fmt(f) + } +} + +// From DWARF 5. +// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1 +const DW_LANG_RUST: c_uint = 0x1c; +#[allow(non_upper_case_globals)] +const DW_ATE_boolean: c_uint = 0x02; +#[allow(non_upper_case_globals)] +const DW_ATE_float: c_uint = 0x04; +#[allow(non_upper_case_globals)] +const DW_ATE_signed: c_uint = 0x05; +#[allow(non_upper_case_globals)] +const DW_ATE_unsigned: c_uint = 0x07; +#[allow(non_upper_case_globals)] +const DW_ATE_unsigned_char: c_uint = 0x08; + +pub const UNKNOWN_LINE_NUMBER: c_uint = 0; +pub const UNKNOWN_COLUMN_NUMBER: c_uint = 0; + +pub const NO_SCOPE_METADATA: Option<&DIScope> = None; + +#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)] +pub struct UniqueTypeId(ast::Name); + +// The TypeMap is where the CrateDebugContext holds the type metadata nodes +// created so far. The metadata nodes are indexed by UniqueTypeId, and, for +// faster lookup, also by Ty. The TypeMap is responsible for creating +// UniqueTypeIds. +pub struct TypeMap<'ll, 'tcx> { + // The UniqueTypeIds created so far + unique_id_interner: Interner, + // A map from UniqueTypeId to debuginfo metadata for that type. This is a 1:1 mapping. + unique_id_to_metadata: FxHashMap, + // A map from types to debuginfo metadata. This is a N:1 mapping. + type_to_metadata: FxHashMap, &'ll DIType>, + // A map from types to UniqueTypeId. This is a N:1 mapping. + type_to_unique_id: FxHashMap, UniqueTypeId> +} + +impl TypeMap<'ll, 'tcx> { + pub fn new() -> Self { + TypeMap { + unique_id_interner: Interner::new(), + type_to_metadata: FxHashMap(), + unique_id_to_metadata: FxHashMap(), + type_to_unique_id: FxHashMap(), + } + } + + // Adds a Ty to metadata mapping to the TypeMap. The method will fail if + // the mapping already exists. + fn register_type_with_metadata( + &mut self, + type_: Ty<'tcx>, + metadata: &'ll DIType, + ) { + if self.type_to_metadata.insert(type_, metadata).is_some() { + bug!("Type metadata for Ty '{}' is already in the TypeMap!", type_); + } + } + + // Adds a UniqueTypeId to metadata mapping to the TypeMap. The method will + // fail if the mapping already exists. + fn register_unique_id_with_metadata( + &mut self, + unique_type_id: UniqueTypeId, + metadata: &'ll DIType, + ) { + if self.unique_id_to_metadata.insert(unique_type_id, metadata).is_some() { + bug!("Type metadata for unique id '{}' is already in the TypeMap!", + self.get_unique_type_id_as_string(unique_type_id)); + } + } + + fn find_metadata_for_type(&self, type_: Ty<'tcx>) -> Option<&'ll DIType> { + self.type_to_metadata.get(&type_).cloned() + } + + fn find_metadata_for_unique_id(&self, unique_type_id: UniqueTypeId) -> Option<&'ll DIType> { + self.unique_id_to_metadata.get(&unique_type_id).cloned() + } + + // Get the string representation of a UniqueTypeId. This method will fail if + // the id is unknown. + fn get_unique_type_id_as_string(&self, unique_type_id: UniqueTypeId) -> &str { + let UniqueTypeId(interner_key) = unique_type_id; + self.unique_id_interner.get(interner_key) + } + + // Get the UniqueTypeId for the given type. If the UniqueTypeId for the given + // type has been requested before, this is just a table lookup. Otherwise an + // ID will be generated and stored for later lookup. + fn get_unique_type_id_of_type<'a>(&mut self, cx: &CodegenCx<'a, 'tcx>, + type_: Ty<'tcx>) -> UniqueTypeId { + // Let's see if we already have something in the cache + match self.type_to_unique_id.get(&type_).cloned() { + Some(unique_type_id) => return unique_type_id, + None => { /* generate one */} + }; + + // The hasher we are using to generate the UniqueTypeId. We want + // something that provides more than the 64 bits of the DefaultHasher. + let mut hasher = StableHasher::::new(); + let mut hcx = cx.tcx.create_stable_hashing_context(); + let type_ = cx.tcx.erase_regions(&type_); + hcx.while_hashing_spans(false, |hcx| { + hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { + type_.hash_stable(hcx, &mut hasher); + }); + }); + let unique_type_id = hasher.finish().to_hex(); + + let key = self.unique_id_interner.intern(&unique_type_id); + self.type_to_unique_id.insert(type_, UniqueTypeId(key)); + + return UniqueTypeId(key); + } + + // Get the UniqueTypeId for an enum variant. Enum variants are not really + // types of their own, so they need special handling. We still need a + // UniqueTypeId for them, since to debuginfo they *are* real types. + fn get_unique_type_id_of_enum_variant<'a>(&mut self, + cx: &CodegenCx<'a, 'tcx>, + enum_type: Ty<'tcx>, + variant_name: &str) + -> UniqueTypeId { + let enum_type_id = self.get_unique_type_id_of_type(cx, enum_type); + let enum_variant_type_id = format!("{}::{}", + self.get_unique_type_id_as_string(enum_type_id), + variant_name); + let interner_key = self.unique_id_interner.intern(&enum_variant_type_id); + UniqueTypeId(interner_key) + } +} + +// A description of some recursive type. It can either be already finished (as +// with FinalMetadata) or it is not yet finished, but contains all information +// needed to generate the missing parts of the description. See the +// documentation section on Recursive Types at the top of this file for more +// information. +enum RecursiveTypeDescription<'ll, 'tcx> { + UnfinishedMetadata { + unfinished_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + metadata_stub: &'ll DICompositeType, + member_description_factory: MemberDescriptionFactory<'ll, 'tcx>, + }, + FinalMetadata(&'ll DICompositeType) +} + +fn create_and_register_recursive_type_forward_declaration( + cx: &CodegenCx<'ll, 'tcx>, + unfinished_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + metadata_stub: &'ll DICompositeType, + member_description_factory: MemberDescriptionFactory<'ll, 'tcx>, +) -> RecursiveTypeDescription<'ll, 'tcx> { + + // Insert the stub into the TypeMap in order to allow for recursive references + let mut type_map = debug_context(cx).type_map.borrow_mut(); + type_map.register_unique_id_with_metadata(unique_type_id, metadata_stub); + type_map.register_type_with_metadata(unfinished_type, metadata_stub); + + UnfinishedMetadata { + unfinished_type, + unique_type_id, + metadata_stub, + member_description_factory, + } +} + +impl RecursiveTypeDescription<'ll, 'tcx> { + // Finishes up the description of the type in question (mostly by providing + // descriptions of the fields of the given type) and returns the final type + // metadata. + fn finalize(&self, cx: &CodegenCx<'ll, 'tcx>) -> MetadataCreationResult<'ll> { + match *self { + FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false), + UnfinishedMetadata { + unfinished_type, + unique_type_id, + metadata_stub, + ref member_description_factory, + } => { + // Make sure that we have a forward declaration of the type in + // the TypeMap so that recursive references are possible. This + // will always be the case if the RecursiveTypeDescription has + // been properly created through the + // create_and_register_recursive_type_forward_declaration() + // function. + { + let type_map = debug_context(cx).type_map.borrow(); + if type_map.find_metadata_for_unique_id(unique_type_id).is_none() || + type_map.find_metadata_for_type(unfinished_type).is_none() { + bug!("Forward declaration of potentially recursive type \ + '{:?}' was not found in TypeMap!", + unfinished_type); + } + } + + // ... then create the member descriptions ... + let member_descriptions = + member_description_factory.create_member_descriptions(cx); + + // ... and attach them to the stub to complete it. + set_members_of_composite_type(cx, + metadata_stub, + member_descriptions); + return MetadataCreationResult::new(metadata_stub, true); + } + } + } +} + +// Returns from the enclosing function if the type metadata with the given +// unique id can be found in the type map +macro_rules! return_if_metadata_created_in_meantime { + ($cx: expr, $unique_type_id: expr) => ( + match debug_context($cx).type_map + .borrow() + .find_metadata_for_unique_id($unique_type_id) { + Some(metadata) => return MetadataCreationResult::new(metadata, true), + None => { /* proceed normally */ } + } + ) +} + +fn fixed_vec_metadata( + cx: &CodegenCx<'ll, 'tcx>, + unique_type_id: UniqueTypeId, + array_or_slice_type: Ty<'tcx>, + element_type: Ty<'tcx>, + span: Span, +) -> MetadataCreationResult<'ll> { + let element_type_metadata = type_metadata(cx, element_type, span); + + return_if_metadata_created_in_meantime!(cx, unique_type_id); + + let (size, align) = cx.size_and_align_of(array_or_slice_type); + + let upper_bound = match array_or_slice_type.sty { + ty::TyArray(_, len) => { + len.unwrap_usize(cx.tcx) as c_longlong + } + _ => -1 + }; + + let subrange = unsafe { + Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) + }; + + let subscripts = create_DIArray(DIB(cx), &[subrange]); + let metadata = unsafe { + llvm::LLVMRustDIBuilderCreateArrayType( + DIB(cx), + size.bits(), + align.abi_bits() as u32, + element_type_metadata, + subscripts) + }; + + return MetadataCreationResult::new(metadata, false); +} + +fn vec_slice_metadata( + cx: &CodegenCx<'ll, 'tcx>, + slice_ptr_type: Ty<'tcx>, + element_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + span: Span, +) -> MetadataCreationResult<'ll> { + let data_ptr_type = cx.tcx.mk_imm_ptr(element_type); + + let data_ptr_metadata = type_metadata(cx, data_ptr_type, span); + + return_if_metadata_created_in_meantime!(cx, unique_type_id); + + let slice_type_name = compute_debuginfo_type_name(cx, slice_ptr_type, true); + + let (pointer_size, pointer_align) = cx.size_and_align_of(data_ptr_type); + let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx.types.usize); + + let member_descriptions = vec![ + MemberDescription { + name: "data_ptr".to_string(), + type_metadata: data_ptr_metadata, + offset: Size::ZERO, + size: pointer_size, + align: pointer_align, + flags: DIFlags::FlagZero, + }, + MemberDescription { + name: "length".to_string(), + type_metadata: type_metadata(cx, cx.tcx.types.usize, span), + offset: pointer_size, + size: usize_size, + align: usize_align, + flags: DIFlags::FlagZero, + }, + ]; + + let file_metadata = unknown_file_metadata(cx); + + let metadata = composite_type_metadata(cx, + slice_ptr_type, + &slice_type_name[..], + unique_type_id, + member_descriptions, + NO_SCOPE_METADATA, + file_metadata, + span); + MetadataCreationResult::new(metadata, false) +} + +fn subroutine_type_metadata( + cx: &CodegenCx<'ll, 'tcx>, + unique_type_id: UniqueTypeId, + signature: ty::PolyFnSig<'tcx>, + span: Span, +) -> MetadataCreationResult<'ll> { + let signature = cx.tcx.normalize_erasing_late_bound_regions( + ty::ParamEnv::reveal_all(), + &signature, + ); + + let signature_metadata: Vec<_> = iter::once( + // return type + match signature.output().sty { + ty::TyTuple(ref tys) if tys.is_empty() => None, + _ => Some(type_metadata(cx, signature.output(), span)) + } + ).chain( + // regular arguments + signature.inputs().iter().map(|argument_type| { + Some(type_metadata(cx, argument_type, span)) + }) + ).collect(); + + return_if_metadata_created_in_meantime!(cx, unique_type_id); + + return MetadataCreationResult::new( + unsafe { + llvm::LLVMRustDIBuilderCreateSubroutineType( + DIB(cx), + unknown_file_metadata(cx), + create_DIArray(DIB(cx), &signature_metadata[..])) + }, + false); +} + +// FIXME(1563) This is all a bit of a hack because 'trait pointer' is an ill- +// defined concept. For the case of an actual trait pointer (i.e., Box, +// &Trait), trait_object_type should be the whole thing (e.g, Box) and +// trait_type should be the actual trait (e.g., Trait). Where the trait is part +// of a DST struct, there is no trait_object_type and the results of this +// function will be a little bit weird. +fn trait_pointer_metadata( + cx: &CodegenCx<'ll, 'tcx>, + trait_type: Ty<'tcx>, + trait_object_type: Option>, + unique_type_id: UniqueTypeId, +) -> &'ll DIType { + // The implementation provided here is a stub. It makes sure that the trait + // type is assigned the correct name, size, namespace, and source location. + // But it does not describe the trait's methods. + + let containing_scope = match trait_type.sty { + ty::TyDynamic(ref data, ..) => if let Some(principal) = data.principal() { + let def_id = principal.def_id(); + Some(get_namespace_for_item(cx, def_id)) + } else { + NO_SCOPE_METADATA + }, + _ => { + bug!("debuginfo: Unexpected trait-object type in \ + trait_pointer_metadata(): {:?}", + trait_type); + } + }; + + let trait_object_type = trait_object_type.unwrap_or(trait_type); + let trait_type_name = + compute_debuginfo_type_name(cx, trait_object_type, false); + + let file_metadata = unknown_file_metadata(cx); + + let layout = cx.layout_of(cx.tcx.mk_mut_ptr(trait_type)); + + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + + let data_ptr_field = layout.field(cx, 0); + let vtable_field = layout.field(cx, 1); + let member_descriptions = vec![ + MemberDescription { + name: "pointer".to_string(), + type_metadata: type_metadata(cx, + cx.tcx.mk_mut_ptr(cx.tcx.types.u8), + syntax_pos::DUMMY_SP), + offset: layout.fields.offset(0), + size: data_ptr_field.size, + align: data_ptr_field.align, + flags: DIFlags::FlagArtificial, + }, + MemberDescription { + name: "vtable".to_string(), + type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP), + offset: layout.fields.offset(1), + size: vtable_field.size, + align: vtable_field.align, + flags: DIFlags::FlagArtificial, + }, + ]; + + composite_type_metadata(cx, + trait_object_type, + &trait_type_name[..], + unique_type_id, + member_descriptions, + containing_scope, + file_metadata, + syntax_pos::DUMMY_SP) +} + +pub fn type_metadata( + cx: &CodegenCx<'ll, 'tcx>, + t: Ty<'tcx>, + usage_site_span: Span, +) -> &'ll DIType { + // Get the unique type id of this type. + let unique_type_id = { + let mut type_map = debug_context(cx).type_map.borrow_mut(); + // First, try to find the type in TypeMap. If we have seen it before, we + // can exit early here. + match type_map.find_metadata_for_type(t) { + Some(metadata) => { + return metadata; + }, + None => { + // The Ty is not in the TypeMap but maybe we have already seen + // an equivalent type (e.g. only differing in region arguments). + // In order to find out, generate the unique type id and look + // that up. + let unique_type_id = type_map.get_unique_type_id_of_type(cx, t); + match type_map.find_metadata_for_unique_id(unique_type_id) { + Some(metadata) => { + // There is already an equivalent type in the TypeMap. + // Register this Ty as an alias in the cache and + // return the cached metadata. + type_map.register_type_with_metadata(t, metadata); + return metadata; + }, + None => { + // There really is no type metadata for this type, so + // proceed by creating it. + unique_type_id + } + } + } + } + }; + + debug!("type_metadata: {:?}", t); + + let ptr_metadata = |ty: Ty<'tcx>| { + match ty.sty { + ty::TySlice(typ) => { + Ok(vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span)) + } + ty::TyStr => { + Ok(vec_slice_metadata(cx, t, cx.tcx.types.u8, unique_type_id, usage_site_span)) + } + ty::TyDynamic(..) => { + Ok(MetadataCreationResult::new( + trait_pointer_metadata(cx, ty, Some(t), unique_type_id), + false)) + } + _ => { + let pointee_metadata = type_metadata(cx, ty, usage_site_span); + + match debug_context(cx).type_map + .borrow() + .find_metadata_for_unique_id(unique_type_id) { + Some(metadata) => return Err(metadata), + None => { /* proceed normally */ } + }; + + Ok(MetadataCreationResult::new(pointer_type_metadata(cx, t, pointee_metadata), + false)) + } + } + }; + + let MetadataCreationResult { metadata, already_stored_in_typemap } = match t.sty { + ty::TyNever | + ty::TyBool | + ty::TyChar | + ty::TyInt(_) | + ty::TyUint(_) | + ty::TyFloat(_) => { + MetadataCreationResult::new(basic_type_metadata(cx, t), false) + } + ty::TyTuple(ref elements) if elements.is_empty() => { + MetadataCreationResult::new(basic_type_metadata(cx, t), false) + } + ty::TyArray(typ, _) | + ty::TySlice(typ) => { + fixed_vec_metadata(cx, unique_type_id, t, typ, usage_site_span) + } + ty::TyStr => { + fixed_vec_metadata(cx, unique_type_id, t, cx.tcx.types.i8, usage_site_span) + } + ty::TyDynamic(..) => { + MetadataCreationResult::new( + trait_pointer_metadata(cx, t, None, unique_type_id), + false) + } + ty::TyForeign(..) => { + MetadataCreationResult::new( + foreign_type_metadata(cx, t, unique_type_id), + false) + } + ty::TyRawPtr(ty::TypeAndMut{ty, ..}) | + ty::TyRef(_, ty, _) => { + match ptr_metadata(ty) { + Ok(res) => res, + Err(metadata) => return metadata, + } + } + ty::TyAdt(def, _) if def.is_box() => { + match ptr_metadata(t.boxed_ty()) { + Ok(res) => res, + Err(metadata) => return metadata, + } + } + ty::TyFnDef(..) | ty::TyFnPtr(_) => { + let fn_metadata = subroutine_type_metadata(cx, + unique_type_id, + t.fn_sig(cx.tcx), + usage_site_span).metadata; + match debug_context(cx).type_map + .borrow() + .find_metadata_for_unique_id(unique_type_id) { + Some(metadata) => return metadata, + None => { /* proceed normally */ } + }; + + // This is actually a function pointer, so wrap it in pointer DI + MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false) + + } + ty::TyClosure(def_id, substs) => { + let upvar_tys : Vec<_> = substs.upvar_tys(def_id, cx.tcx).collect(); + prepare_tuple_metadata(cx, + t, + &upvar_tys, + unique_type_id, + usage_site_span).finalize(cx) + } + ty::TyGenerator(def_id, substs, _) => { + let upvar_tys : Vec<_> = substs.field_tys(def_id, cx.tcx).map(|t| { + cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t) + }).collect(); + prepare_tuple_metadata(cx, + t, + &upvar_tys, + unique_type_id, + usage_site_span).finalize(cx) + } + ty::TyAdt(def, ..) => match def.adt_kind() { + AdtKind::Struct => { + prepare_struct_metadata(cx, + t, + unique_type_id, + usage_site_span).finalize(cx) + } + AdtKind::Union => { + prepare_union_metadata(cx, + t, + unique_type_id, + usage_site_span).finalize(cx) + } + AdtKind::Enum => { + prepare_enum_metadata(cx, + t, + def.did, + unique_type_id, + usage_site_span).finalize(cx) + } + }, + ty::TyTuple(ref elements) => { + prepare_tuple_metadata(cx, + t, + &elements[..], + unique_type_id, + usage_site_span).finalize(cx) + } + _ => { + bug!("debuginfo: unexpected type in type_metadata: {:?}", t) + } + }; + + { + let mut type_map = debug_context(cx).type_map.borrow_mut(); + + if already_stored_in_typemap { + // Also make sure that we already have a TypeMap entry for the unique type id. + let metadata_for_uid = match type_map.find_metadata_for_unique_id(unique_type_id) { + Some(metadata) => metadata, + None => { + span_bug!(usage_site_span, + "Expected type metadata for unique \ + type id '{}' to already be in \ + the debuginfo::TypeMap but it \ + was not. (Ty = {})", + type_map.get_unique_type_id_as_string(unique_type_id), + t); + } + }; + + match type_map.find_metadata_for_type(t) { + Some(metadata) => { + if metadata != metadata_for_uid { + span_bug!(usage_site_span, + "Mismatch between Ty and \ + UniqueTypeId maps in \ + debuginfo::TypeMap. \ + UniqueTypeId={}, Ty={}", + type_map.get_unique_type_id_as_string(unique_type_id), + t); + } + } + None => { + type_map.register_type_with_metadata(t, metadata); + } + } + } else { + type_map.register_type_with_metadata(t, metadata); + type_map.register_unique_id_with_metadata(unique_type_id, metadata); + } + } + + metadata +} + +pub fn file_metadata(cx: &CodegenCx<'ll, '_>, + file_name: &FileName, + defining_crate: CrateNum) -> &'ll DIFile { + debug!("file_metadata: file_name: {}, defining_crate: {}", + file_name, + defining_crate); + + let directory = if defining_crate == LOCAL_CRATE { + &cx.sess().working_dir.0 + } else { + // If the path comes from an upstream crate we assume it has been made + // independent of the compiler's working directory one way or another. + Path::new("") + }; + + file_metadata_raw(cx, &file_name.to_string(), &directory.to_string_lossy()) +} + +pub fn unknown_file_metadata(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile { + file_metadata_raw(cx, "", "") +} + +fn file_metadata_raw(cx: &CodegenCx<'ll, '_>, + file_name: &str, + directory: &str) + -> &'ll DIFile { + let key = (Symbol::intern(file_name), Symbol::intern(directory)); + + if let Some(file_metadata) = debug_context(cx).created_files.borrow().get(&key) { + return *file_metadata; + } + + debug!("file_metadata: file_name: {}, directory: {}", file_name, directory); + + let file_name = SmallCStr::new(file_name); + let directory = SmallCStr::new(directory); + + let file_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateFile(DIB(cx), + file_name.as_ptr(), + directory.as_ptr()) + }; + + let mut created_files = debug_context(cx).created_files.borrow_mut(); + created_files.insert(key, file_metadata); + file_metadata +} + +fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { + debug!("basic_type_metadata: {:?}", t); + + let (name, encoding) = match t.sty { + ty::TyNever => ("!", DW_ATE_unsigned), + ty::TyTuple(ref elements) if elements.is_empty() => + ("()", DW_ATE_unsigned), + ty::TyBool => ("bool", DW_ATE_boolean), + ty::TyChar => ("char", DW_ATE_unsigned_char), + ty::TyInt(int_ty) => { + (int_ty.ty_to_string(), DW_ATE_signed) + }, + ty::TyUint(uint_ty) => { + (uint_ty.ty_to_string(), DW_ATE_unsigned) + }, + ty::TyFloat(float_ty) => { + (float_ty.ty_to_string(), DW_ATE_float) + }, + _ => bug!("debuginfo::basic_type_metadata - t is invalid type") + }; + + let (size, align) = cx.size_and_align_of(t); + let name = SmallCStr::new(name); + let ty_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateBasicType( + DIB(cx), + name.as_ptr(), + size.bits(), + align.abi_bits() as u32, + encoding) + }; + + return ty_metadata; +} + +fn foreign_type_metadata( + cx: &CodegenCx<'ll, 'tcx>, + t: Ty<'tcx>, + unique_type_id: UniqueTypeId, +) -> &'ll DIType { + debug!("foreign_type_metadata: {:?}", t); + + let name = compute_debuginfo_type_name(cx, t, false); + create_struct_stub(cx, t, &name, unique_type_id, NO_SCOPE_METADATA) +} + +fn pointer_type_metadata( + cx: &CodegenCx<'ll, 'tcx>, + pointer_type: Ty<'tcx>, + pointee_type_metadata: &'ll DIType, +) -> &'ll DIType { + let (pointer_size, pointer_align) = cx.size_and_align_of(pointer_type); + let name = compute_debuginfo_type_name(cx, pointer_type, false); + let name = SmallCStr::new(&name); + unsafe { + llvm::LLVMRustDIBuilderCreatePointerType( + DIB(cx), + pointee_type_metadata, + pointer_size.bits(), + pointer_align.abi_bits() as u32, + name.as_ptr()) + } +} + +pub fn compile_unit_metadata(tcx: TyCtxt, + codegen_unit_name: &str, + debug_context: &CrateDebugContext<'ll, '_>) + -> &'ll DIDescriptor { + let mut name_in_debuginfo = match tcx.sess.local_crate_source_file { + Some(ref path) => path.clone(), + None => PathBuf::from(&*tcx.crate_name(LOCAL_CRATE).as_str()), + }; + + // The OSX linker has an idiosyncrasy where it will ignore some debuginfo + // if multiple object files with the same DW_AT_name are linked together. + // As a workaround we generate unique names for each object file. Those do + // not correspond to an actual source file but that should be harmless. + if tcx.sess.target.target.options.is_like_osx { + name_in_debuginfo.push("@"); + name_in_debuginfo.push(codegen_unit_name); + } + + debug!("compile_unit_metadata: {:?}", name_in_debuginfo); + // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice. + let producer = format!("clang LLVM (rustc version {})", + (option_env!("CFG_VERSION")).expect("CFG_VERSION")); + + let name_in_debuginfo = name_in_debuginfo.to_string_lossy(); + let name_in_debuginfo = SmallCStr::new(&name_in_debuginfo); + let work_dir = SmallCStr::new(&tcx.sess.working_dir.0.to_string_lossy()); + let producer = CString::new(producer).unwrap(); + let flags = "\0"; + let split_name = "\0"; + + unsafe { + let file_metadata = llvm::LLVMRustDIBuilderCreateFile( + debug_context.builder, name_in_debuginfo.as_ptr(), work_dir.as_ptr()); + + let unit_metadata = llvm::LLVMRustDIBuilderCreateCompileUnit( + debug_context.builder, + DW_LANG_RUST, + file_metadata, + producer.as_ptr(), + tcx.sess.opts.optimize != config::OptLevel::No, + flags.as_ptr() as *const _, + 0, + split_name.as_ptr() as *const _); + + if tcx.sess.opts.debugging_opts.profile { + let cu_desc_metadata = llvm::LLVMRustMetadataAsValue(debug_context.llcontext, + unit_metadata); + + let gcov_cu_info = [ + path_to_mdstring(debug_context.llcontext, + &tcx.output_filenames(LOCAL_CRATE).with_extension("gcno")), + path_to_mdstring(debug_context.llcontext, + &tcx.output_filenames(LOCAL_CRATE).with_extension("gcda")), + cu_desc_metadata, + ]; + let gcov_metadata = llvm::LLVMMDNodeInContext(debug_context.llcontext, + gcov_cu_info.as_ptr(), + gcov_cu_info.len() as c_uint); + + let llvm_gcov_ident = const_cstr!("llvm.gcov"); + llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, + llvm_gcov_ident.as_ptr(), + gcov_metadata); + } + + return unit_metadata; + }; + + fn path_to_mdstring(llcx: &'ll llvm::Context, path: &Path) -> &'ll Value { + let path_str = path2cstr(path); + unsafe { + llvm::LLVMMDStringInContext(llcx, + path_str.as_ptr(), + path_str.as_bytes().len() as c_uint) + } + } +} + +struct MetadataCreationResult<'ll> { + metadata: &'ll DIType, + already_stored_in_typemap: bool +} + +impl MetadataCreationResult<'ll> { + fn new(metadata: &'ll DIType, already_stored_in_typemap: bool) -> Self { + MetadataCreationResult { + metadata, + already_stored_in_typemap, + } + } +} + +// Description of a type member, which can either be a regular field (as in +// structs or tuples) or an enum variant. +#[derive(Debug)] +struct MemberDescription<'ll> { + name: String, + type_metadata: &'ll DIType, + offset: Size, + size: Size, + align: Align, + flags: DIFlags, +} + +// A factory for MemberDescriptions. It produces a list of member descriptions +// for some record-like type. MemberDescriptionFactories are used to defer the +// creation of type member descriptions in order to break cycles arising from +// recursive type definitions. +enum MemberDescriptionFactory<'ll, 'tcx> { + StructMDF(StructMemberDescriptionFactory<'tcx>), + TupleMDF(TupleMemberDescriptionFactory<'tcx>), + EnumMDF(EnumMemberDescriptionFactory<'ll, 'tcx>), + UnionMDF(UnionMemberDescriptionFactory<'tcx>), + VariantMDF(VariantMemberDescriptionFactory<'ll, 'tcx>) +} + +impl MemberDescriptionFactory<'ll, 'tcx> { + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + -> Vec> { + match *self { + StructMDF(ref this) => { + this.create_member_descriptions(cx) + } + TupleMDF(ref this) => { + this.create_member_descriptions(cx) + } + EnumMDF(ref this) => { + this.create_member_descriptions(cx) + } + UnionMDF(ref this) => { + this.create_member_descriptions(cx) + } + VariantMDF(ref this) => { + this.create_member_descriptions(cx) + } + } + } +} + +//=----------------------------------------------------------------------------- +// Structs +//=----------------------------------------------------------------------------- + +// Creates MemberDescriptions for the fields of a struct +struct StructMemberDescriptionFactory<'tcx> { + ty: Ty<'tcx>, + variant: &'tcx ty::VariantDef, + span: Span, +} + +impl<'tcx> StructMemberDescriptionFactory<'tcx> { + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + -> Vec> { + let layout = cx.layout_of(self.ty); + self.variant.fields.iter().enumerate().map(|(i, f)| { + let name = if self.variant.ctor_kind == CtorKind::Fn { + format!("__{}", i) + } else { + f.ident.to_string() + }; + let field = layout.field(cx, i); + let (size, align) = field.size_and_align(); + MemberDescription { + name, + type_metadata: type_metadata(cx, field.ty, self.span), + offset: layout.fields.offset(i), + size, + align, + flags: DIFlags::FlagZero, + } + }).collect() + } +} + + +fn prepare_struct_metadata( + cx: &CodegenCx<'ll, 'tcx>, + struct_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + span: Span, +) -> RecursiveTypeDescription<'ll, 'tcx> { + let struct_name = compute_debuginfo_type_name(cx, struct_type, false); + + let (struct_def_id, variant) = match struct_type.sty { + ty::TyAdt(def, _) => (def.did, def.non_enum_variant()), + _ => bug!("prepare_struct_metadata on a non-ADT") + }; + + let containing_scope = get_namespace_for_item(cx, struct_def_id); + + let struct_metadata_stub = create_struct_stub(cx, + struct_type, + &struct_name, + unique_type_id, + Some(containing_scope)); + + create_and_register_recursive_type_forward_declaration( + cx, + struct_type, + unique_type_id, + struct_metadata_stub, + StructMDF(StructMemberDescriptionFactory { + ty: struct_type, + variant, + span, + }) + ) +} + +//=----------------------------------------------------------------------------- +// Tuples +//=----------------------------------------------------------------------------- + +// Creates MemberDescriptions for the fields of a tuple +struct TupleMemberDescriptionFactory<'tcx> { + ty: Ty<'tcx>, + component_types: Vec>, + span: Span, +} + +impl<'tcx> TupleMemberDescriptionFactory<'tcx> { + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + -> Vec> { + let layout = cx.layout_of(self.ty); + self.component_types.iter().enumerate().map(|(i, &component_type)| { + let (size, align) = cx.size_and_align_of(component_type); + MemberDescription { + name: format!("__{}", i), + type_metadata: type_metadata(cx, component_type, self.span), + offset: layout.fields.offset(i), + size, + align, + flags: DIFlags::FlagZero, + } + }).collect() + } +} + +fn prepare_tuple_metadata( + cx: &CodegenCx<'ll, 'tcx>, + tuple_type: Ty<'tcx>, + component_types: &[Ty<'tcx>], + unique_type_id: UniqueTypeId, + span: Span, +) -> RecursiveTypeDescription<'ll, 'tcx> { + let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false); + + create_and_register_recursive_type_forward_declaration( + cx, + tuple_type, + unique_type_id, + create_struct_stub(cx, + tuple_type, + &tuple_name[..], + unique_type_id, + NO_SCOPE_METADATA), + TupleMDF(TupleMemberDescriptionFactory { + ty: tuple_type, + component_types: component_types.to_vec(), + span, + }) + ) +} + +//=----------------------------------------------------------------------------- +// Unions +//=----------------------------------------------------------------------------- + +struct UnionMemberDescriptionFactory<'tcx> { + layout: TyLayout<'tcx>, + variant: &'tcx ty::VariantDef, + span: Span, +} + +impl<'tcx> UnionMemberDescriptionFactory<'tcx> { + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + -> Vec> { + self.variant.fields.iter().enumerate().map(|(i, f)| { + let field = self.layout.field(cx, i); + let (size, align) = field.size_and_align(); + MemberDescription { + name: f.ident.to_string(), + type_metadata: type_metadata(cx, field.ty, self.span), + offset: Size::ZERO, + size, + align, + flags: DIFlags::FlagZero, + } + }).collect() + } +} + +fn prepare_union_metadata( + cx: &CodegenCx<'ll, 'tcx>, + union_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + span: Span, +) -> RecursiveTypeDescription<'ll, 'tcx> { + let union_name = compute_debuginfo_type_name(cx, union_type, false); + + let (union_def_id, variant) = match union_type.sty { + ty::TyAdt(def, _) => (def.did, def.non_enum_variant()), + _ => bug!("prepare_union_metadata on a non-ADT") + }; + + let containing_scope = get_namespace_for_item(cx, union_def_id); + + let union_metadata_stub = create_union_stub(cx, + union_type, + &union_name, + unique_type_id, + containing_scope); + + create_and_register_recursive_type_forward_declaration( + cx, + union_type, + unique_type_id, + union_metadata_stub, + UnionMDF(UnionMemberDescriptionFactory { + layout: cx.layout_of(union_type), + variant, + span, + }) + ) +} + +//=----------------------------------------------------------------------------- +// Enums +//=----------------------------------------------------------------------------- + +// Describes the members of an enum value: An enum is described as a union of +// structs in DWARF. This MemberDescriptionFactory provides the description for +// the members of this union; so for every variant of the given enum, this +// factory will produce one MemberDescription (all with no name and a fixed +// offset of zero bytes). +struct EnumMemberDescriptionFactory<'ll, 'tcx> { + enum_type: Ty<'tcx>, + layout: TyLayout<'tcx>, + discriminant_type_metadata: Option<&'ll DIType>, + containing_scope: &'ll DIScope, + span: Span, +} + +impl EnumMemberDescriptionFactory<'ll, 'tcx> { + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + -> Vec> { + let adt = &self.enum_type.ty_adt_def().unwrap(); + match self.layout.variants { + layout::Variants::Single { .. } if adt.variants.is_empty() => vec![], + layout::Variants::Single { index } => { + let (variant_type_metadata, member_description_factory) = + describe_enum_variant(cx, + self.layout, + &adt.variants[index], + NoDiscriminant, + self.containing_scope, + self.span); + + let member_descriptions = + member_description_factory.create_member_descriptions(cx); + + set_members_of_composite_type(cx, + variant_type_metadata, + member_descriptions); + vec![ + MemberDescription { + name: "".to_string(), + type_metadata: variant_type_metadata, + offset: Size::ZERO, + size: self.layout.size, + align: self.layout.align, + flags: DIFlags::FlagZero + } + ] + } + layout::Variants::Tagged { ref variants, .. } => { + let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata + .expect("")); + (0..variants.len()).map(|i| { + let variant = self.layout.for_variant(cx, i); + let (variant_type_metadata, member_desc_factory) = + describe_enum_variant(cx, + variant, + &adt.variants[i], + discriminant_info, + self.containing_scope, + self.span); + + let member_descriptions = member_desc_factory + .create_member_descriptions(cx); + + set_members_of_composite_type(cx, + variant_type_metadata, + member_descriptions); + MemberDescription { + name: "".to_string(), + type_metadata: variant_type_metadata, + offset: Size::ZERO, + size: variant.size, + align: variant.align, + flags: DIFlags::FlagZero + } + }).collect() + } + layout::Variants::NicheFilling { dataful_variant, ref niche_variants, .. } => { + let variant = self.layout.for_variant(cx, dataful_variant); + // Create a description of the non-null variant + let (variant_type_metadata, member_description_factory) = + describe_enum_variant(cx, + variant, + &adt.variants[dataful_variant], + OptimizedDiscriminant, + self.containing_scope, + self.span); + + let variant_member_descriptions = + member_description_factory.create_member_descriptions(cx); + + set_members_of_composite_type(cx, + variant_type_metadata, + variant_member_descriptions); + + // Encode the information about the null variant in the union + // member's name. + let mut name = String::from("RUST$ENCODED$ENUM$"); + // HACK(eddyb) the debuggers should just handle offset+size + // of discriminant instead of us having to recover its path. + // Right now it's not even going to work for `niche_start > 0`, + // and for multiple niche variants it only supports the first. + fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, + name: &mut String, + layout: TyLayout<'tcx>, + offset: Size, + size: Size) { + for i in 0..layout.fields.count() { + let field_offset = layout.fields.offset(i); + if field_offset > offset { + continue; + } + let inner_offset = offset - field_offset; + let field = layout.field(cx, i); + if inner_offset + size <= field.size { + write!(name, "{}$", i).unwrap(); + compute_field_path(cx, name, field, inner_offset, size); + } + } + } + compute_field_path(cx, &mut name, + self.layout, + self.layout.fields.offset(0), + self.layout.field(cx, 0).size); + name.push_str(&adt.variants[*niche_variants.start()].name.as_str()); + + // Create the (singleton) list of descriptions of union members. + vec![ + MemberDescription { + name, + type_metadata: variant_type_metadata, + offset: Size::ZERO, + size: variant.size, + align: variant.align, + flags: DIFlags::FlagZero + } + ] + } + } + } +} + +// Creates MemberDescriptions for the fields of a single enum variant. +struct VariantMemberDescriptionFactory<'ll, 'tcx> { + // Cloned from the layout::Struct describing the variant. + offsets: Vec, + args: Vec<(String, Ty<'tcx>)>, + discriminant_type_metadata: Option<&'ll DIType>, + span: Span, +} + +impl VariantMemberDescriptionFactory<'ll, 'tcx> { + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + -> Vec> { + self.args.iter().enumerate().map(|(i, &(ref name, ty))| { + let (size, align) = cx.size_and_align_of(ty); + MemberDescription { + name: name.to_string(), + type_metadata: match self.discriminant_type_metadata { + Some(metadata) if i == 0 => metadata, + _ => type_metadata(cx, ty, self.span) + }, + offset: self.offsets[i], + size, + align, + flags: DIFlags::FlagZero + } + }).collect() + } +} + +#[derive(Copy, Clone)] +enum EnumDiscriminantInfo<'ll> { + RegularDiscriminant(&'ll DIType), + OptimizedDiscriminant, + NoDiscriminant +} + +// Returns a tuple of (1) type_metadata_stub of the variant, (2) the llvm_type +// of the variant, and (3) a MemberDescriptionFactory for producing the +// descriptions of the fields of the variant. This is a rudimentary version of a +// full RecursiveTypeDescription. +fn describe_enum_variant( + cx: &CodegenCx<'ll, 'tcx>, + layout: layout::TyLayout<'tcx>, + variant: &'tcx ty::VariantDef, + discriminant_info: EnumDiscriminantInfo<'ll>, + containing_scope: &'ll DIScope, + span: Span, +) -> (&'ll DICompositeType, MemberDescriptionFactory<'ll, 'tcx>) { + let variant_name = variant.name.as_str(); + let unique_type_id = debug_context(cx).type_map + .borrow_mut() + .get_unique_type_id_of_enum_variant( + cx, + layout.ty, + &variant_name); + + let metadata_stub = create_struct_stub(cx, + layout.ty, + &variant_name, + unique_type_id, + Some(containing_scope)); + + // If this is not a univariant enum, there is also the discriminant field. + let (discr_offset, discr_arg) = match discriminant_info { + RegularDiscriminant(_) => { + let enum_layout = cx.layout_of(layout.ty); + (Some(enum_layout.fields.offset(0)), + Some(("RUST$ENUM$DISR".to_string(), enum_layout.field(cx, 0).ty))) + } + _ => (None, None), + }; + let offsets = discr_offset.into_iter().chain((0..layout.fields.count()).map(|i| { + layout.fields.offset(i) + })).collect(); + + // Build an array of (field name, field type) pairs to be captured in the factory closure. + let args = discr_arg.into_iter().chain((0..layout.fields.count()).map(|i| { + let name = if variant.ctor_kind == CtorKind::Fn { + format!("__{}", i) + } else { + variant.fields[i].ident.to_string() + }; + (name, layout.field(cx, i).ty) + })).collect(); + + let member_description_factory = + VariantMDF(VariantMemberDescriptionFactory { + offsets, + args, + discriminant_type_metadata: match discriminant_info { + RegularDiscriminant(discriminant_type_metadata) => { + Some(discriminant_type_metadata) + } + _ => None + }, + span, + }); + + (metadata_stub, member_description_factory) +} + +fn prepare_enum_metadata( + cx: &CodegenCx<'ll, 'tcx>, + enum_type: Ty<'tcx>, + enum_def_id: DefId, + unique_type_id: UniqueTypeId, + span: Span, +) -> RecursiveTypeDescription<'ll, 'tcx> { + let enum_name = compute_debuginfo_type_name(cx, enum_type, false); + + let containing_scope = get_namespace_for_item(cx, enum_def_id); + // FIXME: This should emit actual file metadata for the enum, but we + // currently can't get the necessary information when it comes to types + // imported from other crates. Formerly we violated the ODR when performing + // LTO because we emitted debuginfo for the same type with varying file + // metadata, so as a workaround we pretend that the type comes from + // + let file_metadata = unknown_file_metadata(cx); + + let def = enum_type.ty_adt_def().unwrap(); + let enumerators_metadata: Vec<_> = def.discriminants(cx.tcx) + .zip(&def.variants) + .map(|(discr, v)| { + let name = SmallCStr::new(&v.name.as_str()); + unsafe { + Some(llvm::LLVMRustDIBuilderCreateEnumerator( + DIB(cx), + name.as_ptr(), + // FIXME: what if enumeration has i128 discriminant? + discr.val as u64)) + } + }) + .collect(); + + let discriminant_type_metadata = |discr: layout::Primitive| { + let disr_type_key = (enum_def_id, discr); + let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types + .borrow() + .get(&disr_type_key).cloned(); + match cached_discriminant_type_metadata { + Some(discriminant_type_metadata) => discriminant_type_metadata, + None => { + let (discriminant_size, discriminant_align) = + (discr.size(cx), discr.align(cx)); + let discriminant_base_type_metadata = + type_metadata(cx, discr.to_ty(cx.tcx), syntax_pos::DUMMY_SP); + let discriminant_name = get_enum_discriminant_name(cx, enum_def_id).as_str(); + + let name = SmallCStr::new(&discriminant_name); + let discriminant_type_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateEnumerationType( + DIB(cx), + containing_scope, + name.as_ptr(), + file_metadata, + UNKNOWN_LINE_NUMBER, + discriminant_size.bits(), + discriminant_align.abi_bits() as u32, + create_DIArray(DIB(cx), &enumerators_metadata), + discriminant_base_type_metadata) + }; + + debug_context(cx).created_enum_disr_types + .borrow_mut() + .insert(disr_type_key, discriminant_type_metadata); + + discriminant_type_metadata + } + } + }; + + let layout = cx.layout_of(enum_type); + + let discriminant_type_metadata = match layout.variants { + layout::Variants::Single { .. } | + layout::Variants::NicheFilling { .. } => None, + layout::Variants::Tagged { ref tag, .. } => { + Some(discriminant_type_metadata(tag.value)) + } + }; + + match (&layout.abi, discriminant_type_metadata) { + (&layout::Abi::Scalar(_), Some(discr)) => return FinalMetadata(discr), + _ => {} + } + + let (enum_type_size, enum_type_align) = layout.size_and_align(); + + let enum_name = SmallCStr::new(&enum_name); + let unique_type_id_str = SmallCStr::new( + debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id) + ); + let enum_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateUnionType( + DIB(cx), + containing_scope, + enum_name.as_ptr(), + file_metadata, + UNKNOWN_LINE_NUMBER, + enum_type_size.bits(), + enum_type_align.abi_bits() as u32, + DIFlags::FlagZero, + None, + 0, // RuntimeLang + unique_type_id_str.as_ptr()) + }; + + return create_and_register_recursive_type_forward_declaration( + cx, + enum_type, + unique_type_id, + enum_metadata, + EnumMDF(EnumMemberDescriptionFactory { + enum_type, + layout, + discriminant_type_metadata, + containing_scope, + span, + }), + ); + + fn get_enum_discriminant_name(cx: &CodegenCx, + def_id: DefId) + -> InternedString { + cx.tcx.item_name(def_id) + } +} + +/// Creates debug information for a composite type, that is, anything that +/// results in a LLVM struct. +/// +/// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums. +fn composite_type_metadata( + cx: &CodegenCx<'ll, 'tcx>, + composite_type: Ty<'tcx>, + composite_type_name: &str, + composite_type_unique_id: UniqueTypeId, + member_descriptions: Vec>, + containing_scope: Option<&'ll DIScope>, + + // Ignore source location information as long as it + // can't be reconstructed for non-local crates. + _file_metadata: &'ll DIFile, + _definition_span: Span, +) -> &'ll DICompositeType { + // Create the (empty) struct metadata node ... + let composite_type_metadata = create_struct_stub(cx, + composite_type, + composite_type_name, + composite_type_unique_id, + containing_scope); + // ... and immediately create and add the member descriptions. + set_members_of_composite_type(cx, + composite_type_metadata, + member_descriptions); + + return composite_type_metadata; +} + +fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>, + composite_type_metadata: &'ll DICompositeType, + member_descriptions: Vec>) { + // In some rare cases LLVM metadata uniquing would lead to an existing type + // description being used instead of a new one created in + // create_struct_stub. This would cause a hard to trace assertion in + // DICompositeType::SetTypeArray(). The following check makes sure that we + // get a better error message if this should happen again due to some + // regression. + { + let mut composite_types_completed = + debug_context(cx).composite_types_completed.borrow_mut(); + if composite_types_completed.contains(&composite_type_metadata) { + bug!("debuginfo::set_members_of_composite_type() - \ + Already completed forward declaration re-encountered."); + } else { + composite_types_completed.insert(composite_type_metadata); + } + } + + let member_metadata: Vec<_> = member_descriptions + .into_iter() + .map(|member_description| { + let member_name = CString::new(member_description.name).unwrap(); + unsafe { + Some(llvm::LLVMRustDIBuilderCreateMemberType( + DIB(cx), + composite_type_metadata, + member_name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + member_description.size.bits(), + member_description.align.abi_bits() as u32, + member_description.offset.bits(), + member_description.flags, + member_description.type_metadata)) + } + }) + .collect(); + + unsafe { + let type_array = create_DIArray(DIB(cx), &member_metadata[..]); + llvm::LLVMRustDICompositeTypeSetTypeArray( + DIB(cx), composite_type_metadata, type_array); + } +} + +// A convenience wrapper around LLVMRustDIBuilderCreateStructType(). Does not do +// any caching, does not add any fields to the struct. This can be done later +// with set_members_of_composite_type(). +fn create_struct_stub( + cx: &CodegenCx<'ll, 'tcx>, + struct_type: Ty<'tcx>, + struct_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: Option<&'ll DIScope>, +) -> &'ll DICompositeType { + let (struct_size, struct_align) = cx.size_and_align_of(struct_type); + + let name = SmallCStr::new(struct_type_name); + let unique_type_id = SmallCStr::new( + debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id) + ); + let metadata_stub = unsafe { + // LLVMRustDIBuilderCreateStructType() wants an empty array. A null + // pointer will lead to hard to trace and debug LLVM assertions + // later on in llvm/lib/IR/Value.cpp. + let empty_array = create_DIArray(DIB(cx), &[]); + + llvm::LLVMRustDIBuilderCreateStructType( + DIB(cx), + containing_scope, + name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + struct_size.bits(), + struct_align.abi_bits() as u32, + DIFlags::FlagZero, + None, + empty_array, + 0, + None, + unique_type_id.as_ptr()) + }; + + return metadata_stub; +} + +fn create_union_stub( + cx: &CodegenCx<'ll, 'tcx>, + union_type: Ty<'tcx>, + union_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: &'ll DIScope, +) -> &'ll DICompositeType { + let (union_size, union_align) = cx.size_and_align_of(union_type); + + let name = SmallCStr::new(union_type_name); + let unique_type_id = SmallCStr::new( + debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id) + ); + let metadata_stub = unsafe { + // LLVMRustDIBuilderCreateUnionType() wants an empty array. A null + // pointer will lead to hard to trace and debug LLVM assertions + // later on in llvm/lib/IR/Value.cpp. + let empty_array = create_DIArray(DIB(cx), &[]); + + llvm::LLVMRustDIBuilderCreateUnionType( + DIB(cx), + containing_scope, + name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + union_size.bits(), + union_align.abi_bits() as u32, + DIFlags::FlagZero, + Some(empty_array), + 0, // RuntimeLang + unique_type_id.as_ptr()) + }; + + return metadata_stub; +} + +/// Creates debug information for the given global variable. +/// +/// Adds the created metadata nodes directly to the crate's IR. +pub fn create_global_var_metadata( + cx: &CodegenCx<'ll, '_>, + def_id: DefId, + global: &'ll Value, +) { + if cx.dbg_cx.is_none() { + return; + } + + let tcx = cx.tcx; + let attrs = tcx.codegen_fn_attrs(def_id); + + if attrs.flags.contains(CodegenFnAttrFlags::NO_DEBUG) { + return; + } + + let no_mangle = attrs.flags.contains(CodegenFnAttrFlags::NO_MANGLE); + // We may want to remove the namespace scope if we're in an extern block, see: + // https://github.com/rust-lang/rust/pull/46457#issuecomment-351750952 + let var_scope = get_namespace_for_item(cx, def_id); + let span = tcx.def_span(def_id); + + let (file_metadata, line_number) = if !span.is_dummy() { + let loc = span_start(cx, span); + (file_metadata(cx, &loc.file.name, LOCAL_CRATE), loc.line as c_uint) + } else { + (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER) + }; + + let is_local_to_unit = is_node_local_to_unit(cx, def_id); + let variable_type = Instance::mono(cx.tcx, def_id).ty(cx.tcx); + let type_metadata = type_metadata(cx, variable_type, span); + let var_name = SmallCStr::new(&tcx.item_name(def_id).as_str()); + let linkage_name = if no_mangle { + None + } else { + let linkage_name = mangled_name_of_instance(cx, Instance::mono(tcx, def_id)); + Some(SmallCStr::new(&linkage_name.as_str())) + }; + + let global_align = cx.align_of(variable_type); + + unsafe { + llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx), + Some(var_scope), + var_name.as_ptr(), + // If null, linkage_name field is omitted, + // which is what we want for no_mangle statics + linkage_name.as_ref() + .map_or(ptr::null(), |name| name.as_ptr()), + file_metadata, + line_number, + type_metadata, + is_local_to_unit, + global, + None, + global_align.abi() as u32, + ); + } +} + +// Creates an "extension" of an existing DIScope into another file. +pub fn extend_scope_to_file( + cx: &CodegenCx<'ll, '_>, + scope_metadata: &'ll DIScope, + file: &syntax_pos::FileMap, + defining_crate: CrateNum, +) -> &'ll DILexicalBlock { + let file_metadata = file_metadata(cx, &file.name, defining_crate); + unsafe { + llvm::LLVMRustDIBuilderCreateLexicalBlockFile( + DIB(cx), + scope_metadata, + file_metadata) + } +} + +/// Creates debug information for the given vtable, which is for the +/// given type. +/// +/// Adds the created metadata nodes directly to the crate's IR. +pub fn create_vtable_metadata( + cx: &CodegenCx<'ll, 'tcx>, + ty: ty::Ty<'tcx>, + vtable: &'ll Value, +) { + if cx.dbg_cx.is_none() { + return; + } + + let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP); + + unsafe { + // LLVMRustDIBuilderCreateStructType() wants an empty array. A null + // pointer will lead to hard to trace and debug LLVM assertions + // later on in llvm/lib/IR/Value.cpp. + let empty_array = create_DIArray(DIB(cx), &[]); + + let name = const_cstr!("vtable"); + + // Create a new one each time. We don't want metadata caching + // here, because each vtable will refer to a unique containing + // type. + let vtable_type = llvm::LLVMRustDIBuilderCreateStructType( + DIB(cx), + NO_SCOPE_METADATA, + name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + Size::ZERO.bits(), + cx.tcx.data_layout.pointer_align.abi_bits() as u32, + DIFlags::FlagArtificial, + None, + empty_array, + 0, + Some(type_metadata), + name.as_ptr() + ); + + llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx), + NO_SCOPE_METADATA, + name.as_ptr(), + // LLVM 3.9 + // doesn't accept + // null here, so + // pass the name + // as the linkage + // name. + name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + vtable_type, + true, + vtable, + None, + 0); + } +} diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs new file mode 100644 index 000000000000..fcb8bc3fe2b2 --- /dev/null +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -0,0 +1,551 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// See doc.rs for documentation. +mod doc; + +use self::VariableAccess::*; +use self::VariableKind::*; + +use self::utils::{DIB, span_start, create_DIArray, is_node_local_to_unit}; +use self::namespace::mangled_name_of_instance; +use self::type_names::compute_debuginfo_type_name; +use self::metadata::{type_metadata, file_metadata, TypeMap}; +use self::source_loc::InternalDebugLocation::{self, UnknownLocation}; + +use llvm; +use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags}; +use rustc::hir::CodegenFnAttrFlags; +use rustc::hir::def_id::{DefId, CrateNum}; +use rustc::ty::subst::{Substs, UnpackedKind}; + +use abi::Abi; +use common::CodegenCx; +use builder::Builder; +use monomorphize::Instance; +use rustc::ty::{self, ParamEnv, Ty, InstanceDef}; +use rustc::mir; +use rustc::session::config::{self, DebugInfo}; +use rustc::util::nodemap::{DefIdMap, FxHashMap, FxHashSet}; +use rustc_data_structures::small_c_str::SmallCStr; +use value::Value; + +use libc::c_uint; +use std::cell::{Cell, RefCell}; +use std::ffi::CString; + +use syntax_pos::{self, Span, Pos}; +use syntax::ast; +use syntax::symbol::{Symbol, InternedString}; +use rustc::ty::layout::{self, LayoutOf}; + +pub mod gdb; +mod utils; +mod namespace; +mod type_names; +pub mod metadata; +mod create_scope_map; +mod source_loc; + +pub use self::create_scope_map::{create_mir_scopes, MirDebugScope}; +pub use self::source_loc::start_emitting_source_locations; +pub use self::metadata::create_global_var_metadata; +pub use self::metadata::create_vtable_metadata; +pub use self::metadata::extend_scope_to_file; +pub use self::source_loc::set_source_location; + +#[allow(non_upper_case_globals)] +const DW_TAG_auto_variable: c_uint = 0x100; +#[allow(non_upper_case_globals)] +const DW_TAG_arg_variable: c_uint = 0x101; + +/// A context object for maintaining all state needed by the debuginfo module. +pub struct CrateDebugContext<'a, 'tcx> { + llcontext: &'a llvm::Context, + llmod: &'a llvm::Module, + builder: &'a mut DIBuilder<'a>, + created_files: RefCell>, + created_enum_disr_types: RefCell>, + + type_map: RefCell>, + namespace_map: RefCell>, + + // This collection is used to assert that composite types (structs, enums, + // ...) have their members only set once: + composite_types_completed: RefCell>, +} + +impl Drop for CrateDebugContext<'a, 'tcx> { + fn drop(&mut self) { + unsafe { + llvm::LLVMRustDIBuilderDispose(&mut *(self.builder as *mut _)); + } + } +} + +impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> { + pub fn new(llmod: &'a llvm::Module) -> Self { + debug!("CrateDebugContext::new"); + let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) }; + // DIBuilder inherits context from the module, so we'd better use the same one + let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) }; + CrateDebugContext { + llcontext, + llmod, + builder, + created_files: RefCell::new(FxHashMap()), + created_enum_disr_types: RefCell::new(FxHashMap()), + type_map: RefCell::new(TypeMap::new()), + namespace_map: RefCell::new(DefIdMap()), + composite_types_completed: RefCell::new(FxHashSet()), + } + } +} + +pub enum FunctionDebugContext<'ll> { + RegularContext(FunctionDebugContextData<'ll>), + DebugInfoDisabled, + FunctionWithoutDebugInfo, +} + +impl FunctionDebugContext<'ll> { + pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData<'ll> { + match *self { + FunctionDebugContext::RegularContext(ref data) => data, + FunctionDebugContext::DebugInfoDisabled => { + span_bug!(span, "{}", FunctionDebugContext::debuginfo_disabled_message()); + } + FunctionDebugContext::FunctionWithoutDebugInfo => { + span_bug!(span, "{}", FunctionDebugContext::should_be_ignored_message()); + } + } + } + + fn debuginfo_disabled_message() -> &'static str { + "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!" + } + + fn should_be_ignored_message() -> &'static str { + "debuginfo: Error trying to access FunctionDebugContext for function that should be \ + ignored by debug info!" + } +} + +pub struct FunctionDebugContextData<'ll> { + fn_metadata: &'ll DISubprogram, + source_locations_enabled: Cell, + pub defining_crate: CrateNum, +} + +pub enum VariableAccess<'a, 'll> { + // The llptr given is an alloca containing the variable's value + DirectVariable { alloca: &'ll Value }, + // The llptr given is an alloca containing the start of some pointer chain + // leading to the variable's content. + IndirectVariable { alloca: &'ll Value, address_operations: &'a [i64] } +} + +pub enum VariableKind { + ArgumentVariable(usize /*index*/), + LocalVariable, +} + +/// Create any deferred debug metadata nodes +pub fn finalize(cx: &CodegenCx) { + if cx.dbg_cx.is_none() { + return; + } + + debug!("finalize"); + + if gdb::needs_gdb_debug_scripts_section(cx) { + // Add a .debug_gdb_scripts section to this compile-unit. This will + // cause GDB to try and load the gdb_load_rust_pretty_printers.py file, + // which activates the Rust pretty printers for binary this section is + // contained in. + gdb::get_or_insert_gdb_debug_scripts_section_global(cx); + } + + unsafe { + llvm::LLVMRustDIBuilderFinalize(DIB(cx)); + // Debuginfo generation in LLVM by default uses a higher + // version of dwarf than macOS currently understands. We can + // instruct LLVM to emit an older version of dwarf, however, + // for macOS to understand. For more info see #11352 + // This can be overridden using --llvm-opts -dwarf-version,N. + // Android has the same issue (#22398) + if cx.sess().target.target.options.is_like_osx || + cx.sess().target.target.options.is_like_android { + llvm::LLVMRustAddModuleFlag(cx.llmod, + "Dwarf Version\0".as_ptr() as *const _, + 2) + } + + // Indicate that we want CodeView debug information on MSVC + if cx.sess().target.target.options.is_like_msvc { + llvm::LLVMRustAddModuleFlag(cx.llmod, + "CodeView\0".as_ptr() as *const _, + 1) + } + + // Prevent bitcode readers from deleting the debug info. + let ptr = "Debug Info Version\0".as_ptr(); + llvm::LLVMRustAddModuleFlag(cx.llmod, ptr as *const _, + llvm::LLVMRustDebugMetadataVersion()); + }; +} + +/// Creates the function-specific debug context. +/// +/// Returns the FunctionDebugContext for the function which holds state needed +/// for debug info creation. The function may also return another variant of the +/// FunctionDebugContext enum which indicates why no debuginfo should be created +/// for the function. +pub fn create_function_debug_context( + cx: &CodegenCx<'ll, 'tcx>, + instance: Instance<'tcx>, + sig: ty::FnSig<'tcx>, + llfn: &'ll Value, + mir: &mir::Mir, +) -> FunctionDebugContext<'ll> { + if cx.sess().opts.debuginfo == DebugInfo::None { + return FunctionDebugContext::DebugInfoDisabled; + } + + if let InstanceDef::Item(def_id) = instance.def { + if cx.tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) { + return FunctionDebugContext::FunctionWithoutDebugInfo; + } + } + + let span = mir.span; + + // This can be the case for functions inlined from another crate + if span.is_dummy() { + // FIXME(simulacrum): Probably can't happen; remove. + return FunctionDebugContext::FunctionWithoutDebugInfo; + } + + let def_id = instance.def_id(); + let containing_scope = get_containing_scope(cx, instance); + let loc = span_start(cx, span); + let file_metadata = file_metadata(cx, &loc.file.name, def_id.krate); + + let function_type_metadata = unsafe { + let fn_signature = get_function_signature(cx, sig); + llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature) + }; + + // Find the enclosing function, in case this is a closure. + let def_key = cx.tcx.def_key(def_id); + let mut name = def_key.disambiguated_data.data.to_string(); + + let enclosing_fn_def_id = cx.tcx.closure_base_def_id(def_id); + + // Get_template_parameters() will append a `<...>` clause to the function + // name if necessary. + let generics = cx.tcx.generics_of(enclosing_fn_def_id); + let substs = instance.substs.truncate_to(cx.tcx, generics); + let template_parameters = get_template_parameters(cx, + &generics, + substs, + file_metadata, + &mut name); + + // Get the linkage_name, which is just the symbol name + let linkage_name = mangled_name_of_instance(cx, instance); + + let scope_line = span_start(cx, span).line; + let is_local_to_unit = is_node_local_to_unit(cx, def_id); + + let function_name = CString::new(name).unwrap(); + let linkage_name = SmallCStr::new(&linkage_name.as_str()); + + let mut flags = DIFlags::FlagPrototyped; + + let local_id = cx.tcx.hir.as_local_node_id(def_id); + match *cx.sess().entry_fn.borrow() { + Some((id, _, _)) => { + if local_id == Some(id) { + flags = flags | DIFlags::FlagMainSubprogram; + } + } + None => {} + }; + if cx.layout_of(sig.output()).abi == ty::layout::Abi::Uninhabited { + flags = flags | DIFlags::FlagNoReturn; + } + + let fn_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateFunction( + DIB(cx), + containing_scope, + function_name.as_ptr(), + linkage_name.as_ptr(), + file_metadata, + loc.line as c_uint, + function_type_metadata, + is_local_to_unit, + true, + scope_line as c_uint, + flags, + cx.sess().opts.optimize != config::OptLevel::No, + llfn, + template_parameters, + None) + }; + + // Initialize fn debug context (including scope map and namespace map) + let fn_debug_context = FunctionDebugContextData { + fn_metadata, + source_locations_enabled: Cell::new(false), + defining_crate: def_id.krate, + }; + + return FunctionDebugContext::RegularContext(fn_debug_context); + + fn get_function_signature( + cx: &CodegenCx<'ll, 'tcx>, + sig: ty::FnSig<'tcx>, + ) -> &'ll DIArray { + if cx.sess().opts.debuginfo == DebugInfo::Limited { + return create_DIArray(DIB(cx), &[]); + } + + let mut signature = Vec::with_capacity(sig.inputs().len() + 1); + + // Return type -- llvm::DIBuilder wants this at index 0 + signature.push(match sig.output().sty { + ty::TyTuple(ref tys) if tys.is_empty() => None, + _ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)) + }); + + let inputs = if sig.abi == Abi::RustCall { + &sig.inputs()[..sig.inputs().len() - 1] + } else { + sig.inputs() + }; + + // Arguments types + if cx.sess().target.target.options.is_like_msvc { + // FIXME(#42800): + // There is a bug in MSDIA that leads to a crash when it encounters + // a fixed-size array of `u8` or something zero-sized in a + // function-type (see #40477). + // As a workaround, we replace those fixed-size arrays with a + // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would + // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, + // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. + // This transformed type is wrong, but these function types are + // already inaccurate due to ABI adjustments (see #42800). + signature.extend(inputs.iter().map(|&t| { + let t = match t.sty { + ty::TyArray(ct, _) + if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => { + cx.tcx.mk_imm_ptr(ct) + } + _ => t + }; + Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) + })); + } else { + signature.extend(inputs.iter().map(|t| { + Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) + })); + } + + if sig.abi == Abi::RustCall && !sig.inputs().is_empty() { + if let ty::TyTuple(args) = sig.inputs()[sig.inputs().len() - 1].sty { + signature.extend( + args.iter().map(|argument_type| { + Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)) + }) + ); + } + } + + return create_DIArray(DIB(cx), &signature[..]); + } + + fn get_template_parameters( + cx: &CodegenCx<'ll, 'tcx>, + generics: &ty::Generics, + substs: &Substs<'tcx>, + file_metadata: &'ll DIFile, + name_to_append_suffix_to: &mut String, + ) -> &'ll DIArray { + if substs.types().next().is_none() { + return create_DIArray(DIB(cx), &[]); + } + + name_to_append_suffix_to.push('<'); + for (i, actual_type) in substs.types().enumerate() { + if i != 0 { + name_to_append_suffix_to.push_str(","); + } + + let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); + // Add actual type name to <...> clause of function name + let actual_type_name = compute_debuginfo_type_name(cx, + actual_type, + true); + name_to_append_suffix_to.push_str(&actual_type_name[..]); + } + name_to_append_suffix_to.push('>'); + + // Again, only create type information if full debuginfo is enabled + let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { + let names = get_parameter_names(cx, generics); + substs.iter().zip(names).filter_map(|(kind, name)| { + if let UnpackedKind::Type(ty) = kind.unpack() { + let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); + let actual_type_metadata = + type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); + let name = SmallCStr::new(&name.as_str()); + Some(unsafe { + Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( + DIB(cx), + None, + name.as_ptr(), + actual_type_metadata, + file_metadata, + 0, + 0, + )) + }) + } else { + None + } + }).collect() + } else { + vec![] + }; + + return create_DIArray(DIB(cx), &template_params[..]); + } + + fn get_parameter_names(cx: &CodegenCx, + generics: &ty::Generics) + -> Vec { + let mut names = generics.parent.map_or(vec![], |def_id| { + get_parameter_names(cx, cx.tcx.generics_of(def_id)) + }); + names.extend(generics.params.iter().map(|param| param.name)); + names + } + + fn get_containing_scope( + cx: &CodegenCx<'ll, 'tcx>, + instance: Instance<'tcx>, + ) -> &'ll DIScope { + // First, let's see if this is a method within an inherent impl. Because + // if yes, we want to make the result subroutine DIE a child of the + // subroutine's self-type. + let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { + // If the method does *not* belong to a trait, proceed + if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { + let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( + instance.substs, + ty::ParamEnv::reveal_all(), + &cx.tcx.type_of(impl_def_id), + ); + + // Only "class" methods are generally understood by LLVM, + // so avoid methods on other types (e.g. `<*mut T>::null`). + match impl_self_ty.sty { + ty::TyAdt(def, ..) if !def.is_box() => { + Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) + } + _ => None + } + } else { + // For trait method impls we still use the "parallel namespace" + // strategy + None + } + }); + + self_type.unwrap_or_else(|| { + namespace::item_namespace(cx, DefId { + krate: instance.def_id().krate, + index: cx.tcx + .def_key(instance.def_id()) + .parent + .expect("get_containing_scope: missing parent?") + }) + }) + } +} + +pub fn declare_local( + bx: &Builder<'a, 'll, 'tcx>, + dbg_context: &FunctionDebugContext<'ll>, + variable_name: ast::Name, + variable_type: Ty<'tcx>, + scope_metadata: &'ll DIScope, + variable_access: VariableAccess<'_, 'll>, + variable_kind: VariableKind, + span: Span, +) { + assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); + let cx = bx.cx; + + let file = span_start(cx, span).file; + let file_metadata = file_metadata(cx, + &file.name, + dbg_context.get_ref(span).defining_crate); + + let loc = span_start(cx, span); + let type_metadata = type_metadata(cx, variable_type, span); + + let (argument_index, dwarf_tag) = match variable_kind { + ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), + LocalVariable => (0, DW_TAG_auto_variable) + }; + let align = cx.align_of(variable_type); + + let name = SmallCStr::new(&variable_name.as_str()); + match (variable_access, &[][..]) { + (DirectVariable { alloca }, address_operations) | + (IndirectVariable {alloca, address_operations}, _) => { + let metadata = unsafe { + llvm::LLVMRustDIBuilderCreateVariable( + DIB(cx), + dwarf_tag, + scope_metadata, + name.as_ptr(), + file_metadata, + loc.line as c_uint, + type_metadata, + cx.sess().opts.optimize != config::OptLevel::No, + DIFlags::FlagZero, + argument_index, + align.abi() as u32, + ) + }; + source_loc::set_debug_location(bx, + InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); + unsafe { + let debug_loc = llvm::LLVMGetCurrentDebugLocation(bx.llbuilder); + let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( + DIB(cx), + alloca, + metadata, + address_operations.as_ptr(), + address_operations.len() as c_uint, + debug_loc, + bx.llbb()); + + llvm::LLVMSetInstDebugLocation(bx.llbuilder, instr); + } + source_loc::set_debug_location(bx, UnknownLocation); + } + } +} diff --git a/src/librustc_codegen_llvm/debuginfo/namespace.rs b/src/librustc_codegen_llvm/debuginfo/namespace.rs new file mode 100644 index 000000000000..06f8a4b131b6 --- /dev/null +++ b/src/librustc_codegen_llvm/debuginfo/namespace.rs @@ -0,0 +1,65 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Namespace Handling. + +use super::metadata::{unknown_file_metadata, UNKNOWN_LINE_NUMBER}; +use super::utils::{DIB, debug_context}; +use monomorphize::Instance; +use rustc::ty; + +use llvm; +use llvm::debuginfo::DIScope; +use rustc::hir::def_id::DefId; +use rustc::hir::map::DefPathData; +use common::CodegenCx; + +use rustc_data_structures::small_c_str::SmallCStr; + +pub fn mangled_name_of_instance<'a, 'tcx>( + cx: &CodegenCx<'a, 'tcx>, + instance: Instance<'tcx>, +) -> ty::SymbolName { + let tcx = cx.tcx; + tcx.symbol_name(instance) +} + +pub fn item_namespace(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { + if let Some(&scope) = debug_context(cx).namespace_map.borrow().get(&def_id) { + return scope; + } + + let def_key = cx.tcx.def_key(def_id); + let parent_scope = def_key.parent.map(|parent| { + item_namespace(cx, DefId { + krate: def_id.krate, + index: parent + }) + }); + + let namespace_name = match def_key.disambiguated_data.data { + DefPathData::CrateRoot => cx.tcx.crate_name(def_id.krate).as_str(), + data => data.as_interned_str().as_str() + }; + + let namespace_name = SmallCStr::new(&namespace_name); + + let scope = unsafe { + llvm::LLVMRustDIBuilderCreateNameSpace( + DIB(cx), + parent_scope, + namespace_name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER) + }; + + debug_context(cx).namespace_map.borrow_mut().insert(def_id, scope); + scope +} diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs new file mode 100644 index 000000000000..55cf13939434 --- /dev/null +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -0,0 +1,115 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use self::InternalDebugLocation::*; + +use super::utils::{debug_context, span_start}; +use super::metadata::UNKNOWN_COLUMN_NUMBER; +use super::FunctionDebugContext; + +use llvm; +use llvm::debuginfo::DIScope; +use builder::Builder; + +use libc::c_uint; +use syntax_pos::{Span, Pos}; + +/// Sets the current debug location at the beginning of the span. +/// +/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). +pub fn set_source_location( + debug_context: &FunctionDebugContext<'ll>, + bx: &Builder<'_, 'll, '_>, + scope: Option<&'ll DIScope>, + span: Span, +) { + let function_debug_context = match *debug_context { + FunctionDebugContext::DebugInfoDisabled => return, + FunctionDebugContext::FunctionWithoutDebugInfo => { + set_debug_location(bx, UnknownLocation); + return; + } + FunctionDebugContext::RegularContext(ref data) => data + }; + + let dbg_loc = if function_debug_context.source_locations_enabled.get() { + debug!("set_source_location: {}", bx.sess().codemap().span_to_string(span)); + let loc = span_start(bx.cx, span); + InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize()) + } else { + UnknownLocation + }; + set_debug_location(bx, dbg_loc); +} + +/// Enables emitting source locations for the given functions. +/// +/// Since we don't want source locations to be emitted for the function prelude, +/// they are disabled when beginning to codegen a new function. This functions +/// switches source location emitting on and must therefore be called before the +/// first real statement/expression of the function is codegened. +pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext<'ll>) { + match *dbg_context { + FunctionDebugContext::RegularContext(ref data) => { + data.source_locations_enabled.set(true) + }, + _ => { /* safe to ignore */ } + } +} + + +#[derive(Copy, Clone, PartialEq)] +pub enum InternalDebugLocation<'ll> { + KnownLocation { scope: &'ll DIScope, line: usize, col: usize }, + UnknownLocation +} + +impl InternalDebugLocation<'ll> { + pub fn new(scope: &'ll DIScope, line: usize, col: usize) -> Self { + KnownLocation { + scope, + line, + col, + } + } +} + +pub fn set_debug_location(bx: &Builder<'_, 'll, '_>, debug_location: InternalDebugLocation<'ll>) { + let metadata_node = match debug_location { + KnownLocation { scope, line, col } => { + // For MSVC, set the column number to zero. + // Otherwise, emit it. This mimics clang behaviour. + // See discussion in https://github.com/rust-lang/rust/issues/42921 + let col_used = if bx.cx.sess().target.target.options.is_like_msvc { + UNKNOWN_COLUMN_NUMBER + } else { + col as c_uint + }; + debug!("setting debug location to {} {}", line, col); + + unsafe { + Some(llvm::LLVMRustDIBuilderCreateDebugLocation( + debug_context(bx.cx).llcontext, + line as c_uint, + col_used, + scope, + None)) + } + } + UnknownLocation => { + debug!("clearing debug location "); + None + } + }; + + unsafe { + llvm::LLVMSetCurrentDebugLocation(bx.llbuilder, metadata_node); + } +} diff --git a/src/librustc_trans/debuginfo/type_names.rs b/src/librustc_codegen_llvm/debuginfo/type_names.rs similarity index 93% rename from src/librustc_trans/debuginfo/type_names.rs rename to src/librustc_codegen_llvm/debuginfo/type_names.rs index 0aec92b0d66c..05a74db3a6ca 100644 --- a/src/librustc_trans/debuginfo/type_names.rs +++ b/src/librustc_codegen_llvm/debuginfo/type_names.rs @@ -53,7 +53,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, push_item_name(cx, def.did, qualified, output); push_type_params(cx, substs, output); }, - ty::TyTuple(component_types, _) => { + ty::TyTuple(component_types) => { output.push('('); for &component_type in component_types { push_debuginfo_type_name(cx, component_type, true, output); @@ -80,7 +80,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, output.push('*'); } }, - ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => { + ty::TyRef(_, inner_type, mutbl) => { if !cpp_like_names { output.push('&'); } @@ -97,7 +97,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ty::TyArray(inner_type, len) => { output.push('['); push_debuginfo_type_name(cx, inner_type, true, output); - output.push_str(&format!("; {}", len.val.to_const_int().unwrap().to_u64().unwrap())); + output.push_str(&format!("; {}", len.unwrap_usize(cx.tcx))); output.push(']'); }, ty::TySlice(inner_type) => { @@ -117,8 +117,10 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, }, ty::TyDynamic(ref trait_data, ..) => { if let Some(principal) = trait_data.principal() { - let principal = cx.tcx.erase_late_bound_regions_and_normalize( - &principal); + let principal = cx.tcx.normalize_erasing_late_bound_regions( + ty::ParamEnv::reveal_all(), + &principal, + ); push_item_name(cx, principal.def_id, false, output); push_type_params(cx, principal.substs, output); } @@ -138,7 +140,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, output.push_str("fn("); - let sig = cx.tcx.erase_late_bound_regions_and_normalize(&sig); + let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); if !sig.inputs().is_empty() { for ¶meter_type in sig.inputs() { push_debuginfo_type_name(cx, parameter_type, true, output); @@ -173,6 +175,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ty::TyInfer(_) | ty::TyProjection(..) | ty::TyAnon(..) | + ty::TyGeneratorWitness(..) | ty::TyParam(_) => { bug!("debuginfo: Trying to create type name for \ unexpected type: {:?}", t); @@ -187,10 +190,10 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, output.push_str(&cx.tcx.crate_name(def_id.krate).as_str()); for path_element in cx.tcx.def_path(def_id).data { output.push_str("::"); - output.push_str(&path_element.data.as_interned_str()); + output.push_str(&path_element.data.as_interned_str().as_str()); } } else { - output.push_str(&cx.tcx.item_name(def_id)); + output.push_str(&cx.tcx.item_name(def_id).as_str()); } } diff --git a/src/librustc_codegen_llvm/debuginfo/utils.rs b/src/librustc_codegen_llvm/debuginfo/utils.rs new file mode 100644 index 000000000000..9f4a555082ad --- /dev/null +++ b/src/librustc_codegen_llvm/debuginfo/utils.rs @@ -0,0 +1,67 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Utility Functions. + +use super::{CrateDebugContext}; +use super::namespace::item_namespace; + +use rustc::hir::def_id::DefId; +use rustc::ty::DefIdTree; + +use llvm; +use llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray}; +use common::{CodegenCx}; + +use syntax_pos::{self, Span}; + +pub fn is_node_local_to_unit(cx: &CodegenCx, def_id: DefId) -> bool +{ + // The is_local_to_unit flag indicates whether a function is local to the + // current compilation unit (i.e. if it is *static* in the C-sense). The + // *reachable* set should provide a good approximation of this, as it + // contains everything that might leak out of the current crate (by being + // externally visible or by being inlined into something externally + // visible). It might better to use the `exported_items` set from + // `driver::CrateAnalysis` in the future, but (atm) this set is not + // available in the codegen pass. + !cx.tcx.is_reachable_non_generic(def_id) +} + +#[allow(non_snake_case)] +pub fn create_DIArray( + builder: &DIBuilder<'ll>, + arr: &[Option<&'ll DIDescriptor>], +) -> &'ll DIArray { + return unsafe { + llvm::LLVMRustDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32) + }; +} + +/// Return syntax_pos::Loc corresponding to the beginning of the span +pub fn span_start(cx: &CodegenCx, span: Span) -> syntax_pos::Loc { + cx.sess().codemap().lookup_char_pos(span.lo()) +} + +#[inline] +pub fn debug_context(cx: &'a CodegenCx<'ll, 'tcx>) -> &'a CrateDebugContext<'ll, 'tcx> { + cx.dbg_cx.as_ref().unwrap() +} + +#[inline] +#[allow(non_snake_case)] +pub fn DIB(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> { + cx.dbg_cx.as_ref().unwrap().builder +} + +pub fn get_namespace_for_item(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { + item_namespace(cx, cx.tcx.parent(def_id) + .expect("get_namespace_for_item: missing parent?")) +} diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs new file mode 100644 index 000000000000..5e743ac51bc6 --- /dev/null +++ b/src/librustc_codegen_llvm/declare.rs @@ -0,0 +1,230 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +//! Declare various LLVM values. +//! +//! Prefer using functions and methods from this module rather than calling LLVM +//! functions directly. These functions do some additional work to ensure we do +//! the right thing given the preconceptions of codegen. +//! +//! Some useful guidelines: +//! +//! * Use declare_* family of methods if you are declaring, but are not +//! interested in defining the Value they return. +//! * Use define_* family of methods when you might be defining the Value. +//! * When in doubt, define. + +use llvm; +use llvm::AttributePlace::Function; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, LayoutOf}; +use rustc::session::config::Sanitizer; +use rustc_data_structures::small_c_str::SmallCStr; +use rustc_target::spec::PanicStrategy; +use abi::{Abi, FnType, FnTypeExt}; +use attributes; +use context::CodegenCx; +use common; +use type_::Type; +use value::Value; + + +/// Declare a global value. +/// +/// If there’s a value with the same name already declared, the function will +/// return its Value instead. +pub fn declare_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> &'ll Value { + debug!("declare_global(name={:?})", name); + let namebuf = SmallCStr::new(name); + unsafe { + llvm::LLVMRustGetOrInsertGlobal(cx.llmod, namebuf.as_ptr(), ty) + } +} + + +/// Declare a function. +/// +/// If there’s a value with the same name already declared, the function will +/// update the declaration and return existing Value instead. +fn declare_raw_fn( + cx: &CodegenCx<'ll, '_>, + name: &str, + callconv: llvm::CallConv, + ty: &'ll Type, +) -> &'ll Value { + debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty); + let namebuf = SmallCStr::new(name); + let llfn = unsafe { + llvm::LLVMRustGetOrInsertFunction(cx.llmod, namebuf.as_ptr(), ty) + }; + + llvm::SetFunctionCallConv(llfn, callconv); + // Function addresses in Rust are never significant, allowing functions to + // be merged. + llvm::SetUnnamedAddr(llfn, true); + + if cx.tcx.sess.opts.cg.no_redzone + .unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) { + llvm::Attribute::NoRedZone.apply_llfn(Function, llfn); + } + + if let Some(ref sanitizer) = cx.tcx.sess.opts.debugging_opts.sanitizer { + match *sanitizer { + Sanitizer::Address => { + llvm::Attribute::SanitizeAddress.apply_llfn(Function, llfn); + }, + Sanitizer::Memory => { + llvm::Attribute::SanitizeMemory.apply_llfn(Function, llfn); + }, + Sanitizer::Thread => { + llvm::Attribute::SanitizeThread.apply_llfn(Function, llfn); + }, + _ => {} + } + } + + match cx.tcx.sess.opts.cg.opt_level.as_ref().map(String::as_ref) { + Some("s") => { + llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); + }, + Some("z") => { + llvm::Attribute::MinSize.apply_llfn(Function, llfn); + llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); + }, + _ => {}, + } + + if cx.tcx.sess.panic_strategy() != PanicStrategy::Unwind { + attributes::unwind(llfn, false); + } + + llfn +} + + +/// Declare a C ABI function. +/// +/// Only use this for foreign function ABIs and glue. For Rust functions use +/// `declare_fn` instead. +/// +/// If there’s a value with the same name already declared, the function will +/// update the declaration and return existing Value instead. +pub fn declare_cfn(cx: &CodegenCx<'ll, '_>, name: &str, fn_type: &'ll Type) -> &'ll Value { + declare_raw_fn(cx, name, llvm::CCallConv, fn_type) +} + + +/// Declare a Rust function. +/// +/// If there’s a value with the same name already declared, the function will +/// update the declaration and return existing Value instead. +pub fn declare_fn( + cx: &CodegenCx<'ll, 'tcx>, + name: &str, + fn_type: Ty<'tcx>, +) -> &'ll Value { + debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type); + let sig = common::ty_fn_sig(cx, fn_type); + let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + debug!("declare_rust_fn (after region erasure) sig={:?}", sig); + + let fty = FnType::new(cx, sig, &[]); + let llfn = declare_raw_fn(cx, name, fty.llvm_cconv(), fty.llvm_type(cx)); + + if cx.layout_of(sig.output()).abi == layout::Abi::Uninhabited { + llvm::Attribute::NoReturn.apply_llfn(Function, llfn); + } + + if sig.abi != Abi::Rust && sig.abi != Abi::RustCall { + attributes::unwind(llfn, false); + } + + fty.apply_attrs_llfn(llfn); + + llfn +} + + +/// Declare a global with an intention to define it. +/// +/// Use this function when you intend to define a global. This function will +/// return None if the name already has a definition associated with it. In that +/// case an error should be reported to the user, because it usually happens due +/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). +pub fn define_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> Option<&'ll Value> { + if get_defined_value(cx, name).is_some() { + None + } else { + Some(declare_global(cx, name, ty)) + } +} + +/// Declare a private global +/// +/// Use this function when you intend to define a global without a name. +pub fn define_private_global(cx: &CodegenCx<'ll, '_>, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMRustInsertPrivateGlobal(cx.llmod, ty) + } +} + +/// Declare a Rust function with an intention to define it. +/// +/// Use this function when you intend to define a function. This function will +/// return panic if the name already has a definition associated with it. This +/// can happen with #[no_mangle] or #[export_name], for example. +pub fn define_fn( + cx: &CodegenCx<'ll, 'tcx>, + name: &str, + fn_type: Ty<'tcx>, +) -> &'ll Value { + if get_defined_value(cx, name).is_some() { + cx.sess().fatal(&format!("symbol `{}` already defined", name)) + } else { + declare_fn(cx, name, fn_type) + } +} + +/// Declare a Rust function with an intention to define it. +/// +/// Use this function when you intend to define a function. This function will +/// return panic if the name already has a definition associated with it. This +/// can happen with #[no_mangle] or #[export_name], for example. +pub fn define_internal_fn( + cx: &CodegenCx<'ll, 'tcx>, + name: &str, + fn_type: Ty<'tcx>, +) -> &'ll Value { + let llfn = define_fn(cx, name, fn_type); + unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; + llfn +} + + +/// Get declared value by name. +pub fn get_declared_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { + debug!("get_declared_value(name={:?})", name); + let namebuf = SmallCStr::new(name); + unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) } +} + +/// Get defined or externally defined (AvailableExternally linkage) value by +/// name. +pub fn get_defined_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { + get_declared_value(cx, name).and_then(|val|{ + let declaration = unsafe { + llvm::LLVMIsDeclaration(val) != 0 + }; + if !declaration { + Some(val) + } else { + None + } + }) +} diff --git a/src/librustc_codegen_llvm/diagnostics.rs b/src/librustc_codegen_llvm/diagnostics.rs new file mode 100644 index 000000000000..94776f17c798 --- /dev/null +++ b/src/librustc_codegen_llvm/diagnostics.rs @@ -0,0 +1,50 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_snake_case)] + +register_long_diagnostics! { + +E0511: r##" +Invalid monomorphization of an intrinsic function was used. Erroneous code +example: + +```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) +#![feature(platform_intrinsics)] + +extern "platform-intrinsic" { + fn simd_add(a: T, b: T) -> T; +} + +fn main() { + unsafe { simd_add(0, 1); } + // error: invalid monomorphization of `simd_add` intrinsic +} +``` + +The generic type has to be a SIMD type. Example: + +``` +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + +#[repr(simd)] +#[derive(Copy, Clone)] +struct i32x2(i32, i32); + +extern "platform-intrinsic" { + fn simd_add(a: T, b: T) -> T; +} + +unsafe { simd_add(i32x2(0, 0), i32x2(1, 2)); } // ok! +``` +"##, + +} diff --git a/src/librustc_trans/glue.rs b/src/librustc_codegen_llvm/glue.rs similarity index 91% rename from src/librustc_trans/glue.rs rename to src/librustc_codegen_llvm/glue.rs index c7275d094018..37ce51da7782 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_codegen_llvm/glue.rs @@ -16,37 +16,36 @@ use std; use builder::Builder; use common::*; -use llvm::{ValueRef}; use llvm; use meth; use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; use value::Value; -pub fn size_and_align_of_dst<'a, 'tcx>(bx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef) - -> (ValueRef, ValueRef) { +pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Option<&'ll Value>) + -> (&'ll Value, &'ll Value) { debug!("calculate size of DST: {}; with lost info: {:?}", - t, Value(info)); + t, info); if bx.cx.type_is_sized(t) { let (size, align) = bx.cx.size_and_align_of(t); debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", - t, Value(info), size, align); + t, info, size, align); let size = C_usize(bx.cx, size.bytes()); let align = C_usize(bx.cx, align.abi()); return (size, align); } - assert!(!info.is_null()); match t.sty { ty::TyDynamic(..) => { // load size/align from vtable - (meth::SIZE.get_usize(bx, info), meth::ALIGN.get_usize(bx, info)) + let vtable = info.unwrap(); + (meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable)) } ty::TySlice(_) | ty::TyStr => { let unit = t.sequence_element_type(bx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. let (size, align) = bx.cx.size_and_align_of(unit); - (bx.mul(info, C_usize(bx.cx, size.bytes())), + (bx.mul(info.unwrap(), C_usize(bx.cx, size.bytes())), C_usize(bx.cx, align.abi())) } _ => { diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs new file mode 100644 index 000000000000..be3e0d9d4b1e --- /dev/null +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -0,0 +1,1809 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_upper_case_globals)] + +use attributes; +use intrinsics::{self, Intrinsic}; +use llvm::{self, TypeKind}; +use abi::{Abi, FnType, LlvmType, PassMode}; +use mir::place::PlaceRef; +use mir::operand::{OperandRef, OperandValue}; +use base::*; +use common::*; +use declare; +use glue; +use type_::Type; +use type_of::LayoutLlvmExt; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{HasDataLayout, LayoutOf}; +use rustc::hir; +use syntax::ast; +use syntax::symbol::Symbol; +use builder::Builder; +use value::Value; + +use rustc::session::Session; +use syntax_pos::Span; + +use std::cmp::Ordering; +use std::iter; + +fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { + let llvm_name = match name { + "sqrtf32" => "llvm.sqrt.f32", + "sqrtf64" => "llvm.sqrt.f64", + "powif32" => "llvm.powi.f32", + "powif64" => "llvm.powi.f64", + "sinf32" => "llvm.sin.f32", + "sinf64" => "llvm.sin.f64", + "cosf32" => "llvm.cos.f32", + "cosf64" => "llvm.cos.f64", + "powf32" => "llvm.pow.f32", + "powf64" => "llvm.pow.f64", + "expf32" => "llvm.exp.f32", + "expf64" => "llvm.exp.f64", + "exp2f32" => "llvm.exp2.f32", + "exp2f64" => "llvm.exp2.f64", + "logf32" => "llvm.log.f32", + "logf64" => "llvm.log.f64", + "log10f32" => "llvm.log10.f32", + "log10f64" => "llvm.log10.f64", + "log2f32" => "llvm.log2.f32", + "log2f64" => "llvm.log2.f64", + "fmaf32" => "llvm.fma.f32", + "fmaf64" => "llvm.fma.f64", + "fabsf32" => "llvm.fabs.f32", + "fabsf64" => "llvm.fabs.f64", + "copysignf32" => "llvm.copysign.f32", + "copysignf64" => "llvm.copysign.f64", + "floorf32" => "llvm.floor.f32", + "floorf64" => "llvm.floor.f64", + "ceilf32" => "llvm.ceil.f32", + "ceilf64" => "llvm.ceil.f64", + "truncf32" => "llvm.trunc.f32", + "truncf64" => "llvm.trunc.f64", + "rintf32" => "llvm.rint.f32", + "rintf64" => "llvm.rint.f64", + "nearbyintf32" => "llvm.nearbyint.f32", + "nearbyintf64" => "llvm.nearbyint.f64", + "roundf32" => "llvm.round.f32", + "roundf64" => "llvm.round.f64", + "assume" => "llvm.assume", + "abort" => "llvm.trap", + _ => return None + }; + Some(cx.get_intrinsic(&llvm_name)) +} + +/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, +/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, +/// add them to librustc_codegen_llvm/context.rs +pub fn codegen_intrinsic_call( + bx: &Builder<'a, 'll, 'tcx>, + callee_ty: Ty<'tcx>, + fn_ty: &FnType<'tcx, Ty<'tcx>>, + args: &[OperandRef<'ll, 'tcx>], + llresult: &'ll Value, + span: Span, +) { + let cx = bx.cx; + let tcx = cx.tcx; + + let (def_id, substs) = match callee_ty.sty { + ty::TyFnDef(def_id, substs) => (def_id, substs), + _ => bug!("expected fn item type, found {}", callee_ty) + }; + + let sig = callee_ty.fn_sig(tcx); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let arg_tys = sig.inputs(); + let ret_ty = sig.output(); + let name = &*tcx.item_name(def_id).as_str(); + + let llret_ty = cx.layout_of(ret_ty).llvm_type(cx); + let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align); + + let simple = get_simple_intrinsic(cx, name); + let llval = match name { + _ if simple.is_some() => { + bx.call(simple.unwrap(), + &args.iter().map(|arg| arg.immediate()).collect::>(), + None) + } + "unreachable" => { + return; + }, + "likely" => { + let expect = cx.get_intrinsic(&("llvm.expect.i1")); + bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None) + } + "unlikely" => { + let expect = cx.get_intrinsic(&("llvm.expect.i1")); + bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None) + } + "try" => { + try_intrinsic(bx, cx, + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + llresult); + return; + } + "breakpoint" => { + let llfn = cx.get_intrinsic(&("llvm.debugtrap")); + bx.call(llfn, &[], None) + } + "size_of" => { + let tp_ty = substs.type_at(0); + C_usize(cx, cx.size_of(tp_ty).bytes()) + } + "size_of_val" => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (llsize, _) = + glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); + llsize + } else { + C_usize(cx, cx.size_of(tp_ty).bytes()) + } + } + "min_align_of" => { + let tp_ty = substs.type_at(0); + C_usize(cx, cx.align_of(tp_ty).abi()) + } + "min_align_of_val" => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (_, llalign) = + glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); + llalign + } else { + C_usize(cx, cx.align_of(tp_ty).abi()) + } + } + "pref_align_of" => { + let tp_ty = substs.type_at(0); + C_usize(cx, cx.align_of(tp_ty).pref()) + } + "type_name" => { + let tp_ty = substs.type_at(0); + let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); + C_str_slice(cx, ty_name) + } + "type_id" => { + C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0))) + } + "init" => { + let ty = substs.type_at(0); + if !cx.layout_of(ty).is_zst() { + // Just zero out the stack slot. + // If we store a zero constant, LLVM will drown in vreg allocation for large data + // structures, and the generated code will be awful. (A telltale sign of this is + // large quantities of `mov [byte ptr foo],0` in the generated code.) + memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1)); + } + return; + } + // Effectively no-ops + "uninit" => { + return; + } + "needs_drop" => { + let tp_ty = substs.type_at(0); + + C_bool(cx, bx.cx.type_needs_drop(tp_ty)) + } + "offset" => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + bx.inbounds_gep(ptr, &[offset]) + } + "arith_offset" => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + bx.gep(ptr, &[offset]) + } + + "copy_nonoverlapping" => { + copy_intrinsic(bx, false, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()) + } + "copy" => { + copy_intrinsic(bx, true, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()) + } + "write_bytes" => { + memset_intrinsic(bx, false, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } + + "volatile_copy_nonoverlapping_memory" => { + copy_intrinsic(bx, false, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } + "volatile_copy_memory" => { + copy_intrinsic(bx, true, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } + "volatile_set_memory" => { + memset_intrinsic(bx, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } + "volatile_load" | "unaligned_volatile_load" => { + let tp_ty = substs.type_at(0); + let mut ptr = args[0].immediate(); + if let PassMode::Cast(ty) = fn_ty.ret.mode { + ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to()); + } + let load = bx.volatile_load(ptr); + let align = if name == "unaligned_volatile_load" { + 1 + } else { + cx.align_of(tp_ty).abi() as u32 + }; + unsafe { + llvm::LLVMSetAlignment(load, align); + } + to_immediate(bx, load, cx.layout_of(tp_ty)) + }, + "volatile_store" => { + let dst = args[0].deref(bx.cx); + args[1].val.volatile_store(bx, dst); + return; + }, + "unaligned_volatile_store" => { + let dst = args[0].deref(bx.cx); + args[1].val.unaligned_volatile_store(bx, dst); + return; + }, + "prefetch_read_data" | "prefetch_write_data" | + "prefetch_read_instruction" | "prefetch_write_instruction" => { + let expect = cx.get_intrinsic(&("llvm.prefetch")); + let (rw, cache_type) = match name { + "prefetch_read_data" => (0, 1), + "prefetch_write_data" => (1, 1), + "prefetch_read_instruction" => (0, 0), + "prefetch_write_instruction" => (1, 0), + _ => bug!() + }; + bx.call(expect, &[ + args[0].immediate(), + C_i32(cx, rw), + args[1].immediate(), + C_i32(cx, cache_type) + ], None) + }, + "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | + "bitreverse" | "add_with_overflow" | "sub_with_overflow" | + "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | + "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" => { + let ty = arg_tys[0]; + match int_type_width_signed(ty, cx) { + Some((width, signed)) => + match name { + "ctlz" | "cttz" => { + let y = C_bool(bx.cx, false); + let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); + bx.call(llfn, &[args[0].immediate(), y], None) + } + "ctlz_nonzero" | "cttz_nonzero" => { + let y = C_bool(bx.cx, true); + let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); + let llfn = cx.get_intrinsic(llvm_name); + bx.call(llfn, &[args[0].immediate(), y], None) + } + "ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), + &[args[0].immediate()], None), + "bswap" => { + if width == 8 { + args[0].immediate() // byte swap a u8/i8 is just a no-op + } else { + bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)), + &[args[0].immediate()], None) + } + } + "bitreverse" => { + bx.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)), + &[args[0].immediate()], None) + } + "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { + let intrinsic = format!("llvm.{}{}.with.overflow.i{}", + if signed { 's' } else { 'u' }, + &name[..3], width); + let llfn = bx.cx.get_intrinsic(&intrinsic); + + // Convert `i1` to a `bool`, and write it to the out parameter + let pair = bx.call(llfn, &[ + args[0].immediate(), + args[1].immediate() + ], None); + let val = bx.extract_value(pair, 0); + let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx)); + + let dest = result.project_field(bx, 0); + bx.store(val, dest.llval, dest.align); + let dest = result.project_field(bx, 1); + bx.store(overflow, dest.llval, dest.align); + + return; + }, + "overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()), + "overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()), + "overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()), + "exact_div" => + if signed { + bx.exactsdiv(args[0].immediate(), args[1].immediate()) + } else { + bx.exactudiv(args[0].immediate(), args[1].immediate()) + }, + "unchecked_div" => + if signed { + bx.sdiv(args[0].immediate(), args[1].immediate()) + } else { + bx.udiv(args[0].immediate(), args[1].immediate()) + }, + "unchecked_rem" => + if signed { + bx.srem(args[0].immediate(), args[1].immediate()) + } else { + bx.urem(args[0].immediate(), args[1].immediate()) + }, + "unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()), + "unchecked_shr" => + if signed { + bx.ashr(args[0].immediate(), args[1].immediate()) + } else { + bx.lshr(args[0].immediate(), args[1].immediate()) + }, + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, ty)); + return; + } + } + + }, + "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { + let sty = &arg_tys[0].sty; + match float_type_width(sty) { + Some(_width) => + match name { + "fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()), + "fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()), + "fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()), + "fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()), + "frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()), + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic float type, found `{}`", name, sty)); + return; + } + } + + }, + + "discriminant_value" => { + args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty) + } + + name if name.starts_with("simd_") => { + match generic_simd_intrinsic(bx, name, + callee_ty, + args, + ret_ty, llret_ty, + span) { + Ok(llval) => llval, + Err(()) => return + } + } + // This requires that atomic intrinsics follow a specific naming pattern: + // "atomic_[_]", and no ordering means SeqCst + name if name.starts_with("atomic_") => { + use llvm::AtomicOrdering::*; + + let split: Vec<&str> = name.split('_').collect(); + + let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; + let (order, failorder) = match split.len() { + 2 => (SequentiallyConsistent, SequentiallyConsistent), + 3 => match split[2] { + "unordered" => (Unordered, Unordered), + "relaxed" => (Monotonic, Monotonic), + "acq" => (Acquire, Acquire), + "rel" => (Release, Monotonic), + "acqrel" => (AcquireRelease, Acquire), + "failrelaxed" if is_cxchg => + (SequentiallyConsistent, Monotonic), + "failacq" if is_cxchg => + (SequentiallyConsistent, Acquire), + _ => cx.sess().fatal("unknown ordering in atomic intrinsic") + }, + 4 => match (split[2], split[3]) { + ("acq", "failrelaxed") if is_cxchg => + (Acquire, Monotonic), + ("acqrel", "failrelaxed") if is_cxchg => + (AcquireRelease, Monotonic), + _ => cx.sess().fatal("unknown ordering in atomic intrinsic") + }, + _ => cx.sess().fatal("Atomic intrinsic not in correct format"), + }; + + let invalid_monomorphization = |ty| { + span_invalid_monomorphization_error(tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, ty)); + }; + + match split[1] { + "cxchg" | "cxchgweak" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; + let pair = bx.atomic_cmpxchg( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + order, + failorder, + weak); + let val = bx.extract_value(pair, 0); + let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx)); + + let dest = result.project_field(bx, 0); + bx.store(val, dest.llval, dest.align); + let dest = result.project_field(bx, 1); + bx.store(success, dest.llval, dest.align); + return; + } else { + return invalid_monomorphization(ty); + } + } + + "load" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + let align = cx.align_of(ty); + bx.atomic_load(args[0].immediate(), order, align) + } else { + return invalid_monomorphization(ty); + } + } + + "store" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + let align = cx.align_of(ty); + bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align); + return; + } else { + return invalid_monomorphization(ty); + } + } + + "fence" => { + bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread); + return; + } + + "singlethreadfence" => { + bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread); + return; + } + + // These are all AtomicRMW ops + op => { + let atom_op = match op { + "xchg" => llvm::AtomicXchg, + "xadd" => llvm::AtomicAdd, + "xsub" => llvm::AtomicSub, + "and" => llvm::AtomicAnd, + "nand" => llvm::AtomicNand, + "or" => llvm::AtomicOr, + "xor" => llvm::AtomicXor, + "max" => llvm::AtomicMax, + "min" => llvm::AtomicMin, + "umax" => llvm::AtomicUMax, + "umin" => llvm::AtomicUMin, + _ => cx.sess().fatal("unknown atomic operation") + }; + + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order) + } else { + return invalid_monomorphization(ty); + } + } + } + } + + "nontemporal_store" => { + let dst = args[0].deref(bx.cx); + args[1].val.nontemporal_store(bx, dst); + return; + } + + _ => { + let intr = match Intrinsic::find(&name) { + Some(intr) => intr, + None => bug!("unknown intrinsic '{}'", name), + }; + fn one(x: Vec) -> T { + assert_eq!(x.len(), 1); + x.into_iter().next().unwrap() + } + fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> { + use intrinsics::Type::*; + match *t { + Void => vec![Type::void(cx)], + Integer(_signed, _width, llvm_width) => { + vec![Type::ix(cx, llvm_width as u64)] + } + Float(x) => { + match x { + 32 => vec![Type::f32(cx)], + 64 => vec![Type::f64(cx)], + _ => bug!() + } + } + Pointer(ref t, ref llvm_elem, _const) => { + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(cx, t)); + vec![elem.ptr_to()] + } + Vector(ref t, ref llvm_elem, length) => { + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(cx, t)); + vec![Type::vector(elem, length as u64)] + } + Aggregate(false, ref contents) => { + let elems = contents.iter() + .map(|t| one(ty_to_type(cx, t))) + .collect::>(); + vec![Type::struct_(cx, &elems, false)] + } + Aggregate(true, ref contents) => { + contents.iter() + .flat_map(|t| ty_to_type(cx, t)) + .collect() + } + } + } + + // This allows an argument list like `foo, (bar, baz), + // qux` to be converted into `foo, bar, baz, qux`, integer + // arguments to be truncated as needed and pointers to be + // cast. + fn modify_as_needed( + bx: &Builder<'a, 'll, 'tcx>, + t: &intrinsics::Type, + arg: &OperandRef<'ll, 'tcx>, + ) -> Vec<&'ll Value> { + match *t { + intrinsics::Type::Aggregate(true, ref contents) => { + // We found a tuple that needs squishing! So + // run over the tuple and load each field. + // + // This assumes the type is "simple", i.e. no + // destructors, and the contents are SIMD + // etc. + assert!(!bx.cx.type_needs_drop(arg.layout.ty)); + let (ptr, align) = match arg.val { + OperandValue::Ref(ptr, None, align) => (ptr, align), + _ => bug!() + }; + let arg = PlaceRef::new_sized(ptr, arg.layout, align); + (0..contents.len()).map(|i| { + arg.project_field(bx, i).load(bx).immediate() + }).collect() + } + intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { + let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); + vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())] + } + intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { + let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); + vec![bx.bitcast(arg.immediate(), Type::vector(llvm_elem, length as u64))] + } + intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { + // the LLVM intrinsic uses a smaller integer + // size than the C intrinsic's signature, so + // we have to trim it down here. + vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))] + } + _ => vec![arg.immediate()], + } + } + + + let inputs = intr.inputs.iter() + .flat_map(|t| ty_to_type(cx, t)) + .collect::>(); + + let outputs = one(ty_to_type(cx, &intr.output)); + + let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { + modify_as_needed(bx, t, arg) + }).collect(); + assert_eq!(inputs.len(), llargs.len()); + + let val = match intr.definition { + intrinsics::IntrinsicDef::Named(name) => { + let f = declare::declare_cfn(cx, + name, + Type::func(&inputs, outputs)); + bx.call(f, &llargs, None) + } + }; + + match *intr.output { + intrinsics::Type::Aggregate(flatten, ref elems) => { + // the output is a tuple so we need to munge it properly + assert!(!flatten); + + for i in 0..elems.len() { + let dest = result.project_field(bx, i); + let val = bx.extract_value(val, i as u64); + bx.store(val, dest.llval, dest.align); + } + return; + } + _ => val, + } + } + }; + + if !fn_ty.ret.is_ignore() { + if let PassMode::Cast(ty) = fn_ty.ret.mode { + let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to()); + bx.store(llval, ptr, result.align); + } else { + OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) + .val.store(bx, result); + } + } +} + +fn copy_intrinsic( + bx: &Builder<'a, 'll, 'tcx>, + allow_overlap: bool, + volatile: bool, + ty: Ty<'tcx>, + dst: &'ll Value, + src: &'ll Value, + count: &'ll Value, +) -> &'ll Value { + let cx = bx.cx; + let (size, align) = cx.size_and_align_of(ty); + let size = C_usize(cx, size.bytes()); + let align = C_i32(cx, align.abi() as i32); + + let operation = if allow_overlap { + "memmove" + } else { + "memcpy" + }; + + let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, + cx.data_layout().pointer_size.bits()); + + let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); + let src_ptr = bx.pointercast(src, Type::i8p(cx)); + let llfn = cx.get_intrinsic(&name); + + bx.call(llfn, + &[dst_ptr, + src_ptr, + bx.mul(size, count), + align, + C_bool(cx, volatile)], + None) +} + +fn memset_intrinsic( + bx: &Builder<'a, 'll, 'tcx>, + volatile: bool, + ty: Ty<'tcx>, + dst: &'ll Value, + val: &'ll Value, + count: &'ll Value +) -> &'ll Value { + let cx = bx.cx; + let (size, align) = cx.size_and_align_of(ty); + let size = C_usize(cx, size.bytes()); + let align = C_i32(cx, align.abi() as i32); + let dst = bx.pointercast(dst, Type::i8p(cx)); + call_memset(bx, dst, val, bx.mul(size, count), align, volatile) +} + +fn try_intrinsic( + bx: &Builder<'a, 'll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx>, + func: &'ll Value, + data: &'ll Value, + local_ptr: &'ll Value, + dest: &'ll Value, +) { + if bx.sess().no_landing_pads() { + bx.call(func, &[data], None); + let ptr_align = bx.tcx().data_layout.pointer_align; + bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align); + } else if wants_msvc_seh(bx.sess()) { + codegen_msvc_try(bx, cx, func, data, local_ptr, dest); + } else { + codegen_gnu_try(bx, cx, func, data, local_ptr, dest); + } +} + +// MSVC's definition of the `rust_try` function. +// +// This implementation uses the new exception handling instructions in LLVM +// which have support in LLVM for SEH on MSVC targets. Although these +// instructions are meant to work for all targets, as of the time of this +// writing, however, LLVM does not recommend the usage of these new instructions +// as the old ones are still more optimized. +fn codegen_msvc_try( + bx: &Builder<'a, 'll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx>, + func: &'ll Value, + data: &'ll Value, + local_ptr: &'ll Value, + dest: &'ll Value, +) { + let llfn = get_rust_try_fn(cx, &mut |bx| { + let cx = bx.cx; + + bx.set_personality_fn(bx.cx.eh_personality()); + + let normal = bx.build_sibling_block("normal"); + let catchswitch = bx.build_sibling_block("catchswitch"); + let catchpad = bx.build_sibling_block("catchpad"); + let caught = bx.build_sibling_block("caught"); + + let func = llvm::get_param(bx.llfn(), 0); + let data = llvm::get_param(bx.llfn(), 1); + let local_ptr = llvm::get_param(bx.llfn(), 2); + + // We're generating an IR snippet that looks like: + // + // declare i32 @rust_try(%func, %data, %ptr) { + // %slot = alloca i64* + // invoke %func(%data) to label %normal unwind label %catchswitch + // + // normal: + // ret i32 0 + // + // catchswitch: + // %cs = catchswitch within none [%catchpad] unwind to caller + // + // catchpad: + // %tok = catchpad within %cs [%type_descriptor, 0, %slot] + // %ptr[0] = %slot[0] + // %ptr[1] = %slot[1] + // catchret from %tok to label %caught + // + // caught: + // ret i32 1 + // } + // + // This structure follows the basic usage of throw/try/catch in LLVM. + // For example, compile this C++ snippet to see what LLVM generates: + // + // #include + // + // int bar(void (*foo)(void), uint64_t *ret) { + // try { + // foo(); + // return 0; + // } catch(uint64_t a[2]) { + // ret[0] = a[0]; + // ret[1] = a[1]; + // return 1; + // } + // } + // + // More information can be found in libstd's seh.rs implementation. + let i64p = Type::i64(cx).ptr_to(); + let ptr_align = bx.tcx().data_layout.pointer_align; + let slot = bx.alloca(i64p, "slot", ptr_align); + bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), + None); + + normal.ret(C_i32(cx, 0)); + + let cs = catchswitch.catch_switch(None, None, 1); + catchswitch.add_handler(cs, catchpad.llbb()); + + let tcx = cx.tcx; + let tydesc = match tcx.lang_items().msvc_try_filter() { + Some(did) => ::consts::get_static(cx, did), + None => bug!("msvc_try_filter not defined"), + }; + let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]); + let addr = catchpad.load(slot, ptr_align); + + let i64_align = bx.tcx().data_layout.i64_align; + let arg1 = catchpad.load(addr, i64_align); + let val1 = C_i32(cx, 1); + let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align); + let local_ptr = catchpad.bitcast(local_ptr, i64p); + catchpad.store(arg1, local_ptr, i64_align); + catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align); + catchpad.catch_ret(tok, caught.llbb()); + + caught.ret(C_i32(cx, 1)); + }); + + // Note that no invoke is used here because by definition this function + // can't panic (that's what it's catching). + let ret = bx.call(llfn, &[func, data, local_ptr], None); + let i32_align = bx.tcx().data_layout.i32_align; + bx.store(ret, dest, i32_align); +} + +// Definition of the standard "try" function for Rust using the GNU-like model +// of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke +// instructions). +// +// This codegen is a little surprising because we always call a shim +// function instead of inlining the call to `invoke` manually here. This is done +// because in LLVM we're only allowed to have one personality per function +// definition. The call to the `try` intrinsic is being inlined into the +// function calling it, and that function may already have other personality +// functions in play. By calling a shim we're guaranteed that our shim will have +// the right personality function. +fn codegen_gnu_try( + bx: &Builder<'a, 'll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx>, + func: &'ll Value, + data: &'ll Value, + local_ptr: &'ll Value, + dest: &'ll Value, +) { + let llfn = get_rust_try_fn(cx, &mut |bx| { + let cx = bx.cx; + + // Codegens the shims described above: + // + // bx: + // invoke %func(%args...) normal %normal unwind %catch + // + // normal: + // ret 0 + // + // catch: + // (ptr, _) = landingpad + // store ptr, %local_ptr + // ret 1 + // + // Note that the `local_ptr` data passed into the `try` intrinsic is + // expected to be `*mut *mut u8` for this to actually work, but that's + // managed by the standard library. + + let then = bx.build_sibling_block("then"); + let catch = bx.build_sibling_block("catch"); + + let func = llvm::get_param(bx.llfn(), 0); + let data = llvm::get_param(bx.llfn(), 1); + let local_ptr = llvm::get_param(bx.llfn(), 2); + bx.invoke(func, &[data], then.llbb(), catch.llbb(), None); + then.ret(C_i32(cx, 0)); + + // Type indicator for the exception being thrown. + // + // The first value in this tuple is a pointer to the exception object + // being thrown. The second value is a "selector" indicating which of + // the landing pad clauses the exception's type had been matched to. + // rust_try ignores the selector. + let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], + false); + let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1); + catch.add_clause(vals, C_null(Type::i8p(cx))); + let ptr = catch.extract_value(vals, 0); + let ptr_align = bx.tcx().data_layout.pointer_align; + catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align); + catch.ret(C_i32(cx, 1)); + }); + + // Note that no invoke is used here because by definition this function + // can't panic (that's what it's catching). + let ret = bx.call(llfn, &[func, data, local_ptr], None); + let i32_align = bx.tcx().data_layout.i32_align; + bx.store(ret, dest, i32_align); +} + +// Helper function to give a Block to a closure to codegen a shim function. +// This is currently primarily used for the `try` intrinsic functions above. +fn gen_fn<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + name: &str, + inputs: Vec>, + output: Ty<'tcx>, + codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>), +) -> &'ll Value { + let rust_fn_ty = cx.tcx.mk_fn_ptr(ty::Binder::bind(cx.tcx.mk_fn_sig( + inputs.into_iter(), + output, + false, + hir::Unsafety::Unsafe, + Abi::Rust + ))); + let llfn = declare::define_internal_fn(cx, name, rust_fn_ty); + attributes::from_fn_attrs(cx, llfn, None); + let bx = Builder::new_block(cx, llfn, "entry-block"); + codegen(bx); + llfn +} + +// Helper function used to get a handle to the `__rust_try` function used to +// catch exceptions. +// +// This function is only generated once and is then cached. +fn get_rust_try_fn<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>), +) -> &'ll Value { + if let Some(llfn) = cx.rust_try_fn.get() { + return llfn; + } + + // Define the type up front for the signature of the rust_try function. + let tcx = cx.tcx; + let i8p = tcx.mk_mut_ptr(tcx.types.i8); + let fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig( + iter::once(i8p), + tcx.mk_nil(), + false, + hir::Unsafety::Unsafe, + Abi::Rust + ))); + let output = tcx.types.i32; + let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, codegen); + cx.rust_try_fn.set(Some(rust_try)); + return rust_try +} + +fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { + span_err!(a, b, E0511, "{}", c); +} + +fn generic_simd_intrinsic( + bx: &Builder<'a, 'll, 'tcx>, + name: &str, + callee_ty: Ty<'tcx>, + args: &[OperandRef<'ll, 'tcx>], + ret_ty: Ty<'tcx>, + llret_ty: &'ll Type, + span: Span +) -> Result<&'ll Value, ()> { + // macros for error handling: + macro_rules! emit_error { + ($msg: tt) => { + emit_error!($msg, ) + }; + ($msg: tt, $($fmt: tt)*) => { + span_invalid_monomorphization_error( + bx.sess(), span, + &format!(concat!("invalid monomorphization of `{}` intrinsic: ", + $msg), + name, $($fmt)*)); + } + } + macro_rules! return_error { + ($($fmt: tt)*) => { + { + emit_error!($($fmt)*); + return Err(()); + } + } + } + + macro_rules! require { + ($cond: expr, $($fmt: tt)*) => { + if !$cond { + return_error!($($fmt)*); + } + }; + } + macro_rules! require_simd { + ($ty: expr, $position: expr) => { + require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty) + } + } + + + + let tcx = bx.tcx(); + let sig = tcx.normalize_erasing_late_bound_regions( + ty::ParamEnv::reveal_all(), + &callee_ty.fn_sig(tcx), + ); + let arg_tys = sig.inputs(); + + // every intrinsic takes a SIMD vector as its first argument + require_simd!(arg_tys[0], "input"); + let in_ty = arg_tys[0]; + let in_elem = arg_tys[0].simd_type(tcx); + let in_len = arg_tys[0].simd_size(tcx); + + let comparison = match name { + "simd_eq" => Some(hir::BinOpKind::Eq), + "simd_ne" => Some(hir::BinOpKind::Ne), + "simd_lt" => Some(hir::BinOpKind::Lt), + "simd_le" => Some(hir::BinOpKind::Le), + "simd_gt" => Some(hir::BinOpKind::Gt), + "simd_ge" => Some(hir::BinOpKind::Ge), + _ => None + }; + + if let Some(cmp_op) = comparison { + require_simd!(ret_ty, "return"); + + let out_len = ret_ty.simd_size(tcx); + require!(in_len == out_len, + "expected return type with length {} (same as input type `{}`), \ + found `{}` with length {}", + in_len, in_ty, + ret_ty, out_len); + require!(llret_ty.element_type().kind() == TypeKind::Integer, + "expected return type with integer elements, found `{}` with non-integer `{}`", + ret_ty, + ret_ty.simd_type(tcx)); + + return Ok(compare_simd_types(bx, + args[0].immediate(), + args[1].immediate(), + in_elem, + llret_ty, + cmp_op)) + } + + if name.starts_with("simd_shuffle") { + let n: usize = match name["simd_shuffle".len()..].parse() { + Ok(n) => n, + Err(_) => span_bug!(span, + "bad `simd_shuffle` instruction only caught in codegen?") + }; + + require_simd!(ret_ty, "return"); + + let out_len = ret_ty.simd_size(tcx); + require!(out_len == n, + "expected return type of length {}, found `{}` with length {}", + n, ret_ty, out_len); + require!(in_elem == ret_ty.simd_type(tcx), + "expected return element type `{}` (element of input `{}`), \ + found `{}` with element type `{}`", + in_elem, in_ty, + ret_ty, ret_ty.simd_type(tcx)); + + let total_len = in_len as u128 * 2; + + let vector = args[2].immediate(); + + let indices: Option> = (0..n) + .map(|i| { + let arg_idx = i; + let val = const_get_elt(vector, i as u64); + match const_to_opt_u128(val, true) { + None => { + emit_error!("shuffle index #{} is not a constant", arg_idx); + None + } + Some(idx) if idx >= total_len => { + emit_error!("shuffle index #{} is out of bounds (limit {})", + arg_idx, total_len); + None + } + Some(idx) => Some(C_i32(bx.cx, idx as i32)), + } + }) + .collect(); + let indices = match indices { + Some(i) => i, + None => return Ok(C_null(llret_ty)) + }; + + return Ok(bx.shuffle_vector(args[0].immediate(), + args[1].immediate(), + C_vector(&indices))) + } + + if name == "simd_insert" { + require!(in_elem == arg_tys[2], + "expected inserted type `{}` (element of input `{}`), found `{}`", + in_elem, in_ty, arg_tys[2]); + return Ok(bx.insert_element(args[0].immediate(), + args[2].immediate(), + args[1].immediate())) + } + if name == "simd_extract" { + require!(ret_ty == in_elem, + "expected return type `{}` (element of input `{}`), found `{}`", + in_elem, in_ty, ret_ty); + return Ok(bx.extract_element(args[0].immediate(), args[1].immediate())) + } + + if name == "simd_select" { + let m_elem_ty = in_elem; + let m_len = in_len; + let v_len = arg_tys[1].simd_size(tcx); + require!(m_len == v_len, + "mismatched lengths: mask length `{}` != other vector length `{}`", + m_len, v_len + ); + match m_elem_ty.sty { + ty::TyInt(_) => {}, + _ => { + return_error!("mask element type is `{}`, expected `i_`", m_elem_ty); + } + } + // truncate the mask to a vector of i1s + let i1 = Type::i1(bx.cx); + let i1xn = Type::vector(i1, m_len as u64); + let m_i1s = bx.trunc(args[0].immediate(), i1xn); + return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); + } + + fn simd_simple_float_intrinsic( + name: &str, + in_elem: &::rustc::ty::TyS, + in_ty: &::rustc::ty::TyS, + in_len: usize, + bx: &Builder<'a, 'll, 'tcx>, + span: Span, + args: &[OperandRef<'ll, 'tcx>], + ) -> Result<&'ll Value, ()> { + macro_rules! emit_error { + ($msg: tt) => { + emit_error!($msg, ) + }; + ($msg: tt, $($fmt: tt)*) => { + span_invalid_monomorphization_error( + bx.sess(), span, + &format!(concat!("invalid monomorphization of `{}` intrinsic: ", + $msg), + name, $($fmt)*)); + } + } + macro_rules! return_error { + ($($fmt: tt)*) => { + { + emit_error!($($fmt)*); + return Err(()); + } + } + } + let ety = match in_elem.sty { + ty::TyFloat(f) if f.bit_width() == 32 => { + if in_len < 2 || in_len > 16 { + return_error!( + "unsupported floating-point vector `{}` with length `{}` \ + out-of-range [2, 16]", + in_ty, in_len); + } + "f32" + }, + ty::TyFloat(f) if f.bit_width() == 64 => { + if in_len < 2 || in_len > 8 { + return_error!("unsupported floating-point vector `{}` with length `{}` \ + out-of-range [2, 8]", + in_ty, in_len); + } + "f64" + }, + ty::TyFloat(f) => { + return_error!("unsupported element type `{}` of floating-point vector `{}`", + f, in_ty); + }, + _ => { + return_error!("`{}` is not a floating-point type", in_ty); + } + }; + + let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety); + let intrinsic = bx.cx.get_intrinsic(&llvm_name); + let c = bx.call(intrinsic, + &args.iter().map(|arg| arg.immediate()).collect::>(), + None); + unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) }; + return Ok(c); + } + + if name == "simd_fsqrt" { + return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fsin" { + return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fcos" { + return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fabs" { + return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_floor" { + return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_ceil" { + return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fexp" { + return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fexp2" { + return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_flog10" { + return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_flog2" { + return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_flog" { + return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fpowi" { + return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fpow" { + return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args); + } + + if name == "simd_fma" { + return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args); + } + + // FIXME: use: + // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182 + // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81 + fn llvm_vector_str(elem_ty: ty::Ty, vec_len: usize, no_pointers: usize) -> String { + let p0s: String = "p0".repeat(no_pointers); + match elem_ty.sty { + ty::TyInt(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), + ty::TyUint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), + ty::TyFloat(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()), + _ => unreachable!(), + } + } + + fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty, vec_len: usize, + mut no_pointers: usize) -> &'ll Type { + // FIXME: use cx.layout_of(ty).llvm_type() ? + let mut elem_ty = match elem_ty.sty { + ty::TyInt(v) => Type::int_from_ty(cx, v), + ty::TyUint(v) => Type::uint_from_ty(cx, v), + ty::TyFloat(v) => Type::float_from_ty(cx, v), + _ => unreachable!(), + }; + while no_pointers > 0 { + elem_ty = elem_ty.ptr_to(); + no_pointers -= 1; + } + Type::vector(elem_ty, vec_len as u64) + } + + + if name == "simd_gather" { + // simd_gather(values: , pointers: , + // mask: ) -> + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + require_simd!(ret_ty, "return"); + + // Of the same length: + require!(in_len == arg_tys[1].simd_size(tcx), + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", "second", in_len, in_ty, arg_tys[1], + arg_tys[1].simd_size(tcx)); + require!(in_len == arg_tys[2].simd_size(tcx), + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", "third", in_len, in_ty, arg_tys[2], + arg_tys[2].simd_size(tcx)); + + // The return type must match the first argument type + require!(ret_ty == in_ty, + "expected return type `{}`, found `{}`", + in_ty, ret_ty); + + // This counts how many pointers + fn ptr_count(t: ty::Ty) -> usize { + match t.sty { + ty::TyRawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: ty::Ty) -> ty::Ty { + match t.sty { + ty::TyRawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty { + ty::TyRawPtr(p) if p.ty == in_elem => (ptr_count(arg_tys[1].simd_type(tcx)), + non_ptr(arg_tys[1].simd_type(tcx))), + _ => { + require!(false, "expected element type `{}` of second argument `{}` \ + to be a pointer to the element type `{}` of the first \ + argument `{}`, found `{}` != `*_ {}`", + arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty, + arg_tys[1].simd_type(tcx).sty, in_elem); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert!(pointer_count - 1 == ptr_count(arg_tys[0].simd_type(tcx))); + assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx))); + + // The element type of the third argument must be a signed integer type of any width: + match arg_tys[2].simd_type(tcx).sty { + ty::TyInt(_) => (), + _ => { + require!(false, "expected element type `{}` of third argument `{}` \ + to be a signed integer type", + arg_tys[2].simd_type(tcx).sty, arg_tys[2]); + } + } + + // Alignment of T, must be a constant integer value: + let alignment_ty = Type::i32(bx.cx); + let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + + // Truncate the mask vector to a vector of i1s: + let (mask, mask_ty) = { + let i1 = Type::i1(bx.cx); + let i1xn = Type::vector(i1, in_len as u64); + (bx.trunc(args[2].immediate(), i1xn), i1xn) + }; + + // Type of the vector of pointers: + let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); + + // Type of the vector of elements: + let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); + + let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", + llvm_elem_vec_str, llvm_pointer_vec_str); + let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, + Type::func(&[llvm_pointer_vec_ty, alignment_ty, mask_ty, + llvm_elem_vec_ty], llvm_elem_vec_ty)); + llvm::SetUnnamedAddr(f, false); + let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], + None); + return Ok(v); + } + + if name == "simd_scatter" { + // simd_scatter(values: , pointers: , + // mask: ) -> () + // * N: number of elements in the input vectors + // * T: type of the element to load + // * M: any integer width is supported, will be truncated to i1 + + // All types must be simd vector types + require_simd!(in_ty, "first"); + require_simd!(arg_tys[1], "second"); + require_simd!(arg_tys[2], "third"); + + // Of the same length: + require!(in_len == arg_tys[1].simd_size(tcx), + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", "second", in_len, in_ty, arg_tys[1], + arg_tys[1].simd_size(tcx)); + require!(in_len == arg_tys[2].simd_size(tcx), + "expected {} argument with length {} (same as input type `{}`), \ + found `{}` with length {}", "third", in_len, in_ty, arg_tys[2], + arg_tys[2].simd_size(tcx)); + + // This counts how many pointers + fn ptr_count(t: ty::Ty) -> usize { + match t.sty { + ty::TyRawPtr(p) => 1 + ptr_count(p.ty), + _ => 0, + } + } + + // Non-ptr type + fn non_ptr(t: ty::Ty) -> ty::Ty { + match t.sty { + ty::TyRawPtr(p) => non_ptr(p.ty), + _ => t, + } + } + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).sty { + ty::TyRawPtr(p) if p.ty == in_elem && p.mutbl == hir::MutMutable + => (ptr_count(arg_tys[1].simd_type(tcx)), + non_ptr(arg_tys[1].simd_type(tcx))), + _ => { + require!(false, "expected element type `{}` of second argument `{}` \ + to be a pointer to the element type `{}` of the first \ + argument `{}`, found `{}` != `*mut {}`", + arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty, + arg_tys[1].simd_type(tcx).sty, in_elem); + unreachable!(); + } + }; + assert!(pointer_count > 0); + assert!(pointer_count - 1 == ptr_count(arg_tys[0].simd_type(tcx))); + assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx))); + + // The element type of the third argument must be a signed integer type of any width: + match arg_tys[2].simd_type(tcx).sty { + ty::TyInt(_) => (), + _ => { + require!(false, "expected element type `{}` of third argument `{}` \ + to be a signed integer type", + arg_tys[2].simd_type(tcx).sty, arg_tys[2]); + } + } + + // Alignment of T, must be a constant integer value: + let alignment_ty = Type::i32(bx.cx); + let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + + // Truncate the mask vector to a vector of i1s: + let (mask, mask_ty) = { + let i1 = Type::i1(bx.cx); + let i1xn = Type::vector(i1, in_len as u64); + (bx.trunc(args[2].immediate(), i1xn), i1xn) + }; + + let ret_t = Type::void(bx.cx); + + // Type of the vector of pointers: + let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); + + // Type of the vector of elements: + let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); + + let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", + llvm_elem_vec_str, llvm_pointer_vec_str); + let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, + Type::func(&[llvm_elem_vec_ty, + llvm_pointer_vec_ty, + alignment_ty, + mask_ty], ret_t)); + llvm::SetUnnamedAddr(f, false); + let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], + None); + return Ok(v); + } + + macro_rules! arith_red { + ($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => { + if name == $name { + require!(ret_ty == in_elem, + "expected return type `{}` (element of input `{}`), found `{}`", + in_elem, in_ty, ret_ty); + return match in_elem.sty { + ty::TyInt(_) | ty::TyUint(_) => { + let r = bx.$integer_reduce(args[0].immediate()); + if $ordered { + // if overflow occurs, the result is the + // mathematical result modulo 2^n: + if name.contains("mul") { + Ok(bx.mul(args[1].immediate(), r)) + } else { + Ok(bx.add(args[1].immediate(), r)) + } + } else { + Ok(bx.$integer_reduce(args[0].immediate())) + } + }, + ty::TyFloat(f) => { + // ordered arithmetic reductions take an accumulator + let acc = if $ordered { + let acc = args[1].immediate(); + // FIXME: https://bugs.llvm.org/show_bug.cgi?id=36734 + // * if the accumulator of the fadd isn't 0, incorrect + // code is generated + // * if the accumulator of the fmul isn't 1, incorrect + // code is generated + match const_get_real(acc) { + None => return_error!("accumulator of {} is not a constant", $name), + Some((v, loses_info)) => { + if $name.contains("mul") && v != 1.0_f64 { + return_error!("accumulator of {} is not 1.0", $name); + } else if $name.contains("add") && v != 0.0_f64 { + return_error!("accumulator of {} is not 0.0", $name); + } else if loses_info { + return_error!("accumulator of {} loses information", $name); + } + } + } + acc + } else { + // unordered arithmetic reductions do not: + match f.bit_width() { + 32 => C_undef(Type::f32(bx.cx)), + 64 => C_undef(Type::f64(bx.cx)), + v => { + return_error!(r#" +unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, + $name, in_ty, in_elem, v, ret_ty + ) + } + } + + }; + Ok(bx.$float_reduce(acc, args[0].immediate())) + } + _ => { + return_error!( + "unsupported {} from `{}` with element `{}` to `{}`", + $name, in_ty, in_elem, ret_ty + ) + }, + } + } + } + } + + arith_red!("simd_reduce_add_ordered": vector_reduce_add, vector_reduce_fadd_fast, true); + arith_red!("simd_reduce_mul_ordered": vector_reduce_mul, vector_reduce_fmul_fast, true); + arith_red!("simd_reduce_add_unordered": vector_reduce_add, vector_reduce_fadd_fast, false); + arith_red!("simd_reduce_mul_unordered": vector_reduce_mul, vector_reduce_fmul_fast, false); + + macro_rules! minmax_red { + ($name:tt: $int_red:ident, $float_red:ident) => { + if name == $name { + require!(ret_ty == in_elem, + "expected return type `{}` (element of input `{}`), found `{}`", + in_elem, in_ty, ret_ty); + return match in_elem.sty { + ty::TyInt(_i) => { + Ok(bx.$int_red(args[0].immediate(), true)) + }, + ty::TyUint(_u) => { + Ok(bx.$int_red(args[0].immediate(), false)) + }, + ty::TyFloat(_f) => { + Ok(bx.$float_red(args[0].immediate())) + } + _ => { + return_error!("unsupported {} from `{}` with element `{}` to `{}`", + $name, in_ty, in_elem, ret_ty) + }, + } + } + + } + } + + minmax_red!("simd_reduce_min": vector_reduce_min, vector_reduce_fmin); + minmax_red!("simd_reduce_max": vector_reduce_max, vector_reduce_fmax); + + minmax_red!("simd_reduce_min_nanless": vector_reduce_min, vector_reduce_fmin_fast); + minmax_red!("simd_reduce_max_nanless": vector_reduce_max, vector_reduce_fmax_fast); + + macro_rules! bitwise_red { + ($name:tt : $red:ident, $boolean:expr) => { + if name == $name { + let input = if !$boolean { + require!(ret_ty == in_elem, + "expected return type `{}` (element of input `{}`), found `{}`", + in_elem, in_ty, ret_ty); + args[0].immediate() + } else { + match in_elem.sty { + ty::TyInt(_) | ty::TyUint(_) => {}, + _ => { + return_error!("unsupported {} from `{}` with element `{}` to `{}`", + $name, in_ty, in_elem, ret_ty) + } + } + + // boolean reductions operate on vectors of i1s: + let i1 = Type::i1(bx.cx); + let i1xn = Type::vector(i1, in_len as u64); + bx.trunc(args[0].immediate(), i1xn) + }; + return match in_elem.sty { + ty::TyInt(_) | ty::TyUint(_) => { + let r = bx.$red(input); + Ok( + if !$boolean { + r + } else { + bx.zext(r, Type::bool(bx.cx)) + } + ) + }, + _ => { + return_error!("unsupported {} from `{}` with element `{}` to `{}`", + $name, in_ty, in_elem, ret_ty) + }, + } + } + } + } + + bitwise_red!("simd_reduce_and": vector_reduce_and, false); + bitwise_red!("simd_reduce_or": vector_reduce_or, false); + bitwise_red!("simd_reduce_xor": vector_reduce_xor, false); + bitwise_red!("simd_reduce_all": vector_reduce_and, true); + bitwise_red!("simd_reduce_any": vector_reduce_or, true); + + if name == "simd_cast" { + require_simd!(ret_ty, "return"); + let out_len = ret_ty.simd_size(tcx); + require!(in_len == out_len, + "expected return type with length {} (same as input type `{}`), \ + found `{}` with length {}", + in_len, in_ty, + ret_ty, out_len); + // casting cares about nominal type, not just structural type + let out_elem = ret_ty.simd_type(tcx); + + if in_elem == out_elem { return Ok(args[0].immediate()); } + + enum Style { Float, Int(/* is signed? */ bool), Unsupported } + + let (in_style, in_width) = match in_elem.sty { + // vectors of pointer-sized integers should've been + // disallowed before here, so this unwrap is safe. + ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()), + ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()), + ty::TyFloat(f) => (Style::Float, f.bit_width()), + _ => (Style::Unsupported, 0) + }; + let (out_style, out_width) = match out_elem.sty { + ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()), + ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()), + ty::TyFloat(f) => (Style::Float, f.bit_width()), + _ => (Style::Unsupported, 0) + }; + + match (in_style, out_style) { + (Style::Int(in_is_signed), Style::Int(_)) => { + return Ok(match in_width.cmp(&out_width) { + Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), + Ordering::Less => if in_is_signed { + bx.sext(args[0].immediate(), llret_ty) + } else { + bx.zext(args[0].immediate(), llret_ty) + } + }) + } + (Style::Int(in_is_signed), Style::Float) => { + return Ok(if in_is_signed { + bx.sitofp(args[0].immediate(), llret_ty) + } else { + bx.uitofp(args[0].immediate(), llret_ty) + }) + } + (Style::Float, Style::Int(out_is_signed)) => { + return Ok(if out_is_signed { + bx.fptosi(args[0].immediate(), llret_ty) + } else { + bx.fptoui(args[0].immediate(), llret_ty) + }) + } + (Style::Float, Style::Float) => { + return Ok(match in_width.cmp(&out_width) { + Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), + Ordering::Less => bx.fpext(args[0].immediate(), llret_ty) + }) + } + _ => {/* Unsupported. Fallthrough. */} + } + require!(false, + "unsupported cast from `{}` with element `{}` to `{}` with element `{}`", + in_ty, in_elem, + ret_ty, out_elem); + } + macro_rules! arith { + ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { + $(if name == stringify!($name) { + match in_elem.sty { + $($(ty::$p(_))|* => { + return Ok(bx.$call(args[0].immediate(), args[1].immediate())) + })* + _ => {}, + } + require!(false, + "unsupported operation on `{}` with element `{}`", + in_ty, + in_elem) + })* + } + } + arith! { + simd_add: TyUint, TyInt => add, TyFloat => fadd; + simd_sub: TyUint, TyInt => sub, TyFloat => fsub; + simd_mul: TyUint, TyInt => mul, TyFloat => fmul; + simd_div: TyUint => udiv, TyInt => sdiv, TyFloat => fdiv; + simd_rem: TyUint => urem, TyInt => srem, TyFloat => frem; + simd_shl: TyUint, TyInt => shl; + simd_shr: TyUint => lshr, TyInt => ashr; + simd_and: TyUint, TyInt => and; + simd_or: TyUint, TyInt => or; + simd_xor: TyUint, TyInt => xor; + simd_fmax: TyFloat => maxnum; + simd_fmin: TyFloat => minnum; + } + span_bug!(span, "unknown SIMD intrinsic"); +} + +// Returns the width of an int Ty, and if it's signed or not +// Returns None if the type is not an integer +// FIXME: there’s multiple of this functions, investigate using some of the already existing +// stuffs. +fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> { + match ty.sty { + ty::TyInt(t) => Some((match t { + ast::IntTy::Isize => cx.tcx.sess.target.isize_ty.bit_width().unwrap() as u64, + ast::IntTy::I8 => 8, + ast::IntTy::I16 => 16, + ast::IntTy::I32 => 32, + ast::IntTy::I64 => 64, + ast::IntTy::I128 => 128, + }, true)), + ty::TyUint(t) => Some((match t { + ast::UintTy::Usize => cx.tcx.sess.target.usize_ty.bit_width().unwrap() as u64, + ast::UintTy::U8 => 8, + ast::UintTy::U16 => 16, + ast::UintTy::U32 => 32, + ast::UintTy::U64 => 64, + ast::UintTy::U128 => 128, + }, false)), + _ => None, + } +} + +// Returns the width of a float TypeVariant +// Returns None if the type is not a float +fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>) -> Option { + match *sty { + ty::TyFloat(t) => Some(t.bit_width() as u64), + _ => None, + } +} diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs new file mode 100644 index 000000000000..a65511a24af7 --- /dev/null +++ b/src/librustc_codegen_llvm/lib.rs @@ -0,0 +1,418 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The Rust compiler. +//! +//! # Note +//! +//! This API is completely unstable and subject to change. + +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] + +#![feature(box_patterns)] +#![feature(box_syntax)] +#![feature(crate_visibility_modifier)] +#![feature(custom_attribute)] +#![feature(extern_types)] +#![feature(in_band_lifetimes)] +#![allow(unused_attributes)] +#![feature(libc)] +#![cfg_attr(not(stage0), feature(nll))] +#![feature(quote)] +#![feature(range_contains)] +#![feature(rustc_diagnostic_macros)] +#![feature(slice_sort_by_cached_key)] +#![feature(optin_builtin_traits)] +#![feature(concat_idents)] +#![feature(link_args)] +#![feature(static_nobundle)] + +use back::write::create_target_machine; +use rustc::dep_graph::WorkProduct; +use syntax_pos::symbol::Symbol; + +#[macro_use] extern crate bitflags; +extern crate flate2; +extern crate libc; +#[macro_use] extern crate rustc; +extern crate jobserver; +extern crate num_cpus; +extern crate rustc_mir; +extern crate rustc_allocator; +extern crate rustc_apfloat; +extern crate rustc_target; +#[macro_use] extern crate rustc_data_structures; +extern crate rustc_demangle; +extern crate rustc_incremental; +extern crate rustc_llvm; +extern crate rustc_platform_intrinsics as intrinsics; +extern crate rustc_codegen_utils; +extern crate rustc_fs_util; + +#[macro_use] extern crate log; +#[macro_use] extern crate syntax; +extern crate syntax_pos; +extern crate rustc_errors as errors; +extern crate serialize; +extern crate cc; // Used to locate MSVC +extern crate tempfile; + +use back::bytecode::RLIB_BYTECODE_EXTENSION; + +pub use llvm_util::target_features; + +use std::any::Any; +use std::path::PathBuf; +use std::sync::mpsc; +use rustc_data_structures::sync::Lrc; + +use rustc::dep_graph::DepGraph; +use rustc::hir::def_id::CrateNum; +use rustc::middle::cstore::MetadataLoader; +use rustc::middle::cstore::{NativeLibrary, CrateSource, LibSource}; +use rustc::middle::lang_items::LangItem; +use rustc::session::{Session, CompileIncomplete}; +use rustc::session::config::{OutputFilenames, OutputType, PrintRequest}; +use rustc::ty::{self, TyCtxt}; +use rustc::util::time_graph; +use rustc::util::nodemap::{FxHashSet, FxHashMap}; +use rustc::util::profiling::ProfileCategory; +use rustc_mir::monomorphize; +use rustc_codegen_utils::codegen_backend::CodegenBackend; + +mod diagnostics; + +mod back { + pub use rustc_codegen_utils::symbol_names; + mod archive; + pub mod bytecode; + mod command; + pub mod linker; + pub mod link; + mod lto; + pub mod symbol_export; + pub mod write; + mod rpath; + pub mod wasm; +} + +mod abi; +mod allocator; +mod asm; +mod attributes; +mod base; +mod builder; +mod callee; +mod common; +mod consts; +mod context; +mod debuginfo; +mod declare; +mod glue; +mod intrinsic; +pub mod llvm; +mod llvm_util; +mod metadata; +mod meth; +mod mir; +mod mono_item; +mod type_; +mod type_of; +mod value; + +pub struct LlvmCodegenBackend(()); + +impl !Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis +impl !Sync for LlvmCodegenBackend {} + +impl LlvmCodegenBackend { + pub fn new() -> Box { + box LlvmCodegenBackend(()) + } +} + +impl CodegenBackend for LlvmCodegenBackend { + fn init(&self, sess: &Session) { + llvm_util::init(sess); // Make sure llvm is inited + } + + fn print(&self, req: PrintRequest, sess: &Session) { + match req { + PrintRequest::RelocationModels => { + println!("Available relocation models:"); + for &(name, _) in back::write::RELOC_MODEL_ARGS.iter() { + println!(" {}", name); + } + println!(""); + } + PrintRequest::CodeModels => { + println!("Available code models:"); + for &(name, _) in back::write::CODE_GEN_MODEL_ARGS.iter(){ + println!(" {}", name); + } + println!(""); + } + PrintRequest::TlsModels => { + println!("Available TLS models:"); + for &(name, _) in back::write::TLS_MODEL_ARGS.iter(){ + println!(" {}", name); + } + println!(""); + } + req => llvm_util::print(req, sess), + } + } + + fn print_passes(&self) { + llvm_util::print_passes(); + } + + fn print_version(&self) { + llvm_util::print_version(); + } + + fn diagnostics(&self) -> &[(&'static str, &'static str)] { + &DIAGNOSTICS + } + + fn target_features(&self, sess: &Session) -> Vec { + target_features(sess) + } + + fn metadata_loader(&self) -> Box { + box metadata::LlvmMetadataLoader + } + + fn provide(&self, providers: &mut ty::query::Providers) { + back::symbol_names::provide(providers); + back::symbol_export::provide(providers); + base::provide(providers); + attributes::provide(providers); + } + + fn provide_extern(&self, providers: &mut ty::query::Providers) { + back::symbol_export::provide_extern(providers); + base::provide_extern(providers); + attributes::provide_extern(providers); + } + + fn codegen_crate<'a, 'tcx>( + &self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver> + ) -> Box { + box base::codegen_crate(tcx, rx) + } + + fn join_codegen_and_link( + &self, + ongoing_codegen: Box, + sess: &Session, + dep_graph: &DepGraph, + outputs: &OutputFilenames, + ) -> Result<(), CompileIncomplete>{ + use rustc::util::common::time; + let (ongoing_codegen, work_products) = + ongoing_codegen.downcast::<::back::write::OngoingCodegen>() + .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box") + .join(sess); + if sess.opts.debugging_opts.incremental_info { + back::write::dump_incremental_data(&ongoing_codegen); + } + + time(sess, + "serialize work products", + move || rustc_incremental::save_work_product_index(sess, &dep_graph, work_products)); + + sess.compile_status()?; + + if !sess.opts.output_types.keys().any(|&i| i == OutputType::Exe || + i == OutputType::Metadata) { + return Ok(()); + } + + // Run the linker on any artifacts that resulted from the LLVM run. + // This should produce either a finished executable or library. + sess.profiler(|p| p.start_activity(ProfileCategory::Linking)); + time(sess, "linking", || { + back::link::link_binary(sess, &ongoing_codegen, + outputs, &ongoing_codegen.crate_name.as_str()); + }); + sess.profiler(|p| p.end_activity(ProfileCategory::Linking)); + + // Now that we won't touch anything in the incremental compilation directory + // any more, we can finalize it (which involves renaming it) + rustc_incremental::finalize_session_directory(sess, ongoing_codegen.link.crate_hash); + + Ok(()) + } +} + +/// This is the entrypoint for a hot plugged rustc_codegen_llvm +#[no_mangle] +pub fn __rustc_codegen_backend() -> Box { + LlvmCodegenBackend::new() +} + +struct ModuleCodegen { + /// The name of the module. When the crate may be saved between + /// compilations, incremental compilation requires that name be + /// unique amongst **all** crates. Therefore, it should contain + /// something unique to this crate (e.g., a module path) as well + /// as the crate name and disambiguator. + /// We currently generate these names via CodegenUnit::build_cgu_name(). + name: String, + source: ModuleSource, + kind: ModuleKind, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +enum ModuleKind { + Regular, + Metadata, + Allocator, +} + +impl ModuleCodegen { + fn llvm(&self) -> Option<&ModuleLlvm> { + match self.source { + ModuleSource::Codegened(ref llvm) => Some(llvm), + ModuleSource::Preexisting(_) => None, + } + } + + fn into_compiled_module(self, + emit_obj: bool, + emit_bc: bool, + emit_bc_compressed: bool, + outputs: &OutputFilenames) -> CompiledModule { + let pre_existing = match self.source { + ModuleSource::Preexisting(_) => true, + ModuleSource::Codegened(_) => false, + }; + let object = if emit_obj { + Some(outputs.temp_path(OutputType::Object, Some(&self.name))) + } else { + None + }; + let bytecode = if emit_bc { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) + } else { + None + }; + let bytecode_compressed = if emit_bc_compressed { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) + .with_extension(RLIB_BYTECODE_EXTENSION)) + } else { + None + }; + + CompiledModule { + name: self.name.clone(), + kind: self.kind, + pre_existing, + object, + bytecode, + bytecode_compressed, + } + } +} + +#[derive(Debug)] +struct CompiledModule { + name: String, + kind: ModuleKind, + pre_existing: bool, + object: Option, + bytecode: Option, + bytecode_compressed: Option, +} + +enum ModuleSource { + /// Copy the `.o` files or whatever from the incr. comp. directory. + Preexisting(WorkProduct), + + /// Rebuild from this LLVM module. + Codegened(ModuleLlvm), +} + +struct ModuleLlvm { + llcx: &'static mut llvm::Context, + llmod_raw: *const llvm::Module, + tm: &'static mut llvm::TargetMachine, +} + +unsafe impl Send for ModuleLlvm { } +unsafe impl Sync for ModuleLlvm { } + +impl ModuleLlvm { + fn new(sess: &Session, mod_name: &str) -> Self { + unsafe { + let llcx = llvm::LLVMRustContextCreate(sess.fewer_names()); + let llmod_raw = context::create_module(sess, llcx, mod_name) as *const _; + + ModuleLlvm { + llmod_raw, + llcx, + tm: create_target_machine(sess, false), + } + } + } + + fn llmod(&self) -> &llvm::Module { + unsafe { + &*self.llmod_raw + } + } +} + +impl Drop for ModuleLlvm { + fn drop(&mut self) { + unsafe { + llvm::LLVMContextDispose(&mut *(self.llcx as *mut _)); + llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _)); + } + } +} + +struct CodegenResults { + crate_name: Symbol, + modules: Vec, + allocator_module: Option, + metadata_module: CompiledModule, + link: rustc::middle::cstore::LinkMeta, + metadata: rustc::middle::cstore::EncodedMetadata, + windows_subsystem: Option, + linker_info: back::linker::LinkerInfo, + crate_info: CrateInfo, +} + +/// Misc info we load from metadata to persist beyond the tcx +struct CrateInfo { + panic_runtime: Option, + compiler_builtins: Option, + profiler_runtime: Option, + sanitizer_runtime: Option, + is_no_builtins: FxHashSet, + native_libraries: FxHashMap>>, + crate_name: FxHashMap, + used_libraries: Lrc>, + link_args: Lrc>, + used_crate_source: FxHashMap>, + used_crates_static: Vec<(CrateNum, LibSource)>, + used_crates_dynamic: Vec<(CrateNum, LibSource)>, + wasm_imports: FxHashMap, + lang_item_to_crate: FxHashMap, + missing_lang_items: FxHashMap>, +} + +__build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS } diff --git a/src/librustc_codegen_llvm/llvm/archive_ro.rs b/src/librustc_codegen_llvm/llvm/archive_ro.rs new file mode 100644 index 000000000000..4cbf0d92d7b9 --- /dev/null +++ b/src/librustc_codegen_llvm/llvm/archive_ro.rs @@ -0,0 +1,131 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A wrapper around LLVM's archive (.a) code + +use std::ffi::CString; +use std::path::Path; +use std::slice; +use std::str; + +pub struct ArchiveRO { + pub raw: &'static mut super::Archive, +} + +unsafe impl Send for ArchiveRO {} + +pub struct Iter<'a> { + raw: &'a mut super::ArchiveIterator<'a>, +} + +pub struct Child<'a> { + pub raw: &'a mut super::ArchiveChild<'a>, +} + +impl ArchiveRO { + /// Opens a static archive for read-only purposes. This is more optimized + /// than the `open` method because it uses LLVM's internal `Archive` class + /// rather than shelling out to `ar` for everything. + /// + /// If this archive is used with a mutable method, then an error will be + /// raised. + pub fn open(dst: &Path) -> Result { + return unsafe { + let s = path2cstr(dst); + let ar = super::LLVMRustOpenArchive(s.as_ptr()).ok_or_else(|| { + super::last_error().unwrap_or("failed to open archive".to_string()) + })?; + Ok(ArchiveRO { raw: ar }) + }; + + #[cfg(unix)] + fn path2cstr(p: &Path) -> CString { + use std::os::unix::prelude::*; + use std::ffi::OsStr; + let p: &OsStr = p.as_ref(); + CString::new(p.as_bytes()).unwrap() + } + #[cfg(windows)] + fn path2cstr(p: &Path) -> CString { + CString::new(p.to_str().unwrap()).unwrap() + } + } + + pub fn iter(&self) -> Iter { + unsafe { + Iter { + raw: super::LLVMRustArchiveIteratorNew(self.raw), + } + } + } +} + +impl Drop for ArchiveRO { + fn drop(&mut self) { + unsafe { + super::LLVMRustDestroyArchive(&mut *(self.raw as *mut _)); + } + } +} + +impl<'a> Iterator for Iter<'a> { + type Item = Result, String>; + + fn next(&mut self) -> Option, String>> { + unsafe { + match super::LLVMRustArchiveIteratorNext(self.raw) { + Some(raw) => Some(Ok(Child { raw })), + None => super::last_error().map(Err), + } + } + } +} + +impl<'a> Drop for Iter<'a> { + fn drop(&mut self) { + unsafe { + super::LLVMRustArchiveIteratorFree(&mut *(self.raw as *mut _)); + } + } +} + +impl<'a> Child<'a> { + pub fn name(&self) -> Option<&'a str> { + unsafe { + let mut name_len = 0; + let name_ptr = super::LLVMRustArchiveChildName(self.raw, &mut name_len); + if name_ptr.is_null() { + None + } else { + let name = slice::from_raw_parts(name_ptr as *const u8, name_len as usize); + str::from_utf8(name).ok().map(|s| s.trim()) + } + } + } + + pub fn data(&self) -> &'a [u8] { + unsafe { + let mut data_len = 0; + let data_ptr = super::LLVMRustArchiveChildData(self.raw, &mut data_len); + if data_ptr.is_null() { + panic!("failed to read data from archive child"); + } + slice::from_raw_parts(data_ptr as *const u8, data_len as usize) + } + } +} + +impl<'a> Drop for Child<'a> { + fn drop(&mut self) { + unsafe { + super::LLVMRustArchiveChildFree(&mut *(self.raw as *mut _)); + } + } +} diff --git a/src/librustc_codegen_llvm/llvm/diagnostic.rs b/src/librustc_codegen_llvm/llvm/diagnostic.rs new file mode 100644 index 000000000000..c41a5f74ae30 --- /dev/null +++ b/src/librustc_codegen_llvm/llvm/diagnostic.rs @@ -0,0 +1,179 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! LLVM diagnostic reports. + +pub use self::OptimizationDiagnosticKind::*; +pub use self::Diagnostic::*; + +use libc::c_uint; +use value::Value; + +use super::{DiagnosticInfo, Twine}; + +#[derive(Copy, Clone)] +pub enum OptimizationDiagnosticKind { + OptimizationRemark, + OptimizationMissed, + OptimizationAnalysis, + OptimizationAnalysisFPCommute, + OptimizationAnalysisAliasing, + OptimizationFailure, + OptimizationRemarkOther, +} + +impl OptimizationDiagnosticKind { + pub fn describe(self) -> &'static str { + match self { + OptimizationRemark | OptimizationRemarkOther => "remark", + OptimizationMissed => "missed", + OptimizationAnalysis => "analysis", + OptimizationAnalysisFPCommute => "floating-point", + OptimizationAnalysisAliasing => "aliasing", + OptimizationFailure => "failure", + } + } +} + +pub struct OptimizationDiagnostic<'ll> { + pub kind: OptimizationDiagnosticKind, + pub pass_name: String, + pub function: &'ll Value, + pub line: c_uint, + pub column: c_uint, + pub filename: String, + pub message: String, +} + +impl OptimizationDiagnostic<'ll> { + unsafe fn unpack( + kind: OptimizationDiagnosticKind, + di: &'ll DiagnosticInfo, + ) -> Self { + let mut function = None; + let mut line = 0; + let mut column = 0; + + let mut message = None; + let mut filename = None; + let pass_name = super::build_string(|pass_name| + message = super::build_string(|message| + filename = super::build_string(|filename| + super::LLVMRustUnpackOptimizationDiagnostic(di, + pass_name, + &mut function, + &mut line, + &mut column, + filename, + message) + ).ok() + ).ok() + ).ok(); + + let mut filename = filename.unwrap_or(String::new()); + if filename.is_empty() { + filename.push_str(""); + } + + OptimizationDiagnostic { + kind, + pass_name: pass_name.expect("got a non-UTF8 pass name from LLVM"), + function: function.unwrap(), + line, + column, + filename, + message: message.expect("got a non-UTF8 OptimizationDiagnostic message from LLVM") + } + } +} + +#[derive(Copy, Clone)] +pub struct InlineAsmDiagnostic<'ll> { + pub cookie: c_uint, + pub message: &'ll Twine, + pub instruction: &'ll Value, +} + +impl InlineAsmDiagnostic<'ll> { + unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self { + let mut cookie = 0; + let mut message = None; + let mut instruction = None; + + super::LLVMRustUnpackInlineAsmDiagnostic( + di, + &mut cookie, + &mut message, + &mut instruction, + ); + + InlineAsmDiagnostic { + cookie, + message: message.unwrap(), + instruction: instruction.unwrap(), + } + } +} + +pub enum Diagnostic<'ll> { + Optimization(OptimizationDiagnostic<'ll>), + InlineAsm(InlineAsmDiagnostic<'ll>), + PGO(&'ll DiagnosticInfo), + Linker(&'ll DiagnosticInfo), + + /// LLVM has other types that we do not wrap here. + UnknownDiagnostic(&'ll DiagnosticInfo), +} + +impl Diagnostic<'ll> { + pub unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self { + use super::DiagnosticKind as Dk; + let kind = super::LLVMRustGetDiagInfoKind(di); + + match kind { + Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpack(di)), + + Dk::OptimizationRemark => { + Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di)) + } + Dk::OptimizationRemarkOther => { + Optimization(OptimizationDiagnostic::unpack(OptimizationRemarkOther, di)) + } + Dk::OptimizationRemarkMissed => { + Optimization(OptimizationDiagnostic::unpack(OptimizationMissed, di)) + } + + Dk::OptimizationRemarkAnalysis => { + Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysis, di)) + } + + Dk::OptimizationRemarkAnalysisFPCommute => { + Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisFPCommute, di)) + } + + Dk::OptimizationRemarkAnalysisAliasing => { + Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisAliasing, di)) + } + + Dk::OptimizationFailure => { + Optimization(OptimizationDiagnostic::unpack(OptimizationFailure, di)) + } + + Dk::PGOProfile => { + PGO(di) + } + Dk::Linker => { + Linker(di) + } + + _ => UnknownDiagnostic(di), + } + } +} diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs new file mode 100644 index 000000000000..d3039a05b6db --- /dev/null +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -0,0 +1,1641 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// FIXME: Rename 'DIGlobalVariable' to 'DIGlobalVariableExpression' +// once support for LLVM 3.9 is dropped. +// +// This method was changed in this LLVM patch: +// https://reviews.llvm.org/D26769 + +use super::debuginfo::{ + DIBuilder, DIDescriptor, DIFile, DILexicalBlock, DISubprogram, DIType, + DIBasicType, DIDerivedType, DICompositeType, DIScope, DIVariable, + DIGlobalVariable, DIArray, DISubrange, DITemplateTypeParameter, DIEnumerator, + DINameSpace, DIFlags, +}; + +use libc::{c_uint, c_int, size_t, c_char}; +use libc::{c_ulonglong, c_void}; + +use std::marker::PhantomData; + +use super::RustString; + +pub type Bool = c_uint; + +pub const True: Bool = 1 as Bool; +pub const False: Bool = 0 as Bool; + +#[derive(Copy, Clone, PartialEq)] +#[repr(C)] +#[allow(dead_code)] // Variants constructed by C++. +pub enum LLVMRustResult { + Success, + Failure, +} +// Consts for the LLVM CallConv type, pre-cast to usize. + +/// LLVM CallingConv::ID. Should we wrap this? +#[derive(Copy, Clone, PartialEq, Debug)] +#[repr(C)] +pub enum CallConv { + CCallConv = 0, + FastCallConv = 8, + ColdCallConv = 9, + X86StdcallCallConv = 64, + X86FastcallCallConv = 65, + ArmAapcsCallConv = 67, + Msp430Intr = 69, + X86_ThisCall = 70, + PtxKernel = 71, + X86_64_SysV = 78, + X86_64_Win64 = 79, + X86_VectorCall = 80, + X86_Intr = 83, + AmdGpuKernel = 91, +} + +/// LLVMRustLinkage +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[repr(C)] +pub enum Linkage { + ExternalLinkage = 0, + AvailableExternallyLinkage = 1, + LinkOnceAnyLinkage = 2, + LinkOnceODRLinkage = 3, + WeakAnyLinkage = 4, + WeakODRLinkage = 5, + AppendingLinkage = 6, + InternalLinkage = 7, + PrivateLinkage = 8, + ExternalWeakLinkage = 9, + CommonLinkage = 10, +} + +// LLVMRustVisibility +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[repr(C)] +pub enum Visibility { + Default = 0, + Hidden = 1, + Protected = 2, +} + +/// LLVMDLLStorageClass +#[derive(Copy, Clone)] +#[repr(C)] +pub enum DLLStorageClass { + #[allow(dead_code)] + Default = 0, + DllImport = 1, // Function to be imported from DLL. + #[allow(dead_code)] + DllExport = 2, // Function to be accessible from DLL. +} + +/// Matches LLVMRustAttribute in rustllvm.h +/// Semantically a subset of the C++ enum llvm::Attribute::AttrKind, +/// though it is not ABI compatible (since it's a C++ enum) +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub enum Attribute { + AlwaysInline = 0, + ByVal = 1, + Cold = 2, + InlineHint = 3, + MinSize = 4, + Naked = 5, + NoAlias = 6, + NoCapture = 7, + NoInline = 8, + NonNull = 9, + NoRedZone = 10, + NoReturn = 11, + NoUnwind = 12, + OptimizeForSize = 13, + ReadOnly = 14, + SExt = 15, + StructRet = 16, + UWTable = 17, + ZExt = 18, + InReg = 19, + SanitizeThread = 20, + SanitizeAddress = 21, + SanitizeMemory = 22, +} + +/// LLVMIntPredicate +#[derive(Copy, Clone)] +#[repr(C)] +pub enum IntPredicate { + IntEQ = 32, + IntNE = 33, + IntUGT = 34, + IntUGE = 35, + IntULT = 36, + IntULE = 37, + IntSGT = 38, + IntSGE = 39, + IntSLT = 40, + IntSLE = 41, +} + +/// LLVMRealPredicate +#[derive(Copy, Clone)] +#[repr(C)] +pub enum RealPredicate { + RealPredicateFalse = 0, + RealOEQ = 1, + RealOGT = 2, + RealOGE = 3, + RealOLT = 4, + RealOLE = 5, + RealONE = 6, + RealORD = 7, + RealUNO = 8, + RealUEQ = 9, + RealUGT = 10, + RealUGE = 11, + RealULT = 12, + RealULE = 13, + RealUNE = 14, + RealPredicateTrue = 15, +} + +/// LLVMTypeKind +#[derive(Copy, Clone, PartialEq, Debug)] +#[repr(C)] +pub enum TypeKind { + Void = 0, + Half = 1, + Float = 2, + Double = 3, + X86_FP80 = 4, + FP128 = 5, + PPC_FP128 = 6, + Label = 7, + Integer = 8, + Function = 9, + Struct = 10, + Array = 11, + Pointer = 12, + Vector = 13, + Metadata = 14, + X86_MMX = 15, + Token = 16, +} + +/// LLVMAtomicRmwBinOp +#[derive(Copy, Clone)] +#[repr(C)] +pub enum AtomicRmwBinOp { + AtomicXchg = 0, + AtomicAdd = 1, + AtomicSub = 2, + AtomicAnd = 3, + AtomicNand = 4, + AtomicOr = 5, + AtomicXor = 6, + AtomicMax = 7, + AtomicMin = 8, + AtomicUMax = 9, + AtomicUMin = 10, +} + +/// LLVMAtomicOrdering +#[derive(Copy, Clone)] +#[repr(C)] +pub enum AtomicOrdering { + #[allow(dead_code)] + NotAtomic = 0, + Unordered = 1, + Monotonic = 2, + // Consume = 3, // Not specified yet. + Acquire = 4, + Release = 5, + AcquireRelease = 6, + SequentiallyConsistent = 7, +} + +/// LLVMRustSynchronizationScope +#[derive(Copy, Clone)] +#[repr(C)] +pub enum SynchronizationScope { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + SingleThread, + CrossThread, +} + +/// LLVMRustFileType +#[derive(Copy, Clone)] +#[repr(C)] +pub enum FileType { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + AssemblyFile, + ObjectFile, +} + +/// LLVMMetadataType +#[derive(Copy, Clone)] +#[repr(C)] +pub enum MetadataType { + MD_dbg = 0, + MD_tbaa = 1, + MD_prof = 2, + MD_fpmath = 3, + MD_range = 4, + MD_tbaa_struct = 5, + MD_invariant_load = 6, + MD_alias_scope = 7, + MD_noalias = 8, + MD_nontemporal = 9, + MD_mem_parallel_loop_access = 10, + MD_nonnull = 11, +} + +/// LLVMRustAsmDialect +#[derive(Copy, Clone)] +#[repr(C)] +pub enum AsmDialect { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + Att, + Intel, +} + +/// LLVMRustCodeGenOptLevel +#[derive(Copy, Clone, PartialEq)] +#[repr(C)] +pub enum CodeGenOptLevel { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + None, + Less, + Default, + Aggressive, +} + +/// LLVMRelocMode +#[derive(Copy, Clone, PartialEq)] +#[repr(C)] +pub enum RelocMode { + Default, + Static, + PIC, + DynamicNoPic, + ROPI, + RWPI, + ROPI_RWPI, +} + +/// LLVMRustCodeModel +#[derive(Copy, Clone)] +#[repr(C)] +pub enum CodeModel { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + Small, + Kernel, + Medium, + Large, + None, +} + +/// LLVMRustDiagnosticKind +#[derive(Copy, Clone)] +#[repr(C)] +#[allow(dead_code)] // Variants constructed by C++. +pub enum DiagnosticKind { + Other, + InlineAsm, + StackSize, + DebugMetadataVersion, + SampleProfile, + OptimizationRemark, + OptimizationRemarkMissed, + OptimizationRemarkAnalysis, + OptimizationRemarkAnalysisFPCommute, + OptimizationRemarkAnalysisAliasing, + OptimizationRemarkOther, + OptimizationFailure, + PGOProfile, + Linker, +} + +/// LLVMRustArchiveKind +#[derive(Copy, Clone)] +#[repr(C)] +pub enum ArchiveKind { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + K_GNU, + K_BSD, + K_COFF, +} + +/// LLVMRustPassKind +#[derive(Copy, Clone, PartialEq, Debug)] +#[repr(C)] +#[allow(dead_code)] // Variants constructed by C++. +pub enum PassKind { + Other, + Function, + Module, +} + +/// LLVMRustThinLTOData +extern { pub type ThinLTOData; } + +/// LLVMRustThinLTOBuffer +extern { pub type ThinLTOBuffer; } + +/// LLVMRustThinLTOModule +#[repr(C)] +pub struct ThinLTOModule { + pub identifier: *const c_char, + pub data: *const u8, + pub len: usize, +} + +/// LLVMThreadLocalMode +#[derive(Copy, Clone)] +#[repr(C)] +pub enum ThreadLocalMode { + NotThreadLocal, + GeneralDynamic, + LocalDynamic, + InitialExec, + LocalExec +} + +extern { type Opaque; } +#[repr(C)] +struct InvariantOpaque<'a> { + _marker: PhantomData<&'a mut &'a ()>, + _opaque: Opaque, +} + +// Opaque pointer types +extern { pub type Module; } +extern { pub type Context; } +extern { pub type Type; } +extern { pub type Value; } +extern { pub type Metadata; } +extern { pub type BasicBlock; } +#[repr(C)] +pub struct Builder<'a>(InvariantOpaque<'a>); +extern { pub type MemoryBuffer; } +#[repr(C)] +pub struct PassManager<'a>(InvariantOpaque<'a>); +extern { pub type PassManagerBuilder; } +extern { pub type ObjectFile; } +#[repr(C)] +pub struct SectionIterator<'a>(InvariantOpaque<'a>); +extern { pub type Pass; } +extern { pub type TargetMachine; } +extern { pub type Archive; } +#[repr(C)] +pub struct ArchiveIterator<'a>(InvariantOpaque<'a>); +#[repr(C)] +pub struct ArchiveChild<'a>(InvariantOpaque<'a>); +extern { pub type Twine; } +extern { pub type DiagnosticInfo; } +extern { pub type SMDiagnostic; } +#[repr(C)] +pub struct RustArchiveMember<'a>(InvariantOpaque<'a>); +#[repr(C)] +pub struct OperandBundleDef<'a>(InvariantOpaque<'a>); +#[repr(C)] +pub struct Linker<'a>(InvariantOpaque<'a>); + +pub type DiagnosticHandler = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void); +pub type InlineAsmDiagHandler = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint); + + +pub mod debuginfo { + use super::{InvariantOpaque, Metadata}; + + #[repr(C)] + pub struct DIBuilder<'a>(InvariantOpaque<'a>); + + pub type DIDescriptor = Metadata; + pub type DIScope = DIDescriptor; + pub type DIFile = DIScope; + pub type DILexicalBlock = DIScope; + pub type DISubprogram = DIScope; + pub type DINameSpace = DIScope; + pub type DIType = DIDescriptor; + pub type DIBasicType = DIType; + pub type DIDerivedType = DIType; + pub type DICompositeType = DIDerivedType; + pub type DIVariable = DIDescriptor; + pub type DIGlobalVariable = DIDescriptor; + pub type DIArray = DIDescriptor; + pub type DISubrange = DIDescriptor; + pub type DIEnumerator = DIDescriptor; + pub type DITemplateTypeParameter = DIDescriptor; + + // These values **must** match with LLVMRustDIFlags!! + bitflags! { + #[repr(C)] + #[derive(Default)] + pub struct DIFlags: ::libc::uint32_t { + const FlagZero = 0; + const FlagPrivate = 1; + const FlagProtected = 2; + const FlagPublic = 3; + const FlagFwdDecl = (1 << 2); + const FlagAppleBlock = (1 << 3); + const FlagBlockByrefStruct = (1 << 4); + const FlagVirtual = (1 << 5); + const FlagArtificial = (1 << 6); + const FlagExplicit = (1 << 7); + const FlagPrototyped = (1 << 8); + const FlagObjcClassComplete = (1 << 9); + const FlagObjectPointer = (1 << 10); + const FlagVector = (1 << 11); + const FlagStaticMember = (1 << 12); + const FlagLValueReference = (1 << 13); + const FlagRValueReference = (1 << 14); + const FlagExternalTypeRef = (1 << 15); + const FlagIntroducedVirtual = (1 << 18); + const FlagBitField = (1 << 19); + const FlagNoReturn = (1 << 20); + const FlagMainSubprogram = (1 << 21); + } + } +} + +extern { pub type ModuleBuffer; } + +extern "C" { + // Create and destroy contexts. + pub fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context; + pub fn LLVMContextDispose(C: &'static mut Context); + pub fn LLVMGetMDKindIDInContext(C: &Context, Name: *const c_char, SLen: c_uint) -> c_uint; + + // Create modules. + pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, C: &Context) -> &Module; + pub fn LLVMGetModuleContext(M: &Module) -> &Context; + pub fn LLVMCloneModule(M: &Module) -> &Module; + + /// Data layout. See Module::getDataLayout. + pub fn LLVMGetDataLayout(M: &Module) -> *const c_char; + pub fn LLVMSetDataLayout(M: &Module, Triple: *const c_char); + + /// See Module::setModuleInlineAsm. + pub fn LLVMSetModuleInlineAsm(M: &Module, Asm: *const c_char); + pub fn LLVMRustAppendModuleInlineAsm(M: &Module, Asm: *const c_char); + + /// See llvm::LLVMTypeKind::getTypeID. + pub fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind; + + // Operations on integer types + pub fn LLVMInt1TypeInContext(C: &Context) -> &Type; + pub fn LLVMInt8TypeInContext(C: &Context) -> &Type; + pub fn LLVMInt16TypeInContext(C: &Context) -> &Type; + pub fn LLVMInt32TypeInContext(C: &Context) -> &Type; + pub fn LLVMInt64TypeInContext(C: &Context) -> &Type; + pub fn LLVMIntTypeInContext(C: &Context, NumBits: c_uint) -> &Type; + + pub fn LLVMGetIntTypeWidth(IntegerTy: &Type) -> c_uint; + + // Operations on real types + pub fn LLVMFloatTypeInContext(C: &Context) -> &Type; + pub fn LLVMDoubleTypeInContext(C: &Context) -> &Type; + + // Operations on function types + pub fn LLVMFunctionType(ReturnType: &'a Type, + ParamTypes: *const &'a Type, + ParamCount: c_uint, + IsVarArg: Bool) + -> &'a Type; + pub fn LLVMCountParamTypes(FunctionTy: &Type) -> c_uint; + pub fn LLVMGetParamTypes(FunctionTy: &'a Type, Dest: *mut &'a Type); + + // Operations on struct types + pub fn LLVMStructTypeInContext(C: &'a Context, + ElementTypes: *const &'a Type, + ElementCount: c_uint, + Packed: Bool) + -> &'a Type; + + // Operations on array, pointer, and vector types (sequence types) + pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type; + pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type; + pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type; + + pub fn LLVMGetElementType(Ty: &Type) -> &Type; + pub fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint; + + // Operations on other types + pub fn LLVMVoidTypeInContext(C: &Context) -> &Type; + pub fn LLVMX86MMXTypeInContext(C: &Context) -> &Type; + pub fn LLVMRustMetadataTypeInContext(C: &Context) -> &Type; + + // Operations on all values + pub fn LLVMTypeOf(Val: &Value) -> &Type; + pub fn LLVMGetValueName(Val: &Value) -> *const c_char; + pub fn LLVMSetValueName(Val: &Value, Name: *const c_char); + pub fn LLVMReplaceAllUsesWith(OldVal: &'a Value, NewVal: &'a Value); + pub fn LLVMSetMetadata(Val: &'a Value, KindID: c_uint, Node: &'a Value); + + // Operations on constants of any type + pub fn LLVMConstNull(Ty: &Type) -> &Value; + pub fn LLVMGetUndef(Ty: &Type) -> &Value; + + // Operations on metadata + pub fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> &Value; + pub fn LLVMMDNodeInContext(C: &'a Context, Vals: *const &'a Value, Count: c_uint) -> &'a Value; + pub fn LLVMAddNamedMetadataOperand(M: &'a Module, Name: *const c_char, Val: &'a Value); + + // Operations on scalar constants + pub fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value; + pub fn LLVMConstIntOfArbitraryPrecision(IntTy: &Type, Wn: c_uint, Ws: *const u64) -> &Value; + pub fn LLVMConstIntGetZExtValue(ConstantVal: &Value) -> c_ulonglong; + pub fn LLVMRustConstInt128Get(ConstantVal: &Value, SExt: bool, + high: &mut u64, low: &mut u64) -> bool; + pub fn LLVMConstRealGetDouble (ConstantVal: &Value, losesInfo: &mut Bool) -> f64; + + + // Operations on composite constants + pub fn LLVMConstStringInContext(C: &Context, + Str: *const c_char, + Length: c_uint, + DontNullTerminate: Bool) + -> &Value; + pub fn LLVMConstStructInContext(C: &'a Context, + ConstantVals: *const &'a Value, + Count: c_uint, + Packed: Bool) + -> &'a Value; + + pub fn LLVMConstArray(ElementTy: &'a Type, + ConstantVals: *const &'a Value, + Length: c_uint) + -> &'a Value; + pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value; + + // Constant expressions + pub fn LLVMConstInBoundsGEP( + ConstantVal: &'a Value, + ConstantIndices: *const &'a Value, + NumIndices: c_uint, + ) -> &'a Value; + pub fn LLVMConstZExt(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; + pub fn LLVMConstPtrToInt(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; + pub fn LLVMConstIntToPtr(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; + pub fn LLVMConstBitCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; + pub fn LLVMConstPointerCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; + pub fn LLVMConstExtractValue(AggConstant: &Value, + IdxList: *const c_uint, + NumIdx: c_uint) + -> &Value; + + // Operations on global variables, functions, and aliases (globals) + pub fn LLVMIsDeclaration(Global: &Value) -> Bool; + pub fn LLVMRustGetLinkage(Global: &Value) -> Linkage; + pub fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage); + pub fn LLVMSetSection(Global: &Value, Section: *const c_char); + pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility; + pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility); + pub fn LLVMGetAlignment(Global: &Value) -> c_uint; + pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint); + pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass); + + + // Operations on global variables + pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>; + pub fn LLVMAddGlobal(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value; + pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>; + pub fn LLVMRustGetOrInsertGlobal(M: &'a Module, Name: *const c_char, T: &'a Type) -> &'a Value; + pub fn LLVMRustInsertPrivateGlobal(M: &'a Module, T: &'a Type) -> &'a Value; + pub fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>; + pub fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>; + pub fn LLVMDeleteGlobal(GlobalVar: &Value); + pub fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>; + pub fn LLVMSetInitializer(GlobalVar: &'a Value, ConstantVal: &'a Value); + pub fn LLVMSetThreadLocal(GlobalVar: &Value, IsThreadLocal: Bool); + pub fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode); + pub fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool; + pub fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool); + pub fn LLVMRustGetNamedValue(M: &Module, Name: *const c_char) -> Option<&Value>; + pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); + + // Operations on functions + pub fn LLVMRustGetOrInsertFunction(M: &'a Module, + Name: *const c_char, + FunctionTy: &'a Type) + -> &'a Value; + pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint); + pub fn LLVMRustAddAlignmentAttr(Fn: &Value, index: c_uint, bytes: u32); + pub fn LLVMRustAddDereferenceableAttr(Fn: &Value, index: c_uint, bytes: u64); + pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: &Value, index: c_uint, bytes: u64); + pub fn LLVMRustAddFunctionAttribute(Fn: &Value, index: c_uint, attr: Attribute); + pub fn LLVMRustAddFunctionAttrStringValue(Fn: &Value, + index: c_uint, + Name: *const c_char, + Value: *const c_char); + pub fn LLVMRustRemoveFunctionAttributes(Fn: &Value, index: c_uint, attr: Attribute); + + // Operations on parameters + pub fn LLVMCountParams(Fn: &Value) -> c_uint; + pub fn LLVMGetParam(Fn: &Value, Index: c_uint) -> &Value; + + // Operations on basic blocks + pub fn LLVMGetBasicBlockParent(BB: &BasicBlock) -> &Value; + pub fn LLVMAppendBasicBlockInContext(C: &'a Context, + Fn: &'a Value, + Name: *const c_char) + -> &'a BasicBlock; + pub fn LLVMDeleteBasicBlock(BB: &BasicBlock); + + // Operations on instructions + pub fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock; + + // Operations on call sites + pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint); + pub fn LLVMRustAddCallSiteAttribute(Instr: &Value, index: c_uint, attr: Attribute); + pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: &Value, index: c_uint, bytes: u32); + pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64); + pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: &Value, + index: c_uint, + bytes: u64); + + // Operations on load/store instructions (only) + pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool); + + // Operations on phi nodes + pub fn LLVMAddIncoming(PhiNode: &'a Value, + IncomingValues: *const &'a Value, + IncomingBlocks: *const &'a BasicBlock, + Count: c_uint); + + // Instruction builders + pub fn LLVMCreateBuilderInContext(C: &'a Context) -> &'a mut Builder<'a>; + pub fn LLVMPositionBuilderAtEnd(Builder: &Builder<'a>, Block: &'a BasicBlock); + pub fn LLVMGetInsertBlock(Builder: &Builder<'a>) -> &'a BasicBlock; + pub fn LLVMDisposeBuilder(Builder: &'a mut Builder<'a>); + + // Metadata + pub fn LLVMSetCurrentDebugLocation(Builder: &Builder<'a>, L: Option<&'a Value>); + pub fn LLVMGetCurrentDebugLocation(Builder: &Builder<'a>) -> &'a Value; + pub fn LLVMSetInstDebugLocation(Builder: &Builder<'a>, Inst: &'a Value); + + // Terminators + pub fn LLVMBuildRetVoid(B: &Builder<'a>) -> &'a Value; + pub fn LLVMBuildRet(B: &Builder<'a>, V: &'a Value) -> &'a Value; + pub fn LLVMBuildBr(B: &Builder<'a>, Dest: &'a BasicBlock) -> &'a Value; + pub fn LLVMBuildCondBr(B: &Builder<'a>, + If: &'a Value, + Then: &'a BasicBlock, + Else: &'a BasicBlock) + -> &'a Value; + pub fn LLVMBuildSwitch(B: &Builder<'a>, + V: &'a Value, + Else: &'a BasicBlock, + NumCases: c_uint) + -> &'a Value; + pub fn LLVMRustBuildInvoke(B: &Builder<'a>, + Fn: &'a Value, + Args: *const &'a Value, + NumArgs: c_uint, + Then: &'a BasicBlock, + Catch: &'a BasicBlock, + Bundle: Option<&OperandBundleDef<'a>>, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildLandingPad(B: &Builder<'a>, + Ty: &'a Type, + PersFn: &'a Value, + NumClauses: c_uint, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildResume(B: &Builder<'a>, Exn: &'a Value) -> &'a Value; + pub fn LLVMBuildUnreachable(B: &Builder<'a>) -> &'a Value; + + pub fn LLVMRustBuildCleanupPad(B: &Builder<'a>, + ParentPad: Option<&'a Value>, + ArgCnt: c_uint, + Args: *const &'a Value, + Name: *const c_char) + -> Option<&'a Value>; + pub fn LLVMRustBuildCleanupRet(B: &Builder<'a>, + CleanupPad: &'a Value, + UnwindBB: Option<&'a BasicBlock>) + -> Option<&'a Value>; + pub fn LLVMRustBuildCatchPad(B: &Builder<'a>, + ParentPad: &'a Value, + ArgCnt: c_uint, + Args: *const &'a Value, + Name: *const c_char) + -> Option<&'a Value>; + pub fn LLVMRustBuildCatchRet( + B: &Builder<'a>, + Pad: &'a Value, + BB: &'a BasicBlock, + ) -> Option<&'a Value>; + pub fn LLVMRustBuildCatchSwitch(Builder: &Builder<'a>, + ParentPad: Option<&'a Value>, + BB: Option<&'a BasicBlock>, + NumHandlers: c_uint, + Name: *const c_char) + -> Option<&'a Value>; + pub fn LLVMRustAddHandler(CatchSwitch: &'a Value, Handler: &'a BasicBlock); + pub fn LLVMSetPersonalityFn(Func: &'a Value, Pers: &'a Value); + + // Add a case to the switch instruction + pub fn LLVMAddCase(Switch: &'a Value, OnVal: &'a Value, Dest: &'a BasicBlock); + + // Add a clause to the landing pad instruction + pub fn LLVMAddClause(LandingPad: &'a Value, ClauseVal: &'a Value); + + // Set the cleanup on a landing pad instruction + pub fn LLVMSetCleanup(LandingPad: &Value, Val: Bool); + + // Arithmetic + pub fn LLVMBuildAdd(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildFAdd(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildSub(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildFSub(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildMul(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildFMul(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildUDiv(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildExactUDiv(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildSDiv(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildExactSDiv(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildFDiv(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildURem(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildSRem(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildFRem(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildShl(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildLShr(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildAShr(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildAnd(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildOr(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildXor(B: &Builder<'a>, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildNeg(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value; + pub fn LLVMBuildFNeg(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value; + pub fn LLVMBuildNot(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value; + pub fn LLVMRustSetHasUnsafeAlgebra(Instr: &Value); + + // Memory + pub fn LLVMBuildAlloca(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value; + pub fn LLVMBuildArrayAlloca(B: &Builder<'a>, + Ty: &'a Type, + Val: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildLoad(B: &Builder<'a>, PointerVal: &'a Value, Name: *const c_char) -> &'a Value; + + pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value; + + pub fn LLVMBuildGEP(B: &Builder<'a>, + Pointer: &'a Value, + Indices: *const &'a Value, + NumIndices: c_uint, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildInBoundsGEP(B: &Builder<'a>, + Pointer: &'a Value, + Indices: *const &'a Value, + NumIndices: c_uint, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildStructGEP(B: &Builder<'a>, + Pointer: &'a Value, + Idx: c_uint, + Name: *const c_char) + -> &'a Value; + + // Casts + pub fn LLVMBuildTrunc(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildZExt(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildSExt(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildFPToUI(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildFPToSI(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildUIToFP(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildSIToFP(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildFPTrunc(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildFPExt(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildPtrToInt(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildIntToPtr(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildBitCast(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildPointerCast(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMRustBuildIntCast(B: &Builder<'a>, + Val: &'a Value, + DestTy: &'a Type, + IsSized: bool) + -> &'a Value; + + // Comparisons + pub fn LLVMBuildICmp(B: &Builder<'a>, + Op: c_uint, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildFCmp(B: &Builder<'a>, + Op: c_uint, + LHS: &'a Value, + RHS: &'a Value, + Name: *const c_char) + -> &'a Value; + + // Miscellaneous instructions + pub fn LLVMBuildPhi(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value; + pub fn LLVMRustBuildCall(B: &Builder<'a>, + Fn: &'a Value, + Args: *const &'a Value, + NumArgs: c_uint, + Bundle: Option<&OperandBundleDef<'a>>, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildSelect(B: &Builder<'a>, + If: &'a Value, + Then: &'a Value, + Else: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildVAArg(B: &Builder<'a>, + list: &'a Value, + Ty: &'a Type, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildExtractElement(B: &Builder<'a>, + VecVal: &'a Value, + Index: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildInsertElement(B: &Builder<'a>, + VecVal: &'a Value, + EltVal: &'a Value, + Index: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildShuffleVector(B: &Builder<'a>, + V1: &'a Value, + V2: &'a Value, + Mask: &'a Value, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildExtractValue(B: &Builder<'a>, + AggVal: &'a Value, + Index: c_uint, + Name: *const c_char) + -> &'a Value; + pub fn LLVMBuildInsertValue(B: &Builder<'a>, + AggVal: &'a Value, + EltVal: &'a Value, + Index: c_uint, + Name: *const c_char) + -> &'a Value; + + pub fn LLVMRustBuildVectorReduceFAdd(B: &Builder<'a>, + Acc: &'a Value, + Src: &'a Value) + -> Option<&'a Value>; + pub fn LLVMRustBuildVectorReduceFMul(B: &Builder<'a>, + Acc: &'a Value, + Src: &'a Value) + -> Option<&'a Value>; + pub fn LLVMRustBuildVectorReduceAdd(B: &Builder<'a>, + Src: &'a Value) + -> Option<&'a Value>; + pub fn LLVMRustBuildVectorReduceMul(B: &Builder<'a>, + Src: &'a Value) + -> Option<&'a Value>; + pub fn LLVMRustBuildVectorReduceAnd(B: &Builder<'a>, + Src: &'a Value) + -> Option<&'a Value>; + pub fn LLVMRustBuildVectorReduceOr(B: &Builder<'a>, + Src: &'a Value) + -> Option<&'a Value>; + pub fn LLVMRustBuildVectorReduceXor(B: &Builder<'a>, + Src: &'a Value) + -> Option<&'a Value>; + pub fn LLVMRustBuildVectorReduceMin(B: &Builder<'a>, + Src: &'a Value, + IsSigned: bool) + -> Option<&'a Value>; + pub fn LLVMRustBuildVectorReduceMax(B: &Builder<'a>, + Src: &'a Value, + IsSigned: bool) + -> Option<&'a Value>; + pub fn LLVMRustBuildVectorReduceFMin(B: &Builder<'a>, + Src: &'a Value, + IsNaN: bool) + -> Option<&'a Value>; + pub fn LLVMRustBuildVectorReduceFMax(B: &Builder<'a>, + Src: &'a Value, + IsNaN: bool) + -> Option<&'a Value>; + + pub fn LLVMRustBuildMinNum( + B: &Builder<'a>, + LHS: &'a Value, + LHS: &'a Value, + ) -> Option<&'a Value>; + pub fn LLVMRustBuildMaxNum( + B: &Builder<'a>, + LHS: &'a Value, + LHS: &'a Value, + ) -> Option<&'a Value>; + + // Atomic Operations + pub fn LLVMRustBuildAtomicLoad(B: &Builder<'a>, + PointerVal: &'a Value, + Name: *const c_char, + Order: AtomicOrdering) + -> &'a Value; + + pub fn LLVMRustBuildAtomicStore(B: &Builder<'a>, + Val: &'a Value, + Ptr: &'a Value, + Order: AtomicOrdering) + -> &'a Value; + + pub fn LLVMRustBuildAtomicCmpXchg(B: &Builder<'a>, + LHS: &'a Value, + CMP: &'a Value, + RHS: &'a Value, + Order: AtomicOrdering, + FailureOrder: AtomicOrdering, + Weak: Bool) + -> &'a Value; + + pub fn LLVMBuildAtomicRMW(B: &Builder<'a>, + Op: AtomicRmwBinOp, + LHS: &'a Value, + RHS: &'a Value, + Order: AtomicOrdering, + SingleThreaded: Bool) + -> &'a Value; + + pub fn LLVMRustBuildAtomicFence(B: &Builder, + Order: AtomicOrdering, + Scope: SynchronizationScope); + + /// Writes a module to the specified path. Returns 0 on success. + pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int; + + /// Creates a pass manager. + pub fn LLVMCreatePassManager() -> &'a mut PassManager<'a>; + + /// Creates a function-by-function pass manager + pub fn LLVMCreateFunctionPassManagerForModule(M: &'a Module) -> &'a mut PassManager<'a>; + + /// Disposes a pass manager. + pub fn LLVMDisposePassManager(PM: &'a mut PassManager<'a>); + + /// Runs a pass manager on a module. + pub fn LLVMRunPassManager(PM: &PassManager<'a>, M: &'a Module) -> Bool; + + pub fn LLVMInitializePasses(); + + pub fn LLVMPassManagerBuilderCreate() -> &'static mut PassManagerBuilder; + pub fn LLVMPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder); + pub fn LLVMPassManagerBuilderSetSizeLevel(PMB: &PassManagerBuilder, Value: Bool); + pub fn LLVMPassManagerBuilderSetDisableUnrollLoops(PMB: &PassManagerBuilder, Value: Bool); + pub fn LLVMPassManagerBuilderUseInlinerWithThreshold(PMB: &PassManagerBuilder, + threshold: c_uint); + pub fn LLVMPassManagerBuilderPopulateModulePassManager(PMB: &PassManagerBuilder, + PM: &PassManager); + + pub fn LLVMPassManagerBuilderPopulateFunctionPassManager(PMB: &PassManagerBuilder, + PM: &PassManager); + pub fn LLVMPassManagerBuilderPopulateLTOPassManager(PMB: &PassManagerBuilder, + PM: &PassManager, + Internalize: Bool, + RunInliner: Bool); + pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager( + PMB: &PassManagerBuilder, + PM: &PassManager) -> bool; + + // Stuff that's in rustllvm/ because it's not upstream yet. + + /// Opens an object file. + pub fn LLVMCreateObjectFile( + MemBuf: &'static mut MemoryBuffer, + ) -> Option<&'static mut ObjectFile>; + /// Closes an object file. + pub fn LLVMDisposeObjectFile(ObjFile: &'static mut ObjectFile); + + /// Enumerates the sections in an object file. + pub fn LLVMGetSections(ObjFile: &'a ObjectFile) -> &'a mut SectionIterator<'a>; + /// Destroys a section iterator. + pub fn LLVMDisposeSectionIterator(SI: &'a mut SectionIterator<'a>); + /// Returns true if the section iterator is at the end of the section + /// list: + pub fn LLVMIsSectionIteratorAtEnd(ObjFile: &'a ObjectFile, SI: &SectionIterator<'a>) -> Bool; + /// Moves the section iterator to point to the next section. + pub fn LLVMMoveToNextSection(SI: &SectionIterator); + /// Returns the current section size. + pub fn LLVMGetSectionSize(SI: &SectionIterator) -> c_ulonglong; + /// Returns the current section contents as a string buffer. + pub fn LLVMGetSectionContents(SI: &SectionIterator) -> *const c_char; + + /// Reads the given file and returns it as a memory buffer. Use + /// LLVMDisposeMemoryBuffer() to get rid of it. + pub fn LLVMRustCreateMemoryBufferWithContentsOfFile( + Path: *const c_char, + ) -> Option<&'static mut MemoryBuffer>; + + pub fn LLVMStartMultithreaded() -> Bool; + + /// Returns a string describing the last error caused by an LLVMRust* call. + pub fn LLVMRustGetLastError() -> *const c_char; + + /// Print the pass timings since static dtors aren't picking them up. + pub fn LLVMRustPrintPassTimings(); + + pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type; + + pub fn LLVMStructSetBody(StructTy: &'a Type, + ElementTypes: *const &'a Type, + ElementCount: c_uint, + Packed: Bool); + + /// Prepares inline assembly. + pub fn LLVMRustInlineAsm(Ty: &Type, + AsmString: *const c_char, + Constraints: *const c_char, + SideEffects: Bool, + AlignStack: Bool, + Dialect: AsmDialect) + -> &Value; + + pub fn LLVMRustDebugMetadataVersion() -> u32; + pub fn LLVMRustVersionMajor() -> u32; + pub fn LLVMRustVersionMinor() -> u32; + + pub fn LLVMRustAddModuleFlag(M: &Module, name: *const c_char, value: u32); + + pub fn LLVMRustMetadataAsValue(C: &'a Context, MD: &'a Metadata) -> &'a Value; + + pub fn LLVMRustDIBuilderCreate(M: &'a Module) -> &'a mut DIBuilder<'a>; + + pub fn LLVMRustDIBuilderDispose(Builder: &'a mut DIBuilder<'a>); + + pub fn LLVMRustDIBuilderFinalize(Builder: &DIBuilder); + + pub fn LLVMRustDIBuilderCreateCompileUnit(Builder: &DIBuilder<'a>, + Lang: c_uint, + File: &'a DIFile, + Producer: *const c_char, + isOptimized: bool, + Flags: *const c_char, + RuntimeVer: c_uint, + SplitName: *const c_char) + -> &'a DIDescriptor; + + pub fn LLVMRustDIBuilderCreateFile(Builder: &DIBuilder<'a>, + Filename: *const c_char, + Directory: *const c_char) + -> &'a DIFile; + + pub fn LLVMRustDIBuilderCreateSubroutineType(Builder: &DIBuilder<'a>, + File: &'a DIFile, + ParameterTypes: &'a DIArray) + -> &'a DICompositeType; + + pub fn LLVMRustDIBuilderCreateFunction(Builder: &DIBuilder<'a>, + Scope: &'a DIDescriptor, + Name: *const c_char, + LinkageName: *const c_char, + File: &'a DIFile, + LineNo: c_uint, + Ty: &'a DIType, + isLocalToUnit: bool, + isDefinition: bool, + ScopeLine: c_uint, + Flags: DIFlags, + isOptimized: bool, + Fn: &'a Value, + TParam: &'a DIArray, + Decl: Option<&'a DIDescriptor>) + -> &'a DISubprogram; + + pub fn LLVMRustDIBuilderCreateBasicType(Builder: &DIBuilder<'a>, + Name: *const c_char, + SizeInBits: u64, + AlignInBits: u32, + Encoding: c_uint) + -> &'a DIBasicType; + + pub fn LLVMRustDIBuilderCreatePointerType(Builder: &DIBuilder<'a>, + PointeeTy: &'a DIType, + SizeInBits: u64, + AlignInBits: u32, + Name: *const c_char) + -> &'a DIDerivedType; + + pub fn LLVMRustDIBuilderCreateStructType(Builder: &DIBuilder<'a>, + Scope: Option<&'a DIDescriptor>, + Name: *const c_char, + File: &'a DIFile, + LineNumber: c_uint, + SizeInBits: u64, + AlignInBits: u32, + Flags: DIFlags, + DerivedFrom: Option<&'a DIType>, + Elements: &'a DIArray, + RunTimeLang: c_uint, + VTableHolder: Option<&'a DIType>, + UniqueId: *const c_char) + -> &'a DICompositeType; + + pub fn LLVMRustDIBuilderCreateMemberType(Builder: &DIBuilder<'a>, + Scope: &'a DIDescriptor, + Name: *const c_char, + File: &'a DIFile, + LineNo: c_uint, + SizeInBits: u64, + AlignInBits: u32, + OffsetInBits: u64, + Flags: DIFlags, + Ty: &'a DIType) + -> &'a DIDerivedType; + + pub fn LLVMRustDIBuilderCreateLexicalBlock(Builder: &DIBuilder<'a>, + Scope: &'a DIScope, + File: &'a DIFile, + Line: c_uint, + Col: c_uint) + -> &'a DILexicalBlock; + + pub fn LLVMRustDIBuilderCreateLexicalBlockFile(Builder: &DIBuilder<'a>, + Scope: &'a DIScope, + File: &'a DIFile) + -> &'a DILexicalBlock; + + pub fn LLVMRustDIBuilderCreateStaticVariable(Builder: &DIBuilder<'a>, + Context: Option<&'a DIScope>, + Name: *const c_char, + LinkageName: *const c_char, + File: &'a DIFile, + LineNo: c_uint, + Ty: &'a DIType, + isLocalToUnit: bool, + Val: &'a Value, + Decl: Option<&'a DIDescriptor>, + AlignInBits: u32) + -> &'a DIGlobalVariable; + + pub fn LLVMRustDIBuilderCreateVariable(Builder: &DIBuilder<'a>, + Tag: c_uint, + Scope: &'a DIDescriptor, + Name: *const c_char, + File: &'a DIFile, + LineNo: c_uint, + Ty: &'a DIType, + AlwaysPreserve: bool, + Flags: DIFlags, + ArgNo: c_uint, + AlignInBits: u32) + -> &'a DIVariable; + + pub fn LLVMRustDIBuilderCreateArrayType(Builder: &DIBuilder<'a>, + Size: u64, + AlignInBits: u32, + Ty: &'a DIType, + Subscripts: &'a DIArray) + -> &'a DIType; + + pub fn LLVMRustDIBuilderGetOrCreateSubrange(Builder: &DIBuilder<'a>, + Lo: i64, + Count: i64) + -> &'a DISubrange; + + pub fn LLVMRustDIBuilderGetOrCreateArray(Builder: &DIBuilder<'a>, + Ptr: *const Option<&'a DIDescriptor>, + Count: c_uint) + -> &'a DIArray; + + pub fn LLVMRustDIBuilderInsertDeclareAtEnd(Builder: &DIBuilder<'a>, + Val: &'a Value, + VarInfo: &'a DIVariable, + AddrOps: *const i64, + AddrOpsCount: c_uint, + DL: &'a Value, + InsertAtEnd: &'a BasicBlock) + -> &'a Value; + + pub fn LLVMRustDIBuilderCreateEnumerator(Builder: &DIBuilder<'a>, + Name: *const c_char, + Val: u64) + -> &'a DIEnumerator; + + pub fn LLVMRustDIBuilderCreateEnumerationType(Builder: &DIBuilder<'a>, + Scope: &'a DIScope, + Name: *const c_char, + File: &'a DIFile, + LineNumber: c_uint, + SizeInBits: u64, + AlignInBits: u32, + Elements: &'a DIArray, + ClassType: &'a DIType) + -> &'a DIType; + + pub fn LLVMRustDIBuilderCreateUnionType(Builder: &DIBuilder<'a>, + Scope: &'a DIScope, + Name: *const c_char, + File: &'a DIFile, + LineNumber: c_uint, + SizeInBits: u64, + AlignInBits: u32, + Flags: DIFlags, + Elements: Option<&'a DIArray>, + RunTimeLang: c_uint, + UniqueId: *const c_char) + -> &'a DIType; + + pub fn LLVMSetUnnamedAddr(GlobalVar: &Value, UnnamedAddr: Bool); + + pub fn LLVMRustDIBuilderCreateTemplateTypeParameter(Builder: &DIBuilder<'a>, + Scope: Option<&'a DIScope>, + Name: *const c_char, + Ty: &'a DIType, + File: &'a DIFile, + LineNo: c_uint, + ColumnNo: c_uint) + -> &'a DITemplateTypeParameter; + + + pub fn LLVMRustDIBuilderCreateNameSpace(Builder: &DIBuilder<'a>, + Scope: Option<&'a DIScope>, + Name: *const c_char, + File: &'a DIFile, + LineNo: c_uint) + -> &'a DINameSpace; + + pub fn LLVMRustDICompositeTypeSetTypeArray(Builder: &DIBuilder<'a>, + CompositeType: &'a DIType, + TypeArray: &'a DIArray); + + + pub fn LLVMRustDIBuilderCreateDebugLocation(Context: &'a Context, + Line: c_uint, + Column: c_uint, + Scope: &'a DIScope, + InlinedAt: Option<&'a Metadata>) + -> &'a Value; + pub fn LLVMRustDIBuilderCreateOpDeref() -> i64; + pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> i64; +} + +#[allow(improper_ctypes)] // FIXME(#52456) needed for RustString. +extern "C" { + pub fn LLVMRustWriteTypeToString(Type: &Type, s: &RustString); + pub fn LLVMRustWriteValueToString(value_ref: &Value, s: &RustString); +} + +extern "C" { + pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&Value>; + pub fn LLVMIsAConstantFP(value_ref: &Value) -> Option<&Value>; + + pub fn LLVMRustPassKind(Pass: &Pass) -> PassKind; + pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> Option<&'static mut Pass>; + pub fn LLVMRustAddPass(PM: &PassManager, Pass: &'static mut Pass); + + pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool; + + pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine); + pub fn LLVMRustPrintTargetFeatures(T: &TargetMachine); + + pub fn LLVMRustCreateTargetMachine(Triple: *const c_char, + CPU: *const c_char, + Features: *const c_char, + Model: CodeModel, + Reloc: RelocMode, + Level: CodeGenOptLevel, + UseSoftFP: bool, + PositionIndependentExecutable: bool, + FunctionSections: bool, + DataSections: bool, + TrapUnreachable: bool, + Singlethread: bool, + AsmComments: bool) + -> Option<&'static mut TargetMachine>; + pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine); + pub fn LLVMRustAddAnalysisPasses(T: &'a TargetMachine, PM: &PassManager<'a>, M: &'a Module); + pub fn LLVMRustAddBuilderLibraryInfo(PMB: &'a PassManagerBuilder, + M: &'a Module, + DisableSimplifyLibCalls: bool); + pub fn LLVMRustConfigurePassManagerBuilder(PMB: &PassManagerBuilder, + OptLevel: CodeGenOptLevel, + MergeFunctions: bool, + SLPVectorize: bool, + LoopVectorize: bool, + PrepareForThinLTO: bool, + PGOGenPath: *const c_char, + PGOUsePath: *const c_char); + pub fn LLVMRustAddLibraryInfo(PM: &PassManager<'a>, + M: &'a Module, + DisableSimplifyLibCalls: bool); + pub fn LLVMRustRunFunctionPassManager(PM: &PassManager<'a>, M: &'a Module); + pub fn LLVMRustWriteOutputFile(T: &'a TargetMachine, + PM: &PassManager<'a>, + M: &'a Module, + Output: *const c_char, + FileType: FileType) + -> LLVMRustResult; + pub fn LLVMRustPrintModule(PM: &PassManager<'a>, + M: &'a Module, + Output: *const c_char, + Demangle: extern fn(*const c_char, + size_t, + *mut c_char, + size_t) -> size_t); + pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char); + pub fn LLVMRustPrintPasses(); + pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char); + pub fn LLVMRustAddAlwaysInlinePass(P: &PassManagerBuilder, AddLifetimes: bool); + pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t); + pub fn LLVMRustMarkAllFunctionsNounwind(M: &Module); + + pub fn LLVMRustOpenArchive(path: *const c_char) -> Option<&'static mut Archive>; + pub fn LLVMRustArchiveIteratorNew(AR: &'a Archive) -> &'a mut ArchiveIterator<'a>; + pub fn LLVMRustArchiveIteratorNext( + AIR: &ArchiveIterator<'a>, + ) -> Option<&'a mut ArchiveChild<'a>>; + pub fn LLVMRustArchiveChildName(ACR: &ArchiveChild, size: &mut size_t) -> *const c_char; + pub fn LLVMRustArchiveChildData(ACR: &ArchiveChild, size: &mut size_t) -> *const c_char; + pub fn LLVMRustArchiveChildFree(ACR: &'a mut ArchiveChild<'a>); + pub fn LLVMRustArchiveIteratorFree(AIR: &'a mut ArchiveIterator<'a>); + pub fn LLVMRustDestroyArchive(AR: &'static mut Archive); + + pub fn LLVMRustGetSectionName(SI: &SectionIterator, data: &mut *const c_char) -> size_t; +} + +#[allow(improper_ctypes)] // FIXME(#52456) needed for RustString. +extern "C" { + pub fn LLVMRustWriteTwineToString(T: &Twine, s: &RustString); +} + +extern "C" { + pub fn LLVMContextSetDiagnosticHandler(C: &Context, + Handler: DiagnosticHandler, + DiagnosticContext: *mut c_void); +} + +#[allow(improper_ctypes)] // FIXME(#52456) needed for RustString. +extern "C" { + pub fn LLVMRustUnpackOptimizationDiagnostic(DI: &'a DiagnosticInfo, + pass_name_out: &RustString, + function_out: &mut Option<&'a Value>, + loc_line_out: &mut c_uint, + loc_column_out: &mut c_uint, + loc_filename_out: &RustString, + message_out: &RustString); +} + +extern "C" { + pub fn LLVMRustUnpackInlineAsmDiagnostic(DI: &'a DiagnosticInfo, + cookie_out: &mut c_uint, + message_out: &mut Option<&'a Twine>, + instruction_out: &mut Option<&'a Value>); +} + +#[allow(improper_ctypes)] // FIXME(#52456) needed for RustString. +extern "C" { + pub fn LLVMRustWriteDiagnosticInfoToString(DI: &DiagnosticInfo, s: &RustString); +} + +extern "C" { + pub fn LLVMRustGetDiagInfoKind(DI: &DiagnosticInfo) -> DiagnosticKind; + + pub fn LLVMRustSetInlineAsmDiagnosticHandler(C: &Context, + H: InlineAsmDiagHandler, + CX: *mut c_void); +} + +#[allow(improper_ctypes)] // FIXME(#52456) needed for RustString. +extern "C" { + pub fn LLVMRustWriteSMDiagnosticToString(d: &SMDiagnostic, s: &RustString); +} + +extern "C" { + pub fn LLVMRustWriteArchive(Dst: *const c_char, + NumMembers: size_t, + Members: *const &RustArchiveMember, + WriteSymbtab: bool, + Kind: ArchiveKind) + -> LLVMRustResult; + pub fn LLVMRustArchiveMemberNew(Filename: *const c_char, + Name: *const c_char, + Child: Option<&ArchiveChild<'a>>) + -> &'a mut RustArchiveMember<'a>; + pub fn LLVMRustArchiveMemberFree(Member: &'a mut RustArchiveMember<'a>); + + pub fn LLVMRustSetDataLayoutFromTargetMachine(M: &'a Module, TM: &'a TargetMachine); + + pub fn LLVMRustBuildOperandBundleDef(Name: *const c_char, + Inputs: *const &'a Value, + NumInputs: c_uint) + -> &'a mut OperandBundleDef<'a>; + pub fn LLVMRustFreeOperandBundleDef(Bundle: &'a mut OperandBundleDef<'a>); + + pub fn LLVMRustPositionBuilderAtStart(B: &Builder<'a>, BB: &'a BasicBlock); + + pub fn LLVMRustSetComdat(M: &'a Module, V: &'a Value, Name: *const c_char); + pub fn LLVMRustUnsetComdat(V: &Value); + pub fn LLVMRustSetModulePIELevel(M: &Module); + pub fn LLVMRustModuleBufferCreate(M: &Module) -> &'static mut ModuleBuffer; + pub fn LLVMRustModuleBufferPtr(p: &ModuleBuffer) -> *const u8; + pub fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize; + pub fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer); + pub fn LLVMRustModuleCost(M: &Module) -> u64; + + pub fn LLVMRustThinLTOAvailable() -> bool; + pub fn LLVMRustPGOAvailable() -> bool; + pub fn LLVMRustThinLTOBufferCreate(M: &Module) -> &'static mut ThinLTOBuffer; + pub fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer); + pub fn LLVMRustThinLTOBufferPtr(M: &ThinLTOBuffer) -> *const c_char; + pub fn LLVMRustThinLTOBufferLen(M: &ThinLTOBuffer) -> size_t; + pub fn LLVMRustCreateThinLTOData( + Modules: *const ThinLTOModule, + NumModules: c_uint, + PreservedSymbols: *const *const c_char, + PreservedSymbolsLen: c_uint, + ) -> Option<&'static mut ThinLTOData>; + pub fn LLVMRustPrepareThinLTORename( + Data: &ThinLTOData, + Module: &Module, + ) -> bool; + pub fn LLVMRustPrepareThinLTOResolveWeak( + Data: &ThinLTOData, + Module: &Module, + ) -> bool; + pub fn LLVMRustPrepareThinLTOInternalize( + Data: &ThinLTOData, + Module: &Module, + ) -> bool; + pub fn LLVMRustPrepareThinLTOImport( + Data: &ThinLTOData, + Module: &Module, + ) -> bool; + pub fn LLVMRustFreeThinLTOData(Data: &'static mut ThinLTOData); + pub fn LLVMRustParseBitcodeForThinLTO( + Context: &Context, + Data: *const u8, + len: usize, + Identifier: *const c_char, + ) -> Option<&Module>; + pub fn LLVMRustThinLTOGetDICompileUnit(M: &Module, + CU1: &mut *mut c_void, + CU2: &mut *mut c_void); + pub fn LLVMRustThinLTOPatchDICompileUnit(M: &Module, CU: *mut c_void); + + pub fn LLVMRustLinkerNew(M: &'a Module) -> &'a mut Linker<'a>; + pub fn LLVMRustLinkerAdd(linker: &Linker, + bytecode: *const c_char, + bytecode_len: usize) -> bool; + pub fn LLVMRustLinkerFree(linker: &'a mut Linker<'a>); +} diff --git a/src/librustc_codegen_llvm/llvm/mod.rs b/src/librustc_codegen_llvm/llvm/mod.rs new file mode 100644 index 000000000000..4343c8c184ec --- /dev/null +++ b/src/librustc_codegen_llvm/llvm/mod.rs @@ -0,0 +1,282 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![deny(bare_trait_objects)] + +pub use self::IntPredicate::*; +pub use self::RealPredicate::*; +pub use self::AtomicRmwBinOp::*; +pub use self::MetadataType::*; +pub use self::CodeGenOptSize::*; +pub use self::CallConv::*; +pub use self::Linkage::*; + +use std::str::FromStr; +use std::string::FromUtf8Error; +use std::slice; +use std::ffi::CStr; +use std::cell::RefCell; +use libc::{self, c_uint, c_char, size_t}; +use rustc_data_structures::small_c_str::SmallCStr; + +pub mod archive_ro; +pub mod diagnostic; +mod ffi; + +pub use self::ffi::*; + +impl LLVMRustResult { + pub fn into_result(self) -> Result<(), ()> { + match self { + LLVMRustResult::Success => Ok(()), + LLVMRustResult::Failure => Err(()), + } + } +} + +pub fn AddFunctionAttrStringValue(llfn: &'a Value, + idx: AttributePlace, + attr: &CStr, + value: &CStr) { + unsafe { + LLVMRustAddFunctionAttrStringValue(llfn, + idx.as_uint(), + attr.as_ptr(), + value.as_ptr()) + } +} + +#[derive(Copy, Clone)] +pub enum AttributePlace { + ReturnValue, + Argument(u32), + Function, +} + +impl AttributePlace { + pub fn as_uint(self) -> c_uint { + match self { + AttributePlace::ReturnValue => 0, + AttributePlace::Argument(i) => 1 + i, + AttributePlace::Function => !0, + } + } +} + +#[derive(Copy, Clone, PartialEq)] +#[repr(C)] +pub enum CodeGenOptSize { + CodeGenOptSizeNone = 0, + CodeGenOptSizeDefault = 1, + CodeGenOptSizeAggressive = 2, +} + +impl FromStr for ArchiveKind { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "gnu" => Ok(ArchiveKind::K_GNU), + "bsd" => Ok(ArchiveKind::K_BSD), + "coff" => Ok(ArchiveKind::K_COFF), + _ => Err(()), + } + } +} + +#[repr(C)] +pub struct RustString { + bytes: RefCell>, +} + +/// Appending to a Rust string -- used by RawRustStringOstream. +#[no_mangle] +pub unsafe extern "C" fn LLVMRustStringWriteImpl(sr: &RustString, + ptr: *const c_char, + size: size_t) { + let slice = slice::from_raw_parts(ptr as *const u8, size as usize); + + sr.bytes.borrow_mut().extend_from_slice(slice); +} + +pub fn SetInstructionCallConv(instr: &'a Value, cc: CallConv) { + unsafe { + LLVMSetInstructionCallConv(instr, cc as c_uint); + } +} +pub fn SetFunctionCallConv(fn_: &'a Value, cc: CallConv) { + unsafe { + LLVMSetFunctionCallConv(fn_, cc as c_uint); + } +} + +// Externally visible symbols that might appear in multiple codegen units need to appear in +// their own comdat section so that the duplicates can be discarded at link time. This can for +// example happen for generics when using multiple codegen units. This function simply uses the +// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the +// function. +// For more details on COMDAT sections see e.g. http://www.airs.com/blog/archives/52 +pub fn SetUniqueComdat(llmod: &Module, val: &'a Value) { + unsafe { + LLVMRustSetComdat(llmod, val, LLVMGetValueName(val)); + } +} + +pub fn UnsetComdat(val: &'a Value) { + unsafe { + LLVMRustUnsetComdat(val); + } +} + +pub fn SetUnnamedAddr(global: &'a Value, unnamed: bool) { + unsafe { + LLVMSetUnnamedAddr(global, unnamed as Bool); + } +} + +pub fn set_thread_local(global: &'a Value, is_thread_local: bool) { + unsafe { + LLVMSetThreadLocal(global, is_thread_local as Bool); + } +} +pub fn set_thread_local_mode(global: &'a Value, mode: ThreadLocalMode) { + unsafe { + LLVMSetThreadLocalMode(global, mode); + } +} + +impl Attribute { + pub fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) { + unsafe { LLVMRustAddFunctionAttribute(llfn, idx.as_uint(), *self) } + } + + pub fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) { + unsafe { LLVMRustAddCallSiteAttribute(callsite, idx.as_uint(), *self) } + } + + pub fn unapply_llfn(&self, idx: AttributePlace, llfn: &Value) { + unsafe { LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), *self) } + } + + pub fn toggle_llfn(&self, idx: AttributePlace, llfn: &Value, set: bool) { + if set { + self.apply_llfn(idx, llfn); + } else { + self.unapply_llfn(idx, llfn); + } + } +} + +// Memory-managed interface to object files. + +pub struct ObjectFile { + pub llof: &'static mut ffi::ObjectFile, +} + +unsafe impl Send for ObjectFile {} + +impl ObjectFile { + // This will take ownership of llmb + pub fn new(llmb: &'static mut MemoryBuffer) -> Option { + unsafe { + let llof = LLVMCreateObjectFile(llmb)?; + Some(ObjectFile { llof: llof }) + } + } +} + +impl Drop for ObjectFile { + fn drop(&mut self) { + unsafe { + LLVMDisposeObjectFile(&mut *(self.llof as *mut _)); + } + } +} + +// Memory-managed interface to section iterators. + +pub struct SectionIter<'a> { + pub llsi: &'a mut SectionIterator<'a>, +} + +impl Drop for SectionIter<'a> { + fn drop(&mut self) { + unsafe { + LLVMDisposeSectionIterator(&mut *(self.llsi as *mut _)); + } + } +} + +pub fn mk_section_iter(llof: &'a ffi::ObjectFile) -> SectionIter<'a> { + unsafe { SectionIter { llsi: LLVMGetSections(llof) } } +} + +/// Safe wrapper around `LLVMGetParam`, because segfaults are no fun. +pub fn get_param(llfn: &'a Value, index: c_uint) -> &'a Value { + unsafe { + assert!(index < LLVMCountParams(llfn), + "out of bounds argument access: {} out of {} arguments", index, LLVMCountParams(llfn)); + LLVMGetParam(llfn, index) + } +} + +pub fn build_string(f: impl FnOnce(&RustString)) -> Result { + let sr = RustString { + bytes: RefCell::new(Vec::new()), + }; + f(&sr); + String::from_utf8(sr.bytes.into_inner()) +} + +pub fn twine_to_string(tr: &Twine) -> String { + unsafe { + build_string(|s| LLVMRustWriteTwineToString(tr, s)) + .expect("got a non-UTF8 Twine from LLVM") + } +} + +pub fn last_error() -> Option { + unsafe { + let cstr = LLVMRustGetLastError(); + if cstr.is_null() { + None + } else { + let err = CStr::from_ptr(cstr).to_bytes(); + let err = String::from_utf8_lossy(err).to_string(); + libc::free(cstr as *mut _); + Some(err) + } + } +} + +pub struct OperandBundleDef<'a> { + pub raw: &'a mut ffi::OperandBundleDef<'a>, +} + +impl OperandBundleDef<'a> { + pub fn new(name: &str, vals: &[&'a Value]) -> Self { + let name = SmallCStr::new(name); + let def = unsafe { + LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint) + }; + OperandBundleDef { raw: def } + } +} + +impl Drop for OperandBundleDef<'a> { + fn drop(&mut self) { + unsafe { + LLVMRustFreeOperandBundleDef(&mut *(self.raw as *mut _)); + } + } +} diff --git a/src/librustc_codegen_llvm/llvm_util.rs b/src/librustc_codegen_llvm/llvm_util.rs new file mode 100644 index 000000000000..ff26e0f35f00 --- /dev/null +++ b/src/librustc_codegen_llvm/llvm_util.rs @@ -0,0 +1,264 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax_pos::symbol::Symbol; +use back::write::create_target_machine; +use llvm; +use rustc::session::Session; +use rustc::session::config::PrintRequest; +use libc::c_int; +use std::ffi::CString; +use syntax::feature_gate::UnstableFeatures; + +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Once; + +static POISONED: AtomicBool = AtomicBool::new(false); +static INIT: Once = Once::new(); + +pub(crate) fn init(sess: &Session) { + unsafe { + // Before we touch LLVM, make sure that multithreading is enabled. + INIT.call_once(|| { + if llvm::LLVMStartMultithreaded() != 1 { + // use an extra bool to make sure that all future usage of LLVM + // cannot proceed despite the Once not running more than once. + POISONED.store(true, Ordering::SeqCst); + } + + configure_llvm(sess); + }); + + if POISONED.load(Ordering::SeqCst) { + bug!("couldn't enable multi-threaded LLVM"); + } + } +} + +fn require_inited() { + INIT.call_once(|| bug!("llvm is not initialized")); + if POISONED.load(Ordering::SeqCst) { + bug!("couldn't enable multi-threaded LLVM"); + } +} + +unsafe fn configure_llvm(sess: &Session) { + let mut llvm_c_strs = Vec::new(); + let mut llvm_args = Vec::new(); + + { + let mut add = |arg: &str| { + let s = CString::new(arg).unwrap(); + llvm_args.push(s.as_ptr()); + llvm_c_strs.push(s); + }; + add("rustc"); // fake program name + if sess.time_llvm_passes() { add("-time-passes"); } + if sess.print_llvm_passes() { add("-debug-pass=Structure"); } + if sess.opts.debugging_opts.disable_instrumentation_preinliner { + add("-disable-preinline"); + } + + for arg in &sess.opts.cg.llvm_args { + add(&(*arg)); + } + } + + llvm::LLVMInitializePasses(); + + ::rustc_llvm::initialize_available_targets(); + + llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, + llvm_args.as_ptr()); +} + +// WARNING: the features after applying `to_llvm_feature` must be known +// to LLVM or the feature detection code will walk past the end of the feature +// array, leading to crashes. + +const ARM_WHITELIST: &[(&str, Option<&str>)] = &[ + ("mclass", Some("arm_target_feature")), + ("rclass", Some("arm_target_feature")), + ("dsp", Some("arm_target_feature")), + ("neon", Some("arm_target_feature")), + ("v7", Some("arm_target_feature")), + ("vfp2", Some("arm_target_feature")), + ("vfp3", Some("arm_target_feature")), + ("vfp4", Some("arm_target_feature")), +]; + +const AARCH64_WHITELIST: &[(&str, Option<&str>)] = &[ + ("fp", Some("aarch64_target_feature")), + ("neon", Some("aarch64_target_feature")), + ("sve", Some("aarch64_target_feature")), + ("crc", Some("aarch64_target_feature")), + ("crypto", Some("aarch64_target_feature")), + ("ras", Some("aarch64_target_feature")), + ("lse", Some("aarch64_target_feature")), + ("rdm", Some("aarch64_target_feature")), + ("fp16", Some("aarch64_target_feature")), + ("rcpc", Some("aarch64_target_feature")), + ("dotprod", Some("aarch64_target_feature")), + ("v8.1a", Some("aarch64_target_feature")), + ("v8.2a", Some("aarch64_target_feature")), + ("v8.3a", Some("aarch64_target_feature")), +]; + +const X86_WHITELIST: &[(&str, Option<&str>)] = &[ + ("aes", None), + ("avx", None), + ("avx2", None), + ("avx512bw", Some("avx512_target_feature")), + ("avx512cd", Some("avx512_target_feature")), + ("avx512dq", Some("avx512_target_feature")), + ("avx512er", Some("avx512_target_feature")), + ("avx512f", Some("avx512_target_feature")), + ("avx512ifma", Some("avx512_target_feature")), + ("avx512pf", Some("avx512_target_feature")), + ("avx512vbmi", Some("avx512_target_feature")), + ("avx512vl", Some("avx512_target_feature")), + ("avx512vpopcntdq", Some("avx512_target_feature")), + ("bmi1", None), + ("bmi2", None), + ("fma", None), + ("fxsr", None), + ("lzcnt", None), + ("mmx", Some("mmx_target_feature")), + ("pclmulqdq", None), + ("popcnt", None), + ("rdrand", None), + ("rdseed", None), + ("sha", None), + ("sse", None), + ("sse2", None), + ("sse3", None), + ("sse4.1", None), + ("sse4.2", None), + ("sse4a", Some("sse4a_target_feature")), + ("ssse3", None), + ("tbm", Some("tbm_target_feature")), + ("xsave", None), + ("xsavec", None), + ("xsaveopt", None), + ("xsaves", None), +]; + +const HEXAGON_WHITELIST: &[(&str, Option<&str>)] = &[ + ("hvx", Some("hexagon_target_feature")), + ("hvx-double", Some("hexagon_target_feature")), +]; + +const POWERPC_WHITELIST: &[(&str, Option<&str>)] = &[ + ("altivec", Some("powerpc_target_feature")), + ("power8-altivec", Some("powerpc_target_feature")), + ("power9-altivec", Some("powerpc_target_feature")), + ("power8-vector", Some("powerpc_target_feature")), + ("power9-vector", Some("powerpc_target_feature")), + ("vsx", Some("powerpc_target_feature")), +]; + +const MIPS_WHITELIST: &[(&str, Option<&str>)] = &[ + ("fp64", Some("mips_target_feature")), + ("msa", Some("mips_target_feature")), +]; + +const WASM_WHITELIST: &[(&str, Option<&str>)] = &[ + ("simd128", Some("wasm_target_feature")), +]; + +/// When rustdoc is running, provide a list of all known features so that all their respective +/// primtives may be documented. +/// +/// IMPORTANT: If you're adding another whitelist to the above lists, make sure to add it to this +/// iterator! +pub fn all_known_features() -> impl Iterator)> { + ARM_WHITELIST.iter().cloned() + .chain(AARCH64_WHITELIST.iter().cloned()) + .chain(X86_WHITELIST.iter().cloned()) + .chain(HEXAGON_WHITELIST.iter().cloned()) + .chain(POWERPC_WHITELIST.iter().cloned()) + .chain(MIPS_WHITELIST.iter().cloned()) + .chain(WASM_WHITELIST.iter().cloned()) +} + +pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> &'a str { + let arch = if sess.target.target.arch == "x86_64" { + "x86" + } else { + &*sess.target.target.arch + }; + match (arch, s) { + ("x86", "pclmulqdq") => "pclmul", + ("x86", "rdrand") => "rdrnd", + ("x86", "bmi1") => "bmi", + ("aarch64", "fp") => "fp-armv8", + ("aarch64", "fp16") => "fullfp16", + (_, s) => s, + } +} + +pub fn target_features(sess: &Session) -> Vec { + let target_machine = create_target_machine(sess, true); + target_feature_whitelist(sess) + .iter() + .filter_map(|&(feature, gate)| { + if UnstableFeatures::from_environment().is_nightly_build() || gate.is_none() { + Some(feature) + } else { + None + } + }) + .filter(|feature| { + let llvm_feature = to_llvm_feature(sess, feature); + let cstr = CString::new(llvm_feature).unwrap(); + unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } + }) + .map(|feature| Symbol::intern(feature)).collect() +} + +pub fn target_feature_whitelist(sess: &Session) + -> &'static [(&'static str, Option<&'static str>)] +{ + match &*sess.target.target.arch { + "arm" => ARM_WHITELIST, + "aarch64" => AARCH64_WHITELIST, + "x86" | "x86_64" => X86_WHITELIST, + "hexagon" => HEXAGON_WHITELIST, + "mips" | "mips64" => MIPS_WHITELIST, + "powerpc" | "powerpc64" => POWERPC_WHITELIST, + "wasm32" => WASM_WHITELIST, + _ => &[], + } +} + +pub fn print_version() { + // Can be called without initializing LLVM + unsafe { + println!("LLVM version: {}.{}", + llvm::LLVMRustVersionMajor(), llvm::LLVMRustVersionMinor()); + } +} + +pub fn print_passes() { + // Can be called without initializing LLVM + unsafe { llvm::LLVMRustPrintPasses(); } +} + +pub(crate) fn print(req: PrintRequest, sess: &Session) { + require_inited(); + let tm = create_target_machine(sess, true); + unsafe { + match req { + PrintRequest::TargetCPUs => llvm::LLVMRustPrintTargetCPUs(tm), + PrintRequest::TargetFeatures => llvm::LLVMRustPrintTargetFeatures(tm), + _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req), + } + } +} diff --git a/src/librustc_codegen_llvm/metadata.rs b/src/librustc_codegen_llvm/metadata.rs new file mode 100644 index 000000000000..a4526a53769b --- /dev/null +++ b/src/librustc_codegen_llvm/metadata.rs @@ -0,0 +1,122 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::middle::cstore::MetadataLoader; +use rustc_target::spec::Target; +use llvm; +use llvm::{False, ObjectFile, mk_section_iter}; +use llvm::archive_ro::ArchiveRO; + +use rustc_data_structures::owning_ref::OwningRef; +use std::path::Path; +use std::ptr; +use std::slice; +use rustc_fs_util::path2cstr; + +pub use rustc_data_structures::sync::MetadataRef; + +pub const METADATA_FILENAME: &str = "rust.metadata.bin"; + +pub struct LlvmMetadataLoader; + +impl MetadataLoader for LlvmMetadataLoader { + fn get_rlib_metadata(&self, _: &Target, filename: &Path) -> Result { + // Use ArchiveRO for speed here, it's backed by LLVM and uses mmap + // internally to read the file. We also avoid even using a memcpy by + // just keeping the archive along while the metadata is in use. + let archive = ArchiveRO::open(filename) + .map(|ar| OwningRef::new(box ar)) + .map_err(|e| { + debug!("llvm didn't like `{}`: {}", filename.display(), e); + format!("failed to read rlib metadata in '{}': {}", filename.display(), e) + })?; + let buf: OwningRef<_, [u8]> = archive + .try_map(|ar| { + ar.iter() + .filter_map(|s| s.ok()) + .find(|sect| sect.name() == Some(METADATA_FILENAME)) + .map(|s| s.data()) + .ok_or_else(|| { + debug!("didn't find '{}' in the archive", METADATA_FILENAME); + format!("failed to read rlib metadata: '{}'", + filename.display()) + }) + })?; + Ok(rustc_erase_owner!(buf)) + } + + fn get_dylib_metadata(&self, + target: &Target, + filename: &Path) + -> Result { + unsafe { + let buf = path2cstr(filename); + let mb = llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf.as_ptr()) + .ok_or_else(|| format!("error reading library: '{}'", filename.display()))?; + let of = ObjectFile::new(mb) + .map(|of| OwningRef::new(box of)) + .ok_or_else(|| format!("provided path not an object file: '{}'", + filename.display()))?; + let buf = of.try_map(|of| search_meta_section(of, target, filename))?; + Ok(rustc_erase_owner!(buf)) + } + } +} + +fn search_meta_section<'a>(of: &'a ObjectFile, + target: &Target, + filename: &Path) + -> Result<&'a [u8], String> { + unsafe { + let si = mk_section_iter(of.llof); + while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False { + let mut name_buf = ptr::null(); + let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf); + let name = slice::from_raw_parts(name_buf as *const u8, name_len as usize).to_vec(); + let name = String::from_utf8(name).unwrap(); + debug!("get_metadata_section: name {}", name); + if read_metadata_section_name(target) == name { + let cbuf = llvm::LLVMGetSectionContents(si.llsi); + let csz = llvm::LLVMGetSectionSize(si.llsi) as usize; + // The buffer is valid while the object file is around + let buf: &'a [u8] = slice::from_raw_parts(cbuf as *const u8, csz); + return Ok(buf); + } + llvm::LLVMMoveToNextSection(si.llsi); + } + } + Err(format!("metadata not found: '{}'", filename.display())) +} + +pub fn metadata_section_name(target: &Target) -> &'static str { + // Historical note: + // + // When using link.exe it was seen that the section name `.note.rustc` + // was getting shortened to `.note.ru`, and according to the PE and COFF + // specification: + // + // > Executable images do not use a string table and do not support + // > section names longer than 8 characters + // + // https://msdn.microsoft.com/en-us/library/windows/hardware/gg463119.aspx + // + // As a result, we choose a slightly shorter name! As to why + // `.note.rustc` works on MinGW, that's another good question... + + if target.options.is_like_osx { + "__DATA,.rustc" + } else { + ".rustc" + } +} + +fn read_metadata_section_name(_target: &Target) -> &'static str { + ".rustc" +} diff --git a/src/librustc_trans/meth.rs b/src/librustc_codegen_llvm/meth.rs similarity index 83% rename from src/librustc_trans/meth.rs rename to src/librustc_codegen_llvm/meth.rs index 6b542ae2e936..8a1159bc4773 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_codegen_llvm/meth.rs @@ -8,8 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::ValueRef; -use abi::FnType; +use abi::{FnType, FnTypeExt}; use callee; use common::*; use builder::Builder; @@ -17,6 +16,7 @@ use consts; use monomorphize; use type_::Type; use value::Value; + use rustc::ty::{self, Ty}; use rustc::ty::layout::HasDataLayout; use debuginfo; @@ -33,11 +33,11 @@ impl<'a, 'tcx> VirtualIndex { VirtualIndex(index as u64 + 3) } - pub fn get_fn(self, bx: &Builder<'a, 'tcx>, - llvtable: ValueRef, - fn_ty: &FnType<'tcx>) -> ValueRef { + pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx>, + llvtable: &'ll Value, + fn_ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Value { // Load the data pointer from the object. - debug!("get_fn({:?}, {:?})", Value(llvtable), self); + debug!("get_fn({:?}, {:?})", llvtable, self); let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx).ptr_to().ptr_to()); let ptr_align = bx.tcx().data_layout.pointer_align; @@ -48,9 +48,9 @@ impl<'a, 'tcx> VirtualIndex { ptr } - pub fn get_usize(self, bx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef { + pub fn get_usize(self, bx: &Builder<'a, 'll, 'tcx>, llvtable: &'ll Value) -> &'ll Value { // Load the data pointer from the object. - debug!("get_int({:?}, {:?})", Value(llvtable), self); + debug!("get_int({:?}, {:?})", llvtable, self); let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to()); let usize_align = bx.tcx().data_layout.pointer_align; @@ -69,11 +69,11 @@ impl<'a, 'tcx> VirtualIndex { /// The `trait_ref` encodes the erased self type. Hence if we are /// making an object `Foo` from a value of type `Foo`, then /// `trait_ref` would map `T:Trait`. -pub fn get_vtable<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - ty: Ty<'tcx>, - trait_ref: Option>) - -> ValueRef -{ +pub fn get_vtable( + cx: &CodegenCx<'ll, 'tcx>, + ty: Ty<'tcx>, + trait_ref: Option>, +) -> &'ll Value { let tcx = cx.tcx; debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref); @@ -106,7 +106,7 @@ pub fn get_vtable<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, let vtable_const = C_struct(cx, &components, false); let align = cx.data_layout().pointer_align; - let vtable = consts::addr_of(cx, vtable_const, align, "vtable"); + let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable")); debuginfo::create_vtable_metadata(cx, ty, vtable); diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_llvm/mir/analyze.rs new file mode 100644 index 000000000000..993138aee1ce --- /dev/null +++ b/src/librustc_codegen_llvm/mir/analyze.rs @@ -0,0 +1,361 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! An analysis to determine which locals require allocas and +//! which do not. + +use rustc_data_structures::bitvec::BitArray; +use rustc_data_structures::graph::dominators::Dominators; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use rustc::mir::{self, Location, TerminatorKind}; +use rustc::mir::visit::{Visitor, PlaceContext}; +use rustc::mir::traversal; +use rustc::ty; +use rustc::ty::layout::LayoutOf; +use type_of::LayoutLlvmExt; +use super::FunctionCx; + +pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitArray { + let mir = fx.mir; + let mut analyzer = LocalAnalyzer::new(fx); + + analyzer.visit_mir(mir); + + for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() { + let ty = fx.monomorphize(&ty); + debug!("local {} has type {:?}", index, ty); + let layout = fx.cx.layout_of(ty); + if layout.is_llvm_immediate() { + // These sorts of types are immediates that we can store + // in an Value without an alloca. + } else if layout.is_llvm_scalar_pair() { + // We allow pairs and uses of any of their 2 fields. + } else { + // These sorts of types require an alloca. Note that + // is_llvm_immediate() may *still* be true, particularly + // for newtypes, but we currently force some types + // (e.g. structs) into an alloca unconditionally, just so + // that we don't have to deal with having two pathways + // (gep vs extractvalue etc). + analyzer.not_ssa(mir::Local::new(index)); + } + } + + analyzer.non_ssa_locals +} + +struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll> { + fx: &'mir FunctionCx<'a, 'll, 'tcx>, + dominators: Dominators, + non_ssa_locals: BitArray, + // The location of the first visited direct assignment to each + // local, or an invalid location (out of bounds `block` index). + first_assignment: IndexVec +} + +impl LocalAnalyzer<'mir, 'a, 'll, 'tcx> { + fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx>) -> Self { + let invalid_location = + mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location(); + let mut analyzer = LocalAnalyzer { + fx, + dominators: fx.mir.dominators(), + non_ssa_locals: BitArray::new(fx.mir.local_decls.len()), + first_assignment: IndexVec::from_elem(invalid_location, &fx.mir.local_decls) + }; + + // Arguments get assigned to by means of the function being called + for arg in fx.mir.args_iter() { + analyzer.first_assignment[arg] = mir::START_BLOCK.start_location(); + } + + analyzer + } + + fn first_assignment(&self, local: mir::Local) -> Option { + let location = self.first_assignment[local]; + if location.block.index() < self.fx.mir.basic_blocks().len() { + Some(location) + } else { + None + } + } + + fn not_ssa(&mut self, local: mir::Local) { + debug!("marking {:?} as non-SSA", local); + self.non_ssa_locals.insert(local); + } + + fn assign(&mut self, local: mir::Local, location: Location) { + if self.first_assignment(local).is_some() { + self.not_ssa(local); + } else { + self.first_assignment[local] = location; + } + } +} + +impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { + fn visit_assign(&mut self, + block: mir::BasicBlock, + place: &mir::Place<'tcx>, + rvalue: &mir::Rvalue<'tcx>, + location: Location) { + debug!("visit_assign(block={:?}, place={:?}, rvalue={:?})", block, place, rvalue); + + if let mir::Place::Local(index) = *place { + self.assign(index, location); + if !self.fx.rvalue_creates_operand(rvalue) { + self.not_ssa(index); + } + } else { + self.visit_place(place, PlaceContext::Store, location); + } + + self.visit_rvalue(rvalue, location); + } + + fn visit_terminator_kind(&mut self, + block: mir::BasicBlock, + kind: &mir::TerminatorKind<'tcx>, + location: Location) { + let check = match *kind { + mir::TerminatorKind::Call { + func: mir::Operand::Constant(ref c), + ref args, .. + } => match c.ty.sty { + ty::TyFnDef(did, _) => Some((did, args)), + _ => None, + }, + _ => None, + }; + if let Some((def_id, args)) = check { + if Some(def_id) == self.fx.cx.tcx.lang_items().box_free_fn() { + // box_free(x) shares with `drop x` the property that it + // is not guaranteed to be statically dominated by the + // definition of x, so x must always be in an alloca. + if let mir::Operand::Move(ref place) = args[0] { + self.visit_place(place, PlaceContext::Drop, location); + } + } + } + + self.super_terminator_kind(block, kind, location); + } + + fn visit_place(&mut self, + place: &mir::Place<'tcx>, + context: PlaceContext<'tcx>, + location: Location) { + debug!("visit_place(place={:?}, context={:?})", place, context); + let cx = self.fx.cx; + + if let mir::Place::Projection(ref proj) = *place { + // Allow uses of projections that are ZSTs or from scalar fields. + let is_consume = match context { + PlaceContext::Copy | PlaceContext::Move => true, + _ => false + }; + if is_consume { + let base_ty = proj.base.ty(self.fx.mir, cx.tcx); + let base_ty = self.fx.monomorphize(&base_ty); + + // ZSTs don't require any actual memory access. + let elem_ty = base_ty.projection_ty(cx.tcx, &proj.elem).to_ty(cx.tcx); + let elem_ty = self.fx.monomorphize(&elem_ty); + if cx.layout_of(elem_ty).is_zst() { + return; + } + + if let mir::ProjectionElem::Field(..) = proj.elem { + let layout = cx.layout_of(base_ty.to_ty(cx.tcx)); + if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() { + // Recurse with the same context, instead of `Projection`, + // potentially stopping at non-operand projections, + // which would trigger `not_ssa` on locals. + self.visit_place(&proj.base, context, location); + return; + } + } + } + + // A deref projection only reads the pointer, never needs the place. + if let mir::ProjectionElem::Deref = proj.elem { + return self.visit_place(&proj.base, PlaceContext::Copy, location); + } + } + + self.super_place(place, context, location); + } + + fn visit_local(&mut self, + &local: &mir::Local, + context: PlaceContext<'tcx>, + location: Location) { + match context { + PlaceContext::Call => { + self.assign(local, location); + } + + PlaceContext::StorageLive | + PlaceContext::StorageDead | + PlaceContext::Validate => {} + + PlaceContext::Copy | + PlaceContext::Move => { + // Reads from uninitialized variables (e.g. in dead code, after + // optimizations) require locals to be in (uninitialized) memory. + // NB: there can be uninitialized reads of a local visited after + // an assignment to that local, if they happen on disjoint paths. + let ssa_read = match self.first_assignment(local) { + Some(assignment_location) => { + assignment_location.dominates(location, &self.dominators) + } + None => false + }; + if !ssa_read { + self.not_ssa(local); + } + } + + PlaceContext::Inspect | + PlaceContext::Store | + PlaceContext::AsmOutput | + PlaceContext::Borrow { .. } | + PlaceContext::Projection(..) => { + self.not_ssa(local); + } + + PlaceContext::Drop => { + let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx); + let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx)); + + // Only need the place if we're actually dropping it. + if self.fx.cx.type_needs_drop(ty) { + self.not_ssa(local); + } + } + } + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum CleanupKind { + NotCleanup, + Funclet, + Internal { funclet: mir::BasicBlock } +} + +impl CleanupKind { + pub fn funclet_bb(self, for_bb: mir::BasicBlock) -> Option { + match self { + CleanupKind::NotCleanup => None, + CleanupKind::Funclet => Some(for_bb), + CleanupKind::Internal { funclet } => Some(funclet), + } + } +} + +pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { + fn discover_masters<'tcx>(result: &mut IndexVec, + mir: &mir::Mir<'tcx>) { + for (bb, data) in mir.basic_blocks().iter_enumerated() { + match data.terminator().kind { + TerminatorKind::Goto { .. } | + TerminatorKind::Resume | + TerminatorKind::Abort | + TerminatorKind::Return | + TerminatorKind::GeneratorDrop | + TerminatorKind::Unreachable | + TerminatorKind::SwitchInt { .. } | + TerminatorKind::Yield { .. } | + TerminatorKind::FalseEdges { .. } | + TerminatorKind::FalseUnwind { .. } => { + /* nothing to do */ + } + TerminatorKind::Call { cleanup: unwind, .. } | + TerminatorKind::Assert { cleanup: unwind, .. } | + TerminatorKind::DropAndReplace { unwind, .. } | + TerminatorKind::Drop { unwind, .. } => { + if let Some(unwind) = unwind { + debug!("cleanup_kinds: {:?}/{:?} registering {:?} as funclet", + bb, data, unwind); + result[unwind] = CleanupKind::Funclet; + } + } + } + } + } + + fn propagate<'tcx>(result: &mut IndexVec, + mir: &mir::Mir<'tcx>) { + let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks()); + + let mut set_successor = |funclet: mir::BasicBlock, succ| { + match funclet_succs[funclet] { + ref mut s @ None => { + debug!("set_successor: updating successor of {:?} to {:?}", + funclet, succ); + *s = Some(succ); + }, + Some(s) => if s != succ { + span_bug!(mir.span, "funclet {:?} has 2 parents - {:?} and {:?}", + funclet, s, succ); + } + } + }; + + for (bb, data) in traversal::reverse_postorder(mir) { + let funclet = match result[bb] { + CleanupKind::NotCleanup => continue, + CleanupKind::Funclet => bb, + CleanupKind::Internal { funclet } => funclet, + }; + + debug!("cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}", + bb, data, result[bb], funclet); + + for &succ in data.terminator().successors() { + let kind = result[succ]; + debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}", + funclet, succ, kind); + match kind { + CleanupKind::NotCleanup => { + result[succ] = CleanupKind::Internal { funclet: funclet }; + } + CleanupKind::Funclet => { + if funclet != succ { + set_successor(funclet, succ); + } + } + CleanupKind::Internal { funclet: succ_funclet } => { + if funclet != succ_funclet { + // `succ` has 2 different funclet going into it, so it must + // be a funclet by itself. + + debug!("promoting {:?} to a funclet and updating {:?}", succ, + succ_funclet); + result[succ] = CleanupKind::Funclet; + set_successor(succ_funclet, succ); + set_successor(funclet, succ); + } + } + } + } + } + } + + let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks()); + + discover_masters(&mut result, mir); + propagate(&mut result, mir); + debug!("cleanup_kinds: result={:?}", result); + result +} diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_llvm/mir/block.rs new file mode 100644 index 000000000000..5f718ae456c4 --- /dev/null +++ b/src/librustc_codegen_llvm/mir/block.rs @@ -0,0 +1,963 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::{self, BasicBlock}; +use rustc::middle::lang_items; +use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::layout::{self, LayoutOf}; +use rustc::mir; +use rustc::mir::interpret::EvalErrorKind; +use abi::{Abi, ArgType, ArgTypeExt, FnType, FnTypeExt, LlvmType, PassMode}; +use base; +use callee; +use builder::{Builder, MemFlags}; +use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_uint_big, C_undef}; +use consts; +use meth; +use monomorphize; +use type_of::LayoutLlvmExt; +use type_::Type; +use value::Value; + +use syntax::symbol::Symbol; +use syntax_pos::Pos; + +use super::{FunctionCx, LocalRef}; +use super::place::PlaceRef; +use super::operand::OperandRef; +use super::operand::OperandValue::{Pair, Ref, Immediate}; + +impl FunctionCx<'a, 'll, 'tcx> { + pub fn codegen_block(&mut self, bb: mir::BasicBlock) { + let mut bx = self.build_block(bb); + let data = &self.mir[bb]; + + debug!("codegen_block({:?}={:?})", bb, data); + + for statement in &data.statements { + bx = self.codegen_statement(bx, statement); + } + + self.codegen_terminator(bx, bb, data.terminator()); + } + + fn codegen_terminator(&mut self, + mut bx: Builder<'a, 'll, 'tcx>, + bb: mir::BasicBlock, + terminator: &mir::Terminator<'tcx>) + { + debug!("codegen_terminator: {:?}", terminator); + + // Create the cleanup bundle, if needed. + let tcx = bx.tcx(); + let span = terminator.source_info.span; + let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb); + let funclet = funclet_bb.and_then(|funclet_bb| self.funclets[funclet_bb].as_ref()); + + let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); + let cleanup_bundle = funclet.map(|l| l.bundle()); + + let lltarget = |this: &mut Self, target: mir::BasicBlock| { + let lltarget = this.blocks[target]; + let target_funclet = this.cleanup_kinds[target].funclet_bb(target); + match (funclet_bb, target_funclet) { + (None, None) => (lltarget, false), + (Some(f), Some(t_f)) + if f == t_f || !base::wants_msvc_seh(tcx.sess) + => (lltarget, false), + (None, Some(_)) => { + // jump *into* cleanup - need a landing pad if GNU + (this.landing_pad_to(target), false) + } + (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", terminator), + (Some(_), Some(_)) => { + (this.landing_pad_to(target), true) + } + } + }; + + let llblock = |this: &mut Self, target: mir::BasicBlock| { + let (lltarget, is_cleanupret) = lltarget(this, target); + if is_cleanupret { + // MSVC cross-funclet jump - need a trampoline + + debug!("llblock: creating cleanup trampoline for {:?}", target); + let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); + let trampoline = this.new_block(name); + trampoline.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); + trampoline.llbb() + } else { + lltarget + } + }; + + let funclet_br = |this: &mut Self, bx: Builder<'_, 'll, '_>, target: mir::BasicBlock| { + let (lltarget, is_cleanupret) = lltarget(this, target); + if is_cleanupret { + // micro-optimization: generate a `ret` rather than a jump + // to a trampoline. + bx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); + } else { + bx.br(lltarget); + } + }; + + let do_call = | + this: &mut Self, + bx: Builder<'a, 'll, 'tcx>, + fn_ty: FnType<'tcx, Ty<'tcx>>, + fn_ptr: &'ll Value, + llargs: &[&'ll Value], + destination: Option<(ReturnDest<'ll, 'tcx>, mir::BasicBlock)>, + cleanup: Option + | { + if let Some(cleanup) = cleanup { + let ret_bx = if let Some((_, target)) = destination { + this.blocks[target] + } else { + this.unreachable_block() + }; + let invokeret = bx.invoke(fn_ptr, + &llargs, + ret_bx, + llblock(this, cleanup), + cleanup_bundle); + fn_ty.apply_attrs_callsite(&bx, invokeret); + + if let Some((ret_dest, target)) = destination { + let ret_bx = this.build_block(target); + this.set_debug_loc(&ret_bx, terminator.source_info); + this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret); + } + } else { + let llret = bx.call(fn_ptr, &llargs, cleanup_bundle); + fn_ty.apply_attrs_callsite(&bx, llret); + if this.mir[bb].is_cleanup { + // Cleanup is always the cold path. Don't inline + // drop glue. Also, when there is a deeply-nested + // struct, there are "symmetry" issues that cause + // exponential inlining - see issue #41696. + llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + } + + if let Some((ret_dest, target)) = destination { + this.store_return(&bx, ret_dest, &fn_ty.ret, llret); + funclet_br(this, bx, target); + } else { + bx.unreachable(); + } + } + }; + + self.set_debug_loc(&bx, terminator.source_info); + match terminator.kind { + mir::TerminatorKind::Resume => { + if let Some(cleanup_pad) = cleanup_pad { + bx.cleanup_ret(cleanup_pad, None); + } else { + let slot = self.get_personality_slot(&bx); + let lp0 = slot.project_field(&bx, 0).load(&bx).immediate(); + let lp1 = slot.project_field(&bx, 1).load(&bx).immediate(); + slot.storage_dead(&bx); + + if !bx.sess().target.target.options.custom_unwind_resume { + let mut lp = C_undef(self.landing_pad_type()); + lp = bx.insert_value(lp, lp0, 0); + lp = bx.insert_value(lp, lp1, 1); + bx.resume(lp); + } else { + bx.call(bx.cx.eh_unwind_resume(), &[lp0], cleanup_bundle); + bx.unreachable(); + } + } + } + + mir::TerminatorKind::Abort => { + // Call core::intrinsics::abort() + let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + bx.call(fnname, &[], None); + bx.unreachable(); + } + + mir::TerminatorKind::Goto { target } => { + funclet_br(self, bx, target); + } + + mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { + let discr = self.codegen_operand(&bx, discr); + if targets.len() == 2 { + // If there are two targets, emit br instead of switch + let lltrue = llblock(self, targets[0]); + let llfalse = llblock(self, targets[1]); + if switch_ty == bx.tcx().types.bool { + // Don't generate trivial icmps when switching on bool + if let [0] = values[..] { + bx.cond_br(discr.immediate(), llfalse, lltrue); + } else { + assert_eq!(&values[..], &[1]); + bx.cond_br(discr.immediate(), lltrue, llfalse); + } + } else { + let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); + let llval = C_uint_big(switch_llty, values[0]); + let cmp = bx.icmp(llvm::IntEQ, discr.immediate(), llval); + bx.cond_br(cmp, lltrue, llfalse); + } + } else { + let (otherwise, targets) = targets.split_last().unwrap(); + let switch = bx.switch(discr.immediate(), + llblock(self, *otherwise), values.len()); + let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); + for (&value, target) in values.iter().zip(targets) { + let llval = C_uint_big(switch_llty, value); + let llbb = llblock(self, *target); + bx.add_case(switch, llval, llbb) + } + } + } + + mir::TerminatorKind::Return => { + let llval = match self.fn_ty.ret.mode { + PassMode::Ignore | PassMode::Indirect(..) => { + bx.ret_void(); + return; + } + + PassMode::Direct(_) | PassMode::Pair(..) => { + let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE)); + if let Ref(llval, _, align) = op.val { + bx.load(llval, align) + } else { + op.immediate_or_packed_pair(&bx) + } + } + + PassMode::Cast(cast_ty) => { + let op = match self.locals[mir::RETURN_PLACE] { + LocalRef::Operand(Some(op)) => op, + LocalRef::Operand(None) => bug!("use of return before def"), + LocalRef::Place(cg_place) => { + OperandRef { + val: Ref(cg_place.llval, None, cg_place.align), + layout: cg_place.layout + } + } + LocalRef::UnsizedPlace(_) => bug!("return type must be sized"), + }; + let llslot = match op.val { + Immediate(_) | Pair(..) => { + let scratch = PlaceRef::alloca(&bx, self.fn_ty.ret.layout, "ret"); + op.val.store(&bx, scratch); + scratch.llval + } + Ref(llval, _, align) => { + assert_eq!(align.abi(), op.layout.align.abi(), + "return place is unaligned!"); + llval + } + }; + bx.load( + bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()), + self.fn_ty.ret.layout.align) + } + }; + bx.ret(llval); + } + + mir::TerminatorKind::Unreachable => { + bx.unreachable(); + } + + mir::TerminatorKind::Drop { ref location, target, unwind } => { + let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx()); + let ty = self.monomorphize(&ty); + let drop_fn = monomorphize::resolve_drop_in_place(bx.cx.tcx, ty); + + if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { + // we don't actually need to drop anything. + funclet_br(self, bx, target); + return + } + + let place = self.codegen_place(&bx, location); + let (args1, args2); + let mut args = if let Some(llextra) = place.llextra { + args2 = [place.llval, llextra]; + &args2[..] + } else { + args1 = [place.llval]; + &args1[..] + }; + let (drop_fn, fn_ty) = match ty.sty { + ty::TyDynamic(..) => { + let fn_ty = drop_fn.ty(bx.cx.tcx); + let sig = common::ty_fn_sig(bx.cx, fn_ty); + let sig = bx.tcx().normalize_erasing_late_bound_regions( + ty::ParamEnv::reveal_all(), + &sig, + ); + let fn_ty = FnType::new_vtable(bx.cx, sig, &[]); + let vtable = args[1]; + args = &args[..1]; + (meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty) + } + _ => { + (callee::get_fn(bx.cx, drop_fn), + FnType::of_instance(bx.cx, &drop_fn)) + } + }; + do_call(self, bx, fn_ty, drop_fn, args, + Some((ReturnDest::Nothing, target)), + unwind); + } + + mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { + let cond = self.codegen_operand(&bx, cond).immediate(); + let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1); + + // This case can currently arise only from functions marked + // with #[rustc_inherit_overflow_checks] and inlined from + // another crate (mostly core::num generic/#[inline] fns), + // while the current crate doesn't use overflow checks. + // NOTE: Unlike binops, negation doesn't have its own + // checked operation, just a comparison with the minimum + // value, so we have to check for the assert message. + if !bx.cx.check_overflow { + if let mir::interpret::EvalErrorKind::OverflowNeg = *msg { + const_cond = Some(expected); + } + } + + // Don't codegen the panic block if success if known. + if const_cond == Some(expected) { + funclet_br(self, bx, target); + return; + } + + // Pass the condition through llvm.expect for branch hinting. + let expect = bx.cx.get_intrinsic(&"llvm.expect.i1"); + let cond = bx.call(expect, &[cond, C_bool(bx.cx, expected)], None); + + // Create the failure block and the conditional branch to it. + let lltarget = llblock(self, target); + let panic_block = self.new_block("panic"); + if expected { + bx.cond_br(cond, lltarget, panic_block.llbb()); + } else { + bx.cond_br(cond, panic_block.llbb(), lltarget); + } + + // After this point, bx is the block for the call to panic. + bx = panic_block; + self.set_debug_loc(&bx, terminator.source_info); + + // Get the location information. + let loc = bx.sess().codemap().lookup_char_pos(span.lo()); + let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); + let filename = C_str_slice(bx.cx, filename); + let line = C_u32(bx.cx, loc.line as u32); + let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1); + let align = tcx.data_layout.aggregate_align + .max(tcx.data_layout.i32_align) + .max(tcx.data_layout.pointer_align); + + // Put together the arguments to the panic entry point. + let (lang_item, args) = match *msg { + EvalErrorKind::BoundsCheck { ref len, ref index } => { + let len = self.codegen_operand(&mut bx, len).immediate(); + let index = self.codegen_operand(&mut bx, index).immediate(); + + let file_line_col = C_struct(bx.cx, &[filename, line, col], false); + let file_line_col = consts::addr_of(bx.cx, + file_line_col, + align, + Some("panic_bounds_check_loc")); + (lang_items::PanicBoundsCheckFnLangItem, + vec![file_line_col, index, len]) + } + _ => { + let str = msg.description(); + let msg_str = Symbol::intern(str).as_str(); + let msg_str = C_str_slice(bx.cx, msg_str); + let msg_file_line_col = C_struct(bx.cx, + &[msg_str, filename, line, col], + false); + let msg_file_line_col = consts::addr_of(bx.cx, + msg_file_line_col, + align, + Some("panic_loc")); + (lang_items::PanicFnLangItem, + vec![msg_file_line_col]) + } + }; + + // Obtain the panic entry point. + let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item); + let instance = ty::Instance::mono(bx.tcx(), def_id); + let fn_ty = FnType::of_instance(bx.cx, &instance); + let llfn = callee::get_fn(bx.cx, instance); + + // Codegen the actual panic invoke/call. + do_call(self, bx, fn_ty, llfn, &args, None, cleanup); + } + + mir::TerminatorKind::DropAndReplace { .. } => { + bug!("undesugared DropAndReplace in codegen: {:?}", terminator); + } + + mir::TerminatorKind::Call { ref func, ref args, ref destination, cleanup } => { + // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. + let callee = self.codegen_operand(&bx, func); + + let (instance, mut llfn) = match callee.layout.ty.sty { + ty::TyFnDef(def_id, substs) => { + (Some(ty::Instance::resolve(bx.cx.tcx, + ty::ParamEnv::reveal_all(), + def_id, + substs).unwrap()), + None) + } + ty::TyFnPtr(_) => { + (None, Some(callee.immediate())) + } + _ => bug!("{} is not callable", callee.layout.ty) + }; + let def = instance.map(|i| i.def); + let sig = callee.layout.ty.fn_sig(bx.tcx()); + let sig = bx.tcx().normalize_erasing_late_bound_regions( + ty::ParamEnv::reveal_all(), + &sig, + ); + let abi = sig.abi; + + // Handle intrinsics old codegen wants Expr's for, ourselves. + let intrinsic = match def { + Some(ty::InstanceDef::Intrinsic(def_id)) + => Some(bx.tcx().item_name(def_id).as_str()), + _ => None + }; + let intrinsic = intrinsic.as_ref().map(|s| &s[..]); + + if intrinsic == Some("transmute") { + if let Some(destination_ref) = destination.as_ref() { + let &(ref dest, target) = destination_ref; + self.codegen_transmute(&bx, &args[0], dest); + funclet_br(self, bx, target); + } else { + // If we are trying to transmute to an uninhabited type, + // it is likely there is no allotted destination. In fact, + // transmuting to an uninhabited type is UB, which means + // we can do what we like. Here, we declare that transmuting + // into an uninhabited type is impossible, so anything following + // it must be unreachable. + assert_eq!(bx.cx.layout_of(sig.output()).abi, layout::Abi::Uninhabited); + bx.unreachable(); + } + return; + } + + let extra_args = &args[sig.inputs().len()..]; + let extra_args = extra_args.iter().map(|op_arg| { + let op_ty = op_arg.ty(self.mir, bx.tcx()); + self.monomorphize(&op_ty) + }).collect::>(); + + let fn_ty = match def { + Some(ty::InstanceDef::Virtual(..)) => { + FnType::new_vtable(bx.cx, sig, &extra_args) + } + Some(ty::InstanceDef::DropGlue(_, None)) => { + // empty drop glue - a nop. + let &(_, target) = destination.as_ref().unwrap(); + funclet_br(self, bx, target); + return; + } + _ => FnType::new(bx.cx, sig, &extra_args) + }; + + // The arguments we'll be passing. Plus one to account for outptr, if used. + let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; + let mut llargs = Vec::with_capacity(arg_count); + + // Prepare the return value destination + let ret_dest = if let Some((ref dest, _)) = *destination { + let is_intrinsic = intrinsic.is_some(); + self.make_return_dest(&bx, dest, &fn_ty.ret, &mut llargs, + is_intrinsic) + } else { + ReturnDest::Nothing + }; + + if intrinsic.is_some() && intrinsic != Some("drop_in_place") { + use intrinsic::codegen_intrinsic_call; + + let dest = match ret_dest { + _ if fn_ty.ret.is_indirect() => llargs[0], + ReturnDest::Nothing => { + C_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to()) + } + ReturnDest::IndirectOperand(dst, _) | + ReturnDest::Store(dst) => dst.llval, + ReturnDest::DirectOperand(_) => + bug!("Cannot use direct operand with an intrinsic call") + }; + + let args: Vec<_> = args.iter().enumerate().map(|(i, arg)| { + // The indices passed to simd_shuffle* in the + // third argument must be constant. This is + // checked by const-qualification, which also + // promotes any complex rvalues to constants. + if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") { + match *arg { + // The shuffle array argument is usually not an explicit constant, + // but specified directly in the code. This means it gets promoted + // and we can then extract the value by evaluating the promoted. + mir::Operand::Copy(mir::Place::Promoted(box(index, ty))) | + mir::Operand::Move(mir::Place::Promoted(box(index, ty))) => { + let param_env = ty::ParamEnv::reveal_all(); + let cid = mir::interpret::GlobalId { + instance: self.instance, + promoted: Some(index), + }; + let c = bx.tcx().const_eval(param_env.and(cid)); + let (llval, ty) = self.simd_shuffle_indices( + &bx, + terminator.source_info.span, + ty, + c, + ); + return OperandRef { + val: Immediate(llval), + layout: bx.cx.layout_of(ty), + }; + + }, + mir::Operand::Copy(_) | + mir::Operand::Move(_) => { + span_bug!(span, "shuffle indices must be constant"); + } + mir::Operand::Constant(ref constant) => { + let c = self.eval_mir_constant(&bx, constant); + let (llval, ty) = self.simd_shuffle_indices( + &bx, + constant.span, + constant.ty, + c, + ); + return OperandRef { + val: Immediate(llval), + layout: bx.cx.layout_of(ty) + }; + } + } + } + + self.codegen_operand(&bx, arg) + }).collect(); + + + let callee_ty = instance.as_ref().unwrap().ty(bx.cx.tcx); + codegen_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest, + terminator.source_info.span); + + if let ReturnDest::IndirectOperand(dst, _) = ret_dest { + self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval); + } + + if let Some((_, target)) = *destination { + funclet_br(self, bx, target); + } else { + bx.unreachable(); + } + + return; + } + + // Split the rust-call tupled arguments off. + let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { + let (tup, args) = args.split_last().unwrap(); + (args, Some(tup)) + } else { + (&args[..], None) + }; + + for (i, arg) in first_args.iter().enumerate() { + let mut op = self.codegen_operand(&bx, arg); + if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { + if let Pair(data_ptr, meta) = op.val { + llfn = Some(meth::VirtualIndex::from_index(idx) + .get_fn(&bx, meta, &fn_ty)); + llargs.push(data_ptr); + continue; + } + } + + // The callee needs to own the argument memory if we pass it + // by-ref, so make a local copy of non-immediate constants. + match (arg, op.val) { + (&mir::Operand::Copy(_), Ref(_, None, _)) | + (&mir::Operand::Constant(_), Ref(_, None, _)) => { + let tmp = PlaceRef::alloca(&bx, op.layout, "const"); + op.val.store(&bx, tmp); + op.val = Ref(tmp.llval, None, tmp.align); + } + _ => {} + } + + self.codegen_argument(&bx, op, &mut llargs, &fn_ty.args[i]); + } + if let Some(tup) = untuple { + self.codegen_arguments_untupled(&bx, tup, &mut llargs, + &fn_ty.args[first_args.len()..]) + } + + let fn_ptr = match (llfn, instance) { + (Some(llfn), _) => llfn, + (None, Some(instance)) => callee::get_fn(bx.cx, instance), + _ => span_bug!(span, "no llfn for call"), + }; + + do_call(self, bx, fn_ty, fn_ptr, &llargs, + destination.as_ref().map(|&(_, target)| (ret_dest, target)), + cleanup); + } + mir::TerminatorKind::GeneratorDrop | + mir::TerminatorKind::Yield { .. } => bug!("generator ops in codegen"), + mir::TerminatorKind::FalseEdges { .. } | + mir::TerminatorKind::FalseUnwind { .. } => bug!("borrowck false edges in codegen"), + } + } + + fn codegen_argument(&mut self, + bx: &Builder<'a, 'll, 'tcx>, + op: OperandRef<'ll, 'tcx>, + llargs: &mut Vec<&'ll Value>, + arg: &ArgType<'tcx, Ty<'tcx>>) { + // Fill padding with undef value, where applicable. + if let Some(ty) = arg.pad { + llargs.push(C_undef(ty.llvm_type(bx.cx))); + } + + if arg.is_ignore() { + return; + } + + if let PassMode::Pair(..) = arg.mode { + match op.val { + Pair(a, b) => { + llargs.push(a); + llargs.push(b); + return; + } + _ => bug!("codegen_argument: {:?} invalid for pair arugment", op) + } + } else if arg.is_unsized_indirect() { + match op.val { + Ref(a, Some(b), _) => { + llargs.push(a); + llargs.push(b); + return; + } + _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op) + } + } + + // Force by-ref if we have to load through a cast pointer. + let (mut llval, align, by_ref) = match op.val { + Immediate(_) | Pair(..) => { + match arg.mode { + PassMode::Indirect(..) | PassMode::Cast(_) => { + let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); + op.val.store(bx, scratch); + (scratch.llval, scratch.align, true) + } + _ => { + (op.immediate_or_packed_pair(bx), arg.layout.align, false) + } + } + } + Ref(llval, _, align) => { + if arg.is_indirect() && align.abi() < arg.layout.align.abi() { + // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I + // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't + // have scary latent bugs around. + + let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); + base::memcpy_ty(bx, scratch.llval, llval, op.layout, align, MemFlags::empty()); + (scratch.llval, scratch.align, true) + } else { + (llval, align, true) + } + } + }; + + if by_ref && !arg.is_indirect() { + // Have to load the argument, maybe while casting it. + if let PassMode::Cast(ty) = arg.mode { + llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx).ptr_to()), + align.min(arg.layout.align)); + } else { + // We can't use `PlaceRef::load` here because the argument + // may have a type we don't treat as immediate, but the ABI + // used for this call is passing it by-value. In that case, + // the load would just produce `OperandValue::Ref` instead + // of the `OperandValue::Immediate` we need for the call. + llval = bx.load(llval, align); + if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { + if scalar.is_bool() { + bx.range_metadata(llval, 0..2); + } + } + // We store bools as i8 so we need to truncate to i1. + llval = base::to_immediate(bx, llval, arg.layout); + } + } + + llargs.push(llval); + } + + fn codegen_arguments_untupled(&mut self, + bx: &Builder<'a, 'll, 'tcx>, + operand: &mir::Operand<'tcx>, + llargs: &mut Vec<&'ll Value>, + args: &[ArgType<'tcx, Ty<'tcx>>]) { + let tuple = self.codegen_operand(bx, operand); + + // Handle both by-ref and immediate tuples. + if let Ref(llval, None, align) = tuple.val { + let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align); + for i in 0..tuple.layout.fields.count() { + let field_ptr = tuple_ptr.project_field(bx, i); + self.codegen_argument(bx, field_ptr.load(bx), llargs, &args[i]); + } + } else if let Ref(_, Some(_), _) = tuple.val { + bug!("closure arguments must be sized") + } else { + // If the tuple is immediate, the elements are as well. + for i in 0..tuple.layout.fields.count() { + let op = tuple.extract_field(bx, i); + self.codegen_argument(bx, op, llargs, &args[i]); + } + } + } + + fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx>) -> PlaceRef<'ll, 'tcx> { + let cx = bx.cx; + if let Some(slot) = self.personality_slot { + slot + } else { + let layout = cx.layout_of(cx.tcx.intern_tup(&[ + cx.tcx.mk_mut_ptr(cx.tcx.types.u8), + cx.tcx.types.i32 + ])); + let slot = PlaceRef::alloca(bx, layout, "personalityslot"); + self.personality_slot = Some(slot); + slot + } + } + + /// Return the landingpad wrapper around the given basic block + /// + /// No-op in MSVC SEH scheme. + fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> &'ll BasicBlock { + if let Some(block) = self.landing_pads[target_bb] { + return block; + } + + let block = self.blocks[target_bb]; + let landing_pad = self.landing_pad_uncached(block); + self.landing_pads[target_bb] = Some(landing_pad); + landing_pad + } + + fn landing_pad_uncached(&mut self, target_bb: &'ll BasicBlock) -> &'ll BasicBlock { + if base::wants_msvc_seh(self.cx.sess()) { + span_bug!(self.mir.span, "landing pad was not inserted?") + } + + let bx = self.new_block("cleanup"); + + let llpersonality = self.cx.eh_personality(); + let llretty = self.landing_pad_type(); + let lp = bx.landing_pad(llretty, llpersonality, 1); + bx.set_cleanup(lp); + + let slot = self.get_personality_slot(&bx); + slot.storage_live(&bx); + Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&bx, slot); + + bx.br(target_bb); + bx.llbb() + } + + fn landing_pad_type(&self) -> &'ll Type { + let cx = self.cx; + Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false) + } + + fn unreachable_block(&mut self) -> &'ll BasicBlock { + self.unreachable_block.unwrap_or_else(|| { + let bl = self.new_block("unreachable"); + bl.unreachable(); + self.unreachable_block = Some(bl.llbb()); + bl.llbb() + }) + } + + pub fn new_block(&self, name: &str) -> Builder<'a, 'll, 'tcx> { + Builder::new_block(self.cx, self.llfn, name) + } + + pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'll, 'tcx> { + let bx = Builder::with_cx(self.cx); + bx.position_at_end(self.blocks[bb]); + bx + } + + fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx>, + dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>, + llargs: &mut Vec<&'ll Value>, is_intrinsic: bool) + -> ReturnDest<'ll, 'tcx> { + // If the return is ignored, we can just return a do-nothing ReturnDest + if fn_ret.is_ignore() { + return ReturnDest::Nothing; + } + let dest = if let mir::Place::Local(index) = *dest { + match self.locals[index] { + LocalRef::Place(dest) => dest, + LocalRef::UnsizedPlace(_) => bug!("return type must be sized"), + LocalRef::Operand(None) => { + // Handle temporary places, specifically Operand ones, as + // they don't have allocas + return if fn_ret.is_indirect() { + // Odd, but possible, case, we have an operand temporary, + // but the calling convention has an indirect return. + let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret"); + tmp.storage_live(bx); + llargs.push(tmp.llval); + ReturnDest::IndirectOperand(tmp, index) + } else if is_intrinsic { + // Currently, intrinsics always need a location to store + // the result. so we create a temporary alloca for the + // result + let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret"); + tmp.storage_live(bx); + ReturnDest::IndirectOperand(tmp, index) + } else { + ReturnDest::DirectOperand(index) + }; + } + LocalRef::Operand(Some(_)) => { + bug!("place local already assigned to"); + } + } + } else { + self.codegen_place(bx, dest) + }; + if fn_ret.is_indirect() { + if dest.align.abi() < dest.layout.align.abi() { + // Currently, MIR code generation does not create calls + // that store directly to fields of packed structs (in + // fact, the calls it creates write only to temps), + // + // If someone changes that, please update this code path + // to create a temporary. + span_bug!(self.mir.span, "can't directly store to unaligned value"); + } + llargs.push(dest.llval); + ReturnDest::Nothing + } else { + ReturnDest::Store(dest) + } + } + + fn codegen_transmute(&mut self, bx: &Builder<'a, 'll, 'tcx>, + src: &mir::Operand<'tcx>, + dst: &mir::Place<'tcx>) { + if let mir::Place::Local(index) = *dst { + match self.locals[index] { + LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place), + LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"), + LocalRef::Operand(None) => { + let dst_layout = bx.cx.layout_of(self.monomorphized_place_ty(dst)); + assert!(!dst_layout.ty.has_erasable_regions()); + let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp"); + place.storage_live(bx); + self.codegen_transmute_into(bx, src, place); + let op = place.load(bx); + place.storage_dead(bx); + self.locals[index] = LocalRef::Operand(Some(op)); + } + LocalRef::Operand(Some(op)) => { + assert!(op.layout.is_zst(), + "assigning to initialized SSAtemp"); + } + } + } else { + let dst = self.codegen_place(bx, dst); + self.codegen_transmute_into(bx, src, dst); + } + } + + fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx>, + src: &mir::Operand<'tcx>, + dst: PlaceRef<'ll, 'tcx>) { + let src = self.codegen_operand(bx, src); + let llty = src.layout.llvm_type(bx.cx); + let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to()); + let align = src.layout.align.min(dst.layout.align); + src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); + } + + + // Stores the return value of a function call into it's final location. + fn store_return(&mut self, + bx: &Builder<'a, 'll, 'tcx>, + dest: ReturnDest<'ll, 'tcx>, + ret_ty: &ArgType<'tcx, Ty<'tcx>>, + llval: &'ll Value) { + use self::ReturnDest::*; + + match dest { + Nothing => (), + Store(dst) => ret_ty.store(bx, llval, dst), + IndirectOperand(tmp, index) => { + let op = tmp.load(bx); + tmp.storage_dead(bx); + self.locals[index] = LocalRef::Operand(Some(op)); + } + DirectOperand(index) => { + // If there is a cast, we have to store and reload. + let op = if let PassMode::Cast(_) = ret_ty.mode { + let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret"); + tmp.storage_live(bx); + ret_ty.store(bx, llval, tmp); + let op = tmp.load(bx); + tmp.storage_dead(bx); + op + } else { + OperandRef::from_immediate_or_packed_pair(bx, llval, ret_ty.layout) + }; + self.locals[index] = LocalRef::Operand(Some(op)); + } + } + } +} + +enum ReturnDest<'ll, 'tcx> { + // Do nothing, the return value is indirect or ignored + Nothing, + // Store the return value to the pointer + Store(PlaceRef<'ll, 'tcx>), + // Stores an indirect return value to an operand local place + IndirectOperand(PlaceRef<'ll, 'tcx>, mir::Local), + // Stores a direct return value to an operand local place + DirectOperand(mir::Local) +} diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs new file mode 100644 index 000000000000..a6e14a99f3c8 --- /dev/null +++ b/src/librustc_codegen_llvm/mir/constant.rs @@ -0,0 +1,225 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm; +use rustc::mir::interpret::ConstEvalErr; +use rustc_mir::interpret::{read_target_uint, const_val_field}; +use rustc::hir::def_id::DefId; +use rustc::mir; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::sync::Lrc; +use rustc::mir::interpret::{GlobalId, Pointer, Scalar, Allocation, ConstValue, AllocType}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size}; +use builder::Builder; +use common::{CodegenCx}; +use common::{C_bytes, C_struct, C_uint_big, C_undef, C_usize}; +use consts; +use type_of::LayoutLlvmExt; +use type_::Type; +use syntax::ast::Mutability; +use syntax::codemap::Span; +use value::Value; + +use super::super::callee; +use super::FunctionCx; + +pub fn scalar_to_llvm( + cx: &CodegenCx<'ll, '_>, + cv: Scalar, + layout: &layout::Scalar, + llty: &'ll Type, +) -> &'ll Value { + let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() }; + match cv { + Scalar::Bits { size: 0, .. } => { + assert_eq!(0, layout.value.size(cx).bytes()); + C_undef(Type::ix(cx, 0)) + }, + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.value.size(cx).bytes()); + let llval = C_uint_big(Type::ix(cx, bitsize), bits); + if layout.value == layout::Pointer { + unsafe { llvm::LLVMConstIntToPtr(llval, llty) } + } else { + consts::bitcast(llval, llty) + } + }, + Scalar::Ptr(ptr) => { + let alloc_type = cx.tcx.alloc_map.lock().get(ptr.alloc_id); + let base_addr = match alloc_type { + Some(AllocType::Memory(alloc)) => { + let init = const_alloc_to_llvm(cx, alloc); + if alloc.runtime_mutability == Mutability::Mutable { + consts::addr_of_mut(cx, init, alloc.align, None) + } else { + consts::addr_of(cx, init, alloc.align, None) + } + } + Some(AllocType::Function(fn_instance)) => { + callee::get_fn(cx, fn_instance) + } + Some(AllocType::Static(def_id)) => { + assert!(cx.tcx.is_static(def_id).is_some()); + consts::get_static(cx, def_id) + } + None => bug!("missing allocation {:?}", ptr.alloc_id), + }; + let llval = unsafe { llvm::LLVMConstInBoundsGEP( + consts::bitcast(base_addr, Type::i8p(cx)), + &C_usize(cx, ptr.offset.bytes()), + 1, + ) }; + if layout.value != layout::Pointer { + unsafe { llvm::LLVMConstPtrToInt(llval, llty) } + } else { + consts::bitcast(llval, llty) + } + } + } +} + +pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value { + let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); + let layout = cx.data_layout(); + let pointer_size = layout.pointer_size.bytes() as usize; + + let mut next_offset = 0; + for &(offset, alloc_id) in alloc.relocations.iter() { + let offset = offset.bytes(); + assert_eq!(offset as usize as u64, offset); + let offset = offset as usize; + if offset > next_offset { + llvals.push(C_bytes(cx, &alloc.bytes[next_offset..offset])); + } + let ptr_offset = read_target_uint( + layout.endian, + &alloc.bytes[offset..(offset + pointer_size)], + ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; + llvals.push(scalar_to_llvm( + cx, + Pointer { alloc_id, offset: Size::from_bytes(ptr_offset) }.into(), + &layout::Scalar { + value: layout::Primitive::Pointer, + valid_range: 0..=!0 + }, + Type::i8p(cx) + )); + next_offset = offset + pointer_size; + } + if alloc.bytes.len() >= next_offset { + llvals.push(C_bytes(cx, &alloc.bytes[next_offset ..])); + } + + C_struct(cx, &llvals, true) +} + +pub fn codegen_static_initializer( + cx: &CodegenCx<'ll, 'tcx>, + def_id: DefId, +) -> Result<(&'ll Value, &'tcx Allocation), Lrc>> { + let instance = ty::Instance::mono(cx.tcx, def_id); + let cid = GlobalId { + instance, + promoted: None, + }; + let param_env = ty::ParamEnv::reveal_all(); + let static_ = cx.tcx.const_eval(param_env.and(cid))?; + + let alloc = match static_.val { + ConstValue::ByRef(alloc, n) if n.bytes() == 0 => alloc, + _ => bug!("static const eval returned {:#?}", static_), + }; + Ok((const_alloc_to_llvm(cx, alloc), alloc)) +} + +impl FunctionCx<'a, 'll, 'tcx> { + fn fully_evaluate( + &mut self, + bx: &Builder<'a, 'll, 'tcx>, + constant: &'tcx ty::Const<'tcx>, + ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { + match constant.val { + ConstValue::Unevaluated(def_id, ref substs) => { + let tcx = bx.tcx(); + let param_env = ty::ParamEnv::reveal_all(); + let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); + let cid = GlobalId { + instance, + promoted: None, + }; + tcx.const_eval(param_env.and(cid)) + }, + _ => Ok(constant), + } + } + + pub fn eval_mir_constant( + &mut self, + bx: &Builder<'a, 'll, 'tcx>, + constant: &mir::Constant<'tcx>, + ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { + let c = self.monomorphize(&constant.literal); + self.fully_evaluate(bx, c) + } + + /// process constant containing SIMD shuffle indices + pub fn simd_shuffle_indices( + &mut self, + bx: &Builder<'a, 'll, 'tcx>, + span: Span, + ty: Ty<'tcx>, + constant: Result<&'tcx ty::Const<'tcx>, Lrc>>, + ) -> (&'ll Value, Ty<'tcx>) { + constant + .and_then(|c| { + let field_ty = c.ty.builtin_index().unwrap(); + let fields = match c.ty.sty { + ty::TyArray(_, n) => n.unwrap_usize(bx.tcx()), + ref other => bug!("invalid simd shuffle type: {}", other), + }; + let values: Result, Lrc<_>> = (0..fields).map(|field| { + let field = const_val_field( + bx.tcx(), + ty::ParamEnv::reveal_all(), + self.instance, + None, + mir::Field::new(field as usize), + c, + )?; + if let Some(prim) = field.val.try_to_scalar() { + let layout = bx.cx.layout_of(field_ty); + let scalar = match layout.abi { + layout::Abi::Scalar(ref x) => x, + _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) + }; + Ok(scalar_to_llvm( + bx.cx, prim, scalar, + layout.immediate_llvm_type(bx.cx), + )) + } else { + bug!("simd shuffle field {:?}", field) + } + }).collect(); + let llval = C_struct(bx.cx, &values?, false); + Ok((llval, c.ty)) + }) + .unwrap_or_else(|e| { + e.report_as_error( + bx.tcx().at(span), + "could not evaluate shuffle_indices at compile time", + ); + // We've errored, so we don't have to produce working code. + let ty = self.monomorphize(&ty); + let llty = bx.cx.layout_of(ty).llvm_type(bx.cx); + (C_undef(llty), ty) + }) + } +} diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_llvm/mir/mod.rs new file mode 100644 index 000000000000..5991bb80c30e --- /dev/null +++ b/src/librustc_codegen_llvm/mir/mod.rs @@ -0,0 +1,679 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use common::{C_i32, C_null}; +use libc::c_uint; +use llvm::{self, BasicBlock}; +use llvm::debuginfo::DIScope; +use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts}; +use rustc::ty::layout::{LayoutOf, TyLayout}; +use rustc::mir::{self, Mir}; +use rustc::ty::subst::Substs; +use rustc::session::config::DebugInfo; +use base; +use builder::Builder; +use common::{CodegenCx, Funclet}; +use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; +use monomorphize::Instance; +use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; +use type_::Type; +use value::Value; + +use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; +use syntax::symbol::keywords; + +use std::iter; + +use rustc_data_structures::bitvec::BitArray; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; + +pub use self::constant::codegen_static_initializer; + +use self::analyze::CleanupKind; +use self::place::PlaceRef; +use rustc::mir::traversal; + +use self::operand::{OperandRef, OperandValue}; + +/// Master context for codegenning from MIR. +pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { + instance: Instance<'tcx>, + + mir: &'a mir::Mir<'tcx>, + + debug_context: FunctionDebugContext<'ll>, + + llfn: &'ll Value, + + cx: &'a CodegenCx<'ll, 'tcx>, + + fn_ty: FnType<'tcx, Ty<'tcx>>, + + /// When unwinding is initiated, we have to store this personality + /// value somewhere so that we can load it and re-use it in the + /// resume instruction. The personality is (afaik) some kind of + /// value used for C++ unwinding, which must filter by type: we + /// don't really care about it very much. Anyway, this value + /// contains an alloca into which the personality is stored and + /// then later loaded when generating the DIVERGE_BLOCK. + personality_slot: Option>, + + /// A `Block` for each MIR `BasicBlock` + blocks: IndexVec, + + /// The funclet status of each basic block + cleanup_kinds: IndexVec, + + /// When targeting MSVC, this stores the cleanup info for each funclet + /// BB. This is initialized as we compute the funclets' head block in RPO. + funclets: &'a IndexVec>>, + + /// This stores the landing-pad block for a given BB, computed lazily on GNU + /// and eagerly on MSVC. + landing_pads: IndexVec>, + + /// Cached unreachable block + unreachable_block: Option<&'ll BasicBlock>, + + /// The location where each MIR arg/var/tmp/ret is stored. This is + /// usually an `PlaceRef` representing an alloca, but not always: + /// sometimes we can skip the alloca and just store the value + /// directly using an `OperandRef`, which makes for tighter LLVM + /// IR. The conditions for using an `OperandRef` are as follows: + /// + /// - the type of the local must be judged "immediate" by `is_llvm_immediate` + /// - the operand must never be referenced indirectly + /// - we should not take its address using the `&` operator + /// - nor should it appear in a place path like `tmp.a` + /// - the operand must be defined by an rvalue that can generate immediate + /// values + /// + /// Avoiding allocs can also be important for certain intrinsics, + /// notably `expect`. + locals: IndexVec>, + + /// Debug information for MIR scopes. + scopes: IndexVec>, + + /// If this function is being monomorphized, this contains the type substitutions used. + param_substs: &'tcx Substs<'tcx>, +} + +impl FunctionCx<'a, 'll, 'tcx> { + pub fn monomorphize(&self, value: &T) -> T + where T: TypeFoldable<'tcx> + { + self.cx.tcx.subst_and_normalize_erasing_regions( + self.param_substs, + ty::ParamEnv::reveal_all(), + value, + ) + } + + pub fn set_debug_loc(&mut self, bx: &Builder<'_, 'll, '_>, source_info: mir::SourceInfo) { + let (scope, span) = self.debug_loc(source_info); + debuginfo::set_source_location(&self.debug_context, bx, scope, span); + } + + pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option<&'ll DIScope>, Span) { + // Bail out if debug info emission is not enabled. + match self.debug_context { + FunctionDebugContext::DebugInfoDisabled | + FunctionDebugContext::FunctionWithoutDebugInfo => { + return (self.scopes[source_info.scope].scope_metadata, source_info.span); + } + FunctionDebugContext::RegularContext(_) =>{} + } + + // In order to have a good line stepping behavior in debugger, we overwrite debug + // locations of macro expansions with that of the outermost expansion site + // (unless the crate is being compiled with `-Z debug-macros`). + if source_info.span.ctxt() == NO_EXPANSION || + self.cx.sess().opts.debugging_opts.debug_macros { + let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo()); + (scope, source_info.span) + } else { + // Walk up the macro expansion chain until we reach a non-expanded span. + // We also stop at the function body level because no line stepping can occur + // at the level above that. + let mut span = source_info.span; + while span.ctxt() != NO_EXPANSION && span.ctxt() != self.mir.span.ctxt() { + if let Some(info) = span.ctxt().outer().expn_info() { + span = info.call_site; + } else { + break; + } + } + let scope = self.scope_metadata_for_loc(source_info.scope, span.lo()); + // Use span of the outermost expansion site, while keeping the original lexical scope. + (scope, span) + } + } + + // DILocations inherit source file name from the parent DIScope. Due to macro expansions + // it may so happen that the current span belongs to a different file than the DIScope + // corresponding to span's containing source scope. If so, we need to create a DIScope + // "extension" into that file. + fn scope_metadata_for_loc(&self, scope_id: mir::SourceScope, pos: BytePos) + -> Option<&'ll DIScope> { + let scope_metadata = self.scopes[scope_id].scope_metadata; + if pos < self.scopes[scope_id].file_start_pos || + pos >= self.scopes[scope_id].file_end_pos { + let cm = self.cx.sess().codemap(); + let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate; + Some(debuginfo::extend_scope_to_file(self.cx, + scope_metadata.unwrap(), + &cm.lookup_char_pos(pos).file, + defining_crate)) + } else { + scope_metadata + } + } +} + +enum LocalRef<'ll, 'tcx> { + Place(PlaceRef<'ll, 'tcx>), + /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place). + /// `*p` is the fat pointer that references the actual unsized place. + /// Every time it is initialized, we have to reallocate the place + /// and update the fat pointer. That's the reason why it is indirect. + UnsizedPlace(PlaceRef<'ll, 'tcx>), + Operand(Option>), +} + +impl LocalRef<'ll, 'tcx> { + fn new_operand(cx: &CodegenCx<'ll, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'ll, 'tcx> { + if layout.is_zst() { + // Zero-size temporaries aren't always initialized, which + // doesn't matter because they don't contain data, but + // we need something in the operand. + LocalRef::Operand(Some(OperandRef::new_zst(cx, layout))) + } else { + LocalRef::Operand(None) + } + } +} + +/////////////////////////////////////////////////////////////////////////// + +pub fn codegen_mir( + cx: &'a CodegenCx<'ll, 'tcx>, + llfn: &'ll Value, + mir: &'a Mir<'tcx>, + instance: Instance<'tcx>, + sig: ty::FnSig<'tcx>, +) { + let fn_ty = FnType::new(cx, sig, &[]); + debug!("fn_ty: {:?}", fn_ty); + let debug_context = + debuginfo::create_function_debug_context(cx, instance, sig, llfn, mir); + let bx = Builder::new_block(cx, llfn, "start"); + + if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { + bx.set_personality_fn(cx.eh_personality()); + } + + let cleanup_kinds = analyze::cleanup_kinds(&mir); + // Allocate a `Block` for every basic block, except + // the start block, if nothing loops back to it. + let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty(); + let block_bxs: IndexVec = + mir.basic_blocks().indices().map(|bb| { + if bb == mir::START_BLOCK && !reentrant_start_block { + bx.llbb() + } else { + bx.build_sibling_block(&format!("{:?}", bb)).llbb() + } + }).collect(); + + // Compute debuginfo scopes from MIR scopes. + let scopes = debuginfo::create_mir_scopes(cx, mir, &debug_context); + let (landing_pads, funclets) = create_funclets(mir, &bx, &cleanup_kinds, &block_bxs); + + let mut fx = FunctionCx { + instance, + mir, + llfn, + fn_ty, + cx, + personality_slot: None, + blocks: block_bxs, + unreachable_block: None, + cleanup_kinds, + landing_pads, + funclets: &funclets, + scopes, + locals: IndexVec::new(), + debug_context, + param_substs: { + assert!(!instance.substs.needs_infer()); + instance.substs + }, + }; + + let memory_locals = analyze::non_ssa_locals(&fx); + + // Allocate variable and temp allocas + fx.locals = { + let args = arg_local_refs(&bx, &fx, &fx.scopes, &memory_locals); + + let mut allocate_local = |local| { + let decl = &mir.local_decls[local]; + let layout = bx.cx.layout_of(fx.monomorphize(&decl.ty)); + assert!(!layout.ty.has_erasable_regions()); + + if let Some(name) = decl.name { + // User variable + let debug_scope = fx.scopes[decl.visibility_scope]; + let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == DebugInfo::Full; + + if !memory_locals.contains(local) && !dbg { + debug!("alloc: {:?} ({}) -> operand", local, name); + return LocalRef::new_operand(bx.cx, layout); + } + + debug!("alloc: {:?} ({}) -> place", local, name); + if layout.is_unsized() { + let indirect_place = + PlaceRef::alloca_unsized_indirect(&bx, layout, &name.as_str()); + // FIXME: add an appropriate debuginfo + LocalRef::UnsizedPlace(indirect_place) + } else { + let place = PlaceRef::alloca(&bx, layout, &name.as_str()); + if dbg { + let (scope, span) = fx.debug_loc(mir::SourceInfo { + span: decl.source_info.span, + scope: decl.visibility_scope, + }); + declare_local(&bx, &fx.debug_context, name, layout.ty, scope.unwrap(), + VariableAccess::DirectVariable { alloca: place.llval }, + VariableKind::LocalVariable, span); + } + LocalRef::Place(place) + } + } else { + // Temporary or return place + if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() { + debug!("alloc: {:?} (return place) -> place", local); + let llretptr = llvm::get_param(llfn, 0); + LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align)) + } else if memory_locals.contains(local) { + debug!("alloc: {:?} -> place", local); + if layout.is_unsized() { + let indirect_place = + PlaceRef::alloca_unsized_indirect(&bx, layout, &format!("{:?}", local)); + LocalRef::UnsizedPlace(indirect_place) + } else { + LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local))) + } + } else { + // If this is an immediate local, we do not create an + // alloca in advance. Instead we wait until we see the + // definition and update the operand there. + debug!("alloc: {:?} -> operand", local); + LocalRef::new_operand(bx.cx, layout) + } + } + }; + + let retptr = allocate_local(mir::RETURN_PLACE); + iter::once(retptr) + .chain(args.into_iter()) + .chain(mir.vars_and_temps_iter().map(allocate_local)) + .collect() + }; + + // Branch to the START block, if it's not the entry block. + if reentrant_start_block { + bx.br(fx.blocks[mir::START_BLOCK]); + } + + // Up until here, IR instructions for this function have explicitly not been annotated with + // source code location, so we don't step into call setup code. From here on, source location + // emitting should be enabled. + debuginfo::start_emitting_source_locations(&fx.debug_context); + + let rpo = traversal::reverse_postorder(&mir); + let mut visited = BitArray::new(mir.basic_blocks().len()); + + // Codegen the body of each block using reverse postorder + for (bb, _) in rpo { + visited.insert(bb.index()); + fx.codegen_block(bb); + } + + // Remove blocks that haven't been visited, or have no + // predecessors. + for bb in mir.basic_blocks().indices() { + // Unreachable block + if !visited.contains(bb.index()) { + debug!("codegen_mir: block {:?} was not visited", bb); + unsafe { + llvm::LLVMDeleteBasicBlock(fx.blocks[bb]); + } + } + } +} + +fn create_funclets( + mir: &'a Mir<'tcx>, + bx: &Builder<'a, 'll, 'tcx>, + cleanup_kinds: &IndexVec, + block_bxs: &IndexVec) + -> (IndexVec>, + IndexVec>>) +{ + block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| { + match *cleanup_kind { + CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {} + _ => return (None, None) + } + + let cleanup; + let ret_llbb; + match mir[bb].terminator.as_ref().map(|t| &t.kind) { + // This is a basic block that we're aborting the program for, + // notably in an `extern` function. These basic blocks are inserted + // so that we assert that `extern` functions do indeed not panic, + // and if they do we abort the process. + // + // On MSVC these are tricky though (where we're doing funclets). If + // we were to do a cleanuppad (like below) the normal functions like + // `longjmp` would trigger the abort logic, terminating the + // program. Instead we insert the equivalent of `catch(...)` for C++ + // which magically doesn't trigger when `longjmp` files over this + // frame. + // + // Lots more discussion can be found on #48251 but this codegen is + // modeled after clang's for: + // + // try { + // foo(); + // } catch (...) { + // bar(); + // } + Some(&mir::TerminatorKind::Abort) => { + let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); + let cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); + ret_llbb = cs_bx.llbb(); + + let cs = cs_bx.catch_switch(None, None, 1); + cs_bx.add_handler(cs, cp_bx.llbb()); + + // The "null" here is actually a RTTI type descriptor for the + // C++ personality function, but `catch (...)` has no type so + // it's null. The 64 here is actually a bitfield which + // represents that this is a catch-all block. + let null = C_null(Type::i8p(bx.cx)); + let sixty_four = C_i32(bx.cx, 64); + cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); + cp_bx.br(llbb); + } + _ => { + let cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb)); + ret_llbb = cleanup_bx.llbb(); + cleanup = cleanup_bx.cleanup_pad(None, &[]); + cleanup_bx.br(llbb); + } + }; + + (Some(ret_llbb), Some(Funclet::new(cleanup))) + }).unzip() +} + +/// Produce, for each argument, a `Value` pointing at the +/// argument's value. As arguments are places, these are always +/// indirect. +fn arg_local_refs( + bx: &Builder<'a, 'll, 'tcx>, + fx: &FunctionCx<'a, 'll, 'tcx>, + scopes: &IndexVec>, + memory_locals: &BitArray, +) -> Vec> { + let mir = fx.mir; + let tcx = bx.tcx(); + let mut idx = 0; + let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize; + + // Get the argument scope, if it exists and if we need it. + let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE]; + let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full { + arg_scope.scope_metadata + } else { + None + }; + + mir.args_iter().enumerate().map(|(arg_index, local)| { + let arg_decl = &mir.local_decls[local]; + + let name = if let Some(name) = arg_decl.name { + name.as_str().to_string() + } else { + format!("arg{}", arg_index) + }; + + if Some(local) == mir.spread_arg { + // This argument (e.g. the last argument in the "rust-call" ABI) + // is a tuple that was spread at the ABI level and now we have + // to reconstruct it into a tuple local variable, from multiple + // individual LLVM function arguments. + + let arg_ty = fx.monomorphize(&arg_decl.ty); + let tupled_arg_tys = match arg_ty.sty { + ty::TyTuple(ref tys) => tys, + _ => bug!("spread argument isn't a tuple?!") + }; + + let place = PlaceRef::alloca(bx, bx.cx.layout_of(arg_ty), &name); + for i in 0..tupled_arg_tys.len() { + let arg = &fx.fn_ty.args[idx]; + idx += 1; + if arg.pad.is_some() { + llarg_idx += 1; + } + arg.store_fn_arg(bx, &mut llarg_idx, place.project_field(bx, i)); + } + + // Now that we have one alloca that contains the aggregate value, + // we can create one debuginfo entry for the argument. + arg_scope.map(|scope| { + let variable_access = VariableAccess::DirectVariable { + alloca: place.llval + }; + declare_local( + bx, + &fx.debug_context, + arg_decl.name.unwrap_or(keywords::Invalid.name()), + arg_ty, scope, + variable_access, + VariableKind::ArgumentVariable(arg_index + 1), + DUMMY_SP + ); + }); + + return LocalRef::Place(place); + } + + let arg = &fx.fn_ty.args[idx]; + idx += 1; + if arg.pad.is_some() { + llarg_idx += 1; + } + + if arg_scope.is_none() && !memory_locals.contains(local) { + // We don't have to cast or keep the argument in the alloca. + // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead + // of putting everything in allocas just so we can use llvm.dbg.declare. + let local = |op| LocalRef::Operand(Some(op)); + match arg.mode { + PassMode::Ignore => { + return local(OperandRef::new_zst(bx.cx, arg.layout)); + } + PassMode::Direct(_) => { + let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + bx.set_value_name(llarg, &name); + llarg_idx += 1; + return local( + OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout)); + } + PassMode::Pair(..) => { + let a = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + bx.set_value_name(a, &(name.clone() + ".0")); + llarg_idx += 1; + + let b = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + bx.set_value_name(b, &(name + ".1")); + llarg_idx += 1; + + return local(OperandRef { + val: OperandValue::Pair(a, b), + layout: arg.layout + }); + } + _ => {} + } + } + + let place = if arg.is_sized_indirect() { + // Don't copy an indirect argument to an alloca, the caller + // already put it in a temporary alloca and gave it up. + // FIXME: lifetimes + let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + bx.set_value_name(llarg, &name); + llarg_idx += 1; + PlaceRef::new_sized(llarg, arg.layout, arg.layout.align) + } else if arg.is_unsized_indirect() { + // As the storage for the indirect argument lives during + // the whole function call, we just copy the fat pointer. + let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + llarg_idx += 1; + let llextra = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + llarg_idx += 1; + let indirect_operand = OperandValue::Pair(llarg, llextra); + + let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name); + indirect_operand.store(&bx, tmp); + tmp + } else { + let tmp = PlaceRef::alloca(bx, arg.layout, &name); + arg.store_fn_arg(bx, &mut llarg_idx, tmp); + tmp + }; + arg_scope.map(|scope| { + // Is this a regular argument? + if arg_index > 0 || mir.upvar_decls.is_empty() { + // The Rust ABI passes indirect variables using a pointer and a manual copy, so we + // need to insert a deref here, but the C ABI uses a pointer and a copy using the + // byval attribute, for which LLVM always does the deref itself, + // so we must not add it. + let variable_access = VariableAccess::DirectVariable { + alloca: place.llval + }; + + declare_local( + bx, + &fx.debug_context, + arg_decl.name.unwrap_or(keywords::Invalid.name()), + arg.layout.ty, + scope, + variable_access, + VariableKind::ArgumentVariable(arg_index + 1), + DUMMY_SP + ); + return; + } + + // Or is it the closure environment? + let (closure_layout, env_ref) = match arg.layout.ty.sty { + ty::TyRawPtr(ty::TypeAndMut { ty, .. }) | + ty::TyRef(_, ty, _) => (bx.cx.layout_of(ty), true), + _ => (arg.layout, false) + }; + + let (def_id, upvar_substs) = match closure_layout.ty.sty { + ty::TyClosure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)), + ty::TyGenerator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)), + _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_layout.ty) + }; + let upvar_tys = upvar_substs.upvar_tys(def_id, tcx); + + // Store the pointer to closure data in an alloca for debuginfo + // because that's what the llvm.dbg.declare intrinsic expects. + + // FIXME(eddyb) this shouldn't be necessary but SROA seems to + // mishandle DW_OP_plus not preceded by DW_OP_deref, i.e. it + // doesn't actually strip the offset when splitting the closure + // environment into its components so it ends up out of bounds. + // (cuviper) It seems to be fine without the alloca on LLVM 6 and later. + let env_alloca = !env_ref && unsafe { llvm::LLVMRustVersionMajor() < 6 }; + let env_ptr = if env_alloca { + let scratch = PlaceRef::alloca(bx, + bx.cx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)), + "__debuginfo_env_ptr"); + bx.store(place.llval, scratch.llval, scratch.align); + scratch.llval + } else { + place.llval + }; + + for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { + let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes(); + + let ops = unsafe { + [llvm::LLVMRustDIBuilderCreateOpDeref(), + llvm::LLVMRustDIBuilderCreateOpPlusUconst(), + byte_offset_of_var_in_env as i64, + llvm::LLVMRustDIBuilderCreateOpDeref()] + }; + + // The environment and the capture can each be indirect. + + // FIXME(eddyb) see above why we sometimes have to keep + // a pointer in an alloca for debuginfo atm. + let mut ops = if env_ref || env_alloca { &ops[..] } else { &ops[1..] }; + + let ty = if let (true, &ty::TyRef(_, ty, _)) = (decl.by_ref, &ty.sty) { + ty + } else { + ops = &ops[..ops.len() - 1]; + ty + }; + + let variable_access = VariableAccess::IndirectVariable { + alloca: env_ptr, + address_operations: &ops + }; + declare_local( + bx, + &fx.debug_context, + decl.debug_name, + ty, + scope, + variable_access, + VariableKind::LocalVariable, + DUMMY_SP + ); + } + }); + if arg.is_unsized_indirect() { + LocalRef::UnsizedPlace(place) + } else { + LocalRef::Place(place) + } + }).collect() +} + +mod analyze; +mod block; +mod constant; +pub mod place; +pub mod operand; +mod rvalue; +mod statement; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_llvm/mir/operand.rs new file mode 100644 index 000000000000..9537379813d5 --- /dev/null +++ b/src/librustc_codegen_llvm/mir/operand.rs @@ -0,0 +1,451 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::mir::interpret::ConstEvalErr; +use rustc::mir; +use rustc::mir::interpret::{ConstValue, ScalarMaybeUndef}; +use rustc::ty; +use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::sync::Lrc; + +use base; +use common::{CodegenCx, C_undef, C_usize}; +use builder::{Builder, MemFlags}; +use value::Value; +use type_of::LayoutLlvmExt; +use type_::Type; +use glue; + +use std::fmt; + +use super::{FunctionCx, LocalRef}; +use super::constant::scalar_to_llvm; +use super::place::PlaceRef; + +/// The representation of a Rust value. The enum variant is in fact +/// uniquely determined by the value's type, but is kept as a +/// safety check. +#[derive(Copy, Clone, Debug)] +pub enum OperandValue<'ll> { + /// A reference to the actual operand. The data is guaranteed + /// to be valid for the operand's lifetime. + /// The second value, if any, is the extra data (vtable or length) + /// which indicates that it refers to an unsized rvalue. + Ref(&'ll Value, Option<&'ll Value>, Align), + /// A single LLVM value. + Immediate(&'ll Value), + /// A pair of immediate LLVM values. Used by fat pointers too. + Pair(&'ll Value, &'ll Value) +} + +/// An `OperandRef` is an "SSA" reference to a Rust value, along with +/// its type. +/// +/// NOTE: unless you know a value's type exactly, you should not +/// generate LLVM opcodes acting on it and instead act via methods, +/// to avoid nasty edge cases. In particular, using `Builder::store` +/// directly is sure to cause problems -- use `OperandRef::store` +/// instead. +#[derive(Copy, Clone)] +pub struct OperandRef<'ll, 'tcx> { + // The value. + pub val: OperandValue<'ll>, + + // The layout of value, based on its Rust type. + pub layout: TyLayout<'tcx>, +} + +impl fmt::Debug for OperandRef<'ll, 'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) + } +} + +impl OperandRef<'ll, 'tcx> { + pub fn new_zst(cx: &CodegenCx<'ll, 'tcx>, + layout: TyLayout<'tcx>) -> OperandRef<'ll, 'tcx> { + assert!(layout.is_zst()); + OperandRef { + val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(cx))), + layout + } + } + + pub fn from_const(bx: &Builder<'a, 'll, 'tcx>, + val: &'tcx ty::Const<'tcx>) + -> Result, Lrc>> { + let layout = bx.cx.layout_of(val.ty); + + if layout.is_zst() { + return Ok(OperandRef::new_zst(bx.cx, layout)); + } + + let val = match val.val { + ConstValue::Unevaluated(..) => bug!(), + ConstValue::Scalar(x) => { + let scalar = match layout.abi { + layout::Abi::Scalar(ref x) => x, + _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) + }; + let llval = scalar_to_llvm( + bx.cx, + x, + scalar, + layout.immediate_llvm_type(bx.cx), + ); + OperandValue::Immediate(llval) + }, + ConstValue::ScalarPair(a, b) => { + let (a_scalar, b_scalar) = match layout.abi { + layout::Abi::ScalarPair(ref a, ref b) => (a, b), + _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout) + }; + let a_llval = scalar_to_llvm( + bx.cx, + a, + a_scalar, + layout.scalar_pair_element_llvm_type(bx.cx, 0, true), + ); + let b_layout = layout.scalar_pair_element_llvm_type(bx.cx, 1, true); + let b_llval = match b { + ScalarMaybeUndef::Scalar(b) => scalar_to_llvm( + bx.cx, + b, + b_scalar, + b_layout, + ), + ScalarMaybeUndef::Undef => C_undef(b_layout), + }; + OperandValue::Pair(a_llval, b_llval) + }, + ConstValue::ByRef(alloc, offset) => { + return Ok(PlaceRef::from_const_alloc(bx, layout, alloc, offset).load(bx)); + }, + }; + + Ok(OperandRef { + val, + layout + }) + } + + /// Asserts that this operand refers to a scalar and returns + /// a reference to its value. + pub fn immediate(self) -> &'ll Value { + match self.val { + OperandValue::Immediate(s) => s, + _ => bug!("not immediate: {:?}", self) + } + } + + pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'ll, 'tcx> { + let projected_ty = self.layout.ty.builtin_deref(true) + .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty; + let (llptr, llextra) = match self.val { + OperandValue::Immediate(llptr) => (llptr, None), + OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)), + OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self) + }; + let layout = cx.layout_of(projected_ty); + PlaceRef { + llval: llptr, + llextra, + layout, + align: layout.align, + } + } + + /// If this operand is a `Pair`, we return an aggregate with the two values. + /// For other cases, see `immediate`. + pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value { + if let OperandValue::Pair(a, b) = self.val { + let llty = self.layout.llvm_type(bx.cx); + debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", + self, llty); + // Reconstruct the immediate aggregate. + let mut llpair = C_undef(llty); + llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0); + llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1); + llpair + } else { + self.immediate() + } + } + + /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`. + pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>, + llval: &'ll Value, + layout: TyLayout<'tcx>) + -> OperandRef<'ll, 'tcx> { + let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi { + debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", + llval, layout); + + // Deconstruct the immediate aggregate. + let a_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 0), a); + let b_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 1), b); + OperandValue::Pair(a_llval, b_llval) + } else { + OperandValue::Immediate(llval) + }; + OperandRef { val, layout } + } + + pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef<'ll, 'tcx> { + let field = self.layout.field(bx.cx, i); + let offset = self.layout.fields.offset(i); + + let mut val = match (self.val, &self.layout.abi) { + // If the field is ZST, it has no data. + _ if field.is_zst() => { + return OperandRef::new_zst(bx.cx, field); + } + + // Newtype of a scalar, scalar pair or vector. + (OperandValue::Immediate(_), _) | + (OperandValue::Pair(..), _) if field.size == self.layout.size => { + assert_eq!(offset.bytes(), 0); + self.val + } + + // Extract a scalar component from a pair. + (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => { + if offset.bytes() == 0 { + assert_eq!(field.size, a.value.size(bx.cx)); + OperandValue::Immediate(a_llval) + } else { + assert_eq!(offset, a.value.size(bx.cx) + .abi_align(b.value.align(bx.cx))); + assert_eq!(field.size, b.value.size(bx.cx)); + OperandValue::Immediate(b_llval) + } + } + + // `#[repr(simd)]` types are also immediate. + (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { + OperandValue::Immediate( + bx.extract_element(llval, C_usize(bx.cx, i as u64))) + } + + _ => bug!("OperandRef::extract_field({:?}): not applicable", self) + }; + + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + match val { + OperandValue::Immediate(ref mut llval) => { + *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx)); + } + OperandValue::Pair(ref mut a, ref mut b) => { + *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0, true)); + *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1, true)); + } + OperandValue::Ref(..) => bug!() + } + + OperandRef { + val, + layout: field + } + } +} + +impl OperandValue<'ll> { + pub fn store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + self.store_with_flags(bx, dest, MemFlags::empty()); + } + + pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + self.store_with_flags(bx, dest, MemFlags::VOLATILE); + } + + pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); + } + + pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); + } + + fn store_with_flags( + self, + bx: &Builder<'a, 'll, 'tcx>, + dest: PlaceRef<'ll, 'tcx>, + flags: MemFlags, + ) { + debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); + // Avoid generating stores of zero-sized values, because the only way to have a zero-sized + // value is through `undef`, and store itself is useless. + if dest.layout.is_zst() { + return; + } + match self { + OperandValue::Ref(r, None, source_align) => { + base::memcpy_ty(bx, dest.llval, r, dest.layout, + source_align.min(dest.align), flags) + } + OperandValue::Ref(_, Some(_), _) => { + bug!("cannot directly store unsized values"); + } + OperandValue::Immediate(s) => { + let val = base::from_immediate(bx, s); + bx.store_with_flags(val, dest.llval, dest.align, flags); + } + OperandValue::Pair(a, b) => { + for (i, &x) in [a, b].iter().enumerate() { + let llptr = bx.struct_gep(dest.llval, i as u64); + let val = base::from_immediate(bx, x); + bx.store_with_flags(val, llptr, dest.align, flags); + } + } + } + } + + pub fn store_unsized(self, bx: &Builder<'a, 'll, 'tcx>, indirect_dest: PlaceRef<'ll, 'tcx>) { + debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest); + let flags = MemFlags::empty(); + + // `indirect_dest` must have `*mut T` type. We extract `T` out of it. + let unsized_ty = indirect_dest.layout.ty.builtin_deref(true) + .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest)).ty; + + let (llptr, llextra) = + if let OperandValue::Ref(llptr, Some(llextra), _) = self { + (llptr, llextra) + } else { + bug!("store_unsized called with a sized value") + }; + + // FIXME: choose an appropriate alignment, or use dynamic align somehow + let max_align = Align::from_bits(128, 128).unwrap(); + let min_align = Align::from_bits(8, 8).unwrap(); + + // Allocate an appropriate region on the stack, and copy the value into it + let (llsize, _) = glue::size_and_align_of_dst(&bx, unsized_ty, Some(llextra)); + let lldst = bx.array_alloca(Type::i8(bx.cx), llsize, "unsized_tmp", max_align); + base::call_memcpy(&bx, lldst, llptr, llsize, min_align, flags); + + // Store the allocated region and the extra to the indirect place. + let indirect_operand = OperandValue::Pair(lldst, llextra); + indirect_operand.store(&bx, indirect_dest); + } +} + +impl FunctionCx<'a, 'll, 'tcx> { + fn maybe_codegen_consume_direct(&mut self, + bx: &Builder<'a, 'll, 'tcx>, + place: &mir::Place<'tcx>) + -> Option> + { + debug!("maybe_codegen_consume_direct(place={:?})", place); + + // watch out for locals that do not have an + // alloca; they are handled somewhat differently + if let mir::Place::Local(index) = *place { + match self.locals[index] { + LocalRef::Operand(Some(o)) => { + return Some(o); + } + LocalRef::Operand(None) => { + bug!("use of {:?} before def", place); + } + LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => { + // use path below + } + } + } + + // Moves out of scalar and scalar pair fields are trivial. + if let &mir::Place::Projection(ref proj) = place { + if let Some(o) = self.maybe_codegen_consume_direct(bx, &proj.base) { + match proj.elem { + mir::ProjectionElem::Field(ref f, _) => { + return Some(o.extract_field(bx, f.index())); + } + mir::ProjectionElem::Index(_) | + mir::ProjectionElem::ConstantIndex { .. } => { + // ZSTs don't require any actual memory access. + // FIXME(eddyb) deduplicate this with the identical + // checks in `codegen_consume` and `extract_field`. + let elem = o.layout.field(bx.cx, 0); + if elem.is_zst() { + return Some(OperandRef::new_zst(bx.cx, elem)); + } + } + _ => {} + } + } + } + + None + } + + pub fn codegen_consume(&mut self, + bx: &Builder<'a, 'll, 'tcx>, + place: &mir::Place<'tcx>) + -> OperandRef<'ll, 'tcx> + { + debug!("codegen_consume(place={:?})", place); + + let ty = self.monomorphized_place_ty(place); + let layout = bx.cx.layout_of(ty); + + // ZSTs don't require any actual memory access. + if layout.is_zst() { + return OperandRef::new_zst(bx.cx, layout); + } + + if let Some(o) = self.maybe_codegen_consume_direct(bx, place) { + return o; + } + + // for most places, to consume them we just load them + // out from their home + self.codegen_place(bx, place).load(bx) + } + + pub fn codegen_operand(&mut self, + bx: &Builder<'a, 'll, 'tcx>, + operand: &mir::Operand<'tcx>) + -> OperandRef<'ll, 'tcx> + { + debug!("codegen_operand(operand={:?})", operand); + + match *operand { + mir::Operand::Copy(ref place) | + mir::Operand::Move(ref place) => { + self.codegen_consume(bx, place) + } + + mir::Operand::Constant(ref constant) => { + let ty = self.monomorphize(&constant.ty); + self.eval_mir_constant(bx, constant) + .and_then(|c| OperandRef::from_const(bx, c)) + .unwrap_or_else(|err| { + err.report_as_error( + bx.tcx().at(constant.span), + "could not evaluate constant operand", + ); + // Allow RalfJ to sleep soundly knowing that even refactorings that remove + // the above error (or silence it under some conditions) will not cause UB + let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + bx.call(fnname, &[], None); + // We've errored, so we don't have to produce working code. + let layout = bx.cx.layout_of(ty); + PlaceRef::new_sized( + C_undef(layout.llvm_type(bx.cx).ptr_to()), + layout, + layout.align, + ).load(bx) + }) + } + } + } +} diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_llvm/mir/place.rs new file mode 100644 index 000000000000..89d41dcc8e99 --- /dev/null +++ b/src/librustc_codegen_llvm/mir/place.rs @@ -0,0 +1,551 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::{self, LLVMConstInBoundsGEP}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size}; +use rustc::mir; +use rustc::mir::tcx::PlaceTy; +use rustc_data_structures::indexed_vec::Idx; +use base; +use builder::Builder; +use common::{CodegenCx, C_undef, C_usize, C_u8, C_u32, C_uint, C_null, C_uint_big}; +use consts; +use type_of::LayoutLlvmExt; +use type_::Type; +use value::Value; +use glue; +use mir::constant::const_alloc_to_llvm; + +use super::{FunctionCx, LocalRef}; +use super::operand::{OperandRef, OperandValue}; + +#[derive(Copy, Clone, Debug)] +pub struct PlaceRef<'ll, 'tcx> { + /// Pointer to the contents of the place + pub llval: &'ll Value, + + /// This place's extra data if it is unsized, or null + pub llextra: Option<&'ll Value>, + + /// Monomorphized type of this place, including variant information + pub layout: TyLayout<'tcx>, + + /// What alignment we know for this place + pub align: Align, +} + +impl PlaceRef<'ll, 'tcx> { + pub fn new_sized( + llval: &'ll Value, + layout: TyLayout<'tcx>, + align: Align, + ) -> PlaceRef<'ll, 'tcx> { + assert!(!layout.is_unsized()); + PlaceRef { + llval, + llextra: None, + layout, + align + } + } + + pub fn from_const_alloc( + bx: &Builder<'a, 'll, 'tcx>, + layout: TyLayout<'tcx>, + alloc: &mir::interpret::Allocation, + offset: Size, + ) -> PlaceRef<'ll, 'tcx> { + let init = const_alloc_to_llvm(bx.cx, alloc); + let base_addr = consts::addr_of(bx.cx, init, layout.align, None); + + let llval = unsafe { LLVMConstInBoundsGEP( + consts::bitcast(base_addr, Type::i8p(bx.cx)), + &C_usize(bx.cx, offset.bytes()), + 1, + )}; + let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to()); + PlaceRef::new_sized(llval, layout, alloc.align) + } + + pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) + -> PlaceRef<'ll, 'tcx> { + debug!("alloca({:?}: {:?})", name, layout); + assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); + let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align); + Self::new_sized(tmp, layout, layout.align) + } + + /// Returns a place for an indirect reference to an unsized place. + pub fn alloca_unsized_indirect(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) + -> PlaceRef<'ll, 'tcx> { + debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); + assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); + let ptr_ty = bx.cx.tcx.mk_mut_ptr(layout.ty); + let ptr_layout = bx.cx.layout_of(ptr_ty); + Self::alloca(bx, ptr_layout, name) + } + + pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value { + if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { + if self.layout.is_unsized() { + assert_eq!(count, 0); + self.llextra.unwrap() + } else { + C_usize(cx, count) + } + } else { + bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) + } + } + + pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> { + debug!("PlaceRef::load: {:?}", self); + + assert_eq!(self.llextra.is_some(), self.layout.is_unsized()); + + if self.layout.is_zst() { + return OperandRef::new_zst(bx.cx, self.layout); + } + + let scalar_load_metadata = |load, scalar: &layout::Scalar| { + let vr = scalar.valid_range.clone(); + match scalar.value { + layout::Int(..) => { + let range = scalar.valid_range_exclusive(bx.cx); + if range.start != range.end { + bx.range_metadata(load, range); + } + } + layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { + bx.nonnull_metadata(load); + } + _ => {} + } + }; + + let val = if let Some(llextra) = self.llextra { + OperandValue::Ref(self.llval, Some(llextra), self.align) + } else if self.layout.is_llvm_immediate() { + let mut const_llval = None; + unsafe { + if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) { + if llvm::LLVMIsGlobalConstant(global) == llvm::True { + const_llval = llvm::LLVMGetInitializer(global); + } + } + } + let llval = const_llval.unwrap_or_else(|| { + let load = bx.load(self.llval, self.align); + if let layout::Abi::Scalar(ref scalar) = self.layout.abi { + scalar_load_metadata(load, scalar); + } + load + }); + OperandValue::Immediate(base::to_immediate(bx, llval, self.layout)) + } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { + let load = |i, scalar: &layout::Scalar| { + let llptr = bx.struct_gep(self.llval, i as u64); + let load = bx.load(llptr, self.align); + scalar_load_metadata(load, scalar); + if scalar.is_bool() { + bx.trunc(load, Type::i1(bx.cx)) + } else { + load + } + }; + OperandValue::Pair(load(0, a), load(1, b)) + } else { + OperandValue::Ref(self.llval, None, self.align) + }; + + OperandRef { val, layout: self.layout } + } + + /// Access a field, at a point when the value's case is known. + pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<'ll, 'tcx> { + let cx = bx.cx; + let field = self.layout.field(cx, ix); + let offset = self.layout.fields.offset(ix); + let align = self.align.min(self.layout.align).min(field.align); + + let simple = || { + // Unions and newtypes only use an offset of 0. + let llval = if offset.bytes() == 0 { + self.llval + } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { + // Offsets have to match either first or second field. + assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx))); + bx.struct_gep(self.llval, 1) + } else { + bx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) + }; + PlaceRef { + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + llval: bx.pointercast(llval, field.llvm_type(cx).ptr_to()), + llextra: if cx.type_has_metadata(field.ty) { + self.llextra + } else { + None + }, + layout: field, + align, + } + }; + + // Simple cases, which don't need DST adjustment: + // * no metadata available - just log the case + // * known alignment - sized types, [T], str or a foreign type + // * packed struct - there is no alignment padding + match field.ty.sty { + _ if self.llextra.is_none() => { + debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", + ix, self.llval); + return simple(); + } + _ if !field.is_unsized() => return simple(), + ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(), + ty::TyAdt(def, _) => { + if def.repr.packed() { + // FIXME(eddyb) generalize the adjustment when we + // start supporting packing to larger alignments. + assert_eq!(self.layout.align.abi(), 1); + return simple(); + } + } + _ => {} + } + + // We need to get the pointer manually now. + // We do this by casting to a *i8, then offsetting it by the appropriate amount. + // We do this instead of, say, simply adjusting the pointer from the result of a GEP + // because the field may have an arbitrary alignment in the LLVM representation + // anyway. + // + // To demonstrate: + // struct Foo { + // x: u16, + // y: T + // } + // + // The type Foo> is represented in LLVM as { u16, { u16, u8 }}, meaning that + // the `y` field has 16-bit alignment. + + let meta = self.llextra; + + let unaligned_offset = C_usize(cx, offset.bytes()); + + // Get the alignment of the field + let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); + + // Bump the unaligned offset up to the appropriate alignment using the + // following expression: + // + // (unaligned offset + (align - 1)) & -align + + // Calculate offset + let align_sub_1 = bx.sub(unsized_align, C_usize(cx, 1u64)); + let offset = bx.and(bx.add(unaligned_offset, align_sub_1), + bx.neg(unsized_align)); + + debug!("struct_field_ptr: DST field offset: {:?}", offset); + + // Cast and adjust pointer + let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx)); + let byte_ptr = bx.gep(byte_ptr, &[offset]); + + // Finally, cast back to the type expected + let ll_fty = field.llvm_type(cx); + debug!("struct_field_ptr: Field type is {:?}", ll_fty); + + PlaceRef { + llval: bx.pointercast(byte_ptr, ll_fty.ptr_to()), + llextra: self.llextra, + layout: field, + align, + } + } + + /// Obtain the actual discriminant of a value. + pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx>, cast_to: Ty<'tcx>) -> &'ll Value { + let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx); + if self.layout.abi == layout::Abi::Uninhabited { + return C_undef(cast_to); + } + match self.layout.variants { + layout::Variants::Single { index } => { + let discr_val = self.layout.ty.ty_adt_def().map_or( + index as u128, + |def| def.discriminant_for_variant(bx.cx.tcx, index).val); + return C_uint_big(cast_to, discr_val); + } + layout::Variants::Tagged { .. } | + layout::Variants::NicheFilling { .. } => {}, + } + + let discr = self.project_field(bx, 0); + let lldiscr = discr.load(bx).immediate(); + match self.layout.variants { + layout::Variants::Single { .. } => bug!(), + layout::Variants::Tagged { ref tag, .. } => { + let signed = match tag.value { + // We use `i1` for bytes that are always `0` or `1`, + // e.g. `#[repr(i8)] enum E { A, B }`, but we can't + // let LLVM interpret the `i1` as signed, because + // then `i1 1` (i.e. E::B) is effectively `i8 -1`. + layout::Int(_, signed) => !tag.is_bool() && signed, + _ => false + }; + bx.intcast(lldiscr, cast_to, signed) + } + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { + let niche_llty = discr.layout.immediate_llvm_type(bx.cx); + if niche_variants.start() == niche_variants.end() { + // FIXME(eddyb) Check the actual primitive type here. + let niche_llval = if niche_start == 0 { + // HACK(eddyb) Using `C_null` as it works on all types. + C_null(niche_llty) + } else { + C_uint_big(niche_llty, niche_start) + }; + bx.select(bx.icmp(llvm::IntEQ, lldiscr, niche_llval), + C_uint(cast_to, *niche_variants.start() as u64), + C_uint(cast_to, dataful_variant as u64)) + } else { + // Rebase from niche values to discriminant values. + let delta = niche_start.wrapping_sub(*niche_variants.start() as u128); + let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta)); + let lldiscr_max = C_uint(niche_llty, *niche_variants.end() as u64); + bx.select(bx.icmp(llvm::IntULE, lldiscr, lldiscr_max), + bx.intcast(lldiscr, cast_to, false), + C_uint(cast_to, dataful_variant as u64)) + } + } + } + } + + /// Set the discriminant for a new value of the given case of the given + /// representation. + pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: usize) { + if self.layout.for_variant(bx.cx, variant_index).abi == layout::Abi::Uninhabited { + return; + } + match self.layout.variants { + layout::Variants::Single { index } => { + assert_eq!(index, variant_index); + } + layout::Variants::Tagged { .. } => { + let ptr = self.project_field(bx, 0); + let to = self.layout.ty.ty_adt_def().unwrap() + .discriminant_for_variant(bx.tcx(), variant_index) + .val; + bx.store( + C_uint_big(ptr.layout.llvm_type(bx.cx), to), + ptr.llval, + ptr.align); + } + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { + if variant_index != dataful_variant { + if bx.sess().target.target.arch == "arm" || + bx.sess().target.target.arch == "aarch64" { + // Issue #34427: As workaround for LLVM bug on ARM, + // use memset of 0 before assigning niche value. + let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to()); + let fill_byte = C_u8(bx.cx, 0); + let (size, align) = self.layout.size_and_align(); + let size = C_usize(bx.cx, size.bytes()); + let align = C_u32(bx.cx, align.abi() as u32); + base::call_memset(bx, llptr, fill_byte, size, align, false); + } + + let niche = self.project_field(bx, 0); + let niche_llty = niche.layout.immediate_llvm_type(bx.cx); + let niche_value = ((variant_index - *niche_variants.start()) as u128) + .wrapping_add(niche_start); + // FIXME(eddyb) Check the actual primitive type here. + let niche_llval = if niche_value == 0 { + // HACK(eddyb) Using `C_null` as it works on all types. + C_null(niche_llty) + } else { + C_uint_big(niche_llty, niche_value) + }; + OperandValue::Immediate(niche_llval).store(bx, niche); + } + } + } + } + + pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value) + -> PlaceRef<'ll, 'tcx> { + PlaceRef { + llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]), + llextra: None, + layout: self.layout.field(bx.cx, 0), + align: self.align + } + } + + pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: usize) + -> PlaceRef<'ll, 'tcx> { + let mut downcast = *self; + downcast.layout = self.layout.for_variant(bx.cx, variant_index); + + // Cast to the appropriate variant struct type. + let variant_ty = downcast.layout.llvm_type(bx.cx); + downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to()); + + downcast + } + + pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) { + bx.lifetime_start(self.llval, self.layout.size); + } + + pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) { + bx.lifetime_end(self.llval, self.layout.size); + } +} + +impl FunctionCx<'a, 'll, 'tcx> { + pub fn codegen_place(&mut self, + bx: &Builder<'a, 'll, 'tcx>, + place: &mir::Place<'tcx>) + -> PlaceRef<'ll, 'tcx> { + debug!("codegen_place(place={:?})", place); + + let cx = bx.cx; + let tcx = cx.tcx; + + if let mir::Place::Local(index) = *place { + match self.locals[index] { + LocalRef::Place(place) => { + return place; + } + LocalRef::UnsizedPlace(place) => { + return place.load(bx).deref(&cx); + } + LocalRef::Operand(..) => { + bug!("using operand local {:?} as place", place); + } + } + } + + let result = match *place { + mir::Place::Local(_) => bug!(), // handled above + mir::Place::Promoted(box (index, ty)) => { + let param_env = ty::ParamEnv::reveal_all(); + let cid = mir::interpret::GlobalId { + instance: self.instance, + promoted: Some(index), + }; + let layout = cx.layout_of(self.monomorphize(&ty)); + match bx.tcx().const_eval(param_env.and(cid)) { + Ok(val) => match val.val { + mir::interpret::ConstValue::ByRef(alloc, offset) => { + PlaceRef::from_const_alloc(bx, layout, alloc, offset) + } + _ => bug!("promoteds should have an allocation: {:?}", val), + }, + Err(_) => { + // this is unreachable as long as runtime + // and compile-time agree on values + // With floats that won't always be true + // so we generate an abort + let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + bx.call(fnname, &[], None); + let llval = C_undef(layout.llvm_type(bx.cx).ptr_to()); + PlaceRef::new_sized(llval, layout, layout.align) + } + } + } + mir::Place::Static(box mir::Static { def_id, ty }) => { + let layout = cx.layout_of(self.monomorphize(&ty)); + PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align) + }, + mir::Place::Projection(box mir::Projection { + ref base, + elem: mir::ProjectionElem::Deref + }) => { + // Load the pointer from its location. + self.codegen_consume(bx, base).deref(bx.cx) + } + mir::Place::Projection(ref projection) => { + let cg_base = self.codegen_place(bx, &projection.base); + + match projection.elem { + mir::ProjectionElem::Deref => bug!(), + mir::ProjectionElem::Field(ref field, _) => { + cg_base.project_field(bx, field.index()) + } + mir::ProjectionElem::Index(index) => { + let index = &mir::Operand::Copy(mir::Place::Local(index)); + let index = self.codegen_operand(bx, index); + let llindex = index.immediate(); + cg_base.project_index(bx, llindex) + } + mir::ProjectionElem::ConstantIndex { offset, + from_end: false, + min_length: _ } => { + let lloffset = C_usize(bx.cx, offset as u64); + cg_base.project_index(bx, lloffset) + } + mir::ProjectionElem::ConstantIndex { offset, + from_end: true, + min_length: _ } => { + let lloffset = C_usize(bx.cx, offset as u64); + let lllen = cg_base.len(bx.cx); + let llindex = bx.sub(lllen, lloffset); + cg_base.project_index(bx, llindex) + } + mir::ProjectionElem::Subslice { from, to } => { + let mut subslice = cg_base.project_index(bx, + C_usize(bx.cx, from as u64)); + let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } + .projection_ty(tcx, &projection.elem).to_ty(bx.tcx()); + subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty)); + + if subslice.layout.is_unsized() { + subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), + C_usize(bx.cx, (from as u64) + (to as u64)))); + } + + // Cast the place pointer type to the new + // array or slice type (*[%_; new_len]). + subslice.llval = bx.pointercast(subslice.llval, + subslice.layout.llvm_type(bx.cx).ptr_to()); + + subslice + } + mir::ProjectionElem::Downcast(_, v) => { + cg_base.project_downcast(bx, v) + } + } + } + }; + debug!("codegen_place(place={:?}) => {:?}", place, result); + result + } + + pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> { + let tcx = self.cx.tcx; + let place_ty = place.ty(self.mir, tcx); + self.monomorphize(&place_ty.to_ty(tcx)) + } +} diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_codegen_llvm/mir/rvalue.rs similarity index 81% rename from src/librustc_trans/mir/rvalue.rs rename to src/librustc_codegen_llvm/mir/rvalue.rs index d1bc4fe90014..84427d8b40f7 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_codegen_llvm/mir/rvalue.rs @@ -8,21 +8,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, ValueRef}; +use llvm; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::layout::{self, LayoutOf}; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; -use rustc_const_math::MAX_F32_PLUS_HALF_ULP; use std::{u128, i128}; use base; use builder::Builder; use callee; use common::{self, val_ty}; -use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_uint_big}; +use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_undef, C_null, C_usize, C_uint, C_uint_big}; use consts; use monomorphize; use type_::Type; @@ -30,26 +29,25 @@ use type_of::LayoutLlvmExt; use value::Value; use super::{FunctionCx, LocalRef}; -use super::constant::const_scalar_checked_binop; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; -impl<'a, 'tcx> FunctionCx<'a, 'tcx> { - pub fn trans_rvalue(&mut self, - bx: Builder<'a, 'tcx>, - dest: PlaceRef<'tcx>, +impl FunctionCx<'a, 'll, 'tcx> { + pub fn codegen_rvalue(&mut self, + bx: Builder<'a, 'll, 'tcx>, + dest: PlaceRef<'ll, 'tcx>, rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'tcx> + -> Builder<'a, 'll, 'tcx> { - debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", - Value(dest.llval), rvalue); + debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", + dest.llval, rvalue); match *rvalue { mir::Rvalue::Use(ref operand) => { - let tr_operand = self.trans_operand(&bx, operand); - // FIXME: consider not copying constants through stack. (fixable by translating + let cg_operand = self.codegen_operand(&bx, operand); + // FIXME: consider not copying constants through stack. (fixable by codegenning // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) - tr_operand.val.store(&bx, dest); + cg_operand.val.store(&bx, dest); bx } @@ -59,16 +57,16 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { if dest.layout.is_llvm_scalar_pair() { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. - let (bx, temp) = self.trans_rvalue_operand(bx, rvalue); + let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); temp.val.store(&bx, dest); return bx; } // Unsize of a nontrivial struct. I would prefer for - // this to be eliminated by MIR translation, but + // this to be eliminated by MIR building, but // `CoerceUnsized` can be passed by a where-clause, // so the (generic) MIR may not be able to expand it. - let operand = self.trans_operand(&bx, source); + let operand = self.codegen_operand(&bx, source); match operand.val { OperandValue::Pair(..) | OperandValue::Immediate(_) => { @@ -78,23 +76,26 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { // `coerce_unsized_into` use extractvalue to // index into the struct, and this case isn't // important enough for it. - debug!("trans_rvalue: creating ugly alloca"); + debug!("codegen_rvalue: creating ugly alloca"); let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp"); scratch.storage_live(&bx); operand.val.store(&bx, scratch); base::coerce_unsized_into(&bx, scratch, dest); scratch.storage_dead(&bx); } - OperandValue::Ref(llref, align) => { + OperandValue::Ref(llref, None, align) => { let source = PlaceRef::new_sized(llref, operand.layout, align); base::coerce_unsized_into(&bx, source, dest); } + OperandValue::Ref(_, Some(_), _) => { + bug!("unsized coercion on an unsized rvalue") + } } bx } mir::Rvalue::Repeat(ref elem, count) => { - let tr_elem = self.trans_operand(&bx, elem); + let cg_elem = self.codegen_operand(&bx, elem); // Do not generate the loop for zero-sized elements or empty arrays. if dest.layout.is_zst() { @@ -103,7 +104,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { let start = dest.project_index(&bx, C_usize(bx.cx, 0)).llval; - if let OperandValue::Immediate(v) = tr_elem.val { + if let OperandValue::Immediate(v) = cg_elem.val { let align = C_i32(bx.cx, dest.align.abi() as i32); let size = C_usize(bx.cx, dest.layout.size.bytes()); @@ -122,7 +123,6 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { } } - let count = count.as_u64(); let count = C_usize(bx.cx, count); let end = dest.project_index(&bx, count).llval; @@ -136,8 +136,8 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { let keep_going = header_bx.icmp(llvm::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); - tr_elem.val.store(&body_bx, - PlaceRef::new_sized(current, tr_elem.layout, dest.align)); + cg_elem.val.store(&body_bx, + PlaceRef::new_sized(current, cg_elem.layout, dest.align)); let next = body_bx.inbounds_gep(current, &[C_usize(bx.cx, 1)]); body_bx.br(header_bx.llbb()); @@ -149,7 +149,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { mir::Rvalue::Aggregate(ref kind, ref operands) => { let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { - dest.trans_set_discr(&bx, variant_index); + dest.codegen_set_discr(&bx, variant_index); if adt_def.is_enum() { (dest.project_downcast(&bx, variant_index), active_field_index) } else { @@ -159,7 +159,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { _ => (dest, None) }; for (i, operand) in operands.iter().enumerate() { - let op = self.trans_operand(&bx, operand); + let op = self.codegen_operand(&bx, operand); // Do not generate stores and GEPis for zero-sized fields. if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); @@ -171,23 +171,43 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { _ => { assert!(self.rvalue_creates_operand(rvalue)); - let (bx, temp) = self.trans_rvalue_operand(bx, rvalue); + let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); temp.val.store(&bx, dest); bx } } } - pub fn trans_rvalue_operand(&mut self, - bx: Builder<'a, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> (Builder<'a, 'tcx>, OperandRef<'tcx>) + pub fn codegen_rvalue_unsized(&mut self, + bx: Builder<'a, 'll, 'tcx>, + indirect_dest: PlaceRef<'ll, 'tcx>, + rvalue: &mir::Rvalue<'tcx>) + -> Builder<'a, 'll, 'tcx> { - assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); + debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})", + indirect_dest.llval, rvalue); + + match *rvalue { + mir::Rvalue::Use(ref operand) => { + let cg_operand = self.codegen_operand(&bx, operand); + cg_operand.val.store_unsized(&bx, indirect_dest); + bx + } + + _ => bug!("unsized assignment other than Rvalue::Use"), + } + } + + pub fn codegen_rvalue_operand(&mut self, + bx: Builder<'a, 'll, 'tcx>, + rvalue: &mir::Rvalue<'tcx>) + -> (Builder<'a, 'll, 'tcx>, OperandRef<'ll, 'tcx>) + { + assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue); match *rvalue { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { - let operand = self.trans_operand(&bx, source); + let operand = self.codegen_operand(&bx, source); debug!("cast operand is {:?}", operand); let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty)); @@ -195,6 +215,10 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { mir::CastKind::ReifyFnPointer => { match operand.layout.ty.sty { ty::TyFnDef(def_id, substs) => { + if bx.cx.tcx.has_attr(def_id, "rustc_args_required_const") { + bug!("reifying a fn ptr that requires \ + const arguments"); + } OperandValue::Immediate( callee::resolve_and_get_fn(bx.cx, def_id, substs)) } @@ -231,7 +255,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { // HACK(eddyb) have to bitcast pointers // until LLVM removes pointee types. let lldata = bx.pointercast(lldata, - cast.scalar_pair_element_llvm_type(bx.cx, 0)); + cast.scalar_pair_element_llvm_type(bx.cx, 0, true)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { @@ -241,7 +265,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { OperandValue::Pair(lldata, llextra) } OperandValue::Ref(..) => { - bug!("by-ref operand {:?} in trans_rvalue_operand", + bug!("by-ref operand {:?} in codegen_rvalue_operand", operand); } } @@ -250,7 +274,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { if let OperandValue::Pair(data_ptr, meta) = operand.val { if cast.is_llvm_scalar_pair() { let data_cast = bx.pointercast(data_ptr, - cast.scalar_pair_element_llvm_type(bx.cx, 0)); + cast.scalar_pair_element_llvm_type(bx.cx, 0, true)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and @@ -265,19 +289,47 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { } mir::CastKind::Misc => { assert!(cast.is_llvm_immediate()); + let ll_t_out = cast.immediate_llvm_type(bx.cx); + if operand.layout.abi == layout::Abi::Uninhabited { + return (bx, OperandRef { + val: OperandValue::Immediate(C_undef(ll_t_out)), + layout: cast, + }); + } let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); let ll_t_in = operand.layout.immediate_llvm_type(bx.cx); - let ll_t_out = cast.immediate_llvm_type(bx.cx); + match operand.layout.variants { + layout::Variants::Single { index } => { + if let Some(def) = operand.layout.ty.ty_adt_def() { + let discr_val = def + .discriminant_for_variant(bx.cx.tcx, index) + .val; + let discr = C_uint_big(ll_t_out, discr_val); + return (bx, OperandRef { + val: OperandValue::Immediate(discr), + layout: cast, + }); + } + } + layout::Variants::Tagged { .. } | + layout::Variants::NicheFilling { .. } => {}, + } let llval = operand.immediate(); let mut signed = false; if let layout::Abi::Scalar(ref scalar) = operand.layout.abi { if let layout::Int(_, s) = scalar.value { - signed = s; + // We use `i1` for bytes that are always `0` or `1`, + // e.g. `#[repr(i8)] enum E { A, B }`, but we can't + // let LLVM interpret the `i1` as signed, because + // then `i1 1` (i.e. E::B) is effectively `i8 -1`. + signed = !scalar.is_bool() && s; - if scalar.valid_range.end > scalar.valid_range.start { + let er = scalar.valid_range_exclusive(bx.cx); + if er.end != er.start && + scalar.valid_range.end() > scalar.valid_range.start() { // We want `table[e as usize]` to not // have bound checks, and this is the most // convenient place to put the `assume`. @@ -285,7 +337,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { base::call_assume(&bx, bx.icmp( llvm::IntULE, llval, - C_uint_big(ll_t_in, scalar.valid_range.end) + C_uint_big(ll_t_in, *scalar.valid_range.end()) )); } } @@ -335,16 +387,16 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { } mir::Rvalue::Ref(_, bk, ref place) => { - let tr_place = self.trans_place(&bx, place); + let cg_place = self.codegen_place(&bx, place); - let ty = tr_place.layout.ty; + let ty = cg_place.layout.ty; // Note: places are indirect, so storing the `llval` into the // destination effectively creates a reference. let val = if !bx.cx.type_has_metadata(ty) { - OperandValue::Immediate(tr_place.llval) + OperandValue::Immediate(cg_place.llval) } else { - OperandValue::Pair(tr_place.llval, tr_place.llextra) + OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap()) }; (bx, OperandRef { val, @@ -365,12 +417,12 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { } mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.trans_operand(&bx, lhs); - let rhs = self.trans_operand(&bx, rhs); + let lhs = self.codegen_operand(&bx, lhs); + let rhs = self.codegen_operand(&bx, rhs); let llresult = match (lhs.val, rhs.val) { (OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra)) => { - self.trans_fat_ptr_binop(&bx, op, + self.codegen_fat_ptr_binop(&bx, op, lhs_addr, lhs_extra, rhs_addr, rhs_extra, lhs.layout.ty) @@ -378,7 +430,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => { - self.trans_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty) + self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty) } _ => bug!() @@ -391,13 +443,13 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { (bx, operand) } mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.trans_operand(&bx, lhs); - let rhs = self.trans_operand(&bx, rhs); - let result = self.trans_scalar_checked_binop(&bx, op, + let lhs = self.codegen_operand(&bx, lhs); + let rhs = self.codegen_operand(&bx, rhs); + let result = self.codegen_scalar_checked_binop(&bx, op, lhs.immediate(), rhs.immediate(), lhs.layout.ty); let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty); - let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool], false); + let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]); let operand = OperandRef { val: result, layout: bx.cx.layout_of(operand_ty) @@ -407,7 +459,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { } mir::Rvalue::UnaryOp(op, ref operand) => { - let operand = self.trans_operand(&bx, operand); + let operand = self.codegen_operand(&bx, operand); let lloperand = operand.immediate(); let is_float = operand.layout.ty.is_fp(); let llval = match op { @@ -426,8 +478,8 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { mir::Rvalue::Discriminant(ref place) => { let discr_ty = rvalue.ty(&*self.mir, bx.tcx()); - let discr = self.trans_place(&bx, place) - .trans_get_discr(&bx, discr_ty); + let discr = self.codegen_place(&bx, place) + .codegen_get_discr(&bx, discr_ty); (bx, OperandRef { val: OperandValue::Immediate(discr), layout: self.cx.layout_of(discr_ty) @@ -470,7 +522,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { (bx, operand) } mir::Rvalue::Use(ref operand) => { - let operand = self.trans_operand(&bx, operand); + let operand = self.codegen_operand(&bx, operand); (bx, operand) } mir::Rvalue::Repeat(..) | @@ -484,35 +536,37 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { } } - fn evaluate_array_len(&mut self, - bx: &Builder<'a, 'tcx>, - place: &mir::Place<'tcx>) -> ValueRef - { + fn evaluate_array_len( + &mut self, + bx: &Builder<'a, 'll, 'tcx>, + place: &mir::Place<'tcx>, + ) -> &'ll Value { // ZST are passed as operands and require special handling - // because trans_place() panics if Local is operand. + // because codegen_place() panics if Local is operand. if let mir::Place::Local(index) = *place { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::TyArray(_, n) = op.layout.ty.sty { - let n = n.val.to_const_int().unwrap().to_u64().unwrap(); + let n = n.unwrap_usize(bx.cx.tcx); return common::C_usize(bx.cx, n); } } } // use common size calculation for non zero-sized types - let tr_value = self.trans_place(&bx, place); - return tr_value.len(bx.cx); + let cg_value = self.codegen_place(&bx, place); + return cg_value.len(bx.cx); } - pub fn trans_scalar_binop(&mut self, - bx: &Builder<'a, 'tcx>, - op: mir::BinOp, - lhs: ValueRef, - rhs: ValueRef, - input_ty: Ty<'tcx>) -> ValueRef { + pub fn codegen_scalar_binop( + &mut self, + bx: &Builder<'a, 'll, 'tcx>, + op: mir::BinOp, + lhs: &'ll Value, + rhs: &'ll Value, + input_ty: Ty<'tcx>, + ) -> &'ll Value { let is_float = input_ty.is_fp(); let is_signed = input_ty.is_signed(); let is_nil = input_ty.is_nil(); - let is_bool = input_ty.is_bool(); match op { mir::BinOp::Add => if is_float { bx.fadd(lhs, rhs) @@ -562,15 +616,6 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { lhs, rhs ) } else { - let (lhs, rhs) = if is_bool { - // FIXME(#36856) -- extend the bools into `i8` because - // LLVM's i1 comparisons are broken. - (bx.zext(lhs, Type::i8(bx.cx)), - bx.zext(rhs, Type::i8(bx.cx))) - } else { - (lhs, rhs) - }; - bx.icmp( base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs @@ -579,15 +624,16 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { } } - pub fn trans_fat_ptr_binop(&mut self, - bx: &Builder<'a, 'tcx>, - op: mir::BinOp, - lhs_addr: ValueRef, - lhs_extra: ValueRef, - rhs_addr: ValueRef, - rhs_extra: ValueRef, - _input_ty: Ty<'tcx>) - -> ValueRef { + pub fn codegen_fat_ptr_binop( + &mut self, + bx: &Builder<'a, 'll, 'tcx>, + op: mir::BinOp, + lhs_addr: &'ll Value, + lhs_extra: &'ll Value, + rhs_addr: &'ll Value, + rhs_extra: &'ll Value, + _input_ty: Ty<'tcx>, + ) -> &'ll Value { match op { mir::BinOp::Eq => { bx.and( @@ -626,29 +672,21 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { } } - pub fn trans_scalar_checked_binop(&mut self, - bx: &Builder<'a, 'tcx>, + pub fn codegen_scalar_checked_binop(&mut self, + bx: &Builder<'a, 'll, 'tcx>, op: mir::BinOp, - lhs: ValueRef, - rhs: ValueRef, - input_ty: Ty<'tcx>) -> OperandValue { + lhs: &'ll Value, + rhs: &'ll Value, + input_ty: Ty<'tcx>) -> OperandValue<'ll> { // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. if !bx.cx.check_overflow { - let val = self.trans_scalar_binop(bx, op, lhs, rhs, input_ty); + let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); return OperandValue::Pair(val, C_bool(bx.cx, false)); } - // First try performing the operation on constants, which - // will only succeed if both operands are constant. - // This is necessary to determine when an overflow Assert - // will always panic at runtime, and produce a warning. - if let Some((val, of)) = const_scalar_checked_binop(bx.tcx(), op, lhs, rhs, input_ty) { - return OperandValue::Pair(val, C_bool(bx.cx, of)); - } - let (val, of) = match op { // These are checked using intrinsics mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => { @@ -671,7 +709,7 @@ impl<'a, 'tcx> FunctionCx<'a, 'tcx> { let outer_bits = bx.and(rhs, invert_mask); let of = bx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty)); - let val = self.trans_scalar_binop(bx, op, lhs, rhs, input_ty); + let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); (val, of) } @@ -712,7 +750,7 @@ enum OverflowOp { Add, Sub, Mul } -fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder, ty: Ty) -> ValueRef { +fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> &'ll Value { use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{TyInt, TyUint}; @@ -720,18 +758,8 @@ fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder, ty: Ty) -> ValueRef { let tcx = bx.tcx(); let new_sty = match ty.sty { - TyInt(Isize) => match &tcx.sess.target.target.target_pointer_width[..] { - "16" => TyInt(I16), - "32" => TyInt(I32), - "64" => TyInt(I64), - _ => panic!("unsupported target word size") - }, - TyUint(Usize) => match &tcx.sess.target.target.target_pointer_width[..] { - "16" => TyUint(U16), - "32" => TyUint(U32), - "64" => TyUint(U64), - _ => panic!("unsupported target word size") - }, + TyInt(Isize) => TyInt(tcx.sess.target.isize_ty), + TyUint(Usize) => TyUint(tcx.sess.target.usize_ty), ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(), _ => panic!("tried to get overflow intrinsic for op applied to non-int type") }; @@ -787,11 +815,11 @@ fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder, ty: Ty) -> ValueRef { bx.cx.get_intrinsic(&name) } -fn cast_int_to_float(bx: &Builder, +fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, signed: bool, - x: ValueRef, - int_ty: Type, - float_ty: Type) -> ValueRef { + x: &'ll Value, + int_ty: &'ll Type, + float_ty: &'ll Type) -> &'ll Value { // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding. // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity). // LLVM's uitofp produces undef in those cases, so we manually check for that case. @@ -799,6 +827,10 @@ fn cast_int_to_float(bx: &Builder, if is_u128_to_f32 { // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. + use rustc_apfloat::ieee::Single; + use rustc_apfloat::Float; + const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) + << (Single::MAX_EXP - Single::PRECISION as i16); let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bx.icmp(llvm::IntUGE, x, max); let infinity_bits = C_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32); @@ -813,11 +845,11 @@ fn cast_int_to_float(bx: &Builder, } } -fn cast_float_to_int(bx: &Builder, +fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, signed: bool, - x: ValueRef, - float_ty: Type, - int_ty: Type) -> ValueRef { + x: &'ll Value, + float_ty: &'ll Type, + int_ty: &'ll Type) -> &'ll Value { let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { @@ -840,20 +872,20 @@ fn cast_float_to_int(bx: &Builder, // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits. // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two. // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly - // representable. Note that this only works if float_ty's exponent range is sufficently large. + // representable. Note that this only works if float_ty's exponent range is sufficiently large. // f16 or 256 bit integers would break this property. Right now the smallest float type is f32 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127. // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because // we're rounding towards zero, we just get float_ty::MAX (which is always an integer). // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. - fn compute_clamp_bounds(signed: bool, int_ty: Type) -> (u128, u128) { + fn compute_clamp_bounds(signed: bool, int_ty: &Type) -> (u128, u128) { let rounded_min = F::from_i128_r(int_min(signed, int_ty), Round::TowardZero); assert_eq!(rounded_min.status, Status::OK); let rounded_max = F::from_u128_r(int_max(signed, int_ty), Round::TowardZero); assert!(rounded_max.value.is_finite()); (rounded_min.value.to_bits(), rounded_max.value.to_bits()) } - fn int_max(signed: bool, int_ty: Type) -> u128 { + fn int_max(signed: bool, int_ty: &Type) -> u128 { let shift_amount = 128 - int_ty.int_width(); if signed { i128::MAX as u128 >> shift_amount @@ -861,7 +893,7 @@ fn cast_float_to_int(bx: &Builder, u128::MAX >> shift_amount } } - fn int_min(signed: bool, int_ty: Type) -> i128 { + fn int_min(signed: bool, int_ty: &Type) -> i128 { if signed { i128::MIN >> (128 - int_ty.int_width()) } else { diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_llvm/mir/statement.rs new file mode 100644 index 000000000000..dd62a12553ca --- /dev/null +++ b/src/librustc_codegen_llvm/mir/statement.rs @@ -0,0 +1,99 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::mir; + +use asm; +use builder::Builder; + +use super::FunctionCx; +use super::LocalRef; + +impl FunctionCx<'a, 'll, 'tcx> { + pub fn codegen_statement(&mut self, + bx: Builder<'a, 'll, 'tcx>, + statement: &mir::Statement<'tcx>) + -> Builder<'a, 'll, 'tcx> { + debug!("codegen_statement(statement={:?})", statement); + + self.set_debug_loc(&bx, statement.source_info); + match statement.kind { + mir::StatementKind::Assign(ref place, ref rvalue) => { + if let mir::Place::Local(index) = *place { + match self.locals[index] { + LocalRef::Place(cg_dest) => { + self.codegen_rvalue(bx, cg_dest, rvalue) + } + LocalRef::UnsizedPlace(cg_indirect_dest) => { + self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue) + } + LocalRef::Operand(None) => { + let (bx, operand) = self.codegen_rvalue_operand(bx, rvalue); + self.locals[index] = LocalRef::Operand(Some(operand)); + bx + } + LocalRef::Operand(Some(op)) => { + if !op.layout.is_zst() { + span_bug!(statement.source_info.span, + "operand {:?} already assigned", + rvalue); + } + + // If the type is zero-sized, it's already been set here, + // but we still need to make sure we codegen the operand + self.codegen_rvalue_operand(bx, rvalue).0 + } + } + } else { + let cg_dest = self.codegen_place(&bx, place); + self.codegen_rvalue(bx, cg_dest, rvalue) + } + } + mir::StatementKind::SetDiscriminant{ref place, variant_index} => { + self.codegen_place(&bx, place) + .codegen_set_discr(&bx, variant_index); + bx + } + mir::StatementKind::StorageLive(local) => { + if let LocalRef::Place(cg_place) = self.locals[local] { + cg_place.storage_live(&bx); + } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { + cg_indirect_place.storage_live(&bx); + } + bx + } + mir::StatementKind::StorageDead(local) => { + if let LocalRef::Place(cg_place) = self.locals[local] { + cg_place.storage_dead(&bx); + } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { + cg_indirect_place.storage_dead(&bx); + } + bx + } + mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { + let outputs = outputs.iter().map(|output| { + self.codegen_place(&bx, output) + }).collect(); + + let input_vals = inputs.iter().map(|input| { + self.codegen_operand(&bx, input).immediate() + }).collect(); + + asm::codegen_inline_asm(&bx, asm, outputs, input_vals); + bx + } + mir::StatementKind::ReadForMatch(_) | + mir::StatementKind::EndRegion(_) | + mir::StatementKind::Validate(..) | + mir::StatementKind::UserAssertTy(..) | + mir::StatementKind::Nop => bx, + } + } +} diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs new file mode 100644 index 000000000000..7f25911abec3 --- /dev/null +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -0,0 +1,188 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Walks the crate looking for items/impl-items/trait-items that have +//! either a `rustc_symbol_name` or `rustc_item_path` attribute and +//! generates an error giving, respectively, the symbol name or +//! item-path. This is used for unit testing the code that generates +//! paths etc in all kinds of annoying scenarios. + +use asm; +use attributes; +use base; +use consts; +use context::CodegenCx; +use declare; +use llvm; +use monomorphize::Instance; +use type_of::LayoutLlvmExt; +use rustc::hir; +use rustc::hir::def::Def; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::mir::mono::{Linkage, Visibility}; +use rustc::ty::TypeFoldable; +use rustc::ty::layout::LayoutOf; +use std::fmt; + +pub use rustc::mir::mono::MonoItem; + +pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; + +pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { + fn define(&self, cx: &CodegenCx<'a, 'tcx>) { + debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", + self.to_string(cx.tcx), + self.to_raw_string(), + cx.codegen_unit.name()); + + match *self.as_mono_item() { + MonoItem::Static(def_id) => { + let tcx = cx.tcx; + let is_mutable = match tcx.describe_def(def_id) { + Some(Def::Static(_, is_mutable)) => is_mutable, + Some(other) => { + bug!("Expected Def::Static, found {:?}", other) + } + None => { + bug!("Expected Def::Static for {:?}, found nothing", def_id) + } + }; + consts::codegen_static(&cx, def_id, is_mutable); + } + MonoItem::GlobalAsm(node_id) => { + let item = cx.tcx.hir.expect_item(node_id); + if let hir::ItemKind::GlobalAsm(ref ga) = item.node { + asm::codegen_global_asm(cx, ga); + } else { + span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type") + } + } + MonoItem::Fn(instance) => { + base::codegen_instance(&cx, instance); + } + } + + debug!("END IMPLEMENTING '{} ({})' in cgu {}", + self.to_string(cx.tcx), + self.to_raw_string(), + cx.codegen_unit.name()); + } + + fn predefine(&self, + cx: &CodegenCx<'a, 'tcx>, + linkage: Linkage, + visibility: Visibility) { + debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", + self.to_string(cx.tcx), + self.to_raw_string(), + cx.codegen_unit.name()); + + let symbol_name = self.symbol_name(cx.tcx).as_str(); + + debug!("symbol {}", &symbol_name); + + match *self.as_mono_item() { + MonoItem::Static(def_id) => { + predefine_static(cx, def_id, linkage, visibility, &symbol_name); + } + MonoItem::Fn(instance) => { + predefine_fn(cx, instance, linkage, visibility, &symbol_name); + } + MonoItem::GlobalAsm(..) => {} + } + + debug!("END PREDEFINING '{} ({})' in cgu {}", + self.to_string(cx.tcx), + self.to_raw_string(), + cx.codegen_unit.name()); + } + + fn to_raw_string(&self) -> String { + match *self.as_mono_item() { + MonoItem::Fn(instance) => { + format!("Fn({:?}, {})", + instance.def, + instance.substs.as_ptr() as usize) + } + MonoItem::Static(id) => { + format!("Static({:?})", id) + } + MonoItem::GlobalAsm(id) => { + format!("GlobalAsm({:?})", id) + } + } + } +} + +impl<'a, 'tcx> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {} + +fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, + def_id: DefId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + let instance = Instance::mono(cx.tcx, def_id); + let ty = instance.ty(cx.tcx); + let llty = cx.layout_of(ty).llvm_type(cx); + + let g = declare::define_global(cx, symbol_name, llty).unwrap_or_else(|| { + cx.sess().span_fatal(cx.tcx.def_span(def_id), + &format!("symbol `{}` is already defined", symbol_name)) + }); + + unsafe { + llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); + llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); + } + + cx.instances.borrow_mut().insert(instance, g); +} + +fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + assert!(!instance.substs.needs_infer() && + !instance.substs.has_param_types()); + + let mono_ty = instance.ty(cx.tcx); + let attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); + let lldecl = declare::declare_fn(cx, symbol_name, mono_ty); + unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; + base::set_link_section(lldecl, &attrs); + if linkage == Linkage::LinkOnceODR || + linkage == Linkage::WeakODR { + llvm::SetUniqueComdat(cx.llmod, lldecl); + } + + // If we're compiling the compiler-builtins crate, e.g. the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if linkage != Linkage::Internal && linkage != Linkage::Private && + cx.tcx.is_compiler_builtins(LOCAL_CRATE) { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); + } + } else { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); + } + } + + debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance); + if instance.def.is_inline(cx.tcx) { + attributes::inline(lldecl, attributes::InlineAttr::Hint); + } + attributes::from_fn_attrs(cx, lldecl, Some(instance.def.def_id())); + + cx.instances.borrow_mut().insert(instance, lldecl); +} diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs new file mode 100644 index 000000000000..51a233d79162 --- /dev/null +++ b/src/librustc_codegen_llvm/type_.rs @@ -0,0 +1,316 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_upper_case_globals)] + +pub use llvm::Type; + +use llvm; +use llvm::{Bool, False, True, TypeKind}; + +use context::CodegenCx; + +use syntax::ast; +use rustc::ty::layout::{self, Align, Size}; +use rustc_data_structures::small_c_str::SmallCStr; + +use std::fmt; + +use libc::c_uint; + +impl PartialEq for Type { + fn eq(&self, other: &Self) -> bool { + self as *const _ == other as *const _ + } +} + +impl fmt::Debug for Type { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&llvm::build_string(|s| unsafe { + llvm::LLVMRustWriteTypeToString(self, s); + }).expect("non-UTF8 type description from LLVM")) + } +} + +impl Type { + pub fn void(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMVoidTypeInContext(cx.llcx) + } + } + + pub fn metadata(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMRustMetadataTypeInContext(cx.llcx) + } + } + + pub fn i1(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMInt1TypeInContext(cx.llcx) + } + } + + pub fn i8(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMInt8TypeInContext(cx.llcx) + } + } + + pub fn i8_llcx(llcx: &llvm::Context) -> &Type { + unsafe { + llvm::LLVMInt8TypeInContext(llcx) + } + } + + pub fn i16(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMInt16TypeInContext(cx.llcx) + } + } + + pub fn i32(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMInt32TypeInContext(cx.llcx) + } + } + + pub fn i64(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMInt64TypeInContext(cx.llcx) + } + } + + pub fn i128(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMIntTypeInContext(cx.llcx, 128) + } + } + + // Creates an integer type with the given number of bits, e.g. i24 + pub fn ix(cx: &CodegenCx<'ll, '_>, num_bits: u64) -> &'ll Type { + unsafe { + llvm::LLVMIntTypeInContext(cx.llcx, num_bits as c_uint) + } + } + + // Creates an integer type with the given number of bits, e.g. i24 + pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type { + unsafe { + llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) + } + } + + pub fn f32(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMFloatTypeInContext(cx.llcx) + } + } + + pub fn f64(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMDoubleTypeInContext(cx.llcx) + } + } + + pub fn bool(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + Type::i8(cx) + } + + pub fn char(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + Type::i32(cx) + } + + pub fn i8p(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + Type::i8(cx).ptr_to() + } + + pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { + Type::i8_llcx(llcx).ptr_to() + } + + pub fn isize(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + cx.isize_ty + } + + pub fn c_int(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + match &cx.tcx.sess.target.target.target_c_int_width[..] { + "16" => Type::i16(cx), + "32" => Type::i32(cx), + "64" => Type::i64(cx), + width => bug!("Unsupported target_c_int_width: {}", width), + } + } + + pub fn int_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::IntTy) -> &'ll Type { + match t { + ast::IntTy::Isize => cx.isize_ty, + ast::IntTy::I8 => Type::i8(cx), + ast::IntTy::I16 => Type::i16(cx), + ast::IntTy::I32 => Type::i32(cx), + ast::IntTy::I64 => Type::i64(cx), + ast::IntTy::I128 => Type::i128(cx), + } + } + + pub fn uint_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::UintTy) -> &'ll Type { + match t { + ast::UintTy::Usize => cx.isize_ty, + ast::UintTy::U8 => Type::i8(cx), + ast::UintTy::U16 => Type::i16(cx), + ast::UintTy::U32 => Type::i32(cx), + ast::UintTy::U64 => Type::i64(cx), + ast::UintTy::U128 => Type::i128(cx), + } + } + + pub fn float_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::FloatTy) -> &'ll Type { + match t { + ast::FloatTy::F32 => Type::f32(cx), + ast::FloatTy::F64 => Type::f64(cx), + } + } + + pub fn func(args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { + unsafe { + llvm::LLVMFunctionType(ret, args.as_ptr(), + args.len() as c_uint, False) + } + } + + pub fn variadic_func(args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { + unsafe { + llvm::LLVMFunctionType(ret, args.as_ptr(), + args.len() as c_uint, True) + } + } + + pub fn struct_(cx: &CodegenCx<'ll, '_>, els: &[&'ll Type], packed: bool) -> &'ll Type { + unsafe { + llvm::LLVMStructTypeInContext(cx.llcx, els.as_ptr(), + els.len() as c_uint, + packed as Bool) + } + } + + pub fn named_struct(cx: &CodegenCx<'ll, '_>, name: &str) -> &'ll Type { + let name = SmallCStr::new(name); + unsafe { + llvm::LLVMStructCreateNamed(cx.llcx, name.as_ptr()) + } + } + + + pub fn array(ty: &Type, len: u64) -> &Type { + unsafe { + llvm::LLVMRustArrayType(ty, len) + } + } + + pub fn vector(ty: &Type, len: u64) -> &Type { + unsafe { + llvm::LLVMVectorType(ty, len as c_uint) + } + } + + pub fn kind(&self) -> TypeKind { + unsafe { + llvm::LLVMRustGetTypeKind(self) + } + } + + pub fn set_struct_body(&'ll self, els: &[&'ll Type], packed: bool) { + unsafe { + llvm::LLVMStructSetBody(self, els.as_ptr(), + els.len() as c_uint, packed as Bool) + } + } + + pub fn ptr_to(&self) -> &Type { + unsafe { + llvm::LLVMPointerType(self, 0) + } + } + + pub fn element_type(&self) -> &Type { + unsafe { + llvm::LLVMGetElementType(self) + } + } + + /// Return the number of elements in `self` if it is a LLVM vector type. + pub fn vector_length(&self) -> usize { + unsafe { + llvm::LLVMGetVectorSize(self) as usize + } + } + + pub fn func_params(&self) -> Vec<&Type> { + unsafe { + let n_args = llvm::LLVMCountParamTypes(self) as usize; + let mut args = Vec::with_capacity(n_args); + llvm::LLVMGetParamTypes(self, args.as_mut_ptr()); + args.set_len(n_args); + args + } + } + + pub fn float_width(&self) -> usize { + match self.kind() { + TypeKind::Float => 32, + TypeKind::Double => 64, + TypeKind::X86_FP80 => 80, + TypeKind::FP128 | TypeKind::PPC_FP128 => 128, + _ => bug!("llvm_float_width called on a non-float type") + } + } + + /// Retrieve the bit width of the integer type `self`. + pub fn int_width(&self) -> u64 { + unsafe { + llvm::LLVMGetIntTypeWidth(self) as u64 + } + } + + pub fn from_integer(cx: &CodegenCx<'ll, '_>, i: layout::Integer) -> &'ll Type { + use rustc::ty::layout::Integer::*; + match i { + I8 => Type::i8(cx), + I16 => Type::i16(cx), + I32 => Type::i32(cx), + I64 => Type::i64(cx), + I128 => Type::i128(cx), + } + } + + /// Return a LLVM type that has at most the required alignment, + /// as a conservative approximation for unknown pointee types. + pub fn pointee_for_abi_align(cx: &CodegenCx<'ll, '_>, align: Align) -> &'ll Type { + // FIXME(eddyb) We could find a better approximation if ity.align < align. + let ity = layout::Integer::approximate_abi_align(cx, align); + Type::from_integer(cx, ity) + } + + /// Return a LLVM type that has at most the required alignment, + /// and exactly the required size, as a best-effort padding array. + pub fn padding_filler(cx: &CodegenCx<'ll, '_>, size: Size, align: Align) -> &'ll Type { + let unit = layout::Integer::approximate_abi_align(cx, align); + let size = size.bytes(); + let unit_size = unit.size().bytes(); + assert_eq!(size % unit_size, 0); + Type::array(Type::from_integer(cx, unit), size / unit_size) + } + + pub fn x86_mmx(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + unsafe { + llvm::LLVMX86MMXTypeInContext(cx.llcx) + } + } +} diff --git a/src/librustc_trans/type_of.rs b/src/librustc_codegen_llvm/type_of.rs similarity index 85% rename from src/librustc_trans/type_of.rs rename to src/librustc_codegen_llvm/type_of.rs index b1533cfad19f..69d91b327283 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -8,21 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::FnType; +use abi::{FnType, FnTypeExt}; use common::*; +use llvm; use rustc::hir; use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; -use rustc_back::PanicStrategy; -use trans_item::DefPathBasedNames; +use rustc_target::spec::PanicStrategy; +use rustc_target::abi::FloatTy; +use rustc_mir::monomorphize::item::DefPathBasedNames; use type_::Type; use std::fmt::Write; fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, layout: TyLayout<'tcx>, - defer: &mut Option<(Type, TyLayout<'tcx>)>) - -> Type { + defer: &mut Option<(&'a Type, TyLayout<'tcx>)>) + -> &'a Type { match layout.abi { layout::Abi::Scalar(_) => bug!("handled elsewhere"), layout::Abi::Vector { ref element, count } => { @@ -39,14 +41,14 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, if use_x86_mmx { return Type::x86_mmx(cx) } else { - let element = layout.scalar_llvm_type_at(cx, element, Size::from_bytes(0)); - return Type::vector(&element, count); + let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); + return Type::vector(element, count); } } layout::Abi::ScalarPair(..) => { return Type::struct_(cx, &[ - layout.scalar_pair_element_llvm_type(cx, 0), - layout.scalar_pair_element_llvm_type(cx, 1), + layout.scalar_pair_element_llvm_type(cx, 0, false), + layout.scalar_pair_element_llvm_type(cx, 1, false), ], false); } layout::Abi::Uninhabited | @@ -57,7 +59,9 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ty::TyClosure(..) | ty::TyGenerator(..) | ty::TyAdt(..) | - ty::TyDynamic(..) | + // FIXME(eddyb) producing readable type names for trait objects can result + // in problematically distinct types due to HRTB and subtyping (see #47638). + // ty::TyDynamic(..) | ty::TyForeign(..) | ty::TyStr => { let mut name = String::with_capacity(32); @@ -85,14 +89,14 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, Type::struct_(cx, &[fill], packed) } Some(ref name) => { - let mut llty = Type::named_struct(cx, name); + let llty = Type::named_struct(cx, name); llty.set_struct_body(&[fill], packed); llty } } } layout::FieldPlacement::Array { count, .. } => { - Type::array(&layout.field(cx, 0).llvm_type(cx), count) + Type::array(layout.field(cx, 0).llvm_type(cx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { @@ -112,14 +116,14 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, layout: TyLayout<'tcx>) - -> (Vec, bool) { + -> (Vec<&'a Type>, bool) { debug!("struct_llfields: {:#?}", layout); let field_count = layout.fields.count(); let mut packed = false; - let mut offset = Size::from_bytes(0); + let mut offset = Size::ZERO; let mut prev_align = layout.align; - let mut result: Vec = Vec::with_capacity(1 + field_count * 2); + let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2); for i in layout.fields.index_by_increasing_offset() { let field = layout.field(cx, i); packed |= layout.align.abi() < field.align.abi(); @@ -197,12 +201,12 @@ pub struct PointeeInfo { pub trait LayoutLlvmExt<'tcx> { fn is_llvm_immediate(&self) -> bool; fn is_llvm_scalar_pair<'a>(&self) -> bool; - fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Type; - fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Type; + fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; + fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, - scalar: &layout::Scalar, offset: Size) -> Type; + scalar: &layout::Scalar, offset: Size) -> &'a Type; fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>, - index: usize) -> Type; + index: usize, immediate: bool) -> &'a Type; fn llvm_field_index(&self, index: usize) -> u64; fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option; @@ -211,10 +215,10 @@ pub trait LayoutLlvmExt<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { match self.abi { - layout::Abi::Uninhabited | layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true, layout::Abi::ScalarPair(..) => false, + layout::Abi::Uninhabited | layout::Abi::Aggregate { .. } => self.is_zst() } } @@ -240,7 +244,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { /// with the inner-most trailing unsized field using the "minimal unit" /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. - fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Type { + fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). @@ -248,7 +252,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { return llty; } let llty = match self.ty.sty { - ty::TyRef(_, ty::TypeAndMut { ty, .. }) | + ty::TyRef(_, ty, _) | ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => { cx.layout_of(ty).llvm_type(cx).ptr_to() } @@ -256,10 +260,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { cx.layout_of(self.ty.boxed_ty()).llvm_type(cx).ptr_to() } ty::TyFnPtr(sig) => { - let sig = cx.tcx.erase_late_bound_regions_and_normalize(&sig); + let sig = cx.tcx.normalize_erasing_late_bound_regions( + ty::ParamEnv::reveal_all(), + &sig, + ); FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to() } - _ => self.scalar_llvm_type_at(cx, scalar, Size::from_bytes(0)) + _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO) }; cx.scalar_lltypes.borrow_mut().insert(self.ty, llty); return llty; @@ -297,7 +304,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { cx.lltypes.borrow_mut().insert((self.ty, variant_index), llty); - if let Some((mut llty, layout)) = defer { + if let Some((llty, layout)) = defer { let (llfields, packed) = struct_llfields(cx, layout); llty.set_struct_body(&llfields, packed) } @@ -305,7 +312,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { llty } - fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Type { + fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { if scalar.is_bool() { return Type::i1(cx); @@ -315,11 +322,11 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, - scalar: &layout::Scalar, offset: Size) -> Type { + scalar: &layout::Scalar, offset: Size) -> &'a Type { match scalar.value { layout::Int(i, _) => Type::from_integer(cx, i), - layout::F32 => Type::f32(cx), - layout::F64 => Type::f64(cx), + layout::Float(FloatTy::F32) => Type::f32(cx), + layout::Float(FloatTy::F64) => Type::f64(cx), layout::Pointer => { // If we know the alignment, pick something better than i8. let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { @@ -333,7 +340,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>, - index: usize) -> Type { + index: usize, immediate: bool) -> &'a Type { // HACK(eddyb) special-case fat pointers until LLVM removes // pointee types, to avoid bitcasting every `OperandRef::deref`. match self.ty.sty { @@ -343,7 +350,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } ty::TyAdt(def, _) if def.is_box() => { let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty()); - return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index); + return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate); } _ => {} } @@ -354,19 +361,18 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { }; let scalar = [a, b][index]; - // Make sure to return the same type `immediate_llvm_type` would, - // to avoid dealing with two types and the associated conversions. - // This means that `(bool, bool)` is represented as `{i1, i1}`, - // both in memory and as an immediate, while `bool` is typically - // `i8` in memory and only `i1` when immediate. While we need to - // load/store `bool` as `i8` to avoid crippling LLVM optimizations, - // `i1` in a LLVM aggregate is valid and mostly equivalent to `i8`. - if scalar.is_bool() { + // Make sure to return the same type `immediate_llvm_type` would when + // dealing with an immediate pair. This means that `(bool, bool)` is + // effectively represented as `{i8, i8}` in memory and two `i1`s as an + // immediate, just like `bool` is typically `i8` in memory and only `i1` + // when immediate. We need to load/store `bool` as `i8` to avoid + // crippling LLVM optimizations or triggering other LLVM bugs with `i1`. + if immediate && scalar.is_bool() { return Type::i1(cx); } let offset = if index == 0 { - Size::from_bytes(0) + Size::ZERO } else { a.value.size(cx).abi_align(b.value.align(cx)) }; @@ -413,18 +419,23 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { }); } - ty::TyRef(_, mt) if offset.bytes() == 0 => { - let (size, align) = cx.size_and_align_of(mt.ty); + ty::TyRef(_, ty, mt) if offset.bytes() == 0 => { + let (size, align) = cx.size_and_align_of(ty); - let kind = match mt.mutbl { - hir::MutImmutable => if cx.type_is_freeze(mt.ty) { + let kind = match mt { + hir::MutImmutable => if cx.type_is_freeze(ty) { PointerKind::Frozen } else { PointerKind::Shared }, hir::MutMutable => { - if cx.tcx.sess.opts.debugging_opts.mutable_noalias || - cx.tcx.sess.panic_strategy() == PanicStrategy::Abort { + // Only emit noalias annotations for LLVM >= 6 or in panic=abort + // mode, as prior versions had many bugs in conjunction with + // unwinding. See also issue #31681. + let mutable_noalias = cx.tcx.sess.opts.debugging_opts.mutable_noalias + .unwrap_or(unsafe { llvm::LLVMRustVersionMajor() >= 6 } + || cx.tcx.sess.panic_strategy() == PanicStrategy::Abort); + if mutable_noalias { PointerKind::UniqueBorrowed } else { PointerKind::Shared diff --git a/src/librustc_codegen_llvm/value.rs b/src/librustc_codegen_llvm/value.rs new file mode 100644 index 000000000000..3328948c2951 --- /dev/null +++ b/src/librustc_codegen_llvm/value.rs @@ -0,0 +1,39 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use llvm::Value; + +use llvm; + +use std::fmt; +use std::hash::{Hash, Hasher}; + +impl PartialEq for Value { + fn eq(&self, other: &Self) -> bool { + self as *const _ == other as *const _ + } +} + +impl Eq for Value {} + +impl Hash for Value { + fn hash(&self, hasher: &mut H) { + (self as *const Self).hash(hasher); + } +} + + +impl fmt::Debug for Value { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&llvm::build_string(|s| unsafe { + llvm::LLVMRustWriteValueToString(self, s); + }).expect("nun-UTF8 value description from LLVM")) + } +} diff --git a/src/librustc_codegen_utils/Cargo.toml b/src/librustc_codegen_utils/Cargo.toml new file mode 100644 index 000000000000..a1f4a323f849 --- /dev/null +++ b/src/librustc_codegen_utils/Cargo.toml @@ -0,0 +1,23 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_codegen_utils" +version = "0.0.0" + +[lib] +name = "rustc_codegen_utils" +path = "lib.rs" +crate-type = ["dylib"] +test = false + +[dependencies] +flate2 = "1.0" +log = "0.4" + +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } +rustc = { path = "../librustc" } +rustc_target = { path = "../librustc_target" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_mir = { path = "../librustc_mir" } +rustc_incremental = { path = "../librustc_incremental" } +rustc_metadata_utils = { path = "../librustc_metadata_utils" } diff --git a/src/librustc_codegen_utils/codegen_backend.rs b/src/librustc_codegen_utils/codegen_backend.rs new file mode 100644 index 000000000000..ae8f65303a7f --- /dev/null +++ b/src/librustc_codegen_utils/codegen_backend.rs @@ -0,0 +1,230 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The Rust compiler. +//! +//! # Note +//! +//! This API is completely unstable and subject to change. + +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![deny(warnings)] + +#![feature(box_syntax)] + +use std::any::Any; +use std::io::{self, Write}; +use std::fs::File; +use std::path::Path; +use std::sync::{mpsc, Arc}; + +use rustc_data_structures::owning_ref::OwningRef; +use rustc_data_structures::sync::Lrc; +use flate2::Compression; +use flate2::write::DeflateEncoder; + +use syntax::symbol::Symbol; +use rustc::hir::def_id::LOCAL_CRATE; +use rustc::session::{Session, CompileIncomplete}; +use rustc::session::config::{CrateType, OutputFilenames, PrintRequest}; +use rustc::ty::TyCtxt; +use rustc::ty::query::Providers; +use rustc::middle::cstore::EncodedMetadata; +use rustc::middle::cstore::MetadataLoader; +use rustc::dep_graph::DepGraph; +use rustc_target::spec::Target; +use rustc_data_structures::fx::FxHashMap; +use rustc_mir::monomorphize::collector; +use link::{build_link_meta, out_filename}; + +pub use rustc_data_structures::sync::MetadataRef; + +pub trait CodegenBackend { + fn init(&self, _sess: &Session) {} + fn print(&self, _req: PrintRequest, _sess: &Session) {} + fn target_features(&self, _sess: &Session) -> Vec { vec![] } + fn print_passes(&self) {} + fn print_version(&self) {} + fn diagnostics(&self) -> &[(&'static str, &'static str)] { &[] } + + fn metadata_loader(&self) -> Box; + fn provide(&self, _providers: &mut Providers); + fn provide_extern(&self, _providers: &mut Providers); + fn codegen_crate<'a, 'tcx>( + &self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver> + ) -> Box; + + /// This is called on the returned `Box` from `codegen_backend` + /// + /// # Panics + /// + /// Panics when the passed `Box` was not returned by `codegen_backend`. + fn join_codegen_and_link( + &self, + ongoing_codegen: Box, + sess: &Session, + dep_graph: &DepGraph, + outputs: &OutputFilenames, + ) -> Result<(), CompileIncomplete>; +} + +pub struct NoLlvmMetadataLoader; + +impl MetadataLoader for NoLlvmMetadataLoader { + fn get_rlib_metadata(&self, _: &Target, filename: &Path) -> Result { + let mut file = File::open(filename) + .map_err(|e| format!("metadata file open err: {:?}", e))?; + + let mut buf = Vec::new(); + io::copy(&mut file, &mut buf).unwrap(); + let buf: OwningRef, [u8]> = OwningRef::new(buf).into(); + return Ok(rustc_erase_owner!(buf.map_owner_box())); + } + + fn get_dylib_metadata(&self, target: &Target, filename: &Path) -> Result { + self.get_rlib_metadata(target, filename) + } +} + +pub struct MetadataOnlyCodegenBackend(()); +pub struct OngoingCodegen { + metadata: EncodedMetadata, + metadata_version: Vec, + crate_name: Symbol, +} + +impl MetadataOnlyCodegenBackend { + pub fn new() -> Box { + box MetadataOnlyCodegenBackend(()) + } +} + +impl CodegenBackend for MetadataOnlyCodegenBackend { + fn init(&self, sess: &Session) { + for cty in sess.opts.crate_types.iter() { + match *cty { + CrateType::Rlib | CrateType::Dylib | CrateType::Executable => {}, + _ => { + sess.diagnostic().warn( + &format!("LLVM unsupported, so output type {} is not supported", cty) + ); + }, + } + } + } + + fn metadata_loader(&self) -> Box { + box NoLlvmMetadataLoader + } + + fn provide(&self, providers: &mut Providers) { + ::symbol_names::provide(providers); + + providers.target_features_whitelist = |_tcx, _cnum| { + Lrc::new(FxHashMap()) // Just a dummy + }; + providers.is_reachable_non_generic = |_tcx, _defid| true; + providers.exported_symbols = |_tcx, _crate| Arc::new(Vec::new()); + } + fn provide_extern(&self, providers: &mut Providers) { + providers.is_reachable_non_generic = |_tcx, _defid| true; + } + + fn codegen_crate<'a, 'tcx>( + &self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + _rx: mpsc::Receiver> + ) -> Box { + use rustc_mir::monomorphize::item::MonoItem; + + ::check_for_rustc_errors_attr(tcx); + ::symbol_names_test::report_symbol_names(tcx); + ::rustc_incremental::assert_dep_graph(tcx); + ::rustc_incremental::assert_module_sources::assert_module_sources(tcx); + ::rustc_mir::monomorphize::assert_symbols_are_distinct(tcx, + collector::collect_crate_mono_items( + tcx, + collector::MonoItemCollectionMode::Eager + ).0.iter() + ); + // FIXME: Fix this + // ::rustc::middle::dependency_format::calculate(tcx); + let _ = tcx.link_args(LOCAL_CRATE); + let _ = tcx.native_libraries(LOCAL_CRATE); + for mono_item in + collector::collect_crate_mono_items( + tcx, + collector::MonoItemCollectionMode::Eager + ).0 { + match mono_item { + MonoItem::Fn(inst) => { + let def_id = inst.def_id(); + if def_id.is_local() { + let _ = inst.def.is_inline(tcx); + let _ = tcx.codegen_fn_attrs(def_id); + } + } + _ => {} + } + } + tcx.sess.abort_if_errors(); + + let link_meta = build_link_meta(tcx.crate_hash(LOCAL_CRATE)); + let metadata = tcx.encode_metadata(&link_meta); + + box OngoingCodegen { + metadata: metadata, + metadata_version: tcx.metadata_encoding_version().to_vec(), + crate_name: tcx.crate_name(LOCAL_CRATE), + } + } + + fn join_codegen_and_link( + &self, + ongoing_codegen: Box, + sess: &Session, + _dep_graph: &DepGraph, + outputs: &OutputFilenames, + ) -> Result<(), CompileIncomplete> { + let ongoing_codegen = ongoing_codegen.downcast::() + .expect("Expected MetadataOnlyCodegenBackend's OngoingCodegen, found Box"); + for &crate_type in sess.opts.crate_types.iter() { + if crate_type != CrateType::Rlib && + crate_type != CrateType::Dylib { + continue; + } + let output_name = + out_filename(sess, crate_type, &outputs, &ongoing_codegen.crate_name.as_str()); + let mut compressed = ongoing_codegen.metadata_version.clone(); + let metadata = if crate_type == CrateType::Dylib { + DeflateEncoder::new(&mut compressed, Compression::fast()) + .write_all(&ongoing_codegen.metadata.raw_data) + .unwrap(); + &compressed + } else { + &ongoing_codegen.metadata.raw_data + }; + let mut file = File::create(&output_name).unwrap(); + file.write_all(metadata).unwrap(); + } + + sess.abort_if_errors(); + if !sess.opts.crate_types.contains(&CrateType::Rlib) + && !sess.opts.crate_types.contains(&CrateType::Dylib) + { + sess.fatal("Executables are not supported by the metadata-only backend."); + } + Ok(()) + } +} diff --git a/src/librustc_codegen_utils/lib.rs b/src/librustc_codegen_utils/lib.rs new file mode 100644 index 000000000000..635819e94e86 --- /dev/null +++ b/src/librustc_codegen_utils/lib.rs @@ -0,0 +1,64 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # Note +//! +//! This API is completely unstable and subject to change. + +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] + +#![feature(box_patterns)] +#![feature(box_syntax)] +#![feature(custom_attribute)] +#![cfg_attr(not(stage0), feature(nll))] +#![allow(unused_attributes)] +#![feature(quote)] +#![feature(rustc_diagnostic_macros)] + +#![recursion_limit="256"] + +extern crate flate2; +#[macro_use] +extern crate log; + +#[macro_use] +extern crate rustc; +extern crate rustc_target; +extern crate rustc_mir; +extern crate rustc_incremental; +extern crate syntax; +extern crate syntax_pos; +#[macro_use] extern crate rustc_data_structures; +extern crate rustc_metadata_utils; + +use rustc::ty::TyCtxt; + +pub mod link; +pub mod codegen_backend; +pub mod symbol_names; +pub mod symbol_names_test; + +/// check for the #[rustc_error] annotation, which forces an +/// error in codegen. This is used to write compile-fail tests +/// that actually test that compilation succeeds without +/// reporting an error. +pub fn check_for_rustc_errors_attr(tcx: TyCtxt) { + if let Some((id, span, _)) = *tcx.sess.entry_fn.borrow() { + let main_def_id = tcx.hir.local_def_id(id); + + if tcx.has_attr(main_def_id, "rustc_error") { + tcx.sess.span_fatal(span, "compilation successful"); + } + } +} + +__build_diagnostic_array! { librustc_codegen_utils, DIAGNOSTICS } diff --git a/src/librustc_codegen_utils/link.rs b/src/librustc_codegen_utils/link.rs new file mode 100644 index 000000000000..a0d88ccae0f1 --- /dev/null +++ b/src/librustc_codegen_utils/link.rs @@ -0,0 +1,192 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::session::config::{self, OutputFilenames, Input, OutputType}; +use rustc::session::Session; +use rustc::middle::cstore::LinkMeta; +use rustc_data_structures::svh::Svh; +use std::path::{Path, PathBuf}; +use syntax::{ast, attr}; +use syntax_pos::Span; +use rustc_metadata_utils::validate_crate_name; + +pub fn out_filename(sess: &Session, + crate_type: config::CrateType, + outputs: &OutputFilenames, + crate_name: &str) + -> PathBuf { + let default_filename = filename_for_input(sess, crate_type, crate_name, outputs); + let out_filename = outputs.outputs.get(&OutputType::Exe) + .and_then(|s| s.to_owned()) + .or_else(|| outputs.single_output_file.clone()) + .unwrap_or(default_filename); + + check_file_is_writeable(&out_filename, sess); + + out_filename +} + +// Make sure files are writeable. Mac, FreeBSD, and Windows system linkers +// check this already -- however, the Linux linker will happily overwrite a +// read-only file. We should be consistent. +pub fn check_file_is_writeable(file: &Path, sess: &Session) { + if !is_writeable(file) { + sess.fatal(&format!("output file {} is not writeable -- check its \ + permissions", file.display())); + } +} + +fn is_writeable(p: &Path) -> bool { + match p.metadata() { + Err(..) => true, + Ok(m) => !m.permissions().readonly() + } +} + +pub fn build_link_meta(crate_hash: Svh) -> LinkMeta { + let r = LinkMeta { + crate_hash, + }; + info!("{:?}", r); + return r; +} + +pub fn find_crate_name(sess: Option<&Session>, + attrs: &[ast::Attribute], + input: &Input) -> String { + let validate = |s: String, span: Option| { + validate_crate_name(sess, &s, span); + s + }; + + // Look in attributes 100% of the time to make sure the attribute is marked + // as used. After doing this, however, we still prioritize a crate name from + // the command line over one found in the #[crate_name] attribute. If we + // find both we ensure that they're the same later on as well. + let attr_crate_name = attr::find_by_name(attrs, "crate_name") + .and_then(|at| at.value_str().map(|s| (at, s))); + + if let Some(sess) = sess { + if let Some(ref s) = sess.opts.crate_name { + if let Some((attr, name)) = attr_crate_name { + if name != &**s { + let msg = format!("--crate-name and #[crate_name] are \ + required to match, but `{}` != `{}`", + s, name); + sess.span_err(attr.span, &msg); + } + } + return validate(s.clone(), None); + } + } + + if let Some((attr, s)) = attr_crate_name { + return validate(s.to_string(), Some(attr.span)); + } + if let Input::File(ref path) = *input { + if let Some(s) = path.file_stem().and_then(|s| s.to_str()) { + if s.starts_with("-") { + let msg = format!("crate names cannot start with a `-`, but \ + `{}` has a leading hyphen", s); + if let Some(sess) = sess { + sess.err(&msg); + } + } else { + return validate(s.replace("-", "_"), None); + } + } + } + + "rust_out".to_string() +} + +pub fn filename_for_input(sess: &Session, + crate_type: config::CrateType, + crate_name: &str, + outputs: &OutputFilenames) -> PathBuf { + let libname = format!("{}{}", crate_name, sess.opts.cg.extra_filename); + + match crate_type { + config::CrateType::Rlib => { + outputs.out_directory.join(&format!("lib{}.rlib", libname)) + } + config::CrateType::Cdylib | + config::CrateType::ProcMacro | + config::CrateType::Dylib => { + let (prefix, suffix) = (&sess.target.target.options.dll_prefix, + &sess.target.target.options.dll_suffix); + outputs.out_directory.join(&format!("{}{}{}", prefix, libname, + suffix)) + } + config::CrateType::Staticlib => { + let (prefix, suffix) = (&sess.target.target.options.staticlib_prefix, + &sess.target.target.options.staticlib_suffix); + outputs.out_directory.join(&format!("{}{}{}", prefix, libname, + suffix)) + } + config::CrateType::Executable => { + let suffix = &sess.target.target.options.exe_suffix; + let out_filename = outputs.path(OutputType::Exe); + if suffix.is_empty() { + out_filename.to_path_buf() + } else { + out_filename.with_extension(&suffix[1..]) + } + } + } +} + +/// Returns default crate type for target +/// +/// Default crate type is used when crate type isn't provided neither +/// through cmd line arguments nor through crate attributes +/// +/// It is CrateType::Executable for all platforms but iOS as there is no +/// way to run iOS binaries anyway without jailbreaking and +/// interaction with Rust code through static library is the only +/// option for now +pub fn default_output_for_target(sess: &Session) -> config::CrateType { + if !sess.target.target.options.executables { + config::CrateType::Staticlib + } else { + config::CrateType::Executable + } +} + +/// Checks if target supports crate_type as output +pub fn invalid_output_for_target(sess: &Session, + crate_type: config::CrateType) -> bool { + match crate_type { + config::CrateType::Cdylib | + config::CrateType::Dylib | + config::CrateType::ProcMacro => { + if !sess.target.target.options.dynamic_linking { + return true + } + if sess.crt_static() && !sess.target.target.options.crt_static_allows_dylibs { + return true + } + } + _ => {} + } + if sess.target.target.options.only_cdylib { + match crate_type { + config::CrateType::ProcMacro | config::CrateType::Dylib => return true, + _ => {} + } + } + if !sess.target.target.options.executables { + if crate_type == config::CrateType::Executable { + return true + } + } + + false +} diff --git a/src/librustc_codegen_utils/symbol_names.rs b/src/librustc_codegen_utils/symbol_names.rs new file mode 100644 index 000000000000..d834a6502b28 --- /dev/null +++ b/src/librustc_codegen_utils/symbol_names.rs @@ -0,0 +1,445 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The Rust Linkage Model and Symbol Names +//! ======================================= +//! +//! The semantic model of Rust linkage is, broadly, that "there's no global +//! namespace" between crates. Our aim is to preserve the illusion of this +//! model despite the fact that it's not *quite* possible to implement on +//! modern linkers. We initially didn't use system linkers at all, but have +//! been convinced of their utility. +//! +//! There are a few issues to handle: +//! +//! - Linkers operate on a flat namespace, so we have to flatten names. +//! We do this using the C++ namespace-mangling technique. Foo::bar +//! symbols and such. +//! +//! - Symbols for distinct items with the same *name* need to get different +//! linkage-names. Examples of this are monomorphizations of functions or +//! items within anonymous scopes that end up having the same path. +//! +//! - Symbols in different crates but with same names "within" the crate need +//! to get different linkage-names. +//! +//! - Symbol names should be deterministic: Two consecutive runs of the +//! compiler over the same code base should produce the same symbol names for +//! the same items. +//! +//! - Symbol names should not depend on any global properties of the code base, +//! so that small modifications to the code base do not result in all symbols +//! changing. In previous versions of the compiler, symbol names incorporated +//! the SVH (Stable Version Hash) of the crate. This scheme turned out to be +//! infeasible when used in conjunction with incremental compilation because +//! small code changes would invalidate all symbols generated previously. +//! +//! - Even symbols from different versions of the same crate should be able to +//! live next to each other without conflict. +//! +//! In order to fulfill the above requirements the following scheme is used by +//! the compiler: +//! +//! The main tool for avoiding naming conflicts is the incorporation of a 64-bit +//! hash value into every exported symbol name. Anything that makes a difference +//! to the symbol being named, but does not show up in the regular path needs to +//! be fed into this hash: +//! +//! - Different monomorphizations of the same item have the same path but differ +//! in their concrete type parameters, so these parameters are part of the +//! data being digested for the symbol hash. +//! +//! - Rust allows items to be defined in anonymous scopes, such as in +//! `fn foo() { { fn bar() {} } { fn bar() {} } }`. Both `bar` functions have +//! the path `foo::bar`, since the anonymous scopes do not contribute to the +//! path of an item. The compiler already handles this case via so-called +//! disambiguating `DefPaths` which use indices to distinguish items with the +//! same name. The DefPaths of the functions above are thus `foo[0]::bar[0]` +//! and `foo[0]::bar[1]`. In order to incorporate this disambiguation +//! information into the symbol name too, these indices are fed into the +//! symbol hash, so that the above two symbols would end up with different +//! hash values. +//! +//! The two measures described above suffice to avoid intra-crate conflicts. In +//! order to also avoid inter-crate conflicts two more measures are taken: +//! +//! - The name of the crate containing the symbol is prepended to the symbol +//! name, i.e. symbols are "crate qualified". For example, a function `foo` in +//! module `bar` in crate `baz` would get a symbol name like +//! `baz::bar::foo::{hash}` instead of just `bar::foo::{hash}`. This avoids +//! simple conflicts between functions from different crates. +//! +//! - In order to be able to also use symbols from two versions of the same +//! crate (which naturally also have the same name), a stronger measure is +//! required: The compiler accepts an arbitrary "disambiguator" value via the +//! `-C metadata` commandline argument. This disambiguator is then fed into +//! the symbol hash of every exported item. Consequently, the symbols in two +//! identical crates but with different disambiguators are not in conflict +//! with each other. This facility is mainly intended to be used by build +//! tools like Cargo. +//! +//! A note on symbol name stability +//! ------------------------------- +//! Previous versions of the compiler resorted to feeding NodeIds into the +//! symbol hash in order to disambiguate between items with the same path. The +//! current version of the name generation algorithm takes great care not to do +//! that, since NodeIds are notoriously unstable: A small change to the +//! code base will offset all NodeIds after the change and thus, much as using +//! the SVH in the hash, invalidate an unbounded number of symbol names. This +//! makes re-using previously compiled code for incremental compilation +//! virtually impossible. Thus, symbol hash generation exclusively relies on +//! DefPaths which are much more robust in the face of changes to the code base. + +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::hir::map as hir_map; +use rustc::hir::map::definitions::DefPathData; +use rustc::ich::NodeIdHashingMode; +use rustc::middle::weak_lang_items; +use rustc::ty::item_path::{self, ItemPathBuffer, RootMode}; +use rustc::ty::query::Providers; +use rustc::ty::subst::Substs; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::util::common::record_time; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; +use rustc_mir::monomorphize::item::{InstantiationMode, MonoItem, MonoItemExt}; +use rustc_mir::monomorphize::Instance; + +use syntax::attr; +use syntax_pos::symbol::Symbol; + +use std::fmt::Write; + +pub fn provide(providers: &mut Providers) { + *providers = Providers { + def_symbol_name, + symbol_name, + + ..*providers + }; +} + +fn get_symbol_hash<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + + // the DefId of the item this name is for + def_id: DefId, + + // instance this name will be for + instance: Instance<'tcx>, + + // type of the item, without any generic + // parameters substituted; this is + // included in the hash as a kind of + // safeguard. + item_type: Ty<'tcx>, + + // values for generic type parameters, + // if any. + substs: &'tcx Substs<'tcx>, +) -> u64 { + debug!( + "get_symbol_hash(def_id={:?}, parameters={:?})", + def_id, substs + ); + + let mut hasher = StableHasher::::new(); + let mut hcx = tcx.create_stable_hashing_context(); + + record_time(&tcx.sess.perf_stats.symbol_hash_time, || { + // the main symbol name is not necessarily unique; hash in the + // compiler's internal def-path, guaranteeing each symbol has a + // truly unique path + tcx.def_path_hash(def_id).hash_stable(&mut hcx, &mut hasher); + + // Include the main item-type. Note that, in this case, the + // assertions about `needs_subst` may not hold, but this item-type + // ought to be the same for every reference anyway. + assert!(!item_type.has_erasable_regions()); + hcx.while_hashing_spans(false, |hcx| { + hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { + item_type.hash_stable(hcx, &mut hasher); + }); + }); + + // If this is a function, we hash the signature as well. + // This is not *strictly* needed, but it may help in some + // situations, see the `run-make/a-b-a-linker-guard` test. + if let ty::TyFnDef(..) = item_type.sty { + item_type.fn_sig(tcx).hash_stable(&mut hcx, &mut hasher); + } + + // also include any type parameters (for generic items) + assert!(!substs.has_erasable_regions()); + assert!(!substs.needs_subst()); + substs.hash_stable(&mut hcx, &mut hasher); + + let is_generic = substs.types().next().is_some(); + let avoid_cross_crate_conflicts = + // If this is an instance of a generic function, we also hash in + // the ID of the instantiating crate. This avoids symbol conflicts + // in case the same instances is emitted in two crates of the same + // project. + is_generic || + + // If we're dealing with an instance of a function that's inlined from + // another crate but we're marking it as globally shared to our + // compliation (aka we're not making an internal copy in each of our + // codegen units) then this symbol may become an exported (but hidden + // visibility) symbol. This means that multiple crates may do the same + // and we want to be sure to avoid any symbol conflicts here. + match MonoItem::Fn(instance).instantiation_mode(tcx) { + InstantiationMode::GloballyShared { may_conflict: true } => true, + _ => false, + }; + + if avoid_cross_crate_conflicts { + let instantiating_crate = if is_generic { + if !def_id.is_local() && tcx.sess.opts.share_generics() { + // If we are re-using a monomorphization from another crate, + // we have to compute the symbol hash accordingly. + let upstream_monomorphizations = tcx.upstream_monomorphizations_for(def_id); + + upstream_monomorphizations + .and_then(|monos| monos.get(&substs).cloned()) + .unwrap_or(LOCAL_CRATE) + } else { + LOCAL_CRATE + } + } else { + LOCAL_CRATE + }; + + (&tcx.original_crate_name(instantiating_crate).as_str()[..]) + .hash_stable(&mut hcx, &mut hasher); + (&tcx.crate_disambiguator(instantiating_crate)).hash_stable(&mut hcx, &mut hasher); + } + }); + + // 64 bits should be enough to avoid collisions. + hasher.finish() +} + +fn def_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::SymbolName { + let mut buffer = SymbolPathBuffer::new(); + item_path::with_forced_absolute_paths(|| { + tcx.push_item_path(&mut buffer, def_id); + }); + buffer.into_interned() +} + +fn symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance<'tcx>) -> ty::SymbolName { + ty::SymbolName { + name: Symbol::intern(&compute_symbol_name(tcx, instance)).as_interned_str(), + } +} + +fn compute_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance<'tcx>) -> String { + let def_id = instance.def_id(); + let substs = instance.substs; + + debug!("symbol_name(def_id={:?}, substs={:?})", def_id, substs); + + let node_id = tcx.hir.as_local_node_id(def_id); + + if let Some(id) = node_id { + if *tcx.sess.plugin_registrar_fn.get() == Some(id) { + let disambiguator = tcx.sess.local_crate_disambiguator(); + return tcx.sess.generate_plugin_registrar_symbol(disambiguator); + } + if *tcx.sess.derive_registrar_fn.get() == Some(id) { + let disambiguator = tcx.sess.local_crate_disambiguator(); + return tcx.sess.generate_derive_registrar_symbol(disambiguator); + } + } + + // FIXME(eddyb) Precompute a custom symbol name based on attributes. + let attrs = tcx.get_attrs(def_id); + let is_foreign = if let Some(id) = node_id { + match tcx.hir.get(id) { + hir_map::NodeForeignItem(_) => true, + _ => false, + } + } else { + tcx.is_foreign_item(def_id) + }; + + if let Some(name) = weak_lang_items::link_name(&attrs) { + return name.to_string(); + } + + if is_foreign { + if let Some(name) = attr::first_attr_value_str_by_name(&attrs, "link_name") { + return name.to_string(); + } + // Don't mangle foreign items. + return tcx.item_name(def_id).to_string(); + } + + if let Some(name) = tcx.codegen_fn_attrs(def_id).export_name { + // Use provided name + return name.to_string(); + } + + if attr::contains_name(&attrs, "no_mangle") { + // Don't mangle + return tcx.item_name(def_id).to_string(); + } + + // We want to compute the "type" of this item. Unfortunately, some + // kinds of items (e.g., closures) don't have an entry in the + // item-type array. So walk back up the find the closest parent + // that DOES have an entry. + let mut ty_def_id = def_id; + let instance_ty; + loop { + let key = tcx.def_key(ty_def_id); + match key.disambiguated_data.data { + DefPathData::TypeNs(_) | DefPathData::ValueNs(_) => { + instance_ty = tcx.type_of(ty_def_id); + break; + } + _ => { + // if we're making a symbol for something, there ought + // to be a value or type-def or something in there + // *somewhere* + ty_def_id.index = key.parent.unwrap_or_else(|| { + bug!( + "finding type for {:?}, encountered def-id {:?} with no \ + parent", + def_id, + ty_def_id + ); + }); + } + } + } + + // Erase regions because they may not be deterministic when hashed + // and should not matter anyhow. + let instance_ty = tcx.erase_regions(&instance_ty); + + let hash = get_symbol_hash(tcx, def_id, instance, instance_ty, substs); + + SymbolPathBuffer::from_interned(tcx.def_symbol_name(def_id)).finish(hash) +} + +// Follow C++ namespace-mangling style, see +// http://en.wikipedia.org/wiki/Name_mangling for more info. +// +// It turns out that on macOS you can actually have arbitrary symbols in +// function names (at least when given to LLVM), but this is not possible +// when using unix's linker. Perhaps one day when we just use a linker from LLVM +// we won't need to do this name mangling. The problem with name mangling is +// that it seriously limits the available characters. For example we can't +// have things like &T in symbol names when one would theoretically +// want them for things like impls of traits on that type. +// +// To be able to work on all platforms and get *some* reasonable output, we +// use C++ name-mangling. +struct SymbolPathBuffer { + result: String, + temp_buf: String, +} + +impl SymbolPathBuffer { + fn new() -> Self { + let mut result = SymbolPathBuffer { + result: String::with_capacity(64), + temp_buf: String::with_capacity(16), + }; + result.result.push_str("_ZN"); // _Z == Begin name-sequence, N == nested + result + } + + fn from_interned(symbol: ty::SymbolName) -> Self { + let mut result = SymbolPathBuffer { + result: String::with_capacity(64), + temp_buf: String::with_capacity(16), + }; + result.result.push_str(&symbol.as_str()); + result + } + + fn into_interned(self) -> ty::SymbolName { + ty::SymbolName { + name: Symbol::intern(&self.result).as_interned_str(), + } + } + + fn finish(mut self, hash: u64) -> String { + // E = end name-sequence + let _ = write!(self.result, "17h{:016x}E", hash); + self.result + } +} + +impl ItemPathBuffer for SymbolPathBuffer { + fn root_mode(&self) -> &RootMode { + const ABSOLUTE: &'static RootMode = &RootMode::Absolute; + ABSOLUTE + } + + fn push(&mut self, text: &str) { + self.temp_buf.clear(); + let need_underscore = sanitize(&mut self.temp_buf, text); + let _ = write!( + self.result, + "{}", + self.temp_buf.len() + (need_underscore as usize) + ); + if need_underscore { + self.result.push('_'); + } + self.result.push_str(&self.temp_buf); + } +} + +// Name sanitation. LLVM will happily accept identifiers with weird names, but +// gas doesn't! +// gas accepts the following characters in symbols: a-z, A-Z, 0-9, ., _, $ +// +// returns true if an underscore must be added at the start +pub fn sanitize(result: &mut String, s: &str) -> bool { + for c in s.chars() { + match c { + // Escape these with $ sequences + '@' => result.push_str("$SP$"), + '*' => result.push_str("$BP$"), + '&' => result.push_str("$RF$"), + '<' => result.push_str("$LT$"), + '>' => result.push_str("$GT$"), + '(' => result.push_str("$LP$"), + ')' => result.push_str("$RP$"), + ',' => result.push_str("$C$"), + + // '.' doesn't occur in types and functions, so reuse it + // for ':' and '-' + '-' | ':' => result.push('.'), + + // These are legal symbols + 'a'..='z' | 'A'..='Z' | '0'..='9' | '_' | '.' | '$' => result.push(c), + + _ => { + result.push('$'); + for c in c.escape_unicode().skip(1) { + match c { + '{' => {} + '}' => result.push('$'), + c => result.push(c), + } + } + } + } + } + + // Underscore-qualify anything that didn't start as an ident. + !result.is_empty() && result.as_bytes()[0] != '_' as u8 + && !(result.as_bytes()[0] as char).is_xid_start() +} diff --git a/src/librustc_trans/symbol_names_test.rs b/src/librustc_codegen_utils/symbol_names_test.rs similarity index 75% rename from src/librustc_trans/symbol_names_test.rs rename to src/librustc_codegen_utils/symbol_names_test.rs index 15c142cf947c..47bbd67fb5c7 100644 --- a/src/librustc_trans/symbol_names_test.rs +++ b/src/librustc_codegen_utils/symbol_names_test.rs @@ -15,11 +15,10 @@ //! paths etc in all kinds of annoying scenarios. use rustc::hir; -use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; use rustc::ty::TyCtxt; use syntax::ast; -use monomorphize::Instance; +use rustc_mir::monomorphize::Instance; const SYMBOL_NAME: &'static str = "rustc_symbol_name"; const ITEM_PATH: &'static str = "rustc_item_path"; @@ -28,14 +27,13 @@ pub fn report_symbol_names<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { // if the `rustc_attrs` feature is not enabled, then the // attributes we are interested in cannot be present anyway, so // skip the walk. - if !tcx.sess.features.borrow().rustc_attrs { + if !tcx.features().rustc_attrs { return; } tcx.dep_graph.with_ignore(|| { let mut visitor = SymbolNamesTest { tcx: tcx }; - // FIXME(#37712) could use ItemLikeVisitor if trait items were item-like - tcx.hir.krate().visit_all_item_likes(&mut visitor.as_deep_visitor()); + tcx.hir.krate().visit_all_item_likes(&mut visitor); }) } @@ -66,23 +64,16 @@ impl<'a, 'tcx> SymbolNamesTest<'a, 'tcx> { } } -impl<'a, 'tcx> Visitor<'tcx> for SymbolNamesTest<'a, 'tcx> { - fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { - NestedVisitorMap::None - } - +impl<'a, 'tcx> hir::itemlikevisit::ItemLikeVisitor<'tcx> for SymbolNamesTest<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { self.process_attrs(item.id); - intravisit::walk_item(self, item); } - fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem) { - self.process_attrs(ti.id); - intravisit::walk_trait_item(self, ti) + fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { + self.process_attrs(trait_item.id); } - fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem) { - self.process_attrs(ii.id); - intravisit::walk_impl_item(self, ii) + fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { + self.process_attrs(impl_item.id); } } diff --git a/src/librustc_const_eval/Cargo.toml b/src/librustc_const_eval/Cargo.toml deleted file mode 100644 index 53b8402ab2ad..000000000000 --- a/src/librustc_const_eval/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -authors = ["The Rust Project Developers"] -name = "rustc_const_eval" -version = "0.0.0" - -[lib] -name = "rustc_const_eval" -path = "lib.rs" -crate-type = ["dylib"] - -[dependencies] -arena = { path = "../libarena" } -log = "0.4" -rustc = { path = "../librustc" } -rustc_const_math = { path = "../librustc_const_math" } -rustc_data_structures = { path = "../librustc_data_structures" } -rustc_errors = { path = "../librustc_errors" } -syntax = { path = "../libsyntax" } -syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_const_eval/diagnostics.rs b/src/librustc_const_eval/diagnostics.rs deleted file mode 100644 index d01b3c45f7fd..000000000000 --- a/src/librustc_const_eval/diagnostics.rs +++ /dev/null @@ -1,571 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_snake_case)] - -// Error messages for EXXXX errors. -// Each message should start and end with a new line, and be wrapped to 80 characters. -// In vim you can `:set tw=80` and use `gq` to wrap paragraphs. Use `:set tw=0` to disable. -register_long_diagnostics! { - -E0001: r##" -#### Note: this error code is no longer emitted by the compiler. - -This error suggests that the expression arm corresponding to the noted pattern -will never be reached as for all possible values of the expression being -matched, one of the preceding patterns will match. - -This means that perhaps some of the preceding patterns are too general, this -one is too specific or the ordering is incorrect. - -For example, the following `match` block has too many arms: - -``` -match Some(0) { - Some(bar) => {/* ... */} - x => {/* ... */} // This handles the `None` case - _ => {/* ... */} // All possible cases have already been handled -} -``` - -`match` blocks have their patterns matched in order, so, for example, putting -a wildcard arm above a more specific arm will make the latter arm irrelevant. - -Ensure the ordering of the match arm is correct and remove any superfluous -arms. -"##, - -E0002: r##" -#### Note: this error code is no longer emitted by the compiler. - -This error indicates that an empty match expression is invalid because the type -it is matching on is non-empty (there exist values of this type). In safe code -it is impossible to create an instance of an empty type, so empty match -expressions are almost never desired. This error is typically fixed by adding -one or more cases to the match expression. - -An example of an empty type is `enum Empty { }`. So, the following will work: - -``` -enum Empty {} - -fn foo(x: Empty) { - match x { - // empty - } -} -``` - -However, this won't: - -```compile_fail -fn foo(x: Option) { - match x { - // empty - } -} -``` -"##, - -E0003: r##" -#### Note: this error code is no longer emitted by the compiler. - -Not-a-Number (NaN) values cannot be compared for equality and hence can never -match the input to a match expression. So, the following will not compile: - -```compile_fail -const NAN: f32 = 0.0 / 0.0; - -let number = 0.1f32; - -match number { - NAN => { /* ... */ }, - _ => {} -} -``` - -To match against NaN values, you should instead use the `is_nan()` method in a -guard, like so: - -``` -let number = 0.1f32; - -match number { - x if x.is_nan() => { /* ... */ } - _ => {} -} -``` -"##, - -E0004: r##" -This error indicates that the compiler cannot guarantee a matching pattern for -one or more possible inputs to a match expression. Guaranteed matches are -required in order to assign values to match expressions, or alternatively, -determine the flow of execution. Erroneous code example: - -```compile_fail,E0004 -enum Terminator { - HastaLaVistaBaby, - TalkToMyHand, -} - -let x = Terminator::HastaLaVistaBaby; - -match x { // error: non-exhaustive patterns: `HastaLaVistaBaby` not covered - Terminator::TalkToMyHand => {} -} -``` - -If you encounter this error you must alter your patterns so that every possible -value of the input type is matched. For types with a small number of variants -(like enums) you should probably cover all cases explicitly. Alternatively, the -underscore `_` wildcard pattern can be added after all other patterns to match -"anything else". Example: - -``` -enum Terminator { - HastaLaVistaBaby, - TalkToMyHand, -} - -let x = Terminator::HastaLaVistaBaby; - -match x { - Terminator::TalkToMyHand => {} - Terminator::HastaLaVistaBaby => {} -} - -// or: - -match x { - Terminator::TalkToMyHand => {} - _ => {} -} -``` -"##, - -E0005: r##" -Patterns used to bind names must be irrefutable, that is, they must guarantee -that a name will be extracted in all cases. Erroneous code example: - -```compile_fail,E0005 -let x = Some(1); -let Some(y) = x; -// error: refutable pattern in local binding: `None` not covered -``` - -If you encounter this error you probably need to use a `match` or `if let` to -deal with the possibility of failure. Example: - -``` -let x = Some(1); - -match x { - Some(y) => { - // do something - }, - None => {} -} - -// or: - -if let Some(y) = x { - // do something -} -``` -"##, - -E0007: r##" -This error indicates that the bindings in a match arm would require a value to -be moved into more than one location, thus violating unique ownership. Code -like the following is invalid as it requires the entire `Option` to be -moved into a variable called `op_string` while simultaneously requiring the -inner `String` to be moved into a variable called `s`. - -```compile_fail,E0007 -let x = Some("s".to_string()); - -match x { - op_string @ Some(s) => {}, // error: cannot bind by-move with sub-bindings - None => {}, -} -``` - -See also the error E0303. -"##, - -E0008: r##" -Names bound in match arms retain their type in pattern guards. As such, if a -name is bound by move in a pattern, it should also be moved to wherever it is -referenced in the pattern guard code. Doing so however would prevent the name -from being available in the body of the match arm. Consider the following: - -```compile_fail,E0008 -match Some("hi".to_string()) { - Some(s) if s.len() == 0 => {}, // use s. - _ => {}, -} -``` - -The variable `s` has type `String`, and its use in the guard is as a variable of -type `String`. The guard code effectively executes in a separate scope to the -body of the arm, so the value would be moved into this anonymous scope and -therefore becomes unavailable in the body of the arm. - -The problem above can be solved by using the `ref` keyword. - -``` -match Some("hi".to_string()) { - Some(ref s) if s.len() == 0 => {}, - _ => {}, -} -``` - -Though this example seems innocuous and easy to solve, the problem becomes clear -when it encounters functions which consume the value: - -```compile_fail,E0008 -struct A{} - -impl A { - fn consume(self) -> usize { - 0 - } -} - -fn main() { - let a = Some(A{}); - match a { - Some(y) if y.consume() > 0 => {} - _ => {} - } -} -``` - -In this situation, even the `ref` keyword cannot solve it, since borrowed -content cannot be moved. This problem cannot be solved generally. If the value -can be cloned, here is a not-so-specific solution: - -``` -#[derive(Clone)] -struct A{} - -impl A { - fn consume(self) -> usize { - 0 - } -} - -fn main() { - let a = Some(A{}); - match a{ - Some(ref y) if y.clone().consume() > 0 => {} - _ => {} - } -} -``` - -If the value will be consumed in the pattern guard, using its clone will not -move its ownership, so the code works. -"##, - -E0009: r##" -In a pattern, all values that don't implement the `Copy` trait have to be bound -the same way. The goal here is to avoid binding simultaneously by-move and -by-ref. - -This limitation may be removed in a future version of Rust. - -Erroneous code example: - -```compile_fail,E0009 -struct X { x: (), } - -let x = Some((X { x: () }, X { x: () })); -match x { - Some((y, ref z)) => {}, // error: cannot bind by-move and by-ref in the - // same pattern - None => panic!() -} -``` - -You have two solutions: - -Solution #1: Bind the pattern's values the same way. - -``` -struct X { x: (), } - -let x = Some((X { x: () }, X { x: () })); -match x { - Some((ref y, ref z)) => {}, - // or Some((y, z)) => {} - None => panic!() -} -``` - -Solution #2: Implement the `Copy` trait for the `X` structure. - -However, please keep in mind that the first solution should be preferred. - -``` -#[derive(Clone, Copy)] -struct X { x: (), } - -let x = Some((X { x: () }, X { x: () })); -match x { - Some((y, ref z)) => {}, - None => panic!() -} -``` -"##, - -E0158: r##" -`const` and `static` mean different things. A `const` is a compile-time -constant, an alias for a literal value. This property means you can match it -directly within a pattern. - -The `static` keyword, on the other hand, guarantees a fixed location in memory. -This does not always mean that the value is constant. For example, a global -mutex can be declared `static` as well. - -If you want to match against a `static`, consider using a guard instead: - -``` -static FORTY_TWO: i32 = 42; - -match Some(42) { - Some(x) if x == FORTY_TWO => {} - _ => {} -} -``` -"##, - -E0162: r##" -An if-let pattern attempts to match the pattern, and enters the body if the -match was successful. If the match is irrefutable (when it cannot fail to -match), use a regular `let`-binding instead. For instance: - -```compile_fail,E0162 -struct Irrefutable(i32); -let irr = Irrefutable(0); - -// This fails to compile because the match is irrefutable. -if let Irrefutable(x) = irr { - // This body will always be executed. - // ... -} -``` - -Try this instead: - -``` -struct Irrefutable(i32); -let irr = Irrefutable(0); - -let Irrefutable(x) = irr; -println!("{}", x); -``` -"##, - -E0165: r##" -A while-let pattern attempts to match the pattern, and enters the body if the -match was successful. If the match is irrefutable (when it cannot fail to -match), use a regular `let`-binding inside a `loop` instead. For instance: - -```compile_fail,E0165 -struct Irrefutable(i32); -let irr = Irrefutable(0); - -// This fails to compile because the match is irrefutable. -while let Irrefutable(x) = irr { - // ... -} -``` - -Try this instead: - -```no_run -struct Irrefutable(i32); -let irr = Irrefutable(0); - -loop { - let Irrefutable(x) = irr; - // ... -} -``` -"##, - -E0170: r##" -Enum variants are qualified by default. For example, given this type: - -``` -enum Method { - GET, - POST, -} -``` - -You would match it using: - -``` -enum Method { - GET, - POST, -} - -let m = Method::GET; - -match m { - Method::GET => {}, - Method::POST => {}, -} -``` - -If you don't qualify the names, the code will bind new variables named "GET" and -"POST" instead. This behavior is likely not what you want, so `rustc` warns when -that happens. - -Qualified names are good practice, and most code works well with them. But if -you prefer them unqualified, you can import the variants into scope: - -``` -use Method::*; -enum Method { GET, POST } -# fn main() {} -``` - -If you want others to be able to import variants from your module directly, use -`pub use`: - -``` -pub use Method::*; -pub enum Method { GET, POST } -# fn main() {} -``` -"##, - - -E0297: r##" -#### Note: this error code is no longer emitted by the compiler. - -Patterns used to bind names must be irrefutable. That is, they must guarantee -that a name will be extracted in all cases. Instead of pattern matching the -loop variable, consider using a `match` or `if let` inside the loop body. For -instance: - -```compile_fail,E0005 -let xs : Vec> = vec![Some(1), None]; - -// This fails because `None` is not covered. -for Some(x) in xs { - // ... -} -``` - -Match inside the loop instead: - -``` -let xs : Vec> = vec![Some(1), None]; - -for item in xs { - match item { - Some(x) => {}, - None => {}, - } -} -``` - -Or use `if let`: - -``` -let xs : Vec> = vec![Some(1), None]; - -for item in xs { - if let Some(x) = item { - // ... - } -} -``` -"##, - -E0301: r##" -Mutable borrows are not allowed in pattern guards, because matching cannot have -side effects. Side effects could alter the matched object or the environment -on which the match depends in such a way, that the match would not be -exhaustive. For instance, the following would not match any arm if mutable -borrows were allowed: - -```compile_fail,E0301 -match Some(()) { - None => { }, - option if option.take().is_none() => { - /* impossible, option is `Some` */ - }, - Some(_) => { } // When the previous match failed, the option became `None`. -} -``` -"##, - -E0302: r##" -Assignments are not allowed in pattern guards, because matching cannot have -side effects. Side effects could alter the matched object or the environment -on which the match depends in such a way, that the match would not be -exhaustive. For instance, the following would not match any arm if assignments -were allowed: - -```compile_fail,E0302 -match Some(()) { - None => { }, - option if { option = None; false } => { }, - Some(_) => { } // When the previous match failed, the option became `None`. -} -``` -"##, - -E0303: r##" -In certain cases it is possible for sub-bindings to violate memory safety. -Updates to the borrow checker in a future version of Rust may remove this -restriction, but for now patterns must be rewritten without sub-bindings. - -Before: - -```compile_fail,E0303 -match Some("hi".to_string()) { - ref op_string_ref @ Some(s) => {}, - None => {}, -} -``` - -After: - -``` -match Some("hi".to_string()) { - Some(ref s) => { - let op_string_ref = &Some(s); - // ... - }, - None => {}, -} -``` - -The `op_string_ref` binding has type `&Option<&String>` in both cases. - -See also https://github.com/rust-lang/rust/issues/14587 -"##, - -} - - -register_diagnostics! { -// E0298, // cannot compare constants -// E0299, // mismatched types between arms -// E0471, // constant evaluation error (in pattern) -} diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs deleted file mode 100644 index 418bd4b5effc..000000000000 --- a/src/librustc_const_eval/eval.rs +++ /dev/null @@ -1,684 +0,0 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::middle::const_val::ConstVal::*; -use rustc::middle::const_val::ConstAggregate::*; -use rustc::middle::const_val::ErrKind::*; -use rustc::middle::const_val::{ByteArray, ConstVal, ConstEvalErr, EvalResult, ErrKind}; - -use rustc::hir::map::blocks::FnLikeNode; -use rustc::hir::def::{Def, CtorKind}; -use rustc::hir::def_id::DefId; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::LayoutOf; -use rustc::ty::util::IntTypeExt; -use rustc::ty::subst::{Substs, Subst}; -use rustc::util::common::ErrorReported; -use rustc::util::nodemap::NodeMap; - -use syntax::abi::Abi; -use syntax::ast; -use syntax::attr; -use rustc::hir::{self, Expr}; -use syntax_pos::Span; - -use std::cmp::Ordering; - -use rustc_const_math::*; -macro_rules! signal { - ($e:expr, $exn:expr) => { - return Err(ConstEvalErr { span: $e.span, kind: $exn }) - } -} - -macro_rules! math { - ($e:expr, $op:expr) => { - match $op { - Ok(val) => val, - Err(e) => signal!($e, ErrKind::from(e)), - } - } -} - -/// * `DefId` is the id of the constant. -/// * `Substs` is the monomorphized substitutions for the expression. -pub fn lookup_const_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - key: ty::ParamEnvAnd<'tcx, (DefId, &'tcx Substs<'tcx>)>) - -> Option<(DefId, &'tcx Substs<'tcx>)> { - ty::Instance::resolve( - tcx, - key.param_env, - key.value.0, - key.value.1, - ).map(|instance| (instance.def_id(), instance.substs)) -} - -pub struct ConstContext<'a, 'tcx: 'a> { - tcx: TyCtxt<'a, 'tcx, 'tcx>, - tables: &'a ty::TypeckTables<'tcx>, - param_env: ty::ParamEnv<'tcx>, - substs: &'tcx Substs<'tcx>, - fn_args: Option>> -} - -impl<'a, 'tcx> ConstContext<'a, 'tcx> { - pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env_and_substs: ty::ParamEnvAnd<'tcx, &'tcx Substs<'tcx>>, - tables: &'a ty::TypeckTables<'tcx>) - -> Self { - ConstContext { - tcx, - param_env: param_env_and_substs.param_env, - tables, - substs: param_env_and_substs.value, - fn_args: None - } - } - - /// Evaluate a constant expression in a context where the expression isn't - /// guaranteed to be evaluable. - pub fn eval(&self, e: &'tcx Expr) -> EvalResult<'tcx> { - if self.tables.tainted_by_errors { - signal!(e, TypeckError); - } - eval_const_expr_partial(self, e) - } -} - -type CastResult<'tcx> = Result, ErrKind<'tcx>>; - -fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, - e: &'tcx Expr) -> EvalResult<'tcx> { - trace!("eval_const_expr_partial: {:?}", e); - let tcx = cx.tcx; - let ty = cx.tables.expr_ty(e).subst(tcx, cx.substs); - let mk_const = |val| tcx.mk_const(ty::Const { val, ty }); - - let result = match e.node { - hir::ExprUnary(hir::UnNeg, ref inner) => { - // unary neg literals already got their sign during creation - if let hir::ExprLit(ref lit) = inner.node { - use syntax::ast::*; - use syntax::ast::LitIntType::*; - const I8_OVERFLOW: u128 = i8::min_value() as u8 as u128; - const I16_OVERFLOW: u128 = i16::min_value() as u16 as u128; - const I32_OVERFLOW: u128 = i32::min_value() as u32 as u128; - const I64_OVERFLOW: u128 = i64::min_value() as u64 as u128; - const I128_OVERFLOW: u128 = i128::min_value() as u128; - let negated = match (&lit.node, &ty.sty) { - (&LitKind::Int(I8_OVERFLOW, _), &ty::TyInt(IntTy::I8)) | - (&LitKind::Int(I8_OVERFLOW, Signed(IntTy::I8)), _) => { - Some(I8(i8::min_value())) - }, - (&LitKind::Int(I16_OVERFLOW, _), &ty::TyInt(IntTy::I16)) | - (&LitKind::Int(I16_OVERFLOW, Signed(IntTy::I16)), _) => { - Some(I16(i16::min_value())) - }, - (&LitKind::Int(I32_OVERFLOW, _), &ty::TyInt(IntTy::I32)) | - (&LitKind::Int(I32_OVERFLOW, Signed(IntTy::I32)), _) => { - Some(I32(i32::min_value())) - }, - (&LitKind::Int(I64_OVERFLOW, _), &ty::TyInt(IntTy::I64)) | - (&LitKind::Int(I64_OVERFLOW, Signed(IntTy::I64)), _) => { - Some(I64(i64::min_value())) - }, - (&LitKind::Int(I128_OVERFLOW, _), &ty::TyInt(IntTy::I128)) | - (&LitKind::Int(I128_OVERFLOW, Signed(IntTy::I128)), _) => { - Some(I128(i128::min_value())) - }, - (&LitKind::Int(n, _), &ty::TyInt(IntTy::Isize)) | - (&LitKind::Int(n, Signed(IntTy::Isize)), _) => { - match tcx.sess.target.isize_ty { - IntTy::I16 => if n == I16_OVERFLOW { - Some(Isize(Is16(i16::min_value()))) - } else { - None - }, - IntTy::I32 => if n == I32_OVERFLOW { - Some(Isize(Is32(i32::min_value()))) - } else { - None - }, - IntTy::I64 => if n == I64_OVERFLOW { - Some(Isize(Is64(i64::min_value()))) - } else { - None - }, - _ => span_bug!(e.span, "typeck error") - } - }, - _ => None - }; - if let Some(i) = negated { - return Ok(mk_const(Integral(i))); - } - } - mk_const(match cx.eval(inner)?.val { - Float(f) => Float(-f), - Integral(i) => Integral(math!(e, -i)), - _ => signal!(e, TypeckError) - }) - } - hir::ExprUnary(hir::UnNot, ref inner) => { - mk_const(match cx.eval(inner)?.val { - Integral(i) => Integral(math!(e, !i)), - Bool(b) => Bool(!b), - _ => signal!(e, TypeckError) - }) - } - hir::ExprUnary(hir::UnDeref, _) => signal!(e, UnimplementedConstVal("deref operation")), - hir::ExprBinary(op, ref a, ref b) => { - // technically, if we don't have type hints, but integral eval - // gives us a type through a type-suffix, cast or const def type - // we need to re-eval the other value of the BinOp if it was - // not inferred - mk_const(match (cx.eval(a)?.val, cx.eval(b)?.val) { - (Float(a), Float(b)) => { - use std::cmp::Ordering::*; - match op.node { - hir::BiAdd => Float(math!(e, a + b)), - hir::BiSub => Float(math!(e, a - b)), - hir::BiMul => Float(math!(e, a * b)), - hir::BiDiv => Float(math!(e, a / b)), - hir::BiRem => Float(math!(e, a % b)), - hir::BiEq => Bool(math!(e, a.try_cmp(b)) == Equal), - hir::BiLt => Bool(math!(e, a.try_cmp(b)) == Less), - hir::BiLe => Bool(math!(e, a.try_cmp(b)) != Greater), - hir::BiNe => Bool(math!(e, a.try_cmp(b)) != Equal), - hir::BiGe => Bool(math!(e, a.try_cmp(b)) != Less), - hir::BiGt => Bool(math!(e, a.try_cmp(b)) == Greater), - _ => span_bug!(e.span, "typeck error"), - } - } - (Integral(a), Integral(b)) => { - use std::cmp::Ordering::*; - match op.node { - hir::BiAdd => Integral(math!(e, a + b)), - hir::BiSub => Integral(math!(e, a - b)), - hir::BiMul => Integral(math!(e, a * b)), - hir::BiDiv => Integral(math!(e, a / b)), - hir::BiRem => Integral(math!(e, a % b)), - hir::BiBitAnd => Integral(math!(e, a & b)), - hir::BiBitOr => Integral(math!(e, a | b)), - hir::BiBitXor => Integral(math!(e, a ^ b)), - hir::BiShl => Integral(math!(e, a << b)), - hir::BiShr => Integral(math!(e, a >> b)), - hir::BiEq => Bool(math!(e, a.try_cmp(b)) == Equal), - hir::BiLt => Bool(math!(e, a.try_cmp(b)) == Less), - hir::BiLe => Bool(math!(e, a.try_cmp(b)) != Greater), - hir::BiNe => Bool(math!(e, a.try_cmp(b)) != Equal), - hir::BiGe => Bool(math!(e, a.try_cmp(b)) != Less), - hir::BiGt => Bool(math!(e, a.try_cmp(b)) == Greater), - _ => span_bug!(e.span, "typeck error"), - } - } - (Bool(a), Bool(b)) => { - Bool(match op.node { - hir::BiAnd => a && b, - hir::BiOr => a || b, - hir::BiBitXor => a ^ b, - hir::BiBitAnd => a & b, - hir::BiBitOr => a | b, - hir::BiEq => a == b, - hir::BiNe => a != b, - hir::BiLt => a < b, - hir::BiLe => a <= b, - hir::BiGe => a >= b, - hir::BiGt => a > b, - _ => span_bug!(e.span, "typeck error"), - }) - } - (Char(a), Char(b)) => { - Bool(match op.node { - hir::BiEq => a == b, - hir::BiNe => a != b, - hir::BiLt => a < b, - hir::BiLe => a <= b, - hir::BiGe => a >= b, - hir::BiGt => a > b, - _ => span_bug!(e.span, "typeck error"), - }) - } - - _ => signal!(e, MiscBinaryOp), - }) - } - hir::ExprCast(ref base, _) => { - let base_val = cx.eval(base)?; - let base_ty = cx.tables.expr_ty(base).subst(tcx, cx.substs); - if ty == base_ty { - base_val - } else { - match cast_const(tcx, base_val.val, ty) { - Ok(val) => mk_const(val), - Err(kind) => signal!(e, kind), - } - } - } - hir::ExprPath(ref qpath) => { - let substs = cx.tables.node_substs(e.hir_id).subst(tcx, cx.substs); - match cx.tables.qpath_def(qpath, e.hir_id) { - Def::Const(def_id) | - Def::AssociatedConst(def_id) => { - let substs = tcx.normalize_associated_type_in_env(&substs, cx.param_env); - match tcx.at(e.span).const_eval(cx.param_env.and((def_id, substs))) { - Ok(val) => val, - Err(ConstEvalErr { kind: TypeckError, .. }) => { - signal!(e, TypeckError); - } - Err(err) => { - debug!("bad reference: {:?}, {:?}", err.description(), err.span); - signal!(e, ErroneousReferencedConstant(box err)) - }, - } - }, - Def::VariantCtor(variant_def, CtorKind::Const) => { - mk_const(Variant(variant_def)) - } - Def::VariantCtor(_, CtorKind::Fn) => { - signal!(e, UnimplementedConstVal("enum variants")); - } - Def::StructCtor(_, CtorKind::Const) => { - mk_const(Aggregate(Struct(&[]))) - } - Def::StructCtor(_, CtorKind::Fn) => { - signal!(e, UnimplementedConstVal("tuple struct constructors")) - } - Def::Local(id) => { - debug!("Def::Local({:?}): {:?}", id, cx.fn_args); - if let Some(&val) = cx.fn_args.as_ref().and_then(|args| args.get(&id)) { - val - } else { - signal!(e, NonConstPath); - } - }, - Def::Method(id) | Def::Fn(id) => mk_const(Function(id, substs)), - Def::Err => span_bug!(e.span, "typeck error"), - _ => signal!(e, NonConstPath), - } - } - hir::ExprCall(ref callee, ref args) => { - let (def_id, substs) = match cx.eval(callee)?.val { - Function(def_id, substs) => (def_id, substs), - _ => signal!(e, TypeckError), - }; - - if tcx.fn_sig(def_id).abi() == Abi::RustIntrinsic { - let layout_of = |ty: Ty<'tcx>| { - let ty = tcx.erase_regions(&ty); - (tcx.at(e.span), cx.param_env).layout_of(ty).map_err(|err| { - ConstEvalErr { span: e.span, kind: LayoutError(err) } - }) - }; - match &tcx.item_name(def_id)[..] { - "size_of" => { - let size = layout_of(substs.type_at(0))?.size.bytes(); - return Ok(mk_const(Integral(Usize(ConstUsize::new(size, - tcx.sess.target.usize_ty).unwrap())))); - } - "min_align_of" => { - let align = layout_of(substs.type_at(0))?.align.abi(); - return Ok(mk_const(Integral(Usize(ConstUsize::new(align, - tcx.sess.target.usize_ty).unwrap())))); - } - _ => signal!(e, TypeckError) - } - } - - let body = if let Some(node_id) = tcx.hir.as_local_node_id(def_id) { - if let Some(fn_like) = FnLikeNode::from_node(tcx.hir.get(node_id)) { - if fn_like.constness() == hir::Constness::Const { - tcx.hir.body(fn_like.body()) - } else { - signal!(e, TypeckError) - } - } else { - signal!(e, TypeckError) - } - } else { - if tcx.is_const_fn(def_id) { - tcx.extern_const_body(def_id).body - } else { - signal!(e, TypeckError) - } - }; - - let arg_ids = body.arguments.iter().map(|arg| match arg.pat.node { - hir::PatKind::Binding(_, canonical_id, _, _) => Some(canonical_id), - _ => None - }).collect::>(); - assert_eq!(arg_ids.len(), args.len()); - - let mut call_args = NodeMap(); - for (arg, arg_expr) in arg_ids.into_iter().zip(args.iter()) { - let arg_val = cx.eval(arg_expr)?; - debug!("const call arg: {:?}", arg); - if let Some(id) = arg { - assert!(call_args.insert(id, arg_val).is_none()); - } - } - debug!("const call({:?})", call_args); - let callee_cx = ConstContext { - tcx, - param_env: cx.param_env, - tables: tcx.typeck_tables_of(def_id), - substs, - fn_args: Some(call_args) - }; - callee_cx.eval(&body.value)? - }, - hir::ExprLit(ref lit) => match lit_to_const(&lit.node, tcx, ty) { - Ok(val) => mk_const(val), - Err(err) => signal!(e, err), - }, - hir::ExprBlock(ref block) => { - match block.expr { - Some(ref expr) => cx.eval(expr)?, - None => mk_const(Aggregate(Tuple(&[]))), - } - } - hir::ExprType(ref e, _) => cx.eval(e)?, - hir::ExprTup(ref fields) => { - let values = fields.iter().map(|e| cx.eval(e)).collect::, _>>()?; - mk_const(Aggregate(Tuple(tcx.alloc_const_slice(&values)))) - } - hir::ExprStruct(_, ref fields, _) => { - mk_const(Aggregate(Struct(tcx.alloc_name_const_slice(&fields.iter().map(|f| { - cx.eval(&f.expr).map(|v| (f.name.node, v)) - }).collect::, _>>()?)))) - } - hir::ExprIndex(ref arr, ref idx) => { - if !tcx.sess.features.borrow().const_indexing { - signal!(e, IndexOpFeatureGated); - } - let arr = cx.eval(arr)?; - let idx = match cx.eval(idx)?.val { - Integral(Usize(i)) => i.as_u64(), - _ => signal!(idx, IndexNotUsize), - }; - assert_eq!(idx as usize as u64, idx); - match arr.val { - Aggregate(Array(v)) => { - if let Some(&elem) = v.get(idx as usize) { - elem - } else { - let n = v.len() as u64; - signal!(e, IndexOutOfBounds { len: n, index: idx }) - } - } - - Aggregate(Repeat(.., n)) if idx >= n => { - signal!(e, IndexOutOfBounds { len: n, index: idx }) - } - Aggregate(Repeat(elem, _)) => elem, - - ByteStr(b) if idx >= b.data.len() as u64 => { - signal!(e, IndexOutOfBounds { len: b.data.len() as u64, index: idx }) - } - ByteStr(b) => { - mk_const(Integral(U8(b.data[idx as usize]))) - }, - - _ => signal!(e, IndexedNonVec), - } - } - hir::ExprArray(ref v) => { - let values = v.iter().map(|e| cx.eval(e)).collect::, _>>()?; - mk_const(Aggregate(Array(tcx.alloc_const_slice(&values)))) - } - hir::ExprRepeat(ref elem, _) => { - let n = match ty.sty { - ty::TyArray(_, n) => n.val.to_const_int().unwrap().to_u64().unwrap(), - _ => span_bug!(e.span, "typeck error") - }; - mk_const(Aggregate(Repeat(cx.eval(elem)?, n))) - }, - hir::ExprTupField(ref base, index) => { - if let Aggregate(Tuple(fields)) = cx.eval(base)?.val { - fields[index.node] - } else { - signal!(base, ExpectedConstTuple); - } - } - hir::ExprField(ref base, field_name) => { - if let Aggregate(Struct(fields)) = cx.eval(base)?.val { - if let Some(&(_, f)) = fields.iter().find(|&&(name, _)| name == field_name.node) { - f - } else { - signal!(e, MissingStructField); - } - } else { - signal!(base, ExpectedConstStruct); - } - } - hir::ExprAddrOf(..) => signal!(e, UnimplementedConstVal("address operator")), - _ => signal!(e, MiscCatchAll) - }; - - Ok(result) -} - -fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - val: ConstInt, - ty: Ty<'tcx>) - -> CastResult<'tcx> { - let v = val.to_u128_unchecked(); - match ty.sty { - ty::TyBool if v == 0 => Ok(Bool(false)), - ty::TyBool if v == 1 => Ok(Bool(true)), - ty::TyInt(ast::IntTy::I8) => Ok(Integral(I8(v as i128 as i8))), - ty::TyInt(ast::IntTy::I16) => Ok(Integral(I16(v as i128 as i16))), - ty::TyInt(ast::IntTy::I32) => Ok(Integral(I32(v as i128 as i32))), - ty::TyInt(ast::IntTy::I64) => Ok(Integral(I64(v as i128 as i64))), - ty::TyInt(ast::IntTy::I128) => Ok(Integral(I128(v as i128))), - ty::TyInt(ast::IntTy::Isize) => { - Ok(Integral(Isize(ConstIsize::new_truncating(v as i128, tcx.sess.target.isize_ty)))) - }, - ty::TyUint(ast::UintTy::U8) => Ok(Integral(U8(v as u8))), - ty::TyUint(ast::UintTy::U16) => Ok(Integral(U16(v as u16))), - ty::TyUint(ast::UintTy::U32) => Ok(Integral(U32(v as u32))), - ty::TyUint(ast::UintTy::U64) => Ok(Integral(U64(v as u64))), - ty::TyUint(ast::UintTy::U128) => Ok(Integral(U128(v as u128))), - ty::TyUint(ast::UintTy::Usize) => { - Ok(Integral(Usize(ConstUsize::new_truncating(v, tcx.sess.target.usize_ty)))) - }, - ty::TyFloat(fty) => { - if let Some(i) = val.to_u128() { - Ok(Float(ConstFloat::from_u128(i, fty))) - } else { - // The value must be negative, go through signed integers. - let i = val.to_u128_unchecked() as i128; - Ok(Float(ConstFloat::from_i128(i, fty))) - } - } - ty::TyRawPtr(_) => Err(ErrKind::UnimplementedConstVal("casting an address to a raw ptr")), - ty::TyChar => match val { - U8(u) => Ok(Char(u as char)), - _ => bug!(), - }, - _ => Err(CannotCast), - } -} - -fn cast_const_float<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - val: ConstFloat, - ty: Ty<'tcx>) -> CastResult<'tcx> { - let int_width = |ty| { - ty::layout::Integer::from_attr(tcx, ty).size().bits() as usize - }; - match ty.sty { - ty::TyInt(ity) => { - if let Some(i) = val.to_i128(int_width(attr::SignedInt(ity))) { - cast_const_int(tcx, I128(i), ty) - } else { - Err(CannotCast) - } - } - ty::TyUint(uty) => { - if let Some(i) = val.to_u128(int_width(attr::UnsignedInt(uty))) { - cast_const_int(tcx, U128(i), ty) - } else { - Err(CannotCast) - } - } - ty::TyFloat(fty) => Ok(Float(val.convert(fty))), - _ => Err(CannotCast), - } -} - -fn cast_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - val: ConstVal<'tcx>, - ty: Ty<'tcx>) - -> CastResult<'tcx> { - match val { - Integral(i) => cast_const_int(tcx, i, ty), - Bool(b) => cast_const_int(tcx, U8(b as u8), ty), - Float(f) => cast_const_float(tcx, f, ty), - Char(c) => cast_const_int(tcx, U32(c as u32), ty), - Variant(v) => { - let adt = tcx.adt_def(tcx.parent_def_id(v).unwrap()); - let idx = adt.variant_index_with_id(v); - cast_const_int(tcx, adt.discriminant_for_variant(tcx, idx), ty) - } - Function(..) => Err(UnimplementedConstVal("casting fn pointers")), - ByteStr(b) => match ty.sty { - ty::TyRawPtr(_) => { - Err(ErrKind::UnimplementedConstVal("casting a bytestr to a raw ptr")) - }, - ty::TyRef(_, ty::TypeAndMut { ref ty, mutbl: hir::MutImmutable }) => match ty.sty { - ty::TyArray(ty, n) => { - let n = n.val.to_const_int().unwrap().to_u64().unwrap(); - if ty == tcx.types.u8 && n == b.data.len() as u64 { - Ok(val) - } else { - Err(CannotCast) - } - } - ty::TySlice(_) => { - Err(ErrKind::UnimplementedConstVal("casting a bytestr to slice")) - }, - _ => Err(CannotCast), - }, - _ => Err(CannotCast), - }, - Str(s) => match ty.sty { - ty::TyRawPtr(_) => Err(ErrKind::UnimplementedConstVal("casting a str to a raw ptr")), - ty::TyRef(_, ty::TypeAndMut { ref ty, mutbl: hir::MutImmutable }) => match ty.sty { - ty::TyStr => Ok(Str(s)), - _ => Err(CannotCast), - }, - _ => Err(CannotCast), - }, - _ => Err(CannotCast), - } -} - -fn lit_to_const<'a, 'tcx>(lit: &'tcx ast::LitKind, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - mut ty: Ty<'tcx>) - -> Result, ErrKind<'tcx>> { - use syntax::ast::*; - use syntax::ast::LitIntType::*; - - if let ty::TyAdt(adt, _) = ty.sty { - if adt.is_enum() { - ty = adt.repr.discr_type().to_ty(tcx) - } - } - - match *lit { - LitKind::Str(ref s, _) => Ok(Str(s.as_str())), - LitKind::ByteStr(ref data) => Ok(ByteStr(ByteArray { data })), - LitKind::Byte(n) => Ok(Integral(U8(n))), - LitKind::Int(n, hint) => { - match (&ty.sty, hint) { - (&ty::TyInt(ity), _) | - (_, Signed(ity)) => { - Ok(Integral(ConstInt::new_signed_truncating(n as i128, - ity, tcx.sess.target.isize_ty))) - } - (&ty::TyUint(uty), _) | - (_, Unsigned(uty)) => { - Ok(Integral(ConstInt::new_unsigned_truncating(n as u128, - uty, tcx.sess.target.usize_ty))) - } - _ => bug!() - } - } - LitKind::Float(n, fty) => { - parse_float(&n.as_str(), fty).map(Float) - } - LitKind::FloatUnsuffixed(n) => { - let fty = match ty.sty { - ty::TyFloat(fty) => fty, - _ => bug!() - }; - parse_float(&n.as_str(), fty).map(Float) - } - LitKind::Bool(b) => Ok(Bool(b)), - LitKind::Char(c) => Ok(Char(c)), - } -} - -fn parse_float<'tcx>(num: &str, fty: ast::FloatTy) - -> Result> { - ConstFloat::from_str(num, fty).map_err(|_| { - // FIXME(#31407) this is only necessary because float parsing is buggy - UnimplementedConstVal("could not evaluate float literal (see issue #31407)") - }) -} - -pub fn compare_const_vals(tcx: TyCtxt, span: Span, a: &ConstVal, b: &ConstVal) - -> Result -{ - let result = match (a, b) { - (&Integral(a), &Integral(b)) => a.try_cmp(b).ok(), - (&Float(a), &Float(b)) => a.try_cmp(b).ok(), - (&Str(ref a), &Str(ref b)) => Some(a.cmp(b)), - (&Bool(a), &Bool(b)) => Some(a.cmp(&b)), - (&ByteStr(a), &ByteStr(b)) => Some(a.data.cmp(b.data)), - (&Char(a), &Char(b)) => Some(a.cmp(&b)), - _ => None, - }; - - match result { - Some(result) => Ok(result), - None => { - // FIXME: can this ever be reached? - tcx.sess.delay_span_bug(span, - &format!("type mismatch comparing {:?} and {:?}", a, b)); - Err(ErrorReported) - } - } -} - -impl<'a, 'tcx> ConstContext<'a, 'tcx> { - pub fn compare_lit_exprs(&self, - span: Span, - a: &'tcx Expr, - b: &'tcx Expr) -> Result { - let tcx = self.tcx; - let a = match self.eval(a) { - Ok(a) => a, - Err(e) => { - e.report(tcx, a.span, "expression"); - return Err(ErrorReported); - } - }; - let b = match self.eval(b) { - Ok(b) => b, - Err(e) => { - e.report(tcx, b.span, "expression"); - return Err(ErrorReported); - } - }; - compare_const_vals(tcx, span, &a.val, &b.val) - } -} diff --git a/src/librustc_const_eval/lib.rs b/src/librustc_const_eval/lib.rs deleted file mode 100644 index b4563f6cf2e7..000000000000 --- a/src/librustc_const_eval/lib.rs +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! constant evaluation on the HIR and code to validate patterns/matches -//! -//! # Note -//! -//! This API is completely unstable and subject to change. - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] -#![deny(warnings)] - -#![feature(rustc_diagnostic_macros)] -#![feature(slice_patterns)] -#![feature(box_patterns)] -#![feature(box_syntax)] -#![feature(i128_type)] -#![feature(from_ref)] - -extern crate arena; -#[macro_use] extern crate syntax; -#[macro_use] extern crate log; -#[macro_use] extern crate rustc; -extern crate rustc_const_math; -extern crate rustc_data_structures; -extern crate rustc_errors; -extern crate syntax_pos; - -// NB: This module needs to be declared first so diagnostics are -// registered before they are used. -mod diagnostics; - -mod eval; -mod _match; -pub mod check_match; -pub mod pattern; - -pub use eval::*; - -use rustc::ty::maps::Providers; - -pub fn provide(providers: &mut Providers) { - *providers = Providers { - check_match: check_match::check_match, - ..*providers - }; -} - -// Build the diagnostics array at the end so that the metadata includes error use sites. -#[cfg(not(stage0))] // remove after the next snapshot -__build_diagnostic_array! { librustc_const_eval, DIAGNOSTICS } diff --git a/src/librustc_const_eval/pattern.rs b/src/librustc_const_eval/pattern.rs deleted file mode 100644 index 3cfa1d6797d1..000000000000 --- a/src/librustc_const_eval/pattern.rs +++ /dev/null @@ -1,966 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use eval; - -use rustc::middle::const_val::{ConstEvalErr, ConstVal}; -use rustc::mir::{Field, BorrowKind, Mutability}; -use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region}; -use rustc::ty::subst::{Substs, Kind}; -use rustc::hir::{self, PatKind, RangeEnd}; -use rustc::hir::def::{Def, CtorKind}; -use rustc::hir::pat_util::EnumerateAndAdjustIterator; - -use rustc_data_structures::indexed_vec::Idx; - -use std::fmt; -use syntax::ast; -use syntax::ptr::P; -use syntax_pos::Span; - -#[derive(Clone, Debug)] -pub enum PatternError<'tcx> { - StaticInPattern(Span), - ConstEval(ConstEvalErr<'tcx>), -} - -#[derive(Copy, Clone, Debug)] -pub enum BindingMode<'tcx> { - ByValue, - ByRef(Region<'tcx>, BorrowKind), -} - -#[derive(Clone, Debug)] -pub struct FieldPattern<'tcx> { - pub field: Field, - pub pattern: Pattern<'tcx>, -} - -#[derive(Clone, Debug)] -pub struct Pattern<'tcx> { - pub ty: Ty<'tcx>, - pub span: Span, - pub kind: Box>, -} - -#[derive(Clone, Debug)] -pub enum PatternKind<'tcx> { - Wild, - - /// x, ref x, x @ P, etc - Binding { - mutability: Mutability, - name: ast::Name, - mode: BindingMode<'tcx>, - var: ast::NodeId, - ty: Ty<'tcx>, - subpattern: Option>, - }, - - /// Foo(...) or Foo{...} or Foo, where `Foo` is a variant name from an adt with >1 variants - Variant { - adt_def: &'tcx AdtDef, - substs: &'tcx Substs<'tcx>, - variant_index: usize, - subpatterns: Vec>, - }, - - /// (...), Foo(...), Foo{...}, or Foo, where `Foo` is a variant name from an adt with 1 variant - Leaf { - subpatterns: Vec>, - }, - - /// box P, &P, &mut P, etc - Deref { - subpattern: Pattern<'tcx>, - }, - - Constant { - value: &'tcx ty::Const<'tcx>, - }, - - Range { - lo: &'tcx ty::Const<'tcx>, - hi: &'tcx ty::Const<'tcx>, - end: RangeEnd, - }, - - /// matches against a slice, checking the length and extracting elements - Slice { - prefix: Vec>, - slice: Option>, - suffix: Vec>, - }, - - /// fixed match against an array, irrefutable - Array { - prefix: Vec>, - slice: Option>, - suffix: Vec>, - }, -} - -fn print_const_val(value: &ConstVal, f: &mut fmt::Formatter) -> fmt::Result { - match *value { - ConstVal::Float(ref x) => write!(f, "{}", x), - ConstVal::Integral(ref i) => write!(f, "{}", i), - ConstVal::Str(ref s) => write!(f, "{:?}", &s[..]), - ConstVal::ByteStr(b) => write!(f, "{:?}", b.data), - ConstVal::Bool(b) => write!(f, "{:?}", b), - ConstVal::Char(c) => write!(f, "{:?}", c), - ConstVal::Variant(_) | - ConstVal::Function(..) | - ConstVal::Aggregate(_) | - ConstVal::Unevaluated(..) => bug!("{:?} not printable in a pattern", value) - } -} - -impl<'tcx> fmt::Display for Pattern<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self.kind { - PatternKind::Wild => write!(f, "_"), - PatternKind::Binding { mutability, name, mode, ref subpattern, .. } => { - let is_mut = match mode { - BindingMode::ByValue => mutability == Mutability::Mut, - BindingMode::ByRef(_, bk) => { - write!(f, "ref ")?; - bk == BorrowKind::Mut - } - }; - if is_mut { - write!(f, "mut ")?; - } - write!(f, "{}", name)?; - if let Some(ref subpattern) = *subpattern { - write!(f, " @ {}", subpattern)?; - } - Ok(()) - } - PatternKind::Variant { ref subpatterns, .. } | - PatternKind::Leaf { ref subpatterns } => { - let variant = match *self.kind { - PatternKind::Variant { adt_def, variant_index, .. } => { - Some(&adt_def.variants[variant_index]) - } - _ => if let ty::TyAdt(adt, _) = self.ty.sty { - if !adt.is_enum() { - Some(&adt.variants[0]) - } else { - None - } - } else { - None - } - }; - - let mut first = true; - let mut start_or_continue = || if first { first = false; "" } else { ", " }; - - if let Some(variant) = variant { - write!(f, "{}", variant.name)?; - - // Only for TyAdt we can have `S {...}`, - // which we handle separately here. - if variant.ctor_kind == CtorKind::Fictive { - write!(f, " {{ ")?; - - let mut printed = 0; - for p in subpatterns { - if let PatternKind::Wild = *p.pattern.kind { - continue; - } - let name = variant.fields[p.field.index()].name; - write!(f, "{}{}: {}", start_or_continue(), name, p.pattern)?; - printed += 1; - } - - if printed < variant.fields.len() { - write!(f, "{}..", start_or_continue())?; - } - - return write!(f, " }}"); - } - } - - let num_fields = variant.map_or(subpatterns.len(), |v| v.fields.len()); - if num_fields != 0 || variant.is_none() { - write!(f, "(")?; - for i in 0..num_fields { - write!(f, "{}", start_or_continue())?; - - // Common case: the field is where we expect it. - if let Some(p) = subpatterns.get(i) { - if p.field.index() == i { - write!(f, "{}", p.pattern)?; - continue; - } - } - - // Otherwise, we have to go looking for it. - if let Some(p) = subpatterns.iter().find(|p| p.field.index() == i) { - write!(f, "{}", p.pattern)?; - } else { - write!(f, "_")?; - } - } - write!(f, ")")?; - } - - Ok(()) - } - PatternKind::Deref { ref subpattern } => { - match self.ty.sty { - ty::TyAdt(def, _) if def.is_box() => write!(f, "box ")?, - ty::TyRef(_, mt) => { - write!(f, "&")?; - if mt.mutbl == hir::MutMutable { - write!(f, "mut ")?; - } - } - _ => bug!("{} is a bad Deref pattern type", self.ty) - } - write!(f, "{}", subpattern) - } - PatternKind::Constant { value } => { - print_const_val(&value.val, f) - } - PatternKind::Range { lo, hi, end } => { - print_const_val(&lo.val, f)?; - match end { - RangeEnd::Included => write!(f, "...")?, - RangeEnd::Excluded => write!(f, "..")?, - } - print_const_val(&hi.val, f) - } - PatternKind::Slice { ref prefix, ref slice, ref suffix } | - PatternKind::Array { ref prefix, ref slice, ref suffix } => { - let mut first = true; - let mut start_or_continue = || if first { first = false; "" } else { ", " }; - write!(f, "[")?; - for p in prefix { - write!(f, "{}{}", start_or_continue(), p)?; - } - if let Some(ref slice) = *slice { - write!(f, "{}", start_or_continue())?; - match *slice.kind { - PatternKind::Wild => {} - _ => write!(f, "{}", slice)? - } - write!(f, "..")?; - } - for p in suffix { - write!(f, "{}{}", start_or_continue(), p)?; - } - write!(f, "]") - } - } - } -} - -pub struct PatternContext<'a, 'tcx: 'a> { - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, - pub param_env: ty::ParamEnv<'tcx>, - pub tables: &'a ty::TypeckTables<'tcx>, - pub substs: &'tcx Substs<'tcx>, - pub errors: Vec>, -} - -impl<'a, 'tcx> Pattern<'tcx> { - pub fn from_hir(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env_and_substs: ty::ParamEnvAnd<'tcx, &'tcx Substs<'tcx>>, - tables: &'a ty::TypeckTables<'tcx>, - pat: &'tcx hir::Pat) -> Self { - let mut pcx = PatternContext::new(tcx, param_env_and_substs, tables); - let result = pcx.lower_pattern(pat); - if !pcx.errors.is_empty() { - let msg = format!("encountered errors lowering pattern: {:?}", pcx.errors); - tcx.sess.delay_span_bug(pat.span, &msg); - } - debug!("Pattern::from_hir({:?}) = {:?}", pat, result); - result - } -} - -impl<'a, 'tcx> PatternContext<'a, 'tcx> { - pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env_and_substs: ty::ParamEnvAnd<'tcx, &'tcx Substs<'tcx>>, - tables: &'a ty::TypeckTables<'tcx>) -> Self { - PatternContext { - tcx, - param_env: param_env_and_substs.param_env, - tables, - substs: param_env_and_substs.value, - errors: vec![] - } - } - - pub fn lower_pattern(&mut self, pat: &'tcx hir::Pat) -> Pattern<'tcx> { - // When implicit dereferences have been inserted in this pattern, the unadjusted lowered - // pattern has the type that results *after* dereferencing. For example, in this code: - // - // ``` - // match &&Some(0i32) { - // Some(n) => { ... }, - // _ => { ... }, - // } - // ``` - // - // the type assigned to `Some(n)` in `unadjusted_pat` would be `Option` (this is - // determined in rustc_typeck::check::match). The adjustments would be - // - // `vec![&&Option, &Option]`. - // - // Applying the adjustments, we want to instead output `&&Some(n)` (as a HAIR pattern). So - // we wrap the unadjusted pattern in `PatternKind::Deref` repeatedly, consuming the - // adjustments in *reverse order* (last-in-first-out, so that the last `Deref` inserted - // gets the least-dereferenced type). - let unadjusted_pat = self.lower_pattern_unadjusted(pat); - self.tables - .pat_adjustments() - .get(pat.hir_id) - .unwrap_or(&vec![]) - .iter() - .rev() - .fold(unadjusted_pat, |pat, ref_ty| { - debug!("{:?}: wrapping pattern with type {:?}", pat, ref_ty); - Pattern { - span: pat.span, - ty: ref_ty, - kind: Box::new(PatternKind::Deref { subpattern: pat }), - } - }, - ) - } - - fn lower_pattern_unadjusted(&mut self, pat: &'tcx hir::Pat) -> Pattern<'tcx> { - let mut ty = self.tables.node_id_to_type(pat.hir_id); - - let kind = match pat.node { - PatKind::Wild => PatternKind::Wild, - - PatKind::Lit(ref value) => self.lower_lit(value), - - PatKind::Range(ref lo, ref hi, end) => { - match (self.lower_lit(lo), self.lower_lit(hi)) { - (PatternKind::Constant { value: lo }, - PatternKind::Constant { value: hi }) => { - PatternKind::Range { lo, hi, end } - } - _ => PatternKind::Wild - } - } - - PatKind::Path(ref qpath) => { - return self.lower_path(qpath, pat.hir_id, pat.id, pat.span); - } - - PatKind::Ref(ref subpattern, _) | - PatKind::Box(ref subpattern) => { - PatternKind::Deref { subpattern: self.lower_pattern(subpattern) } - } - - PatKind::Slice(ref prefix, ref slice, ref suffix) => { - let ty = self.tables.node_id_to_type(pat.hir_id); - match ty.sty { - ty::TyRef(_, mt) => - PatternKind::Deref { - subpattern: Pattern { - ty: mt.ty, - span: pat.span, - kind: Box::new(self.slice_or_array_pattern( - pat.span, mt.ty, prefix, slice, suffix)) - }, - }, - - ty::TySlice(..) | - ty::TyArray(..) => - self.slice_or_array_pattern(pat.span, ty, prefix, slice, suffix), - - ref sty => - span_bug!( - pat.span, - "unexpanded type for vector pattern: {:?}", - sty), - } - } - - PatKind::Tuple(ref subpatterns, ddpos) => { - let ty = self.tables.node_id_to_type(pat.hir_id); - match ty.sty { - ty::TyTuple(ref tys, _) => { - let subpatterns = - subpatterns.iter() - .enumerate_and_adjust(tys.len(), ddpos) - .map(|(i, subpattern)| FieldPattern { - field: Field::new(i), - pattern: self.lower_pattern(subpattern) - }) - .collect(); - - PatternKind::Leaf { subpatterns: subpatterns } - } - - ref sty => span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", sty), - } - } - - PatKind::Binding(_, id, ref ident, ref sub) => { - let var_ty = self.tables.node_id_to_type(pat.hir_id); - let region = match var_ty.sty { - ty::TyRef(r, _) => Some(r), - _ => None, - }; - let bm = *self.tables.pat_binding_modes().get(pat.hir_id) - .expect("missing binding mode"); - let (mutability, mode) = match bm { - ty::BindByValue(hir::MutMutable) => - (Mutability::Mut, BindingMode::ByValue), - ty::BindByValue(hir::MutImmutable) => - (Mutability::Not, BindingMode::ByValue), - ty::BindByReference(hir::MutMutable) => - (Mutability::Not, BindingMode::ByRef( - region.unwrap(), BorrowKind::Mut)), - ty::BindByReference(hir::MutImmutable) => - (Mutability::Not, BindingMode::ByRef( - region.unwrap(), BorrowKind::Shared)), - }; - - // A ref x pattern is the same node used for x, and as such it has - // x's type, which is &T, where we want T (the type being matched). - if let ty::BindByReference(_) = bm { - if let ty::TyRef(_, mt) = ty.sty { - ty = mt.ty; - } else { - bug!("`ref {}` has wrong type {}", ident.node, ty); - } - } - - PatternKind::Binding { - mutability, - mode, - name: ident.node, - var: id, - ty: var_ty, - subpattern: self.lower_opt_pattern(sub), - } - } - - PatKind::TupleStruct(ref qpath, ref subpatterns, ddpos) => { - let def = self.tables.qpath_def(qpath, pat.hir_id); - let adt_def = match ty.sty { - ty::TyAdt(adt_def, _) => adt_def, - _ => span_bug!(pat.span, "tuple struct pattern not applied to an ADT"), - }; - let variant_def = adt_def.variant_of_def(def); - - let subpatterns = - subpatterns.iter() - .enumerate_and_adjust(variant_def.fields.len(), ddpos) - .map(|(i, field)| FieldPattern { - field: Field::new(i), - pattern: self.lower_pattern(field), - }) - .collect(); - self.lower_variant_or_leaf(def, ty, subpatterns) - } - - PatKind::Struct(ref qpath, ref fields, _) => { - let def = self.tables.qpath_def(qpath, pat.hir_id); - let adt_def = match ty.sty { - ty::TyAdt(adt_def, _) => adt_def, - _ => { - span_bug!( - pat.span, - "struct pattern not applied to an ADT"); - } - }; - let variant_def = adt_def.variant_of_def(def); - - let subpatterns = - fields.iter() - .map(|field| { - let index = variant_def.index_of_field_named(field.node.name); - let index = index.unwrap_or_else(|| { - span_bug!( - pat.span, - "no field with name {:?}", - field.node.name); - }); - FieldPattern { - field: Field::new(index), - pattern: self.lower_pattern(&field.node.pat), - } - }) - .collect(); - - self.lower_variant_or_leaf(def, ty, subpatterns) - } - }; - - Pattern { - span: pat.span, - ty, - kind: Box::new(kind), - } - } - - fn lower_patterns(&mut self, pats: &'tcx [P]) -> Vec> { - pats.iter().map(|p| self.lower_pattern(p)).collect() - } - - fn lower_opt_pattern(&mut self, pat: &'tcx Option>) -> Option> - { - pat.as_ref().map(|p| self.lower_pattern(p)) - } - - fn flatten_nested_slice_patterns( - &mut self, - prefix: Vec>, - slice: Option>, - suffix: Vec>) - -> (Vec>, Option>, Vec>) - { - let orig_slice = match slice { - Some(orig_slice) => orig_slice, - None => return (prefix, slice, suffix) - }; - let orig_prefix = prefix; - let orig_suffix = suffix; - - // dance because of intentional borrow-checker stupidity. - let kind = *orig_slice.kind; - match kind { - PatternKind::Slice { prefix, slice, mut suffix } | - PatternKind::Array { prefix, slice, mut suffix } => { - let mut orig_prefix = orig_prefix; - - orig_prefix.extend(prefix); - suffix.extend(orig_suffix); - - (orig_prefix, slice, suffix) - } - _ => { - (orig_prefix, Some(Pattern { - kind: box kind, ..orig_slice - }), orig_suffix) - } - } - } - - fn slice_or_array_pattern( - &mut self, - span: Span, - ty: Ty<'tcx>, - prefix: &'tcx [P], - slice: &'tcx Option>, - suffix: &'tcx [P]) - -> PatternKind<'tcx> - { - let prefix = self.lower_patterns(prefix); - let slice = self.lower_opt_pattern(slice); - let suffix = self.lower_patterns(suffix); - let (prefix, slice, suffix) = - self.flatten_nested_slice_patterns(prefix, slice, suffix); - - match ty.sty { - ty::TySlice(..) => { - // matching a slice or fixed-length array - PatternKind::Slice { prefix: prefix, slice: slice, suffix: suffix } - } - - ty::TyArray(_, len) => { - // fixed-length array - let len = len.val.to_const_int().unwrap().to_u64().unwrap(); - assert!(len >= prefix.len() as u64 + suffix.len() as u64); - PatternKind::Array { prefix: prefix, slice: slice, suffix: suffix } - } - - _ => { - span_bug!(span, "bad slice pattern type {:?}", ty); - } - } - } - - fn lower_variant_or_leaf( - &mut self, - def: Def, - ty: Ty<'tcx>, - subpatterns: Vec>) - -> PatternKind<'tcx> - { - match def { - Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => { - let enum_id = self.tcx.parent_def_id(variant_id).unwrap(); - let adt_def = self.tcx.adt_def(enum_id); - if adt_def.is_enum() { - let substs = match ty.sty { - ty::TyAdt(_, substs) | - ty::TyFnDef(_, substs) => substs, - _ => bug!("inappropriate type for def: {:?}", ty.sty), - }; - PatternKind::Variant { - adt_def, - substs, - variant_index: adt_def.variant_index_with_id(variant_id), - subpatterns, - } - } else { - PatternKind::Leaf { subpatterns: subpatterns } - } - } - - Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) | - Def::TyAlias(..) | Def::AssociatedTy(..) | Def::SelfTy(..) => { - PatternKind::Leaf { subpatterns: subpatterns } - } - - _ => bug!() - } - } - - fn lower_path(&mut self, - qpath: &hir::QPath, - id: hir::HirId, - pat_id: ast::NodeId, - span: Span) - -> Pattern<'tcx> { - let ty = self.tables.node_id_to_type(id); - let def = self.tables.qpath_def(qpath, id); - let kind = match def { - Def::Const(def_id) | Def::AssociatedConst(def_id) => { - let substs = self.tables.node_substs(id); - match eval::lookup_const_by_id(self.tcx, self.param_env.and((def_id, substs))) { - Some((def_id, substs)) => { - // Enter the inlined constant's tables&substs temporarily. - let old_tables = self.tables; - let old_substs = self.substs; - self.tables = self.tcx.typeck_tables_of(def_id); - self.substs = substs; - let body = if let Some(id) = self.tcx.hir.as_local_node_id(def_id) { - self.tcx.hir.body(self.tcx.hir.body_owned_by(id)) - } else { - self.tcx.extern_const_body(def_id).body - }; - let pat = self.lower_const_expr(&body.value, pat_id, span); - self.tables = old_tables; - self.substs = old_substs; - return pat; - } - None => { - self.errors.push(PatternError::StaticInPattern(span)); - PatternKind::Wild - } - } - } - _ => self.lower_variant_or_leaf(def, ty, vec![]), - }; - - Pattern { - span, - ty, - kind: Box::new(kind), - } - } - - fn lower_lit(&mut self, expr: &'tcx hir::Expr) -> PatternKind<'tcx> { - let const_cx = eval::ConstContext::new(self.tcx, - self.param_env.and(self.substs), - self.tables); - match const_cx.eval(expr) { - Ok(value) => { - if let ConstVal::Variant(def_id) = value.val { - let ty = self.tables.expr_ty(expr); - self.lower_variant_or_leaf(Def::Variant(def_id), ty, vec![]) - } else { - PatternKind::Constant { value } - } - } - Err(e) => { - self.errors.push(PatternError::ConstEval(e)); - PatternKind::Wild - } - } - } - - fn lower_const_expr(&mut self, - expr: &'tcx hir::Expr, - pat_id: ast::NodeId, - span: Span) - -> Pattern<'tcx> { - let pat_ty = self.tables.expr_ty(expr); - debug!("expr={:?} pat_ty={:?} pat_id={}", expr, pat_ty, pat_id); - match pat_ty.sty { - ty::TyFloat(_) => { - self.tcx.sess.span_err(span, "floating point constants cannot be used in patterns"); - } - ty::TyAdt(adt_def, _) if adt_def.is_union() => { - // Matching on union fields is unsafe, we can't hide it in constants - self.tcx.sess.span_err(span, "cannot use unions in constant patterns"); - } - ty::TyAdt(adt_def, _) => { - if !self.tcx.has_attr(adt_def.did, "structural_match") { - let msg = format!("to use a constant of type `{}` in a pattern, \ - `{}` must be annotated with `#[derive(PartialEq, Eq)]`", - self.tcx.item_path_str(adt_def.did), - self.tcx.item_path_str(adt_def.did)); - self.tcx.sess.span_err(span, &msg); - } - } - _ => { } - } - let kind = match expr.node { - hir::ExprTup(ref exprs) => { - PatternKind::Leaf { - subpatterns: exprs.iter().enumerate().map(|(i, expr)| { - FieldPattern { - field: Field::new(i), - pattern: self.lower_const_expr(expr, pat_id, span) - } - }).collect() - } - } - - hir::ExprCall(ref callee, ref args) => { - let qpath = match callee.node { - hir::ExprPath(ref qpath) => qpath, - _ => bug!() - }; - let ty = self.tables.node_id_to_type(callee.hir_id); - let def = self.tables.qpath_def(qpath, callee.hir_id); - match def { - Def::Fn(..) | Def::Method(..) => self.lower_lit(expr), - _ => { - let subpatterns = args.iter().enumerate().map(|(i, expr)| { - FieldPattern { - field: Field::new(i), - pattern: self.lower_const_expr(expr, pat_id, span) - } - }).collect(); - self.lower_variant_or_leaf(def, ty, subpatterns) - } - } - } - - hir::ExprStruct(ref qpath, ref fields, None) => { - let def = self.tables.qpath_def(qpath, expr.hir_id); - let adt_def = match pat_ty.sty { - ty::TyAdt(adt_def, _) => adt_def, - _ => { - span_bug!( - expr.span, - "struct expr without ADT type"); - } - }; - let variant_def = adt_def.variant_of_def(def); - - let subpatterns = - fields.iter() - .map(|field| { - let index = variant_def.index_of_field_named(field.name.node); - let index = index.unwrap_or_else(|| { - span_bug!( - expr.span, - "no field with name {:?}", - field.name); - }); - FieldPattern { - field: Field::new(index), - pattern: self.lower_const_expr(&field.expr, pat_id, span), - } - }) - .collect(); - - self.lower_variant_or_leaf(def, pat_ty, subpatterns) - } - - hir::ExprArray(ref exprs) => { - let pats = exprs.iter() - .map(|expr| self.lower_const_expr(expr, pat_id, span)) - .collect(); - PatternKind::Array { - prefix: pats, - slice: None, - suffix: vec![] - } - } - - hir::ExprPath(ref qpath) => { - return self.lower_path(qpath, expr.hir_id, pat_id, span); - } - - _ => self.lower_lit(expr) - }; - - Pattern { - span, - ty: pat_ty, - kind: Box::new(kind), - } - } -} - -pub trait PatternFoldable<'tcx> : Sized { - fn fold_with>(&self, folder: &mut F) -> Self { - self.super_fold_with(folder) - } - - fn super_fold_with>(&self, folder: &mut F) -> Self; -} - -pub trait PatternFolder<'tcx> : Sized { - fn fold_pattern(&mut self, pattern: &Pattern<'tcx>) -> Pattern<'tcx> { - pattern.super_fold_with(self) - } - - fn fold_pattern_kind(&mut self, kind: &PatternKind<'tcx>) -> PatternKind<'tcx> { - kind.super_fold_with(self) - } -} - - -impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Box { - fn super_fold_with>(&self, folder: &mut F) -> Self { - let content: T = (**self).fold_with(folder); - box content - } -} - -impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Vec { - fn super_fold_with>(&self, folder: &mut F) -> Self { - self.iter().map(|t| t.fold_with(folder)).collect() - } -} - -impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Option { - fn super_fold_with>(&self, folder: &mut F) -> Self{ - self.as_ref().map(|t| t.fold_with(folder)) - } -} - -macro_rules! CloneImpls { - (<$lt_tcx:tt> $($ty:ty),+) => { - $( - impl<$lt_tcx> PatternFoldable<$lt_tcx> for $ty { - fn super_fold_with>(&self, _: &mut F) -> Self { - Clone::clone(self) - } - } - )+ - } -} - -CloneImpls!{ <'tcx> - Span, Field, Mutability, ast::Name, ast::NodeId, usize, &'tcx ty::Const<'tcx>, - Region<'tcx>, Ty<'tcx>, BindingMode<'tcx>, &'tcx AdtDef, - &'tcx Substs<'tcx>, &'tcx Kind<'tcx> -} - -impl<'tcx> PatternFoldable<'tcx> for FieldPattern<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - FieldPattern { - field: self.field.fold_with(folder), - pattern: self.pattern.fold_with(folder) - } - } -} - -impl<'tcx> PatternFoldable<'tcx> for Pattern<'tcx> { - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_pattern(self) - } - - fn super_fold_with>(&self, folder: &mut F) -> Self { - Pattern { - ty: self.ty.fold_with(folder), - span: self.span.fold_with(folder), - kind: self.kind.fold_with(folder) - } - } -} - -impl<'tcx> PatternFoldable<'tcx> for PatternKind<'tcx> { - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_pattern_kind(self) - } - - fn super_fold_with>(&self, folder: &mut F) -> Self { - match *self { - PatternKind::Wild => PatternKind::Wild, - PatternKind::Binding { - mutability, - name, - mode, - var, - ty, - ref subpattern, - } => PatternKind::Binding { - mutability: mutability.fold_with(folder), - name: name.fold_with(folder), - mode: mode.fold_with(folder), - var: var.fold_with(folder), - ty: ty.fold_with(folder), - subpattern: subpattern.fold_with(folder), - }, - PatternKind::Variant { - adt_def, - substs, - variant_index, - ref subpatterns, - } => PatternKind::Variant { - adt_def: adt_def.fold_with(folder), - substs: substs.fold_with(folder), - variant_index: variant_index.fold_with(folder), - subpatterns: subpatterns.fold_with(folder) - }, - PatternKind::Leaf { - ref subpatterns, - } => PatternKind::Leaf { - subpatterns: subpatterns.fold_with(folder), - }, - PatternKind::Deref { - ref subpattern, - } => PatternKind::Deref { - subpattern: subpattern.fold_with(folder), - }, - PatternKind::Constant { - value - } => PatternKind::Constant { - value: value.fold_with(folder) - }, - PatternKind::Range { - lo, - hi, - end, - } => PatternKind::Range { - lo: lo.fold_with(folder), - hi: hi.fold_with(folder), - end, - }, - PatternKind::Slice { - ref prefix, - ref slice, - ref suffix, - } => PatternKind::Slice { - prefix: prefix.fold_with(folder), - slice: slice.fold_with(folder), - suffix: suffix.fold_with(folder) - }, - PatternKind::Array { - ref prefix, - ref slice, - ref suffix - } => PatternKind::Array { - prefix: prefix.fold_with(folder), - slice: slice.fold_with(folder), - suffix: suffix.fold_with(folder) - }, - } - } -} diff --git a/src/librustc_const_math/Cargo.toml b/src/librustc_const_math/Cargo.toml deleted file mode 100644 index 41310ede3e08..000000000000 --- a/src/librustc_const_math/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -authors = ["The Rust Project Developers"] -name = "rustc_const_math" -version = "0.0.0" - -[lib] -name = "rustc_const_math" -path = "lib.rs" -crate-type = ["dylib"] - -[dependencies] -rustc_apfloat = { path = "../librustc_apfloat" } -serialize = { path = "../libserialize" } -syntax = { path = "../libsyntax" } diff --git a/src/librustc_const_math/err.rs b/src/librustc_const_math/err.rs deleted file mode 100644 index bd0a332436e6..000000000000 --- a/src/librustc_const_math/err.rs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use syntax::ast; - -#[derive(Debug, PartialEq, Eq, Clone, RustcEncodable, RustcDecodable)] -pub enum ConstMathErr { - NotInRange, - CmpBetweenUnequalTypes, - UnequalTypes(Op), - Overflow(Op), - ShiftNegative, - DivisionByZero, - RemainderByZero, - UnsignedNegation, - ULitOutOfRange(ast::UintTy), - LitOutOfRange(ast::IntTy), -} -pub use self::ConstMathErr::*; - -#[derive(Debug, PartialEq, Eq, Clone, RustcEncodable, RustcDecodable)] -pub enum Op { - Add, - Sub, - Mul, - Div, - Rem, - Shr, - Shl, - Neg, - BitAnd, - BitOr, - BitXor, -} - -impl ConstMathErr { - pub fn description(&self) -> &'static str { - use self::Op::*; - match *self { - NotInRange => "inferred value out of range", - CmpBetweenUnequalTypes => "compared two values of different types", - UnequalTypes(Add) => "tried to add two values of different types", - UnequalTypes(Sub) => "tried to subtract two values of different types", - UnequalTypes(Mul) => "tried to multiply two values of different types", - UnequalTypes(Div) => "tried to divide two values of different types", - UnequalTypes(Rem) => { - "tried to calculate the remainder of two values of different types" - }, - UnequalTypes(BitAnd) => "tried to bitand two values of different types", - UnequalTypes(BitOr) => "tried to bitor two values of different types", - UnequalTypes(BitXor) => "tried to xor two values of different types", - UnequalTypes(_) => unreachable!(), - Overflow(Add) => "attempt to add with overflow", - Overflow(Sub) => "attempt to subtract with overflow", - Overflow(Mul) => "attempt to multiply with overflow", - Overflow(Div) => "attempt to divide with overflow", - Overflow(Rem) => "attempt to calculate the remainder with overflow", - Overflow(Neg) => "attempt to negate with overflow", - Overflow(Shr) => "attempt to shift right with overflow", - Overflow(Shl) => "attempt to shift left with overflow", - Overflow(_) => unreachable!(), - ShiftNegative => "attempt to shift by a negative amount", - DivisionByZero => "attempt to divide by zero", - RemainderByZero => "attempt to calculate the remainder with a divisor of zero", - UnsignedNegation => "unary negation of unsigned integer", - ULitOutOfRange(ast::UintTy::U8) => "literal out of range for u8", - ULitOutOfRange(ast::UintTy::U16) => "literal out of range for u16", - ULitOutOfRange(ast::UintTy::U32) => "literal out of range for u32", - ULitOutOfRange(ast::UintTy::U64) => "literal out of range for u64", - ULitOutOfRange(ast::UintTy::U128) => "literal out of range for u128", - ULitOutOfRange(ast::UintTy::Usize) => "literal out of range for usize", - LitOutOfRange(ast::IntTy::I8) => "literal out of range for i8", - LitOutOfRange(ast::IntTy::I16) => "literal out of range for i16", - LitOutOfRange(ast::IntTy::I32) => "literal out of range for i32", - LitOutOfRange(ast::IntTy::I64) => "literal out of range for i64", - LitOutOfRange(ast::IntTy::I128) => "literal out of range for i128", - LitOutOfRange(ast::IntTy::Isize) => "literal out of range for isize", - } - } -} diff --git a/src/librustc_const_math/float.rs b/src/librustc_const_math/float.rs deleted file mode 100644 index 9d820ea8cbed..000000000000 --- a/src/librustc_const_math/float.rs +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::cmp::Ordering; -use std::num::ParseFloatError; - -use syntax::ast; - -use rustc_apfloat::{Float, FloatConvert, Status}; -use rustc_apfloat::ieee::{Single, Double}; - -use super::err::*; - -// Note that equality for `ConstFloat` means that the it is the same -// constant, not that the rust values are equal. In particular, `NaN -// == NaN` (at least if it's the same NaN; distinct encodings for NaN -// are considering unequal). -#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub struct ConstFloat { - pub ty: ast::FloatTy, - - // This is a bit inefficient but it makes conversions below more - // ergonomic, and all of this will go away once `miri` is merged. - pub bits: u128, -} - -impl ConstFloat { - /// Description of the type, not the value - pub fn description(&self) -> &'static str { - self.ty.ty_to_string() - } - - /// Compares the values if they are of the same type - pub fn try_cmp(self, rhs: Self) -> Result { - match (self.ty, rhs.ty) { - (ast::FloatTy::F64, ast::FloatTy::F64) => { - let a = Double::from_bits(self.bits); - let b = Double::from_bits(rhs.bits); - // This is pretty bad but it is the existing behavior. - Ok(a.partial_cmp(&b).unwrap_or(Ordering::Greater)) - } - - (ast::FloatTy::F32, ast::FloatTy::F32) => { - let a = Single::from_bits(self.bits); - let b = Single::from_bits(rhs.bits); - Ok(a.partial_cmp(&b).unwrap_or(Ordering::Greater)) - } - - _ => Err(CmpBetweenUnequalTypes), - } - } - - pub fn from_i128(input: i128, ty: ast::FloatTy) -> Self { - let bits = match ty { - ast::FloatTy::F32 => Single::from_i128(input).value.to_bits(), - ast::FloatTy::F64 => Double::from_i128(input).value.to_bits() - }; - ConstFloat { bits, ty } - } - - pub fn from_u128(input: u128, ty: ast::FloatTy) -> Self { - let bits = match ty { - ast::FloatTy::F32 => Single::from_u128(input).value.to_bits(), - ast::FloatTy::F64 => Double::from_u128(input).value.to_bits() - }; - ConstFloat { bits, ty } - } - - pub fn from_str(num: &str, ty: ast::FloatTy) -> Result { - let bits = match ty { - ast::FloatTy::F32 => { - let rust_bits = num.parse::()?.to_bits() as u128; - let apfloat = num.parse::().unwrap_or_else(|e| { - panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e); - }); - let apfloat_bits = apfloat.to_bits(); - assert!(rust_bits == apfloat_bits, - "apfloat::ieee::Single gave different result for `{}`: \ - {}({:#x}) vs Rust's {}({:#x})", - num, apfloat, apfloat_bits, - Single::from_bits(rust_bits), rust_bits); - apfloat_bits - } - ast::FloatTy::F64 => { - let rust_bits = num.parse::()?.to_bits() as u128; - let apfloat = num.parse::().unwrap_or_else(|e| { - panic!("apfloat::ieee::Double failed to parse `{}`: {:?}", num, e); - }); - let apfloat_bits = apfloat.to_bits(); - assert!(rust_bits == apfloat_bits, - "apfloat::ieee::Double gave different result for `{}`: \ - {}({:#x}) vs Rust's {}({:#x})", - num, apfloat, apfloat_bits, - Double::from_bits(rust_bits), rust_bits); - apfloat_bits - } - }; - Ok(ConstFloat { bits, ty }) - } - - pub fn to_i128(self, width: usize) -> Option { - assert!(width <= 128); - let r = match self.ty { - ast::FloatTy::F32 => Single::from_bits(self.bits).to_i128(width), - ast::FloatTy::F64 => Double::from_bits(self.bits).to_i128(width) - }; - if r.status.intersects(Status::INVALID_OP) { - None - } else { - Some(r.value) - } - } - - pub fn to_u128(self, width: usize) -> Option { - assert!(width <= 128); - let r = match self.ty { - ast::FloatTy::F32 => Single::from_bits(self.bits).to_u128(width), - ast::FloatTy::F64 => Double::from_bits(self.bits).to_u128(width) - }; - if r.status.intersects(Status::INVALID_OP) { - None - } else { - Some(r.value) - } - } - - pub fn convert(self, to: ast::FloatTy) -> Self { - let bits = match (self.ty, to) { - (ast::FloatTy::F32, ast::FloatTy::F32) | - (ast::FloatTy::F64, ast::FloatTy::F64) => return self, - - (ast::FloatTy::F32, ast::FloatTy::F64) => { - Double::to_bits(Single::from_bits(self.bits).convert(&mut false).value) - } - (ast::FloatTy::F64, ast::FloatTy::F32) => { - Single::to_bits(Double::from_bits(self.bits).convert(&mut false).value) - } - }; - ConstFloat { bits, ty: to } - } -} - -impl ::std::fmt::Display for ConstFloat { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { - match self.ty { - ast::FloatTy::F32 => write!(fmt, "{:#}", Single::from_bits(self.bits))?, - ast::FloatTy::F64 => write!(fmt, "{:#}", Double::from_bits(self.bits))?, - } - write!(fmt, "{}", self.ty) - } -} - -impl ::std::fmt::Debug for ConstFloat { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { - ::std::fmt::Display::fmt(self, fmt) - } -} - -macro_rules! derive_binop { - ($op:ident, $func:ident) => { - impl ::std::ops::$op for ConstFloat { - type Output = Result; - fn $func(self, rhs: Self) -> Result { - let bits = match (self.ty, rhs.ty) { - (ast::FloatTy::F32, ast::FloatTy::F32) =>{ - let a = Single::from_bits(self.bits); - let b = Single::from_bits(rhs.bits); - a.$func(b).value.to_bits() - } - (ast::FloatTy::F64, ast::FloatTy::F64) => { - let a = Double::from_bits(self.bits); - let b = Double::from_bits(rhs.bits); - a.$func(b).value.to_bits() - } - _ => return Err(UnequalTypes(Op::$op)), - }; - Ok(ConstFloat { bits, ty: self.ty }) - } - } - } -} - -derive_binop!(Add, add); -derive_binop!(Sub, sub); -derive_binop!(Mul, mul); -derive_binop!(Div, div); -derive_binop!(Rem, rem); - -impl ::std::ops::Neg for ConstFloat { - type Output = Self; - fn neg(self) -> Self { - let bits = match self.ty { - ast::FloatTy::F32 => (-Single::from_bits(self.bits)).to_bits(), - ast::FloatTy::F64 => (-Double::from_bits(self.bits)).to_bits(), - }; - ConstFloat { bits, ty: self.ty } - } -} - -/// This is `f32::MAX + (0.5 ULP)` as an integer. Numbers greater or equal to this -/// are rounded to infinity when converted to `f32`. -/// -/// NB: Computed as maximum significand with an extra 1 bit added (for the half ULP) -/// shifted by the maximum exponent (accounting for normalization). -pub const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) - << (Single::MAX_EXP - Single::PRECISION as i16); diff --git a/src/librustc_const_math/int.rs b/src/librustc_const_math/int.rs deleted file mode 100644 index 4ec27d7ade56..000000000000 --- a/src/librustc_const_math/int.rs +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::cmp::Ordering; -use syntax::attr::IntType; -use syntax::ast::{IntTy, UintTy}; - -use super::isize::*; -use super::usize::*; -use super::err::*; - -#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)] -pub enum ConstInt { - I8(i8), - I16(i16), - I32(i32), - I64(i64), - I128(i128), - Isize(ConstIsize), - U8(u8), - U16(u16), - U32(u32), - U64(u64), - U128(u128), - Usize(ConstUsize), -} -pub use self::ConstInt::*; - - -macro_rules! bounds { - ($ct: ty, $($t:ident $min:ident $max:ident)*) => { - $( - pub const $min: $ct = $t::min_value() as $ct; - pub const $max: $ct = $t::max_value() as $ct; - )* - }; - ($ct: ty: $min_val: expr, $($t:ident $min:ident $max:ident)*) => { - $( - pub const $min: $ct = $min_val; - pub const $max: $ct = $t::max_value() as $ct; - )* - } -} - -mod ubounds { - #![allow(dead_code)] - bounds!{u128: 0, - i8 I8MIN I8MAX i16 I16MIN I16MAX i32 I32MIN I32MAX i64 I64MIN I64MAX i128 I128MIN I128MAX - u8 U8MIN U8MAX u16 U16MIN U16MAX u32 U32MIN U32MAX u64 U64MIN U64MAX u128 U128MIN U128MAX - // do not add constants for isize/usize, because these are guaranteed to be wrong for - // arbitrary host/target combinations - } -} - -mod ibounds { - #![allow(dead_code)] - bounds!(i128, u64 U64MIN U64MAX); - - pub const U128MIN: i128 = 0; - pub const U128MAX: i128 = i128::max_value(); - - bounds!{i128, - i8 I8MIN I8MAX i16 I16MIN I16MAX i32 I32MIN I32MAX i64 I64MIN I64MAX i128 I128MIN I128MAX - u8 U8MIN U8MAX u16 U16MIN U16MAX u32 U32MIN U32MAX - // do not add constants for isize/usize, because these are guaranteed to be wrong for - // arbitrary host/target combinations - } -} - -impl ConstInt { - /// Creates a new unsigned ConstInt with matching type while also checking that overflow does - /// not happen. - pub fn new_unsigned(val: u128, ty: UintTy, usize_ty: UintTy) -> Option { - match ty { - UintTy::U8 if val <= ubounds::U8MAX => Some(U8(val as u8)), - UintTy::U16 if val <= ubounds::U16MAX => Some(U16(val as u16)), - UintTy::U32 if val <= ubounds::U32MAX => Some(U32(val as u32)), - UintTy::U64 if val <= ubounds::U64MAX => Some(U64(val as u64)), - UintTy::Usize if val <= ubounds::U64MAX => ConstUsize::new(val as u64, usize_ty).ok() - .map(Usize), - UintTy::U128 => Some(U128(val)), - _ => None - } - } - - /// Creates a new signed ConstInt with matching type while also checking that overflow does - /// not happen. - pub fn new_signed(val: i128, ty: IntTy, isize_ty: IntTy) -> Option { - match ty { - IntTy::I8 if val <= ibounds::I8MAX => Some(I8(val as i8)), - IntTy::I16 if val <= ibounds::I16MAX => Some(I16(val as i16)), - IntTy::I32 if val <= ibounds::I32MAX => Some(I32(val as i32)), - IntTy::I64 if val <= ibounds::I64MAX => Some(I64(val as i64)), - IntTy::Isize if val <= ibounds::I64MAX => ConstIsize::new(val as i64, isize_ty).ok() - .map(Isize), - IntTy::I128 => Some(I128(val)), - _ => None - } - } - - /// Creates a new unsigned ConstInt with matching type. - pub fn new_unsigned_truncating(val: u128, ty: UintTy, usize_ty: UintTy) -> ConstInt { - match ty { - UintTy::U8 => U8(val as u8), - UintTy::U16 => U16(val as u16), - UintTy::U32 => U32(val as u32), - UintTy::U64 => U64(val as u64), - UintTy::Usize => Usize(ConstUsize::new_truncating(val, usize_ty)), - UintTy::U128 => U128(val) - } - } - - /// Creates a new signed ConstInt with matching type. - pub fn new_signed_truncating(val: i128, ty: IntTy, isize_ty: IntTy) -> ConstInt { - match ty { - IntTy::I8 => I8(val as i8), - IntTy::I16 => I16(val as i16), - IntTy::I32 => I32(val as i32), - IntTy::I64 => I64(val as i64), - IntTy::Isize => Isize(ConstIsize::new_truncating(val, isize_ty)), - IntTy::I128 => I128(val) - } - } - - /// Description of the type, not the value - pub fn description(&self) -> &'static str { - match *self { - I8(_) => "i8", - I16(_) => "i16", - I32(_) => "i32", - I64(_) => "i64", - I128(_) => "i128", - Isize(_) => "isize", - U8(_) => "u8", - U16(_) => "u16", - U32(_) => "u32", - U64(_) => "u64", - U128(_) => "u128", - Usize(_) => "usize", - } - } - - /// Erases the type and returns a u128. - /// This is not the same as `-5i8 as u128` but as `-5i8 as i128 as u128` - pub fn to_u128_unchecked(self) -> u128 { - match self { - I8(i) => i as i128 as u128, - I16(i) => i as i128 as u128, - I32(i) => i as i128 as u128, - I64(i) => i as i128 as u128, - I128(i) => i as i128 as u128, - Isize(Is16(i)) => i as i128 as u128, - Isize(Is32(i)) => i as i128 as u128, - Isize(Is64(i)) => i as i128 as u128, - U8(i) => i as u128, - U16(i) => i as u128, - U32(i) => i as u128, - U64(i) => i as u128, - U128(i) => i as u128, - Usize(Us16(i)) => i as u128, - Usize(Us32(i)) => i as u128, - Usize(Us64(i)) => i as u128, - } - } - - /// Converts the value to a `u32` if it's in the range 0...std::u32::MAX - pub fn to_u32(&self) -> Option { - self.to_u128().and_then(|v| if v <= u32::max_value() as u128 { - Some(v as u32) - } else { - None - }) - } - - /// Converts the value to a `u64` if it's in the range 0...std::u64::MAX - pub fn to_u64(&self) -> Option { - self.to_u128().and_then(|v| if v <= u64::max_value() as u128 { - Some(v as u64) - } else { - None - }) - } - - /// Converts the value to a `u128` if it's in the range 0...std::u128::MAX - pub fn to_u128(&self) -> Option { - match *self { - I8(v) if v >= 0 => Some(v as u128), - I16(v) if v >= 0 => Some(v as u128), - I32(v) if v >= 0 => Some(v as u128), - I64(v) if v >= 0 => Some(v as u128), - I128(v) if v >= 0 => Some(v as u128), - Isize(Is16(v)) if v >= 0 => Some(v as u128), - Isize(Is32(v)) if v >= 0 => Some(v as u128), - Isize(Is64(v)) if v >= 0 => Some(v as u128), - U8(v) => Some(v as u128), - U16(v) => Some(v as u128), - U32(v) => Some(v as u128), - U64(v) => Some(v as u128), - U128(v) => Some(v as u128), - Usize(Us16(v)) => Some(v as u128), - Usize(Us32(v)) => Some(v as u128), - Usize(Us64(v)) => Some(v as u128), - _ => None, - } - } - - pub fn is_negative(&self) -> bool { - match *self { - I8(v) => v < 0, - I16(v) => v < 0, - I32(v) => v < 0, - I64(v) => v < 0, - I128(v) => v < 0, - Isize(Is16(v)) => v < 0, - Isize(Is32(v)) => v < 0, - Isize(Is64(v)) => v < 0, - _ => false, - } - } - - /// Compares the values if they are of the same type - pub fn try_cmp(self, rhs: Self) -> Result<::std::cmp::Ordering, ConstMathErr> { - match (self, rhs) { - (I8(a), I8(b)) => Ok(a.cmp(&b)), - (I16(a), I16(b)) => Ok(a.cmp(&b)), - (I32(a), I32(b)) => Ok(a.cmp(&b)), - (I64(a), I64(b)) => Ok(a.cmp(&b)), - (I128(a), I128(b)) => Ok(a.cmp(&b)), - (Isize(Is16(a)), Isize(Is16(b))) => Ok(a.cmp(&b)), - (Isize(Is32(a)), Isize(Is32(b))) => Ok(a.cmp(&b)), - (Isize(Is64(a)), Isize(Is64(b))) => Ok(a.cmp(&b)), - (U8(a), U8(b)) => Ok(a.cmp(&b)), - (U16(a), U16(b)) => Ok(a.cmp(&b)), - (U32(a), U32(b)) => Ok(a.cmp(&b)), - (U64(a), U64(b)) => Ok(a.cmp(&b)), - (U128(a), U128(b)) => Ok(a.cmp(&b)), - (Usize(Us16(a)), Usize(Us16(b))) => Ok(a.cmp(&b)), - (Usize(Us32(a)), Usize(Us32(b))) => Ok(a.cmp(&b)), - (Usize(Us64(a)), Usize(Us64(b))) => Ok(a.cmp(&b)), - _ => Err(CmpBetweenUnequalTypes), - } - } - - /// Adds 1 to the value and wraps around if the maximum for the type is reached - pub fn wrap_incr(self) -> Self { - macro_rules! add1 { - ($e:expr) => { ($e).wrapping_add(1) } - } - match self { - ConstInt::I8(i) => ConstInt::I8(add1!(i)), - ConstInt::I16(i) => ConstInt::I16(add1!(i)), - ConstInt::I32(i) => ConstInt::I32(add1!(i)), - ConstInt::I64(i) => ConstInt::I64(add1!(i)), - ConstInt::I128(i) => ConstInt::I128(add1!(i)), - ConstInt::Isize(ConstIsize::Is16(i)) => ConstInt::Isize(ConstIsize::Is16(add1!(i))), - ConstInt::Isize(ConstIsize::Is32(i)) => ConstInt::Isize(ConstIsize::Is32(add1!(i))), - ConstInt::Isize(ConstIsize::Is64(i)) => ConstInt::Isize(ConstIsize::Is64(add1!(i))), - ConstInt::U8(i) => ConstInt::U8(add1!(i)), - ConstInt::U16(i) => ConstInt::U16(add1!(i)), - ConstInt::U32(i) => ConstInt::U32(add1!(i)), - ConstInt::U64(i) => ConstInt::U64(add1!(i)), - ConstInt::U128(i) => ConstInt::U128(add1!(i)), - ConstInt::Usize(ConstUsize::Us16(i)) => ConstInt::Usize(ConstUsize::Us16(add1!(i))), - ConstInt::Usize(ConstUsize::Us32(i)) => ConstInt::Usize(ConstUsize::Us32(add1!(i))), - ConstInt::Usize(ConstUsize::Us64(i)) => ConstInt::Usize(ConstUsize::Us64(add1!(i))), - } - } - - pub fn int_type(self) -> IntType { - match self { - ConstInt::I8(_) => IntType::SignedInt(IntTy::I8), - ConstInt::I16(_) => IntType::SignedInt(IntTy::I16), - ConstInt::I32(_) => IntType::SignedInt(IntTy::I32), - ConstInt::I64(_) => IntType::SignedInt(IntTy::I64), - ConstInt::I128(_) => IntType::SignedInt(IntTy::I128), - ConstInt::Isize(_) => IntType::SignedInt(IntTy::Isize), - ConstInt::U8(_) => IntType::UnsignedInt(UintTy::U8), - ConstInt::U16(_) => IntType::UnsignedInt(UintTy::U16), - ConstInt::U32(_) => IntType::UnsignedInt(UintTy::U32), - ConstInt::U64(_) => IntType::UnsignedInt(UintTy::U64), - ConstInt::U128(_) => IntType::UnsignedInt(UintTy::U128), - ConstInt::Usize(_) => IntType::UnsignedInt(UintTy::Usize), - } - } -} - -impl ::std::cmp::PartialOrd for ConstInt { - fn partial_cmp(&self, other: &Self) -> Option { - self.try_cmp(*other).ok() - } -} - -impl ::std::cmp::Ord for ConstInt { - fn cmp(&self, other: &Self) -> Ordering { - self.try_cmp(*other).unwrap() - } -} - -impl ::std::fmt::Display for ConstInt { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { - match *self { - I8(i) => write!(fmt, "{}i8", i), - I16(i) => write!(fmt, "{}i16", i), - I32(i) => write!(fmt, "{}i32", i), - I64(i) => write!(fmt, "{}i64", i), - I128(i) => write!(fmt, "{}i128", i), - Isize(i) => write!(fmt, "{}isize", i), - U8(i) => write!(fmt, "{}u8", i), - U16(i) => write!(fmt, "{}u16", i), - U32(i) => write!(fmt, "{}u32", i), - U64(i) => write!(fmt, "{}u64", i), - U128(i) => write!(fmt, "{}u128", i), - Usize(i) => write!(fmt, "{}usize", i), - } - } -} - -macro_rules! overflowing { - ($e:expr, $err:expr) => {{ - if $e.1 { - return Err(Overflow($err)); - } else { - $e.0 - } - }} -} - -macro_rules! impl_binop { - ($op:ident, $func:ident, $checked_func:ident) => { - impl ::std::ops::$op for ConstInt { - type Output = Result; - fn $func(self, rhs: Self) -> Result { - match (self, rhs) { - (I8(a), I8(b)) => a.$checked_func(b).map(I8), - (I16(a), I16(b)) => a.$checked_func(b).map(I16), - (I32(a), I32(b)) => a.$checked_func(b).map(I32), - (I64(a), I64(b)) => a.$checked_func(b).map(I64), - (I128(a), I128(b)) => a.$checked_func(b).map(I128), - (Isize(Is16(a)), Isize(Is16(b))) => a.$checked_func(b).map(Is16).map(Isize), - (Isize(Is32(a)), Isize(Is32(b))) => a.$checked_func(b).map(Is32).map(Isize), - (Isize(Is64(a)), Isize(Is64(b))) => a.$checked_func(b).map(Is64).map(Isize), - (U8(a), U8(b)) => a.$checked_func(b).map(U8), - (U16(a), U16(b)) => a.$checked_func(b).map(U16), - (U32(a), U32(b)) => a.$checked_func(b).map(U32), - (U64(a), U64(b)) => a.$checked_func(b).map(U64), - (U128(a), U128(b)) => a.$checked_func(b).map(U128), - (Usize(Us16(a)), Usize(Us16(b))) => a.$checked_func(b).map(Us16).map(Usize), - (Usize(Us32(a)), Usize(Us32(b))) => a.$checked_func(b).map(Us32).map(Usize), - (Usize(Us64(a)), Usize(Us64(b))) => a.$checked_func(b).map(Us64).map(Usize), - _ => return Err(UnequalTypes(Op::$op)), - }.ok_or(Overflow(Op::$op)) - } - } - } -} - -macro_rules! derive_binop { - ($op:ident, $func:ident) => { - impl ::std::ops::$op for ConstInt { - type Output = Result; - fn $func(self, rhs: Self) -> Result { - match (self, rhs) { - (I8(a), I8(b)) => Ok(I8(a.$func(b))), - (I16(a), I16(b)) => Ok(I16(a.$func(b))), - (I32(a), I32(b)) => Ok(I32(a.$func(b))), - (I64(a), I64(b)) => Ok(I64(a.$func(b))), - (I128(a), I128(b)) => Ok(I128(a.$func(b))), - (Isize(Is16(a)), Isize(Is16(b))) => Ok(Isize(Is16(a.$func(b)))), - (Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a.$func(b)))), - (Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a.$func(b)))), - (U8(a), U8(b)) => Ok(U8(a.$func(b))), - (U16(a), U16(b)) => Ok(U16(a.$func(b))), - (U32(a), U32(b)) => Ok(U32(a.$func(b))), - (U64(a), U64(b)) => Ok(U64(a.$func(b))), - (U128(a), U128(b)) => Ok(U128(a.$func(b))), - (Usize(Us16(a)), Usize(Us16(b))) => Ok(Usize(Us16(a.$func(b)))), - (Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a.$func(b)))), - (Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a.$func(b)))), - _ => Err(UnequalTypes(Op::$op)), - } - } - } - } -} - -impl_binop!(Add, add, checked_add); -impl_binop!(Sub, sub, checked_sub); -impl_binop!(Mul, mul, checked_mul); -derive_binop!(BitAnd, bitand); -derive_binop!(BitOr, bitor); -derive_binop!(BitXor, bitxor); - -const I128_MIN: i128 = ::std::i128::MIN; - -fn check_division( - lhs: ConstInt, - rhs: ConstInt, - op: Op, - zerr: ConstMathErr, -) -> Result<(), ConstMathErr> { - match (lhs, rhs) { - (I8(_), I8(0)) => Err(zerr), - (I16(_), I16(0)) => Err(zerr), - (I32(_), I32(0)) => Err(zerr), - (I64(_), I64(0)) => Err(zerr), - (I128(_), I128(0)) => Err(zerr), - (Isize(_), Isize(Is16(0))) => Err(zerr), - (Isize(_), Isize(Is32(0))) => Err(zerr), - (Isize(_), Isize(Is64(0))) => Err(zerr), - - (U8(_), U8(0)) => Err(zerr), - (U16(_), U16(0)) => Err(zerr), - (U32(_), U32(0)) => Err(zerr), - (U64(_), U64(0)) => Err(zerr), - (U128(_), U128(0)) => Err(zerr), - (Usize(_), Usize(Us16(0))) => Err(zerr), - (Usize(_), Usize(Us32(0))) => Err(zerr), - (Usize(_), Usize(Us64(0))) => Err(zerr), - - (I8(::std::i8::MIN), I8(-1)) => Err(Overflow(op)), - (I16(::std::i16::MIN), I16(-1)) => Err(Overflow(op)), - (I32(::std::i32::MIN), I32(-1)) => Err(Overflow(op)), - (I64(::std::i64::MIN), I64(-1)) => Err(Overflow(op)), - (I128(I128_MIN), I128(-1)) => Err(Overflow(op)), - (Isize(Is16(::std::i16::MIN)), Isize(Is16(-1))) => Err(Overflow(op)), - (Isize(Is32(::std::i32::MIN)), Isize(Is32(-1))) => Err(Overflow(op)), - (Isize(Is64(::std::i64::MIN)), Isize(Is64(-1))) => Err(Overflow(op)), - - _ => Ok(()), - } -} - -impl ::std::ops::Div for ConstInt { - type Output = Result; - fn div(self, rhs: Self) -> Result { - let (lhs, rhs) = (self, rhs); - check_division(lhs, rhs, Op::Div, DivisionByZero)?; - match (lhs, rhs) { - (I8(a), I8(b)) => Ok(I8(a/b)), - (I16(a), I16(b)) => Ok(I16(a/b)), - (I32(a), I32(b)) => Ok(I32(a/b)), - (I64(a), I64(b)) => Ok(I64(a/b)), - (I128(a), I128(b)) => Ok(I128(a/b)), - (Isize(Is16(a)), Isize(Is16(b))) => Ok(Isize(Is16(a/b))), - (Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a/b))), - (Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a/b))), - - (U8(a), U8(b)) => Ok(U8(a/b)), - (U16(a), U16(b)) => Ok(U16(a/b)), - (U32(a), U32(b)) => Ok(U32(a/b)), - (U64(a), U64(b)) => Ok(U64(a/b)), - (U128(a), U128(b)) => Ok(U128(a/b)), - (Usize(Us16(a)), Usize(Us16(b))) => Ok(Usize(Us16(a/b))), - (Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a/b))), - (Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a/b))), - - _ => Err(UnequalTypes(Op::Div)), - } - } -} - -impl ::std::ops::Rem for ConstInt { - type Output = Result; - fn rem(self, rhs: Self) -> Result { - let (lhs, rhs) = (self, rhs); - // should INT_MIN%-1 be zero or an error? - check_division(lhs, rhs, Op::Rem, RemainderByZero)?; - match (lhs, rhs) { - (I8(a), I8(b)) => Ok(I8(a%b)), - (I16(a), I16(b)) => Ok(I16(a%b)), - (I32(a), I32(b)) => Ok(I32(a%b)), - (I64(a), I64(b)) => Ok(I64(a%b)), - (I128(a), I128(b)) => Ok(I128(a%b)), - (Isize(Is16(a)), Isize(Is16(b))) => Ok(Isize(Is16(a%b))), - (Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a%b))), - (Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a%b))), - - (U8(a), U8(b)) => Ok(U8(a%b)), - (U16(a), U16(b)) => Ok(U16(a%b)), - (U32(a), U32(b)) => Ok(U32(a%b)), - (U64(a), U64(b)) => Ok(U64(a%b)), - (U128(a), U128(b)) => Ok(U128(a%b)), - (Usize(Us16(a)), Usize(Us16(b))) => Ok(Usize(Us16(a%b))), - (Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a%b))), - (Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a%b))), - - _ => Err(UnequalTypes(Op::Rem)), - } - } -} - -impl ::std::ops::Shl for ConstInt { - type Output = Result; - fn shl(self, rhs: Self) -> Result { - let b = rhs.to_u32().ok_or(ShiftNegative)?; - match self { - I8(a) => Ok(I8(overflowing!(a.overflowing_shl(b), Op::Shl))), - I16(a) => Ok(I16(overflowing!(a.overflowing_shl(b), Op::Shl))), - I32(a) => Ok(I32(overflowing!(a.overflowing_shl(b), Op::Shl))), - I64(a) => Ok(I64(overflowing!(a.overflowing_shl(b), Op::Shl))), - I128(a) => Ok(I128(overflowing!(a.overflowing_shl(b), Op::Shl))), - Isize(Is16(a)) => Ok(Isize(Is16(overflowing!(a.overflowing_shl(b), Op::Shl)))), - Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_shl(b), Op::Shl)))), - Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_shl(b), Op::Shl)))), - U8(a) => Ok(U8(overflowing!(a.overflowing_shl(b), Op::Shl))), - U16(a) => Ok(U16(overflowing!(a.overflowing_shl(b), Op::Shl))), - U32(a) => Ok(U32(overflowing!(a.overflowing_shl(b), Op::Shl))), - U64(a) => Ok(U64(overflowing!(a.overflowing_shl(b), Op::Shl))), - U128(a) => Ok(U128(overflowing!(a.overflowing_shl(b), Op::Shl))), - Usize(Us16(a)) => Ok(Usize(Us16(overflowing!(a.overflowing_shl(b), Op::Shl)))), - Usize(Us32(a)) => Ok(Usize(Us32(overflowing!(a.overflowing_shl(b), Op::Shl)))), - Usize(Us64(a)) => Ok(Usize(Us64(overflowing!(a.overflowing_shl(b), Op::Shl)))), - } - } -} - -impl ::std::ops::Shr for ConstInt { - type Output = Result; - fn shr(self, rhs: Self) -> Result { - let b = rhs.to_u32().ok_or(ShiftNegative)?; - match self { - I8(a) => Ok(I8(overflowing!(a.overflowing_shr(b), Op::Shr))), - I16(a) => Ok(I16(overflowing!(a.overflowing_shr(b), Op::Shr))), - I32(a) => Ok(I32(overflowing!(a.overflowing_shr(b), Op::Shr))), - I64(a) => Ok(I64(overflowing!(a.overflowing_shr(b), Op::Shr))), - I128(a) => Ok(I128(overflowing!(a.overflowing_shr(b), Op::Shr))), - Isize(Is16(a)) => Ok(Isize(Is16(overflowing!(a.overflowing_shr(b), Op::Shr)))), - Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_shr(b), Op::Shr)))), - Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_shr(b), Op::Shr)))), - U8(a) => Ok(U8(overflowing!(a.overflowing_shr(b), Op::Shr))), - U16(a) => Ok(U16(overflowing!(a.overflowing_shr(b), Op::Shr))), - U32(a) => Ok(U32(overflowing!(a.overflowing_shr(b), Op::Shr))), - U64(a) => Ok(U64(overflowing!(a.overflowing_shr(b), Op::Shr))), - U128(a) => Ok(U128(overflowing!(a.overflowing_shr(b), Op::Shr))), - Usize(Us16(a)) => Ok(Usize(Us16(overflowing!(a.overflowing_shr(b), Op::Shr)))), - Usize(Us32(a)) => Ok(Usize(Us32(overflowing!(a.overflowing_shr(b), Op::Shr)))), - Usize(Us64(a)) => Ok(Usize(Us64(overflowing!(a.overflowing_shr(b), Op::Shr)))), - } - } -} - -impl ::std::ops::Neg for ConstInt { - type Output = Result; - fn neg(self) -> Result { - match self { - I8(a) => Ok(I8(overflowing!(a.overflowing_neg(), Op::Neg))), - I16(a) => Ok(I16(overflowing!(a.overflowing_neg(), Op::Neg))), - I32(a) => Ok(I32(overflowing!(a.overflowing_neg(), Op::Neg))), - I64(a) => Ok(I64(overflowing!(a.overflowing_neg(), Op::Neg))), - I128(a) => Ok(I128(overflowing!(a.overflowing_neg(), Op::Neg))), - Isize(Is16(a)) => Ok(Isize(Is16(overflowing!(a.overflowing_neg(), Op::Neg)))), - Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_neg(), Op::Neg)))), - Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_neg(), Op::Neg)))), - a@U8(0) | a@U16(0) | a@U32(0) | a@U64(0) | a@U128(0) | - a@Usize(Us16(0)) | a@Usize(Us32(0)) | a@Usize(Us64(0)) => Ok(a), - U8(_) | U16(_) | U32(_) | U64(_) | U128(_) | Usize(_) => Err(UnsignedNegation), - } - } -} - -impl ::std::ops::Not for ConstInt { - type Output = Result; - fn not(self) -> Result { - match self { - I8(a) => Ok(I8(!a)), - I16(a) => Ok(I16(!a)), - I32(a) => Ok(I32(!a)), - I64(a) => Ok(I64(!a)), - I128(a) => Ok(I128(!a)), - Isize(Is16(a)) => Ok(Isize(Is16(!a))), - Isize(Is32(a)) => Ok(Isize(Is32(!a))), - Isize(Is64(a)) => Ok(Isize(Is64(!a))), - U8(a) => Ok(U8(!a)), - U16(a) => Ok(U16(!a)), - U32(a) => Ok(U32(!a)), - U64(a) => Ok(U64(!a)), - U128(a) => Ok(U128(!a)), - Usize(Us16(a)) => Ok(Usize(Us16(!a))), - Usize(Us32(a)) => Ok(Usize(Us32(!a))), - Usize(Us64(a)) => Ok(Usize(Us64(!a))), - } - } -} diff --git a/src/librustc_const_math/isize.rs b/src/librustc_const_math/isize.rs deleted file mode 100644 index 18acc782775d..000000000000 --- a/src/librustc_const_math/isize.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use syntax::ast; -use super::err::*; - -/// Depending on the target only one variant is ever used in a compilation. -/// Anything else is an error. This invariant is checked at several locations -#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)] -pub enum ConstIsize { - Is16(i16), - Is32(i32), - Is64(i64), -} -pub use self::ConstIsize::*; - -impl ::std::fmt::Display for ConstIsize { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { - write!(fmt, "{}", self.as_i64()) - } -} - -impl ConstIsize { - pub fn as_i64(self) -> i64 { - match self { - Is16(i) => i as i64, - Is32(i) => i as i64, - Is64(i) => i, - } - } - pub fn new(i: i64, isize_ty: ast::IntTy) -> Result { - match isize_ty { - ast::IntTy::I16 if i as i16 as i64 == i => Ok(Is16(i as i16)), - ast::IntTy::I16 => Err(LitOutOfRange(ast::IntTy::Isize)), - ast::IntTy::I32 if i as i32 as i64 == i => Ok(Is32(i as i32)), - ast::IntTy::I32 => Err(LitOutOfRange(ast::IntTy::Isize)), - ast::IntTy::I64 => Ok(Is64(i)), - _ => unreachable!(), - } - } - pub fn new_truncating(i: i128, isize_ty: ast::IntTy) -> Self { - match isize_ty { - ast::IntTy::I16 => Is16(i as i16), - ast::IntTy::I32 => Is32(i as i32), - ast::IntTy::I64 => Is64(i as i64), - _ => unreachable!(), - } - } -} diff --git a/src/librustc_const_math/lib.rs b/src/librustc_const_math/lib.rs deleted file mode 100644 index 2d98bc48d281..000000000000 --- a/src/librustc_const_math/lib.rs +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Rusty Mathematics -//! -//! # Note -//! -//! This API is completely unstable and subject to change. - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] -#![deny(warnings)] - -#![feature(i128)] -#![feature(i128_type)] - -extern crate rustc_apfloat; - -extern crate syntax; - -extern crate serialize as rustc_serialize; // used by deriving - -mod float; -mod int; -mod usize; -mod isize; -mod err; - -pub use float::*; -pub use int::*; -pub use usize::*; -pub use isize::*; -pub use err::{ConstMathErr, Op}; diff --git a/src/librustc_const_math/usize.rs b/src/librustc_const_math/usize.rs deleted file mode 100644 index 56995f08f05b..000000000000 --- a/src/librustc_const_math/usize.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use syntax::ast; -use super::err::*; - -/// Depending on the target only one variant is ever used in a compilation. -/// Anything else is an error. This invariant is checked at several locations -#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)] -pub enum ConstUsize { - Us16(u16), - Us32(u32), - Us64(u64), -} -pub use self::ConstUsize::*; - -impl ::std::fmt::Display for ConstUsize { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { - write!(fmt, "{}", self.as_u64()) - } -} - -impl ConstUsize { - pub fn as_u64(self) -> u64 { - match self { - Us16(i) => i as u64, - Us32(i) => i as u64, - Us64(i) => i, - } - } - pub fn new(i: u64, usize_ty: ast::UintTy) -> Result { - match usize_ty { - ast::UintTy::U16 if i as u16 as u64 == i => Ok(Us16(i as u16)), - ast::UintTy::U16 => Err(ULitOutOfRange(ast::UintTy::Usize)), - ast::UintTy::U32 if i as u32 as u64 == i => Ok(Us32(i as u32)), - ast::UintTy::U32 => Err(ULitOutOfRange(ast::UintTy::Usize)), - ast::UintTy::U64 => Ok(Us64(i)), - _ => unreachable!(), - } - } - pub fn new_truncating(i: u128, usize_ty: ast::UintTy) -> Self { - match usize_ty { - ast::UintTy::U16 => Us16(i as u16), - ast::UintTy::U32 => Us32(i as u32), - ast::UintTy::U64 => Us64(i as u64), - _ => unreachable!(), - } - } -} diff --git a/src/librustc_cratesio_shim/Cargo.toml b/src/librustc_cratesio_shim/Cargo.toml index 143f88e8f4b8..342c7d1b6786 100644 --- a/src/librustc_cratesio_shim/Cargo.toml +++ b/src/librustc_cratesio_shim/Cargo.toml @@ -21,3 +21,4 @@ crate-type = ["dylib"] [dependencies] bitflags = "1.0" +log = "0.4" diff --git a/src/librustc_cratesio_shim/src/lib.rs b/src/librustc_cratesio_shim/src/lib.rs index 769b4f57206e..39087c5f7461 100644 --- a/src/librustc_cratesio_shim/src/lib.rs +++ b/src/librustc_cratesio_shim/src/lib.rs @@ -11,4 +11,7 @@ // See Cargo.toml for a comment explaining this crate. #![allow(unused_extern_crates)] +#![cfg_attr(not(stage0), feature(nll))] + extern crate bitflags; +extern crate log; diff --git a/src/librustc_data_structures/Cargo.toml b/src/librustc_data_structures/Cargo.toml index 23e42f6a672c..fc5fe91c977d 100644 --- a/src/librustc_data_structures/Cargo.toml +++ b/src/librustc_data_structures/Cargo.toml @@ -9,11 +9,16 @@ path = "lib.rs" crate-type = ["dylib"] [dependencies] +ena = "0.9.3" log = "0.4" +rustc_cratesio_shim = { path = "../librustc_cratesio_shim" } serialize = { path = "../libserialize" } cfg-if = "0.1.2" stable_deref_trait = "1.0.0" parking_lot_core = "0.2.8" +rustc-rayon = "0.1.1" +rustc-rayon-core = "0.1.1" +rustc-hash = "1.0.1" [dependencies.parking_lot] version = "0.5" diff --git a/src/librustc_data_structures/accumulate_vec.rs b/src/librustc_data_structures/accumulate_vec.rs index 52306de74cb8..9423e6b3256c 100644 --- a/src/librustc_data_structures/accumulate_vec.rs +++ b/src/librustc_data_structures/accumulate_vec.rs @@ -15,11 +15,10 @@ //! //! The N above is determined by Array's implementor, by way of an associated constant. -use std::ops::{Deref, DerefMut}; +use std::ops::{Deref, DerefMut, RangeBounds}; use std::iter::{self, IntoIterator, FromIterator}; use std::slice; use std::vec; -use std::collections::range::RangeArgument; use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; @@ -47,6 +46,13 @@ impl AccumulateVec { AccumulateVec::Array(ArrayVec::new()) } + pub fn is_array(&self) -> bool { + match self { + AccumulateVec::Array(..) => true, + AccumulateVec::Heap(..) => false, + } + } + pub fn one(el: A::Element) -> Self { iter::once(el).collect() } @@ -74,7 +80,7 @@ impl AccumulateVec { } pub fn drain(&mut self, range: R) -> Drain - where R: RangeArgument + where R: RangeBounds { match *self { AccumulateVec::Array(ref mut v) => { @@ -218,7 +224,7 @@ impl Encodable for AccumulateVec fn encode(&self, s: &mut S) -> Result<(), S::Error> { s.emit_seq(self.len(), |s| { for (i, e) in self.iter().enumerate() { - try!(s.emit_seq_elt(i, |s| e.encode(s))); + s.emit_seq_elt(i, |s| e.encode(s))?; } Ok(()) }) @@ -230,8 +236,7 @@ impl Decodable for AccumulateVec A::Element: Decodable { fn decode(d: &mut D) -> Result, D::Error> { d.read_seq(|d, len| { - Ok(try!((0..len).map(|i| d.read_seq_elt(i, |d| Decodable::decode(d))).collect())) + (0..len).map(|i| d.read_seq_elt(i, |d| Decodable::decode(d))).collect() }) } } - diff --git a/src/librustc_data_structures/array_vec.rs b/src/librustc_data_structures/array_vec.rs index 57fc78ef5311..56bb96132421 100644 --- a/src/librustc_data_structures/array_vec.rs +++ b/src/librustc_data_structures/array_vec.rs @@ -12,15 +12,15 @@ use std::marker::Unsize; use std::iter::Extend; -use std::ptr::{self, drop_in_place, Shared}; +use std::ptr::{self, drop_in_place, NonNull}; use std::ops::{Deref, DerefMut, Range}; use std::hash::{Hash, Hasher}; use std::slice; use std::fmt; use std::mem; -use std::collections::range::RangeArgument; -use std::collections::Bound::{Excluded, Included, Unbounded}; use std::mem::ManuallyDrop; +use std::ops::Bound::{Excluded, Included, Unbounded}; +use std::ops::RangeBounds; pub unsafe trait Array { type Element; @@ -106,7 +106,7 @@ impl ArrayVec { } pub fn drain(&mut self, range: R) -> Drain - where R: RangeArgument + where R: RangeBounds { // Memory safety // @@ -119,12 +119,12 @@ impl ArrayVec { // the hole, and the vector length is restored to the new length. // let len = self.len(); - let start = match range.start() { + let start = match range.start_bound() { Included(&n) => n, Excluded(&n) => n + 1, Unbounded => 0, }; - let end = match range.end() { + let end = match range.end_bound() { Included(&n) => n + 1, Excluded(&n) => n, Unbounded => len, @@ -146,7 +146,7 @@ impl ArrayVec { tail_start: end, tail_len: len - end, iter: range_slice.iter(), - array_vec: Shared::from(self), + array_vec: NonNull::from(self), } } } @@ -207,7 +207,7 @@ pub struct Iter { impl Drop for Iter { fn drop(&mut self) { - for _ in self {} + self.for_each(drop); } } @@ -232,7 +232,7 @@ pub struct Drain<'a, A: Array> tail_start: usize, tail_len: usize, iter: slice::Iter<'a, ManuallyDrop>, - array_vec: Shared>, + array_vec: NonNull>, } impl<'a, A: Array> Iterator for Drain<'a, A> { @@ -251,7 +251,7 @@ impl<'a, A: Array> Iterator for Drain<'a, A> { impl<'a, A: Array> Drop for Drain<'a, A> { fn drop(&mut self) { // exhaust self first - while let Some(_) = self.next() {} + self.for_each(drop); if self.tail_len > 0 { unsafe { diff --git a/src/librustc_data_structures/base_n.rs b/src/librustc_data_structures/base_n.rs index d333b6393b9c..d3b47daa5b4b 100644 --- a/src/librustc_data_structures/base_n.rs +++ b/src/librustc_data_structures/base_n.rs @@ -17,7 +17,7 @@ pub const MAX_BASE: usize = 64; pub const ALPHANUMERIC_ONLY: usize = 62; pub const CASE_INSENSITIVE: usize = 36; -const BASE_64: &'static [u8; MAX_BASE as usize] = +const BASE_64: &[u8; MAX_BASE as usize] = b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ@$"; #[inline] @@ -37,7 +37,8 @@ pub fn push_str(mut n: u128, base: usize, output: &mut String) { break; } } - &mut s[0..index].reverse(); + s[0..index].reverse(); + output.push_str(str::from_utf8(&s[0..index]).unwrap()); } diff --git a/src/librustc_data_structures/bitslice.rs b/src/librustc_data_structures/bitslice.rs index 7665bfd5b111..a63033c43652 100644 --- a/src/librustc_data_structures/bitslice.rs +++ b/src/librustc_data_structures/bitslice.rs @@ -24,12 +24,13 @@ pub trait BitSlice { impl BitSlice for [Word] { /// Clears bit at `idx` to 0; returns true iff this changed `self.` + #[inline] fn clear_bit(&mut self, idx: usize) -> bool { let words = self; debug!("clear_bit: words={} idx={}", - bits_to_string(words, words.len() * mem::size_of::()), bit_str(idx)); + bits_to_string(words, words.len() * mem::size_of::() * 8), idx); let BitLookup { word, bit_in_word, bit_mask } = bit_lookup(idx); - debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask); + debug!("word={} bit_in_word={} bit_mask=0x{:x}", word, bit_in_word, bit_mask); let oldv = words[word]; let newv = oldv & !bit_mask; words[word] = newv; @@ -37,10 +38,11 @@ impl BitSlice for [Word] { } /// Sets bit at `idx` to 1; returns true iff this changed `self.` + #[inline] fn set_bit(&mut self, idx: usize) -> bool { let words = self; debug!("set_bit: words={} idx={}", - bits_to_string(words, words.len() * mem::size_of::()), bit_str(idx)); + bits_to_string(words, words.len() * mem::size_of::() * 8), idx); let BitLookup { word, bit_in_word, bit_mask } = bit_lookup(idx); debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask); let oldv = words[word]; @@ -50,6 +52,7 @@ impl BitSlice for [Word] { } /// Extracts value of bit at `idx` in `self`. + #[inline] fn get_bit(&self, idx: usize) -> bool { let words = self; let BitLookup { word, bit_mask, .. } = bit_lookup(idx); @@ -72,14 +75,7 @@ fn bit_lookup(bit: usize) -> BitLookup { let word = bit / word_bits; let bit_in_word = bit % word_bits; let bit_mask = 1 << bit_in_word; - BitLookup { word: word, bit_in_word: bit_in_word, bit_mask: bit_mask } -} - - -fn bit_str(bit: Word) -> String { - let byte = bit >> 3; - let lobits = 1 << (bit & 0b111); - format!("[{}:{}-{:02x}]", bit, byte, lobits) + BitLookup { word, bit_in_word, bit_mask } } pub fn bits_to_string(words: &[Word], bits: usize) -> String { @@ -92,29 +88,30 @@ pub fn bits_to_string(words: &[Word], bits: usize) -> String { let mut i = 0; for &word in words.iter() { let mut v = word; - loop { // for each byte in `v`: + for _ in 0..mem::size_of::() { // for each byte in `v`: let remain = bits - i; // If less than a byte remains, then mask just that many bits. let mask = if remain <= 8 { (1 << remain) - 1 } else { 0xFF }; assert!(mask <= 0xFF); let byte = v & mask; - result.push(sep); - result.push_str(&format!("{:02x}", byte)); + result.push_str(&format!("{}{:02x}", sep, byte)); if remain <= 8 { break; } v >>= 8; i += 8; sep = '-'; } + sep = '|'; } result.push(']'); - return result + + result } #[inline] -pub fn bitwise(out_vec: &mut [usize], - in_vec: &[usize], +pub fn bitwise(out_vec: &mut [Word], + in_vec: &[Word], op: &Op) -> bool { assert_eq!(out_vec.len(), in_vec.len()); let mut changed = false; @@ -129,21 +126,21 @@ pub fn bitwise(out_vec: &mut [usize], pub trait BitwiseOperator { /// Applies some bit-operation pointwise to each of the bits in the two inputs. - fn join(&self, pred1: usize, pred2: usize) -> usize; + fn join(&self, pred1: Word, pred2: Word) -> Word; } pub struct Intersect; impl BitwiseOperator for Intersect { #[inline] - fn join(&self, a: usize, b: usize) -> usize { a & b } + fn join(&self, a: Word, b: Word) -> Word { a & b } } pub struct Union; impl BitwiseOperator for Union { #[inline] - fn join(&self, a: usize, b: usize) -> usize { a | b } + fn join(&self, a: Word, b: Word) -> Word { a | b } } pub struct Subtract; impl BitwiseOperator for Subtract { #[inline] - fn join(&self, a: usize, b: usize) -> usize { a & !b } + fn join(&self, a: Word, b: Word) -> Word { a & !b } } diff --git a/src/librustc_data_structures/bitvec.rs b/src/librustc_data_structures/bitvec.rs index 94edaa746f91..49ab3e58812d 100644 --- a/src/librustc_data_structures/bitvec.rs +++ b/src/librustc_data_structures/bitvec.rs @@ -8,19 +8,78 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::iter::FromIterator; +use indexed_vec::{Idx, IndexVec}; +use std::marker::PhantomData; -/// A very simple BitVector type. +type Word = u128; +const WORD_BITS: usize = 128; + +/// A very simple BitArray type. +/// +/// It does not support resizing after creation; use `BitVector` for that. #[derive(Clone, Debug, PartialEq)] -pub struct BitVector { - data: Vec, +pub struct BitArray { + data: Vec, + marker: PhantomData, } -impl BitVector { +#[derive(Clone, Debug, PartialEq)] +pub struct BitVector { + data: BitArray, +} + +impl BitVector { + pub fn grow(&mut self, num_bits: C) { + self.data.grow(num_bits) + } + + pub fn new() -> BitVector { + BitVector { + data: BitArray::new(0), + } + } + + pub fn with_capacity(bits: usize) -> BitVector { + BitVector { + data: BitArray::new(bits), + } + } + + /// Returns true if the bit has changed. #[inline] - pub fn new(num_bits: usize) -> BitVector { - let num_words = u64s(num_bits); - BitVector { data: vec![0; num_words] } + pub fn insert(&mut self, bit: C) -> bool { + self.grow(bit); + self.data.insert(bit) + } + + #[inline] + pub fn contains(&self, bit: C) -> bool { + let (word, mask) = word_mask(bit); + if let Some(word) = self.data.data.get(word) { + (word & mask) != 0 + } else { + false + } + } +} + +impl BitArray { + // Do not make this method public, instead switch your use case to BitVector. + #[inline] + fn grow(&mut self, num_bits: C) { + let num_words = words(num_bits); + if self.data.len() <= num_words { + self.data.resize(num_words + 1, 0) + } + } + + #[inline] + pub fn new(num_bits: usize) -> BitArray { + let num_words = words(num_bits); + BitArray { + data: vec![0; num_words], + marker: PhantomData, + } } #[inline] @@ -34,15 +93,30 @@ impl BitVector { self.data.iter().map(|e| e.count_ones() as usize).sum() } + /// True if `self` contains the bit `bit`. #[inline] - pub fn contains(&self, bit: usize) -> bool { + pub fn contains(&self, bit: C) -> bool { let (word, mask) = word_mask(bit); (self.data[word] & mask) != 0 } + /// True if `self` contains all the bits in `other`. + /// + /// The two vectors must have the same length. + #[inline] + pub fn contains_all(&self, other: &BitArray) -> bool { + assert_eq!(self.data.len(), other.data.len()); + self.data.iter().zip(&other.data).all(|(a, b)| (a & b) == *b) + } + + #[inline] + pub fn is_empty(&self) -> bool { + self.data.iter().all(|a| *a == 0) + } + /// Returns true if the bit has changed. #[inline] - pub fn insert(&mut self, bit: usize) -> bool { + pub fn insert(&mut self, bit: C) -> bool { let (word, mask) = word_mask(bit); let data = &mut self.data[word]; let value = *data; @@ -51,8 +125,26 @@ impl BitVector { new_value != value } + /// Sets all bits to true. + pub fn insert_all(&mut self) { + for data in &mut self.data { + *data = u128::max_value(); + } + } + + /// Returns true if the bit has changed. #[inline] - pub fn insert_all(&mut self, all: &BitVector) -> bool { + pub fn remove(&mut self, bit: C) -> bool { + let (word, mask) = word_mask(bit); + let data = &mut self.data[word]; + let value = *data; + let new_value = value & !mask; + *data = new_value; + new_value != value + } + + #[inline] + pub fn merge(&mut self, all: &BitArray) -> bool { assert!(self.data.len() == all.data.len()); let mut changed = false; for (i, j) in self.data.iter_mut().zip(&all.data) { @@ -65,41 +157,35 @@ impl BitVector { changed } - #[inline] - pub fn grow(&mut self, num_bits: usize) { - let num_words = u64s(num_bits); - if self.data.len() < num_words { - self.data.resize(num_words, 0) - } - } - /// Iterates over indexes of set bits in a sorted order #[inline] - pub fn iter<'a>(&'a self) -> BitVectorIter<'a> { - BitVectorIter { + pub fn iter<'a>(&'a self) -> BitIter<'a, C> { + BitIter { iter: self.data.iter(), current: 0, idx: 0, + marker: PhantomData, } } } -pub struct BitVectorIter<'a> { - iter: ::std::slice::Iter<'a, u64>, - current: u64, +pub struct BitIter<'a, C: Idx> { + iter: ::std::slice::Iter<'a, Word>, + current: Word, idx: usize, + marker: PhantomData } -impl<'a> Iterator for BitVectorIter<'a> { - type Item = usize; - fn next(&mut self) -> Option { +impl<'a, C: Idx> Iterator for BitIter<'a, C> { + type Item = C; + fn next(&mut self) -> Option { while self.current == 0 { self.current = if let Some(&i) = self.iter.next() { if i == 0 { - self.idx += 64; + self.idx += WORD_BITS; continue; } else { - self.idx = u64s(self.idx) * 64; + self.idx = words(self.idx) * WORD_BITS; i } } else { @@ -110,28 +196,13 @@ impl<'a> Iterator for BitVectorIter<'a> { self.current >>= offset; self.current >>= 1; // shift otherwise overflows for 0b1000_0000_…_0000 self.idx += offset + 1; - return Some(self.idx - 1); + + Some(C::new(self.idx - 1)) } -} -impl FromIterator for BitVector { - fn from_iter(iter: I) -> BitVector where I: IntoIterator { - let iter = iter.into_iter(); - let (len, _) = iter.size_hint(); - // Make the minimum length for the bitvector 64 bits since that's - // the smallest non-zero size anyway. - let len = if len < 64 { 64 } else { len }; - let mut bv = BitVector::new(len); - for (idx, val) in iter.enumerate() { - if idx > len { - bv.grow(idx); - } - if val { - bv.insert(idx); - } - } - - bv + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) } } @@ -139,35 +210,38 @@ impl FromIterator for BitVector { /// one gigantic bitvector. In other words, it is as if you have /// `rows` bitvectors, each of length `columns`. #[derive(Clone, Debug)] -pub struct BitMatrix { +pub struct BitMatrix { columns: usize, - vector: Vec, + vector: Vec, + phantom: PhantomData<(R, C)>, } -impl BitMatrix { +impl BitMatrix { /// Create a new `rows x columns` matrix, initially empty. - pub fn new(rows: usize, columns: usize) -> BitMatrix { + pub fn new(rows: usize, columns: usize) -> BitMatrix { // For every element, we need one bit for every other - // element. Round up to an even number of u64s. - let u64s_per_row = u64s(columns); + // element. Round up to an even number of words. + let words_per_row = words(columns); BitMatrix { columns, - vector: vec![0; rows * u64s_per_row], + vector: vec![0; rows * words_per_row], + phantom: PhantomData, } } /// The range of bits for a given row. - fn range(&self, row: usize) -> (usize, usize) { - let u64s_per_row = u64s(self.columns); - let start = row * u64s_per_row; - (start, start + u64s_per_row) + fn range(&self, row: R) -> (usize, usize) { + let row = row.index(); + let words_per_row = words(self.columns); + let start = row * words_per_row; + (start, start + words_per_row) } /// Sets the cell at `(row, column)` to true. Put another way, add /// `column` to the bitset for `row`. /// - /// Returns true if this changed the matrix, and false otherwies. - pub fn add(&mut self, row: usize, column: usize) -> bool { + /// Returns true if this changed the matrix, and false otherwise. + pub fn add(&mut self, row: R, column: R) -> bool { let (start, _) = self.range(row); let (word, mask) = word_mask(column); let vector = &mut self.vector[..]; @@ -181,7 +255,7 @@ impl BitMatrix { /// the matrix cell at `(row, column)` true? Put yet another way, /// if the matrix represents (transitive) reachability, can /// `row` reach `column`? - pub fn contains(&self, row: usize, column: usize) -> bool { + pub fn contains(&self, row: R, column: R) -> bool { let (start, _) = self.range(row); let (word, mask) = word_mask(column); (self.vector[start + word] & mask) != 0 @@ -191,18 +265,18 @@ impl BitMatrix { /// is an O(n) operation where `n` is the number of elements /// (somewhat independent from the actual size of the /// intersection, in particular). - pub fn intersection(&self, a: usize, b: usize) -> Vec { + pub fn intersection(&self, a: R, b: R) -> Vec { let (a_start, a_end) = self.range(a); let (b_start, b_end) = self.range(b); let mut result = Vec::with_capacity(self.columns); for (base, (i, j)) in (a_start..a_end).zip(b_start..b_end).enumerate() { let mut v = self.vector[i] & self.vector[j]; - for bit in 0..64 { + for bit in 0..WORD_BITS { if v == 0 { break; } if v & 0x1 != 0 { - result.push(base * 64 + bit); + result.push(C::new(base * WORD_BITS + bit)); } v >>= 1; } @@ -217,7 +291,7 @@ impl BitMatrix { /// you have an edge `write -> read`, because in that case /// `write` can reach everything that `read` can (and /// potentially more). - pub fn merge(&mut self, read: usize, write: usize) -> bool { + pub fn merge(&mut self, read: R, write: R) -> bool { let (read_start, read_end) = self.range(read); let (write_start, write_end) = self.range(write); let vector = &mut self.vector[..]; @@ -226,38 +300,138 @@ impl BitMatrix { let v1 = vector[write_index]; let v2 = v1 | vector[read_index]; vector[write_index] = v2; - changed = changed | (v1 != v2); + changed |= v1 != v2; } changed } /// Iterates through all the columns set to true in a given row of /// the matrix. - pub fn iter<'a>(&'a self, row: usize) -> BitVectorIter<'a> { + pub fn iter<'a>(&'a self, row: R) -> BitIter<'a, C> { let (start, end) = self.range(row); - BitVectorIter { + BitIter { iter: self.vector[start..end].iter(), current: 0, idx: 0, + marker: PhantomData, } } } -#[inline] -fn u64s(elements: usize) -> usize { - (elements + 63) / 64 +/// A moderately sparse bit matrix: rows are appended lazily, but columns +/// within appended rows are instantiated fully upon creation. +#[derive(Clone, Debug)] +pub struct SparseBitMatrix +where + R: Idx, + C: Idx, +{ + columns: usize, + vector: IndexVec>, +} + +impl SparseBitMatrix { + /// Create a new empty sparse bit matrix with no rows or columns. + pub fn new(columns: usize) -> Self { + Self { + columns, + vector: IndexVec::new(), + } + } + + fn ensure_row(&mut self, row: R) { + let columns = self.columns; + self.vector + .ensure_contains_elem(row, || BitArray::new(columns)); + } + + /// Sets the cell at `(row, column)` to true. Put another way, insert + /// `column` to the bitset for `row`. + /// + /// Returns true if this changed the matrix, and false otherwise. + pub fn add(&mut self, row: R, column: C) -> bool { + self.ensure_row(row); + self.vector[row].insert(column) + } + + /// Do the bits from `row` contain `column`? Put another way, is + /// the matrix cell at `(row, column)` true? Put yet another way, + /// if the matrix represents (transitive) reachability, can + /// `row` reach `column`? + pub fn contains(&self, row: R, column: C) -> bool { + self.vector.get(row).map_or(false, |r| r.contains(column)) + } + + /// Add the bits from row `read` to the bits from row `write`, + /// return true if anything changed. + /// + /// This is used when computing transitive reachability because if + /// you have an edge `write -> read`, because in that case + /// `write` can reach everything that `read` can (and + /// potentially more). + pub fn merge(&mut self, read: R, write: R) -> bool { + if read == write || self.vector.get(read).is_none() { + return false; + } + + self.ensure_row(write); + let (bitvec_read, bitvec_write) = self.vector.pick2_mut(read, write); + bitvec_write.merge(bitvec_read) + } + + /// Merge a row, `from`, into the `into` row. + pub fn merge_into(&mut self, into: R, from: &BitArray) -> bool { + self.ensure_row(into); + self.vector[into].merge(from) + } + + /// Add all bits to the given row. + pub fn add_all(&mut self, row: R) { + self.ensure_row(row); + self.vector[row].insert_all(); + } + + /// Number of elements in the matrix. + pub fn len(&self) -> usize { + self.vector.len() + } + + pub fn rows(&self) -> impl Iterator { + self.vector.indices() + } + + /// Iterates through all the columns set to true in a given row of + /// the matrix. + pub fn iter<'a>(&'a self, row: R) -> impl Iterator + 'a { + self.vector.get(row).into_iter().flat_map(|r| r.iter()) + } + + /// Iterates through each row and the accompanying bit set. + pub fn iter_enumerated<'a>(&'a self) -> impl Iterator)> + 'a { + self.vector.iter_enumerated() + } + + pub fn row(&self, row: R) -> Option<&BitArray> { + self.vector.get(row) + } } #[inline] -fn word_mask(index: usize) -> (usize, u64) { - let word = index / 64; - let mask = 1 << (index % 64); +fn words(elements: C) -> usize { + (elements.index() + WORD_BITS - 1) / WORD_BITS +} + +#[inline] +fn word_mask(index: C) -> (usize, Word) { + let index = index.index(); + let word = index / WORD_BITS; + let mask = 1 << (index % WORD_BITS); (word, mask) } #[test] fn bitvec_iter_works() { - let mut bitvec = BitVector::new(100); + let mut bitvec: BitArray = BitArray::new(100); bitvec.insert(1); bitvec.insert(10); bitvec.insert(19); @@ -267,14 +441,15 @@ fn bitvec_iter_works() { bitvec.insert(65); bitvec.insert(66); bitvec.insert(99); - assert_eq!(bitvec.iter().collect::>(), - [1, 10, 19, 62, 63, 64, 65, 66, 99]); + assert_eq!( + bitvec.iter().collect::>(), + [1, 10, 19, 62, 63, 64, 65, 66, 99] + ); } - #[test] fn bitvec_iter_works_2() { - let mut bitvec = BitVector::new(319); + let mut bitvec: BitArray = BitArray::new(319); bitvec.insert(0); bitvec.insert(127); bitvec.insert(191); @@ -285,14 +460,14 @@ fn bitvec_iter_works_2() { #[test] fn union_two_vecs() { - let mut vec1 = BitVector::new(65); - let mut vec2 = BitVector::new(65); + let mut vec1: BitArray = BitArray::new(65); + let mut vec2: BitArray = BitArray::new(65); assert!(vec1.insert(3)); assert!(!vec1.insert(3)); assert!(vec2.insert(5)); assert!(vec2.insert(64)); - assert!(vec1.insert_all(&vec2)); - assert!(!vec1.insert_all(&vec2)); + assert!(vec1.merge(&vec2)); + assert!(!vec1.merge(&vec2)); assert!(vec1.contains(3)); assert!(!vec1.contains(4)); assert!(vec1.contains(5)); @@ -302,25 +477,25 @@ fn union_two_vecs() { #[test] fn grow() { - let mut vec1 = BitVector::new(65); - for index in 0 .. 65 { + let mut vec1: BitVector = BitVector::with_capacity(65); + for index in 0..65 { assert!(vec1.insert(index)); assert!(!vec1.insert(index)); } vec1.grow(128); // Check if the bits set before growing are still set - for index in 0 .. 65 { + for index in 0..65 { assert!(vec1.contains(index)); } // Check if the new bits are all un-set - for index in 65 .. 128 { + for index in 65..128 { assert!(!vec1.contains(index)); } // Check that we can set all new bits without running out of bounds - for index in 65 .. 128 { + for index in 65..128 { assert!(vec1.insert(index)); assert!(!vec1.insert(index)); } @@ -328,7 +503,7 @@ fn grow() { #[test] fn matrix_intersection() { - let mut vec1 = BitMatrix::new(200, 200); + let mut vec1: BitMatrix = BitMatrix::new(200, 200); // (*) Elements reachable from both 2 and 65. @@ -359,7 +534,49 @@ fn matrix_intersection() { #[test] fn matrix_iter() { - let mut matrix = BitMatrix::new(64, 100); + let mut matrix: BitMatrix = BitMatrix::new(64, 100); + matrix.add(3, 22); + matrix.add(3, 75); + matrix.add(2, 99); + matrix.add(4, 0); + matrix.merge(3, 5); + + let expected = [99]; + let mut iter = expected.iter(); + for i in matrix.iter(2) { + let j = *iter.next().unwrap(); + assert_eq!(i, j); + } + assert!(iter.next().is_none()); + + let expected = [22, 75]; + let mut iter = expected.iter(); + for i in matrix.iter(3) { + let j = *iter.next().unwrap(); + assert_eq!(i, j); + } + assert!(iter.next().is_none()); + + let expected = [0]; + let mut iter = expected.iter(); + for i in matrix.iter(4) { + let j = *iter.next().unwrap(); + assert_eq!(i, j); + } + assert!(iter.next().is_none()); + + let expected = [22, 75]; + let mut iter = expected.iter(); + for i in matrix.iter(5) { + let j = *iter.next().unwrap(); + assert_eq!(i, j); + } + assert!(iter.next().is_none()); +} + +#[test] +fn sparse_matrix_iter() { + let mut matrix: SparseBitMatrix = SparseBitMatrix::new(100); matrix.add(3, 22); matrix.add(3, 75); matrix.add(2, 99); diff --git a/src/librustc_data_structures/blake2b.rs b/src/librustc_data_structures/blake2b.rs deleted file mode 100644 index 6b8bf8df0d33..000000000000 --- a/src/librustc_data_structures/blake2b.rs +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - -// An implementation of the Blake2b cryptographic hash function. -// The implementation closely follows: https://tools.ietf.org/html/rfc7693 -// -// "BLAKE2 is a cryptographic hash function faster than MD5, SHA-1, SHA-2, and -// SHA-3, yet is at least as secure as the latest standard SHA-3." -// according to their own website :) -// -// Indeed this implementation is two to three times as fast as our SHA-256 -// implementation. If you have the luxury of being able to use crates from -// crates.io, you can go there and find still faster implementations. - -use std::mem; -use std::slice; - -#[repr(C)] -struct Blake2bCtx { - b: [u8; 128], - h: [u64; 8], - t: [u64; 2], - c: usize, - outlen: u16, - finalized: bool, - - #[cfg(debug_assertions)] - fnv_hash: u64, -} - -#[cfg(debug_assertions)] -impl ::std::fmt::Debug for Blake2bCtx { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(fmt, "{:x}", self.fnv_hash) - } -} - -#[cfg(not(debug_assertions))] -impl ::std::fmt::Debug for Blake2bCtx { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { - write!(fmt, "Enable debug_assertions() for more info.") - } -} - -#[inline(always)] -fn b2b_g(v: &mut [u64; 16], - a: usize, - b: usize, - c: usize, - d: usize, - x: u64, - y: u64) -{ - v[a] = v[a].wrapping_add(v[b]).wrapping_add(x); - v[d] = (v[d] ^ v[a]).rotate_right(32); - v[c] = v[c].wrapping_add(v[d]); - v[b] = (v[b] ^ v[c]).rotate_right(24); - v[a] = v[a].wrapping_add(v[b]).wrapping_add(y); - v[d] = (v[d] ^ v[a]).rotate_right(16); - v[c] = v[c].wrapping_add(v[d]); - v[b] = (v[b] ^ v[c]).rotate_right(63); -} - -// Initialization vector -const BLAKE2B_IV: [u64; 8] = [ - 0x6A09E667F3BCC908, 0xBB67AE8584CAA73B, - 0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1, - 0x510E527FADE682D1, 0x9B05688C2B3E6C1F, - 0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179 -]; - -fn blake2b_compress(ctx: &mut Blake2bCtx, last: bool) { - - const SIGMA: [[usize; 16]; 12] = [ - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], - [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ], - [11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 ], - [7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 ], - [9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 ], - [2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 ], - [12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 ], - [13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 ], - [6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 ], - [10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 ], - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], - [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ] - ]; - - let mut v: [u64; 16] = [ - ctx.h[0], - ctx.h[1], - ctx.h[2], - ctx.h[3], - ctx.h[4], - ctx.h[5], - ctx.h[6], - ctx.h[7], - - BLAKE2B_IV[0], - BLAKE2B_IV[1], - BLAKE2B_IV[2], - BLAKE2B_IV[3], - BLAKE2B_IV[4], - BLAKE2B_IV[5], - BLAKE2B_IV[6], - BLAKE2B_IV[7], - ]; - - v[12] ^= ctx.t[0]; // low 64 bits of offset - v[13] ^= ctx.t[1]; // high 64 bits - if last { - v[14] = !v[14]; - } - - { - // Re-interpret the input buffer in the state as an array - // of little-endian u64s, converting them to machine - // endianness. It's OK to modify the buffer in place - // since this is the last time this data will be accessed - // before it's overwritten. - - let m: &mut [u64; 16] = unsafe { - let b: &mut [u8; 128] = &mut ctx.b; - ::std::mem::transmute(b) - }; - - if cfg!(target_endian = "big") { - for word in &mut m[..] { - *word = u64::from_le(*word); - } - } - - for i in 0 .. 12 { - b2b_g(&mut v, 0, 4, 8, 12, m[SIGMA[i][ 0]], m[SIGMA[i][ 1]]); - b2b_g(&mut v, 1, 5, 9, 13, m[SIGMA[i][ 2]], m[SIGMA[i][ 3]]); - b2b_g(&mut v, 2, 6, 10, 14, m[SIGMA[i][ 4]], m[SIGMA[i][ 5]]); - b2b_g(&mut v, 3, 7, 11, 15, m[SIGMA[i][ 6]], m[SIGMA[i][ 7]]); - b2b_g(&mut v, 0, 5, 10, 15, m[SIGMA[i][ 8]], m[SIGMA[i][ 9]]); - b2b_g(&mut v, 1, 6, 11, 12, m[SIGMA[i][10]], m[SIGMA[i][11]]); - b2b_g(&mut v, 2, 7, 8, 13, m[SIGMA[i][12]], m[SIGMA[i][13]]); - b2b_g(&mut v, 3, 4, 9, 14, m[SIGMA[i][14]], m[SIGMA[i][15]]); - } - } - - for i in 0 .. 8 { - ctx.h[i] ^= v[i] ^ v[i + 8]; - } -} - -fn blake2b_new(outlen: usize, key: &[u8]) -> Blake2bCtx { - assert!(outlen > 0 && outlen <= 64 && key.len() <= 64); - - let mut ctx = Blake2bCtx { - b: [0; 128], - h: BLAKE2B_IV, - t: [0; 2], - c: 0, - outlen: outlen as u16, - finalized: false, - - #[cfg(debug_assertions)] - fnv_hash: 0xcbf29ce484222325, - }; - - ctx.h[0] ^= 0x01010000 ^ ((key.len() << 8) as u64) ^ (outlen as u64); - - if key.len() > 0 { - blake2b_update(&mut ctx, key); - ctx.c = ctx.b.len(); - } - - ctx -} - -fn blake2b_update(ctx: &mut Blake2bCtx, mut data: &[u8]) { - assert!(!ctx.finalized, "Blake2bCtx already finalized"); - - let mut bytes_to_copy = data.len(); - let mut space_in_buffer = ctx.b.len() - ctx.c; - - while bytes_to_copy > space_in_buffer { - checked_mem_copy(data, &mut ctx.b[ctx.c .. ], space_in_buffer); - - ctx.t[0] = ctx.t[0].wrapping_add(ctx.b.len() as u64); - if ctx.t[0] < (ctx.b.len() as u64) { - ctx.t[1] += 1; - } - blake2b_compress(ctx, false); - ctx.c = 0; - - data = &data[space_in_buffer .. ]; - bytes_to_copy -= space_in_buffer; - space_in_buffer = ctx.b.len(); - } - - if bytes_to_copy > 0 { - checked_mem_copy(data, &mut ctx.b[ctx.c .. ], bytes_to_copy); - ctx.c += bytes_to_copy; - } - - #[cfg(debug_assertions)] - { - // compute additional FNV hash for simpler to read debug output - const MAGIC_PRIME: u64 = 0x00000100000001b3; - - for &byte in data { - ctx.fnv_hash = (ctx.fnv_hash ^ byte as u64).wrapping_mul(MAGIC_PRIME); - } - } -} - -fn blake2b_final(ctx: &mut Blake2bCtx) -{ - assert!(!ctx.finalized, "Blake2bCtx already finalized"); - - ctx.t[0] = ctx.t[0].wrapping_add(ctx.c as u64); - if ctx.t[0] < ctx.c as u64 { - ctx.t[1] += 1; - } - - while ctx.c < 128 { - ctx.b[ctx.c] = 0; - ctx.c += 1; - } - - blake2b_compress(ctx, true); - - // Modify our buffer to little-endian format as it will be read - // as a byte array. It's OK to modify the buffer in place since - // this is the last time this data will be accessed. - if cfg!(target_endian = "big") { - for word in &mut ctx.h { - *word = word.to_le(); - } - } - - ctx.finalized = true; -} - -#[inline(always)] -fn checked_mem_copy(from: &[T1], to: &mut [T2], byte_count: usize) { - let from_size = from.len() * mem::size_of::(); - let to_size = to.len() * mem::size_of::(); - assert!(from_size >= byte_count); - assert!(to_size >= byte_count); - let from_byte_ptr = from.as_ptr() as * const u8; - let to_byte_ptr = to.as_mut_ptr() as * mut u8; - unsafe { - ::std::ptr::copy_nonoverlapping(from_byte_ptr, to_byte_ptr, byte_count); - } -} - -pub fn blake2b(out: &mut [u8], key: &[u8], data: &[u8]) -{ - let mut ctx = blake2b_new(out.len(), key); - blake2b_update(&mut ctx, data); - blake2b_final(&mut ctx); - checked_mem_copy(&ctx.h, out, ctx.outlen as usize); -} - -pub struct Blake2bHasher(Blake2bCtx); - -impl ::std::hash::Hasher for Blake2bHasher { - fn write(&mut self, bytes: &[u8]) { - blake2b_update(&mut self.0, bytes); - } - - fn finish(&self) -> u64 { - assert!(self.0.outlen == 8, - "Hasher initialized with incompatible output length"); - u64::from_le(self.0.h[0]) - } -} - -impl Blake2bHasher { - pub fn new(outlen: usize, key: &[u8]) -> Blake2bHasher { - Blake2bHasher(blake2b_new(outlen, key)) - } - - pub fn finalize(&mut self) -> &[u8] { - if !self.0.finalized { - blake2b_final(&mut self.0); - } - debug_assert!(mem::size_of_val(&self.0.h) >= self.0.outlen as usize); - let raw_ptr = (&self.0.h[..]).as_ptr() as * const u8; - unsafe { - slice::from_raw_parts(raw_ptr, self.0.outlen as usize) - } - } -} - -impl ::std::fmt::Debug for Blake2bHasher { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { - write!(fmt, "{:?}", self.0) - } -} - -#[cfg(test)] -fn selftest_seq(out: &mut [u8], seed: u32) -{ - let mut a: u32 = 0xDEAD4BADu32.wrapping_mul(seed); - let mut b: u32 = 1; - - for i in 0 .. out.len() { - let t: u32 = a.wrapping_add(b); - a = b; - b = t; - out[i] = ((t >> 24) & 0xFF) as u8; - } -} - -#[test] -fn blake2b_selftest() -{ - use std::hash::Hasher; - - // grand hash of hash results - const BLAKE2B_RES: [u8; 32] = [ - 0xC2, 0x3A, 0x78, 0x00, 0xD9, 0x81, 0x23, 0xBD, - 0x10, 0xF5, 0x06, 0xC6, 0x1E, 0x29, 0xDA, 0x56, - 0x03, 0xD7, 0x63, 0xB8, 0xBB, 0xAD, 0x2E, 0x73, - 0x7F, 0x5E, 0x76, 0x5A, 0x7B, 0xCC, 0xD4, 0x75 - ]; - - // parameter sets - const B2B_MD_LEN: [usize; 4] = [20, 32, 48, 64]; - const B2B_IN_LEN: [usize; 6] = [0, 3, 128, 129, 255, 1024]; - - let mut data = [0u8; 1024]; - let mut md = [0u8; 64]; - let mut key = [0u8; 64]; - - let mut hasher = Blake2bHasher::new(32, &[]); - - for i in 0 .. 4 { - let outlen = B2B_MD_LEN[i]; - for j in 0 .. 6 { - let inlen = B2B_IN_LEN[j]; - - selftest_seq(&mut data[.. inlen], inlen as u32); // unkeyed hash - blake2b(&mut md[.. outlen], &[], &data[.. inlen]); - hasher.write(&md[.. outlen]); // hash the hash - - selftest_seq(&mut key[0 .. outlen], outlen as u32); // keyed hash - blake2b(&mut md[.. outlen], &key[.. outlen], &data[.. inlen]); - hasher.write(&md[.. outlen]); // hash the hash - } - } - - // compute and compare the hash of hashes - let md = hasher.finalize(); - for i in 0 .. 32 { - assert_eq!(md[i], BLAKE2B_RES[i]); - } -} diff --git a/src/librustc_data_structures/const_cstr.rs b/src/librustc_data_structures/const_cstr.rs new file mode 100644 index 000000000000..4589d973b6a4 --- /dev/null +++ b/src/librustc_data_structures/const_cstr.rs @@ -0,0 +1,42 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// This macro creates a zero-overhead &CStr by adding a NUL terminator to +/// the string literal passed into it at compile-time. Use it like: +/// +/// ``` +/// let some_const_cstr = const_cstr!("abc"); +/// ``` +/// +/// The above is roughly equivalent to: +/// +/// ``` +/// let some_const_cstr = CStr::from_bytes_with_nul(b"abc\0").unwrap() +/// ``` +/// +/// Note that macro only checks the string literal for internal NULs if +/// debug-assertions are enabled in order to avoid runtime overhead in release +/// builds. +#[macro_export] +macro_rules! const_cstr { + ($s:expr) => ({ + use std::ffi::CStr; + + let str_plus_nul = concat!($s, "\0"); + + if cfg!(debug_assertions) { + CStr::from_bytes_with_nul(str_plus_nul.as_bytes()).unwrap() + } else { + unsafe { + CStr::from_bytes_with_nul_unchecked(str_plus_nul.as_bytes()) + } + } + }) +} diff --git a/src/librustc_data_structures/control_flow_graph/dominators/mod.rs b/src/librustc_data_structures/control_flow_graph/dominators/mod.rs deleted file mode 100644 index dc487f1162ca..000000000000 --- a/src/librustc_data_structures/control_flow_graph/dominators/mod.rs +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Algorithm citation: -//! A Simple, Fast Dominance Algorithm. -//! Keith D. Cooper, Timothy J. Harvey, and Ken Kennedy -//! Rice Computer Science TS-06-33870 -//! - -use super::ControlFlowGraph; -use super::iterate::reverse_post_order; -use super::super::indexed_vec::{IndexVec, Idx}; - -use std::fmt; - -#[cfg(test)] -mod test; - -pub fn dominators(graph: &G) -> Dominators { - let start_node = graph.start_node(); - let rpo = reverse_post_order(graph, start_node); - dominators_given_rpo(graph, &rpo) -} - -pub fn dominators_given_rpo(graph: &G, - rpo: &[G::Node]) - -> Dominators { - let start_node = graph.start_node(); - assert_eq!(rpo[0], start_node); - - // compute the post order index (rank) for each node - let mut post_order_rank: IndexVec = IndexVec::from_elem_n(usize::default(), - graph.num_nodes()); - for (index, node) in rpo.iter().rev().cloned().enumerate() { - post_order_rank[node] = index; - } - - let mut immediate_dominators: IndexVec> = - IndexVec::from_elem_n(Option::default(), graph.num_nodes()); - immediate_dominators[start_node] = Some(start_node); - - let mut changed = true; - while changed { - changed = false; - - for &node in &rpo[1..] { - let mut new_idom = None; - for pred in graph.predecessors(node) { - if immediate_dominators[pred].is_some() { - // (*) - // (*) dominators for `pred` have been calculated - new_idom = intersect_opt(&post_order_rank, - &immediate_dominators, - new_idom, - Some(pred)); - } - } - - if new_idom != immediate_dominators[node] { - immediate_dominators[node] = new_idom; - changed = true; - } - } - } - - Dominators { - post_order_rank, - immediate_dominators, - } -} - -fn intersect_opt(post_order_rank: &IndexVec, - immediate_dominators: &IndexVec>, - node1: Option, - node2: Option) - -> Option { - match (node1, node2) { - (None, None) => None, - (Some(n), None) | (None, Some(n)) => Some(n), - (Some(n1), Some(n2)) => Some(intersect(post_order_rank, immediate_dominators, n1, n2)), - } -} - -fn intersect(post_order_rank: &IndexVec, - immediate_dominators: &IndexVec>, - mut node1: Node, - mut node2: Node) - -> Node { - while node1 != node2 { - while post_order_rank[node1] < post_order_rank[node2] { - node1 = immediate_dominators[node1].unwrap(); - } - - while post_order_rank[node2] < post_order_rank[node1] { - node2 = immediate_dominators[node2].unwrap(); - } - } - return node1; -} - -#[derive(Clone, Debug)] -pub struct Dominators { - post_order_rank: IndexVec, - immediate_dominators: IndexVec>, -} - -impl Dominators { - pub fn is_reachable(&self, node: Node) -> bool { - self.immediate_dominators[node].is_some() - } - - pub fn immediate_dominator(&self, node: Node) -> Node { - assert!(self.is_reachable(node), "node {:?} is not reachable", node); - self.immediate_dominators[node].unwrap() - } - - pub fn dominators(&self, node: Node) -> Iter { - assert!(self.is_reachable(node), "node {:?} is not reachable", node); - Iter { - dominators: self, - node: Some(node), - } - } - - pub fn is_dominated_by(&self, node: Node, dom: Node) -> bool { - // FIXME -- could be optimized by using post-order-rank - self.dominators(node).any(|n| n == dom) - } - - #[cfg(test)] - fn all_immediate_dominators(&self) -> &IndexVec> { - &self.immediate_dominators - } -} - -pub struct Iter<'dom, Node: Idx + 'dom> { - dominators: &'dom Dominators, - node: Option, -} - -impl<'dom, Node: Idx> Iterator for Iter<'dom, Node> { - type Item = Node; - - fn next(&mut self) -> Option { - if let Some(node) = self.node { - let dom = self.dominators.immediate_dominator(node); - if dom == node { - self.node = None; // reached the root - } else { - self.node = Some(dom); - } - return Some(node); - } else { - return None; - } - } -} - -pub struct DominatorTree { - root: N, - children: IndexVec>, -} - -impl DominatorTree { - pub fn children(&self, node: Node) -> &[Node] { - &self.children[node] - } -} - -impl fmt::Debug for DominatorTree { - fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { - fmt::Debug::fmt(&DominatorTreeNode { - tree: self, - node: self.root, - }, - fmt) - } -} - -struct DominatorTreeNode<'tree, Node: Idx> { - tree: &'tree DominatorTree, - node: Node, -} - -impl<'tree, Node: Idx> fmt::Debug for DominatorTreeNode<'tree, Node> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { - let subtrees: Vec<_> = self.tree - .children(self.node) - .iter() - .map(|&child| { - DominatorTreeNode { - tree: self.tree, - node: child, - } - }) - .collect(); - fmt.debug_tuple("") - .field(&self.node) - .field(&subtrees) - .finish() - } -} diff --git a/src/librustc_data_structures/control_flow_graph/iterate/mod.rs b/src/librustc_data_structures/control_flow_graph/iterate/mod.rs deleted file mode 100644 index 2d70b4063426..000000000000 --- a/src/librustc_data_structures/control_flow_graph/iterate/mod.rs +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::ControlFlowGraph; -use super::super::indexed_vec::IndexVec; - -#[cfg(test)] -mod test; - -pub fn post_order_from(graph: &G, start_node: G::Node) -> Vec { - post_order_from_to(graph, start_node, None) -} - -pub fn post_order_from_to(graph: &G, - start_node: G::Node, - end_node: Option) - -> Vec { - let mut visited: IndexVec = IndexVec::from_elem_n(false, graph.num_nodes()); - let mut result: Vec = Vec::with_capacity(graph.num_nodes()); - if let Some(end_node) = end_node { - visited[end_node] = true; - } - post_order_walk(graph, start_node, &mut result, &mut visited); - result -} - -fn post_order_walk(graph: &G, - node: G::Node, - result: &mut Vec, - visited: &mut IndexVec) { - if visited[node] { - return; - } - visited[node] = true; - - for successor in graph.successors(node) { - post_order_walk(graph, successor, result, visited); - } - - result.push(node); -} - -pub fn reverse_post_order(graph: &G, start_node: G::Node) -> Vec { - let mut vec = post_order_from(graph, start_node); - vec.reverse(); - vec -} diff --git a/src/librustc_data_structures/control_flow_graph/mod.rs b/src/librustc_data_structures/control_flow_graph/mod.rs deleted file mode 100644 index 7bf776675c6a..000000000000 --- a/src/librustc_data_structures/control_flow_graph/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::indexed_vec::Idx; - -pub mod dominators; -pub mod iterate; -mod reference; - -#[cfg(test)] -mod test; - -pub trait ControlFlowGraph - where Self: for<'graph> GraphPredecessors<'graph, Item=::Node>, - Self: for<'graph> GraphSuccessors<'graph, Item=::Node> -{ - type Node: Idx; - - fn num_nodes(&self) -> usize; - fn start_node(&self) -> Self::Node; - fn predecessors<'graph>(&'graph self, node: Self::Node) - -> >::Iter; - fn successors<'graph>(&'graph self, node: Self::Node) - -> >::Iter; -} - -pub trait GraphPredecessors<'graph> { - type Item; - type Iter: Iterator; -} - -pub trait GraphSuccessors<'graph> { - type Item; - type Iter: Iterator; -} diff --git a/src/librustc_data_structures/control_flow_graph/reference.rs b/src/librustc_data_structures/control_flow_graph/reference.rs deleted file mode 100644 index 3b8b01f2ff43..000000000000 --- a/src/librustc_data_structures/control_flow_graph/reference.rs +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::*; - -impl<'graph, G: ControlFlowGraph> ControlFlowGraph for &'graph G { - type Node = G::Node; - - fn num_nodes(&self) -> usize { - (**self).num_nodes() - } - - fn start_node(&self) -> Self::Node { - (**self).start_node() - } - - fn predecessors<'iter>(&'iter self, - node: Self::Node) - -> >::Iter { - (**self).predecessors(node) - } - - fn successors<'iter>(&'iter self, node: Self::Node) -> >::Iter { - (**self).successors(node) - } -} - -impl<'iter, 'graph, G: ControlFlowGraph> GraphPredecessors<'iter> for &'graph G { - type Item = G::Node; - type Iter = >::Iter; -} - -impl<'iter, 'graph, G: ControlFlowGraph> GraphSuccessors<'iter> for &'graph G { - type Item = G::Node; - type Iter = >::Iter; -} diff --git a/src/librustc_data_structures/control_flow_graph/test.rs b/src/librustc_data_structures/control_flow_graph/test.rs deleted file mode 100644 index f04b536bc185..000000000000 --- a/src/librustc_data_structures/control_flow_graph/test.rs +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::collections::HashMap; -use std::cmp::max; -use std::slice; -use std::iter; - -use super::{ControlFlowGraph, GraphPredecessors, GraphSuccessors}; - -pub struct TestGraph { - num_nodes: usize, - start_node: usize, - successors: HashMap>, - predecessors: HashMap>, -} - -impl TestGraph { - pub fn new(start_node: usize, edges: &[(usize, usize)]) -> Self { - let mut graph = TestGraph { - num_nodes: start_node + 1, - start_node, - successors: HashMap::new(), - predecessors: HashMap::new(), - }; - for &(source, target) in edges { - graph.num_nodes = max(graph.num_nodes, source + 1); - graph.num_nodes = max(graph.num_nodes, target + 1); - graph.successors.entry(source).or_insert(vec![]).push(target); - graph.predecessors.entry(target).or_insert(vec![]).push(source); - } - for node in 0..graph.num_nodes { - graph.successors.entry(node).or_insert(vec![]); - graph.predecessors.entry(node).or_insert(vec![]); - } - graph - } -} - -impl ControlFlowGraph for TestGraph { - type Node = usize; - - fn start_node(&self) -> usize { - self.start_node - } - - fn num_nodes(&self) -> usize { - self.num_nodes - } - - fn predecessors<'graph>(&'graph self, - node: usize) - -> >::Iter { - self.predecessors[&node].iter().cloned() - } - - fn successors<'graph>(&'graph self, node: usize) -> >::Iter { - self.successors[&node].iter().cloned() - } -} - -impl<'graph> GraphPredecessors<'graph> for TestGraph { - type Item = usize; - type Iter = iter::Cloned>; -} - -impl<'graph> GraphSuccessors<'graph> for TestGraph { - type Item = usize; - type Iter = iter::Cloned>; -} diff --git a/src/librustc/ich/fingerprint.rs b/src/librustc_data_structures/fingerprint.rs similarity index 80% rename from src/librustc/ich/fingerprint.rs rename to src/librustc_data_structures/fingerprint.rs index a7adf28c481b..aa9ddda2b936 100644 --- a/src/librustc/ich/fingerprint.rs +++ b/src/librustc_data_structures/fingerprint.rs @@ -9,7 +9,7 @@ // except according to those terms. use std::mem; -use rustc_data_structures::stable_hasher; +use stable_hasher; use serialize; use serialize::opaque::{EncodeResult, Encoder, Decoder}; @@ -45,6 +45,18 @@ impl Fingerprint { ) } + // Combines two hashes in an order independent way. Make sure this is what + // you want. + #[inline] + pub fn combine_commutative(self, other: Fingerprint) -> Fingerprint { + let a = (self.1 as u128) << 64 | self.0 as u128; + let b = (other.1 as u128) << 64 | other.0 as u128; + + let c = a.wrapping_add(b); + + Fingerprint((c >> 64) as u64, c as u64) + } + pub fn to_hex(&self) -> String { format!("{:x}{:x}", self.0, self.1) } @@ -52,7 +64,8 @@ impl Fingerprint { pub fn encode_opaque(&self, encoder: &mut Encoder) -> EncodeResult { let bytes: [u8; 16] = unsafe { mem::transmute([self.0.to_le(), self.1.to_le()]) }; - encoder.emit_raw_bytes(&bytes) + encoder.emit_raw_bytes(&bytes); + Ok(()) } pub fn decode_opaque<'a>(decoder: &mut Decoder<'a>) -> Result { @@ -67,7 +80,7 @@ impl Fingerprint { } impl ::std::fmt::Display for Fingerprint { - fn fmt(&self, formatter: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + fn fmt(&self, formatter: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(formatter, "{:x}-{:x}", self.0, self.1) } } @@ -79,20 +92,13 @@ impl stable_hasher::StableHasherResult for Fingerprint { } } -impl stable_hasher::HashStable for Fingerprint { - #[inline] - fn hash_stable(&self, - _: &mut CTX, - hasher: &mut stable_hasher::StableHasher) { - ::std::hash::Hash::hash(self, hasher); - } -} +impl_stable_hash_via_hash!(Fingerprint); impl serialize::UseSpecializedEncodable for Fingerprint { } impl serialize::UseSpecializedDecodable for Fingerprint { } -impl<'a> serialize::SpecializedEncoder for serialize::opaque::Encoder<'a> { +impl serialize::SpecializedEncoder for serialize::opaque::Encoder { fn specialized_encode(&mut self, f: &Fingerprint) -> Result<(), Self::Error> { f.encode_opaque(self) } diff --git a/src/librustc_data_structures/flock.rs b/src/librustc_data_structures/flock.rs index ff1ebb11b722..3f248dadb66c 100644 --- a/src/librustc_data_structures/flock.rs +++ b/src/librustc_data_structures/flock.rs @@ -254,8 +254,8 @@ mod imp { type ULONG_PTR = usize; type LPOVERLAPPED = *mut OVERLAPPED; - const LOCKFILE_EXCLUSIVE_LOCK: DWORD = 0x00000002; - const LOCKFILE_FAIL_IMMEDIATELY: DWORD = 0x00000001; + const LOCKFILE_EXCLUSIVE_LOCK: DWORD = 0x0000_0002; + const LOCKFILE_FAIL_IMMEDIATELY: DWORD = 0x0000_0001; const FILE_SHARE_DELETE: DWORD = 0x4; const FILE_SHARE_READ: DWORD = 0x1; diff --git a/src/librustc_data_structures/fx.rs b/src/librustc_data_structures/fx.rs index 5bf25437763c..3bf3170d1df1 100644 --- a/src/librustc_data_structures/fx.rs +++ b/src/librustc_data_structures/fx.rs @@ -10,11 +10,11 @@ use std::collections::{HashMap, HashSet}; use std::default::Default; -use std::hash::{Hasher, Hash, BuildHasherDefault}; -use std::ops::BitXor; +use std::hash::Hash; -pub type FxHashMap = HashMap>; -pub type FxHashSet = HashSet>; +pub use rustc_hash::FxHashMap; +pub use rustc_hash::FxHashSet; +pub use rustc_hash::FxHasher; #[allow(non_snake_case)] pub fn FxHashMap() -> FxHashMap { @@ -26,84 +26,3 @@ pub fn FxHashSet() -> FxHashSet { HashSet::default() } -/// A speedy hash algorithm for use within rustc. The hashmap in liballoc -/// by default uses SipHash which isn't quite as speedy as we want. In the -/// compiler we're not really worried about DOS attempts, so we use a fast -/// non-cryptographic hash. -/// -/// This is the same as the algorithm used by Firefox -- which is a homespun -/// one not based on any widely-known algorithm -- though modified to produce -/// 64-bit hash values instead of 32-bit hash values. It consistently -/// out-performs an FNV-based hash within rustc itself -- the collision rate is -/// similar or slightly worse than FNV, but the speed of the hash function -/// itself is much higher because it works on up to 8 bytes at a time. -pub struct FxHasher { - hash: usize -} - -#[cfg(target_pointer_width = "32")] -const K: usize = 0x9e3779b9; -#[cfg(target_pointer_width = "64")] -const K: usize = 0x517cc1b727220a95; - -impl Default for FxHasher { - #[inline] - fn default() -> FxHasher { - FxHasher { hash: 0 } - } -} - -impl FxHasher { - #[inline] - fn add_to_hash(&mut self, i: usize) { - self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K); - } -} - -impl Hasher for FxHasher { - #[inline] - fn write(&mut self, bytes: &[u8]) { - for byte in bytes { - let i = *byte; - self.add_to_hash(i as usize); - } - } - - #[inline] - fn write_u8(&mut self, i: u8) { - self.add_to_hash(i as usize); - } - - #[inline] - fn write_u16(&mut self, i: u16) { - self.add_to_hash(i as usize); - } - - #[inline] - fn write_u32(&mut self, i: u32) { - self.add_to_hash(i as usize); - } - - #[cfg(target_pointer_width = "32")] - #[inline] - fn write_u64(&mut self, i: u64) { - self.add_to_hash(i as usize); - self.add_to_hash((i >> 32) as usize); - } - - #[cfg(target_pointer_width = "64")] - #[inline] - fn write_u64(&mut self, i: u64) { - self.add_to_hash(i as usize); - } - - #[inline] - fn write_usize(&mut self, i: usize) { - self.add_to_hash(i); - } - - #[inline] - fn finish(&self) -> u64 { - self.hash as u64 - } -} diff --git a/src/librustc_data_structures/graph/dominators/mod.rs b/src/librustc_data_structures/graph/dominators/mod.rs new file mode 100644 index 000000000000..e54147cbe7c8 --- /dev/null +++ b/src/librustc_data_structures/graph/dominators/mod.rs @@ -0,0 +1,215 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Algorithm citation: +//! A Simple, Fast Dominance Algorithm. +//! Keith D. Cooper, Timothy J. Harvey, and Ken Kennedy +//! Rice Computer Science TS-06-33870 +//! + +use super::super::indexed_vec::{Idx, IndexVec}; +use super::iterate::reverse_post_order; +use super::ControlFlowGraph; + +use std::fmt; + +#[cfg(test)] +mod test; + +pub fn dominators(graph: &G) -> Dominators { + let start_node = graph.start_node(); + let rpo = reverse_post_order(graph, start_node); + dominators_given_rpo(graph, &rpo) +} + +pub fn dominators_given_rpo( + graph: &G, + rpo: &[G::Node], +) -> Dominators { + let start_node = graph.start_node(); + assert_eq!(rpo[0], start_node); + + // compute the post order index (rank) for each node + let mut post_order_rank: IndexVec = + IndexVec::from_elem_n(usize::default(), graph.num_nodes()); + for (index, node) in rpo.iter().rev().cloned().enumerate() { + post_order_rank[node] = index; + } + + let mut immediate_dominators: IndexVec> = + IndexVec::from_elem_n(Option::default(), graph.num_nodes()); + immediate_dominators[start_node] = Some(start_node); + + let mut changed = true; + while changed { + changed = false; + + for &node in &rpo[1..] { + let mut new_idom = None; + for pred in graph.predecessors(node) { + if immediate_dominators[pred].is_some() { + // (*) + // (*) dominators for `pred` have been calculated + new_idom = intersect_opt( + &post_order_rank, + &immediate_dominators, + new_idom, + Some(pred), + ); + } + } + + if new_idom != immediate_dominators[node] { + immediate_dominators[node] = new_idom; + changed = true; + } + } + } + + Dominators { + post_order_rank, + immediate_dominators, + } +} + +fn intersect_opt( + post_order_rank: &IndexVec, + immediate_dominators: &IndexVec>, + node1: Option, + node2: Option, +) -> Option { + match (node1, node2) { + (None, None) => None, + (Some(n), None) | (None, Some(n)) => Some(n), + (Some(n1), Some(n2)) => Some(intersect(post_order_rank, immediate_dominators, n1, n2)), + } +} + +fn intersect( + post_order_rank: &IndexVec, + immediate_dominators: &IndexVec>, + mut node1: Node, + mut node2: Node, +) -> Node { + while node1 != node2 { + while post_order_rank[node1] < post_order_rank[node2] { + node1 = immediate_dominators[node1].unwrap(); + } + + while post_order_rank[node2] < post_order_rank[node1] { + node2 = immediate_dominators[node2].unwrap(); + } + } + + node1 +} + +#[derive(Clone, Debug)] +pub struct Dominators { + post_order_rank: IndexVec, + immediate_dominators: IndexVec>, +} + +impl Dominators { + pub fn is_reachable(&self, node: Node) -> bool { + self.immediate_dominators[node].is_some() + } + + pub fn immediate_dominator(&self, node: Node) -> Node { + assert!(self.is_reachable(node), "node {:?} is not reachable", node); + self.immediate_dominators[node].unwrap() + } + + pub fn dominators(&self, node: Node) -> Iter { + assert!(self.is_reachable(node), "node {:?} is not reachable", node); + Iter { + dominators: self, + node: Some(node), + } + } + + pub fn is_dominated_by(&self, node: Node, dom: Node) -> bool { + // FIXME -- could be optimized by using post-order-rank + self.dominators(node).any(|n| n == dom) + } + + #[cfg(test)] + fn all_immediate_dominators(&self) -> &IndexVec> { + &self.immediate_dominators + } +} + +pub struct Iter<'dom, Node: Idx + 'dom> { + dominators: &'dom Dominators, + node: Option, +} + +impl<'dom, Node: Idx> Iterator for Iter<'dom, Node> { + type Item = Node; + + fn next(&mut self) -> Option { + if let Some(node) = self.node { + let dom = self.dominators.immediate_dominator(node); + if dom == node { + self.node = None; // reached the root + } else { + self.node = Some(dom); + } + return Some(node); + } else { + return None; + } + } +} + +pub struct DominatorTree { + root: N, + children: IndexVec>, +} + +impl DominatorTree { + pub fn children(&self, node: Node) -> &[Node] { + &self.children[node] + } +} + +impl fmt::Debug for DominatorTree { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt( + &DominatorTreeNode { + tree: self, + node: self.root, + }, + fmt, + ) + } +} + +struct DominatorTreeNode<'tree, Node: Idx> { + tree: &'tree DominatorTree, + node: Node, +} + +impl<'tree, Node: Idx> fmt::Debug for DominatorTreeNode<'tree, Node> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let subtrees: Vec<_> = self.tree + .children(self.node) + .iter() + .map(|&child| DominatorTreeNode { + tree: self.tree, + node: child, + }) + .collect(); + fmt.debug_tuple("") + .field(&self.node) + .field(&subtrees) + .finish() + } +} diff --git a/src/librustc_data_structures/control_flow_graph/dominators/test.rs b/src/librustc_data_structures/graph/dominators/test.rs similarity index 100% rename from src/librustc_data_structures/control_flow_graph/dominators/test.rs rename to src/librustc_data_structures/graph/dominators/test.rs diff --git a/src/librustc_data_structures/graph/implementation/mod.rs b/src/librustc_data_structures/graph/implementation/mod.rs new file mode 100644 index 000000000000..baac75658686 --- /dev/null +++ b/src/librustc_data_structures/graph/implementation/mod.rs @@ -0,0 +1,417 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A graph module for use in dataflow, region resolution, and elsewhere. +//! +//! # Interface details +//! +//! You customize the graph by specifying a "node data" type `N` and an +//! "edge data" type `E`. You can then later gain access (mutable or +//! immutable) to these "user-data" bits. Currently, you can only add +//! nodes or edges to the graph. You cannot remove or modify them once +//! added. This could be changed if we have a need. +//! +//! # Implementation details +//! +//! The main tricky thing about this code is the way that edges are +//! stored. The edges are stored in a central array, but they are also +//! threaded onto two linked lists for each node, one for incoming edges +//! and one for outgoing edges. Note that every edge is a member of some +//! incoming list and some outgoing list. Basically you can load the +//! first index of the linked list from the node data structures (the +//! field `first_edge`) and then, for each edge, load the next index from +//! the field `next_edge`). Each of those fields is an array that should +//! be indexed by the direction (see the type `Direction`). + +use bitvec::BitArray; +use std::fmt::Debug; +use std::usize; +use snapshot_vec::{SnapshotVec, SnapshotVecDelegate}; + +#[cfg(test)] +mod tests; + +pub struct Graph { + nodes: SnapshotVec>, + edges: SnapshotVec>, +} + +pub struct Node { + first_edge: [EdgeIndex; 2], // see module comment + pub data: N, +} + +#[derive(Debug)] +pub struct Edge { + next_edge: [EdgeIndex; 2], // see module comment + source: NodeIndex, + target: NodeIndex, + pub data: E, +} + +impl SnapshotVecDelegate for Node { + type Value = Node; + type Undo = (); + + fn reverse(_: &mut Vec>, _: ()) {} +} + +impl SnapshotVecDelegate for Edge { + type Value = Edge; + type Undo = (); + + fn reverse(_: &mut Vec>, _: ()) {} +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] +pub struct NodeIndex(pub usize); + +#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] +pub struct EdgeIndex(pub usize); + +pub const INVALID_EDGE_INDEX: EdgeIndex = EdgeIndex(usize::MAX); + +// Use a private field here to guarantee no more instances are created: +#[derive(Copy, Clone, Debug, PartialEq)] +pub struct Direction { + repr: usize, +} + +pub const OUTGOING: Direction = Direction { repr: 0 }; + +pub const INCOMING: Direction = Direction { repr: 1 }; + +impl NodeIndex { + /// Returns unique id (unique with respect to the graph holding associated node). + pub fn node_id(self) -> usize { + self.0 + } +} + +impl Graph { + pub fn new() -> Graph { + Graph { + nodes: SnapshotVec::new(), + edges: SnapshotVec::new(), + } + } + + pub fn with_capacity(nodes: usize, edges: usize) -> Graph { + Graph { + nodes: SnapshotVec::with_capacity(nodes), + edges: SnapshotVec::with_capacity(edges), + } + } + + // # Simple accessors + + #[inline] + pub fn all_nodes(&self) -> &[Node] { + &self.nodes + } + + #[inline] + pub fn len_nodes(&self) -> usize { + self.nodes.len() + } + + #[inline] + pub fn all_edges(&self) -> &[Edge] { + &self.edges + } + + #[inline] + pub fn len_edges(&self) -> usize { + self.edges.len() + } + + // # Node construction + + pub fn next_node_index(&self) -> NodeIndex { + NodeIndex(self.nodes.len()) + } + + pub fn add_node(&mut self, data: N) -> NodeIndex { + let idx = self.next_node_index(); + self.nodes.push(Node { + first_edge: [INVALID_EDGE_INDEX, INVALID_EDGE_INDEX], + data, + }); + idx + } + + pub fn mut_node_data(&mut self, idx: NodeIndex) -> &mut N { + &mut self.nodes[idx.0].data + } + + pub fn node_data(&self, idx: NodeIndex) -> &N { + &self.nodes[idx.0].data + } + + pub fn node(&self, idx: NodeIndex) -> &Node { + &self.nodes[idx.0] + } + + // # Edge construction and queries + + pub fn next_edge_index(&self) -> EdgeIndex { + EdgeIndex(self.edges.len()) + } + + pub fn add_edge(&mut self, source: NodeIndex, target: NodeIndex, data: E) -> EdgeIndex { + debug!("graph: add_edge({:?}, {:?}, {:?})", source, target, data); + + let idx = self.next_edge_index(); + + // read current first of the list of edges from each node + let source_first = self.nodes[source.0].first_edge[OUTGOING.repr]; + let target_first = self.nodes[target.0].first_edge[INCOMING.repr]; + + // create the new edge, with the previous firsts from each node + // as the next pointers + self.edges.push(Edge { + next_edge: [source_first, target_first], + source, + target, + data, + }); + + // adjust the firsts for each node target be the next object. + self.nodes[source.0].first_edge[OUTGOING.repr] = idx; + self.nodes[target.0].first_edge[INCOMING.repr] = idx; + + idx + } + + pub fn edge(&self, idx: EdgeIndex) -> &Edge { + &self.edges[idx.0] + } + + // # Iterating over nodes, edges + + pub fn enumerated_nodes(&self) -> impl Iterator)> { + self.nodes + .iter() + .enumerate() + .map(|(idx, n)| (NodeIndex(idx), n)) + } + + pub fn enumerated_edges(&self) -> impl Iterator)> { + self.edges + .iter() + .enumerate() + .map(|(idx, e)| (EdgeIndex(idx), e)) + } + + pub fn each_node<'a>(&'a self, mut f: impl FnMut(NodeIndex, &'a Node) -> bool) -> bool { + //! Iterates over all edges defined in the graph. + self.enumerated_nodes() + .all(|(node_idx, node)| f(node_idx, node)) + } + + pub fn each_edge<'a>(&'a self, mut f: impl FnMut(EdgeIndex, &'a Edge) -> bool) -> bool { + //! Iterates over all edges defined in the graph + self.enumerated_edges() + .all(|(edge_idx, edge)| f(edge_idx, edge)) + } + + pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges { + self.adjacent_edges(source, OUTGOING) + } + + pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges { + self.adjacent_edges(source, INCOMING) + } + + pub fn adjacent_edges(&self, source: NodeIndex, direction: Direction) -> AdjacentEdges { + let first_edge = self.node(source).first_edge[direction.repr]; + AdjacentEdges { + graph: self, + direction, + next: first_edge, + } + } + + pub fn successor_nodes<'a>( + &'a self, + source: NodeIndex, + ) -> impl Iterator + 'a { + self.outgoing_edges(source).targets() + } + + pub fn predecessor_nodes<'a>( + &'a self, + target: NodeIndex, + ) -> impl Iterator + 'a { + self.incoming_edges(target).sources() + } + + pub fn depth_traverse<'a>( + &'a self, + start: NodeIndex, + direction: Direction, + ) -> DepthFirstTraversal<'a, N, E> { + DepthFirstTraversal::with_start_node(self, start, direction) + } + + pub fn nodes_in_postorder( + &self, + direction: Direction, + entry_node: NodeIndex, + ) -> Vec { + let mut visited = BitArray::new(self.len_nodes()); + let mut stack = vec![]; + let mut result = Vec::with_capacity(self.len_nodes()); + let mut push_node = |stack: &mut Vec<_>, node: NodeIndex| { + if visited.insert(node.0) { + stack.push((node, self.adjacent_edges(node, direction))); + } + }; + + for node in Some(entry_node) + .into_iter() + .chain(self.enumerated_nodes().map(|(node, _)| node)) + { + push_node(&mut stack, node); + while let Some((node, mut iter)) = stack.pop() { + if let Some((_, child)) = iter.next() { + let target = child.source_or_target(direction); + // the current node needs more processing, so + // add it back to the stack + stack.push((node, iter)); + // and then push the new node + push_node(&mut stack, target); + } else { + result.push(node); + } + } + } + + assert_eq!(result.len(), self.len_nodes()); + result + } +} + +// # Iterators + +pub struct AdjacentEdges<'g, N, E> +where + N: 'g, + E: 'g, +{ + graph: &'g Graph, + direction: Direction, + next: EdgeIndex, +} + +impl<'g, N: Debug, E: Debug> AdjacentEdges<'g, N, E> { + fn targets(self) -> impl Iterator + 'g { + self.into_iter().map(|(_, edge)| edge.target) + } + + fn sources(self) -> impl Iterator + 'g { + self.into_iter().map(|(_, edge)| edge.source) + } +} + +impl<'g, N: Debug, E: Debug> Iterator for AdjacentEdges<'g, N, E> { + type Item = (EdgeIndex, &'g Edge); + + fn next(&mut self) -> Option<(EdgeIndex, &'g Edge)> { + let edge_index = self.next; + if edge_index == INVALID_EDGE_INDEX { + return None; + } + + let edge = self.graph.edge(edge_index); + self.next = edge.next_edge[self.direction.repr]; + Some((edge_index, edge)) + } + + fn size_hint(&self) -> (usize, Option) { + // At most, all the edges in the graph. + (0, Some(self.graph.len_edges())) + } +} + +pub struct DepthFirstTraversal<'g, N, E> +where + N: 'g, + E: 'g, +{ + graph: &'g Graph, + stack: Vec, + visited: BitArray, + direction: Direction, +} + +impl<'g, N: Debug, E: Debug> DepthFirstTraversal<'g, N, E> { + pub fn with_start_node( + graph: &'g Graph, + start_node: NodeIndex, + direction: Direction, + ) -> Self { + let mut visited = BitArray::new(graph.len_nodes()); + visited.insert(start_node.node_id()); + DepthFirstTraversal { + graph, + stack: vec![start_node], + visited, + direction, + } + } + + fn visit(&mut self, node: NodeIndex) { + if self.visited.insert(node.node_id()) { + self.stack.push(node); + } + } +} + +impl<'g, N: Debug, E: Debug> Iterator for DepthFirstTraversal<'g, N, E> { + type Item = NodeIndex; + + fn next(&mut self) -> Option { + let next = self.stack.pop(); + if let Some(idx) = next { + for (_, edge) in self.graph.adjacent_edges(idx, self.direction) { + let target = edge.source_or_target(self.direction); + self.visit(target); + } + } + next + } + + fn size_hint(&self) -> (usize, Option) { + // We will visit every node in the graph exactly once. + let remaining = self.graph.len_nodes() - self.visited.count(); + (remaining, Some(remaining)) + } +} + +impl<'g, N: Debug, E: Debug> ExactSizeIterator for DepthFirstTraversal<'g, N, E> {} + +impl Edge { + pub fn source(&self) -> NodeIndex { + self.source + } + + pub fn target(&self) -> NodeIndex { + self.target + } + + pub fn source_or_target(&self, direction: Direction) -> NodeIndex { + if direction == OUTGOING { + self.target + } else { + self.source + } + } +} diff --git a/src/librustc_data_structures/graph/implementation/tests.rs b/src/librustc_data_structures/graph/implementation/tests.rs new file mode 100644 index 000000000000..3814827b5df6 --- /dev/null +++ b/src/librustc_data_structures/graph/implementation/tests.rs @@ -0,0 +1,139 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use graph::implementation::*; +use std::fmt::Debug; + +type TestGraph = Graph<&'static str, &'static str>; + +fn create_graph() -> TestGraph { + let mut graph = Graph::new(); + + // Create a simple graph + // + // F + // | + // V + // A --> B --> C + // | ^ + // v | + // D --> E + + let a = graph.add_node("A"); + let b = graph.add_node("B"); + let c = graph.add_node("C"); + let d = graph.add_node("D"); + let e = graph.add_node("E"); + let f = graph.add_node("F"); + + graph.add_edge(a, b, "AB"); + graph.add_edge(b, c, "BC"); + graph.add_edge(b, d, "BD"); + graph.add_edge(d, e, "DE"); + graph.add_edge(e, c, "EC"); + graph.add_edge(f, b, "FB"); + + return graph; +} + +#[test] +fn each_node() { + let graph = create_graph(); + let expected = ["A", "B", "C", "D", "E", "F"]; + graph.each_node(|idx, node| { + assert_eq!(&expected[idx.0], graph.node_data(idx)); + assert_eq!(expected[idx.0], node.data); + true + }); +} + +#[test] +fn each_edge() { + let graph = create_graph(); + let expected = ["AB", "BC", "BD", "DE", "EC", "FB"]; + graph.each_edge(|idx, edge| { + assert_eq!(expected[idx.0], edge.data); + true + }); +} + +fn test_adjacent_edges(graph: &Graph, + start_index: NodeIndex, + start_data: N, + expected_incoming: &[(E, N)], + expected_outgoing: &[(E, N)]) { + assert!(graph.node_data(start_index) == &start_data); + + let mut counter = 0; + for (edge_index, edge) in graph.incoming_edges(start_index) { + assert!(counter < expected_incoming.len()); + debug!("counter={:?} expected={:?} edge_index={:?} edge={:?}", + counter, + expected_incoming[counter], + edge_index, + edge); + match expected_incoming[counter] { + (ref e, ref n) => { + assert!(e == &edge.data); + assert!(n == graph.node_data(edge.source())); + assert!(start_index == edge.target); + } + } + counter += 1; + } + assert_eq!(counter, expected_incoming.len()); + + let mut counter = 0; + for (edge_index, edge) in graph.outgoing_edges(start_index) { + assert!(counter < expected_outgoing.len()); + debug!("counter={:?} expected={:?} edge_index={:?} edge={:?}", + counter, + expected_outgoing[counter], + edge_index, + edge); + match expected_outgoing[counter] { + (ref e, ref n) => { + assert!(e == &edge.data); + assert!(start_index == edge.source); + assert!(n == graph.node_data(edge.target)); + } + } + counter += 1; + } + assert_eq!(counter, expected_outgoing.len()); +} + +#[test] +fn each_adjacent_from_a() { + let graph = create_graph(); + test_adjacent_edges(&graph, NodeIndex(0), "A", &[], &[("AB", "B")]); +} + +#[test] +fn each_adjacent_from_b() { + let graph = create_graph(); + test_adjacent_edges(&graph, + NodeIndex(1), + "B", + &[("FB", "F"), ("AB", "A")], + &[("BD", "D"), ("BC", "C")]); +} + +#[test] +fn each_adjacent_from_c() { + let graph = create_graph(); + test_adjacent_edges(&graph, NodeIndex(2), "C", &[("EC", "E"), ("BC", "B")], &[]); +} + +#[test] +fn each_adjacent_from_d() { + let graph = create_graph(); + test_adjacent_edges(&graph, NodeIndex(3), "D", &[("BD", "B")], &[("DE", "E")]); +} diff --git a/src/librustc_data_structures/graph/iterate/mod.rs b/src/librustc_data_structures/graph/iterate/mod.rs new file mode 100644 index 000000000000..3afdc88d6027 --- /dev/null +++ b/src/librustc_data_structures/graph/iterate/mod.rs @@ -0,0 +1,63 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::super::indexed_vec::IndexVec; +use super::{DirectedGraph, WithSuccessors, WithNumNodes}; + +#[cfg(test)] +mod test; + +pub fn post_order_from( + graph: &G, + start_node: G::Node, +) -> Vec { + post_order_from_to(graph, start_node, None) +} + +pub fn post_order_from_to( + graph: &G, + start_node: G::Node, + end_node: Option, +) -> Vec { + let mut visited: IndexVec = IndexVec::from_elem_n(false, graph.num_nodes()); + let mut result: Vec = Vec::with_capacity(graph.num_nodes()); + if let Some(end_node) = end_node { + visited[end_node] = true; + } + post_order_walk(graph, start_node, &mut result, &mut visited); + result +} + +fn post_order_walk( + graph: &G, + node: G::Node, + result: &mut Vec, + visited: &mut IndexVec, +) { + if visited[node] { + return; + } + visited[node] = true; + + for successor in graph.successors(node) { + post_order_walk(graph, successor, result, visited); + } + + result.push(node); +} + +pub fn reverse_post_order( + graph: &G, + start_node: G::Node, +) -> Vec { + let mut vec = post_order_from(graph, start_node); + vec.reverse(); + vec +} diff --git a/src/librustc_data_structures/control_flow_graph/iterate/test.rs b/src/librustc_data_structures/graph/iterate/test.rs similarity index 100% rename from src/librustc_data_structures/control_flow_graph/iterate/test.rs rename to src/librustc_data_structures/graph/iterate/test.rs diff --git a/src/librustc_data_structures/graph/mod.rs b/src/librustc_data_structures/graph/mod.rs index 56d5f5ffa3f6..7265e4e8c7c6 100644 --- a/src/librustc_data_structures/graph/mod.rs +++ b/src/librustc_data_structures/graph/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -8,444 +8,72 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! A graph module for use in dataflow, region resolution, and elsewhere. -//! -//! # Interface details -//! -//! You customize the graph by specifying a "node data" type `N` and an -//! "edge data" type `E`. You can then later gain access (mutable or -//! immutable) to these "user-data" bits. Currently, you can only add -//! nodes or edges to the graph. You cannot remove or modify them once -//! added. This could be changed if we have a need. -//! -//! # Implementation details -//! -//! The main tricky thing about this code is the way that edges are -//! stored. The edges are stored in a central array, but they are also -//! threaded onto two linked lists for each node, one for incoming edges -//! and one for outgoing edges. Note that every edge is a member of some -//! incoming list and some outgoing list. Basically you can load the -//! first index of the linked list from the node data structures (the -//! field `first_edge`) and then, for each edge, load the next index from -//! the field `next_edge`). Each of those fields is an array that should -//! be indexed by the direction (see the type `Direction`). +use super::indexed_vec::Idx; -use bitvec::BitVector; -use std::fmt::Debug; -use std::usize; -use snapshot_vec::{SnapshotVec, SnapshotVecDelegate}; +pub mod dominators; +pub mod implementation; +pub mod iterate; +mod reference; +pub mod scc; #[cfg(test)] -mod tests; +mod test; -pub struct Graph { - nodes: SnapshotVec>, - edges: SnapshotVec>, +pub trait DirectedGraph { + type Node: Idx; } -pub struct Node { - first_edge: [EdgeIndex; 2], // see module comment - pub data: N, +pub trait WithNumNodes: DirectedGraph { + fn num_nodes(&self) -> usize; } -#[derive(Debug)] -pub struct Edge { - next_edge: [EdgeIndex; 2], // see module comment - source: NodeIndex, - target: NodeIndex, - pub data: E, -} - -impl SnapshotVecDelegate for Node { - type Value = Node; - type Undo = (); - - fn reverse(_: &mut Vec>, _: ()) {} -} - -impl SnapshotVecDelegate for Edge { - type Value = Edge; - type Undo = (); - - fn reverse(_: &mut Vec>, _: ()) {} -} - -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -pub struct NodeIndex(pub usize); - -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] -pub struct EdgeIndex(pub usize); - -pub const INVALID_EDGE_INDEX: EdgeIndex = EdgeIndex(usize::MAX); - -// Use a private field here to guarantee no more instances are created: -#[derive(Copy, Clone, Debug, PartialEq)] -pub struct Direction { - repr: usize, -} - -pub const OUTGOING: Direction = Direction { repr: 0 }; - -pub const INCOMING: Direction = Direction { repr: 1 }; - -impl NodeIndex { - /// Returns unique id (unique with respect to the graph holding associated node). - pub fn node_id(&self) -> usize { - self.0 - } -} - -impl Graph { - pub fn new() -> Graph { - Graph { - nodes: SnapshotVec::new(), - edges: SnapshotVec::new(), - } - } - - pub fn with_capacity(nodes: usize, edges: usize) -> Graph { - Graph { - nodes: SnapshotVec::with_capacity(nodes), - edges: SnapshotVec::with_capacity(edges), - } - } - - // # Simple accessors - - #[inline] - pub fn all_nodes(&self) -> &[Node] { - &self.nodes - } - - #[inline] - pub fn len_nodes(&self) -> usize { - self.nodes.len() - } - - #[inline] - pub fn all_edges(&self) -> &[Edge] { - &self.edges - } - - #[inline] - pub fn len_edges(&self) -> usize { - self.edges.len() - } - - // # Node construction - - pub fn next_node_index(&self) -> NodeIndex { - NodeIndex(self.nodes.len()) - } - - pub fn add_node(&mut self, data: N) -> NodeIndex { - let idx = self.next_node_index(); - self.nodes.push(Node { - first_edge: [INVALID_EDGE_INDEX, INVALID_EDGE_INDEX], - data, - }); - idx - } - - pub fn mut_node_data(&mut self, idx: NodeIndex) -> &mut N { - &mut self.nodes[idx.0].data - } - - pub fn node_data(&self, idx: NodeIndex) -> &N { - &self.nodes[idx.0].data - } - - pub fn node(&self, idx: NodeIndex) -> &Node { - &self.nodes[idx.0] - } - - // # Edge construction and queries - - pub fn next_edge_index(&self) -> EdgeIndex { - EdgeIndex(self.edges.len()) - } - - pub fn add_edge(&mut self, source: NodeIndex, target: NodeIndex, data: E) -> EdgeIndex { - debug!("graph: add_edge({:?}, {:?}, {:?})", source, target, data); - - let idx = self.next_edge_index(); - - // read current first of the list of edges from each node - let source_first = self.nodes[source.0].first_edge[OUTGOING.repr]; - let target_first = self.nodes[target.0].first_edge[INCOMING.repr]; - - // create the new edge, with the previous firsts from each node - // as the next pointers - self.edges.push(Edge { - next_edge: [source_first, target_first], - source, - target, - data, - }); - - // adjust the firsts for each node target be the next object. - self.nodes[source.0].first_edge[OUTGOING.repr] = idx; - self.nodes[target.0].first_edge[INCOMING.repr] = idx; - - return idx; - } - - pub fn edge(&self, idx: EdgeIndex) -> &Edge { - &self.edges[idx.0] - } - - // # Iterating over nodes, edges - - pub fn enumerated_nodes(&self) -> EnumeratedNodes { - EnumeratedNodes { - iter: self.nodes.iter().enumerate() - } - } - - pub fn enumerated_edges(&self) -> EnumeratedEdges { - EnumeratedEdges { - iter: self.edges.iter().enumerate() - } - } - - pub fn each_node<'a, F>(&'a self, mut f: F) -> bool - where F: FnMut(NodeIndex, &'a Node) -> bool - { - //! Iterates over all edges defined in the graph. - self.enumerated_nodes().all(|(node_idx, node)| f(node_idx, node)) - } - - pub fn each_edge<'a, F>(&'a self, mut f: F) -> bool - where F: FnMut(EdgeIndex, &'a Edge) -> bool - { - //! Iterates over all edges defined in the graph - self.enumerated_edges().all(|(edge_idx, edge)| f(edge_idx, edge)) - } - - pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges { - self.adjacent_edges(source, OUTGOING) - } - - pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges { - self.adjacent_edges(source, INCOMING) - } - - pub fn adjacent_edges(&self, source: NodeIndex, direction: Direction) -> AdjacentEdges { - let first_edge = self.node(source).first_edge[direction.repr]; - AdjacentEdges { - graph: self, - direction, - next: first_edge, - } - } - - pub fn successor_nodes(&self, source: NodeIndex) -> AdjacentTargets { - self.outgoing_edges(source).targets() - } - - pub fn predecessor_nodes(&self, target: NodeIndex) -> AdjacentSources { - self.incoming_edges(target).sources() - } - - pub fn depth_traverse<'a>(&'a self, - start: NodeIndex, - direction: Direction) - -> DepthFirstTraversal<'a, N, E> { - DepthFirstTraversal::with_start_node(self, start, direction) - } - - pub fn nodes_in_postorder<'a>(&'a self, - direction: Direction, - entry_node: NodeIndex) - -> Vec - { - let mut visited = BitVector::new(self.len_nodes()); - let mut stack = vec![]; - let mut result = Vec::with_capacity(self.len_nodes()); - let mut push_node = |stack: &mut Vec<_>, node: NodeIndex| { - if visited.insert(node.0) { - stack.push((node, self.adjacent_edges(node, direction))); - } - }; - - for node in Some(entry_node).into_iter() - .chain(self.enumerated_nodes().map(|(node, _)| node)) - { - push_node(&mut stack, node); - while let Some((node, mut iter)) = stack.pop() { - if let Some((_, child)) = iter.next() { - let target = child.source_or_target(direction); - // the current node needs more processing, so - // add it back to the stack - stack.push((node, iter)); - // and then push the new node - push_node(&mut stack, target); - } else { - result.push(node); - } - } - } - - assert_eq!(result.len(), self.len_nodes()); - result - } -} - -// # Iterators - -pub struct EnumeratedNodes<'g, N> - where N: 'g, +pub trait WithSuccessors: DirectedGraph +where + Self: for<'graph> GraphSuccessors<'graph, Item = ::Node>, { - iter: ::std::iter::Enumerate<::std::slice::Iter<'g, Node>> + fn successors<'graph>( + &'graph self, + node: Self::Node, + ) -> >::Iter; } -impl<'g, N: Debug> Iterator for EnumeratedNodes<'g, N> { - type Item = (NodeIndex, &'g Node); - - fn next(&mut self) -> Option<(NodeIndex, &'g Node)> { - self.iter.next().map(|(idx, n)| (NodeIndex(idx), n)) - } +pub trait GraphSuccessors<'graph> { + type Item; + type Iter: Iterator; } -pub struct EnumeratedEdges<'g, E> - where E: 'g, +pub trait WithPredecessors: DirectedGraph +where + Self: for<'graph> GraphPredecessors<'graph, Item = ::Node>, { - iter: ::std::iter::Enumerate<::std::slice::Iter<'g, Edge>> + fn predecessors<'graph>( + &'graph self, + node: Self::Node, + ) -> >::Iter; } -impl<'g, E: Debug> Iterator for EnumeratedEdges<'g, E> { - type Item = (EdgeIndex, &'g Edge); - - fn next(&mut self) -> Option<(EdgeIndex, &'g Edge)> { - self.iter.next().map(|(idx, e)| (EdgeIndex(idx), e)) - } +pub trait GraphPredecessors<'graph> { + type Item; + type Iter: Iterator; } -pub struct AdjacentEdges<'g, N, E> - where N: 'g, - E: 'g +pub trait WithStartNode: DirectedGraph { + fn start_node(&self) -> Self::Node; +} + +pub trait ControlFlowGraph: + DirectedGraph + WithStartNode + WithPredecessors + WithStartNode + WithSuccessors + WithNumNodes { - graph: &'g Graph, - direction: Direction, - next: EdgeIndex, + // convenient trait } -impl<'g, N, E> AdjacentEdges<'g, N, E> { - fn targets(self) -> AdjacentTargets<'g, N, E> { - AdjacentTargets { edges: self } - } - - fn sources(self) -> AdjacentSources<'g, N, E> { - AdjacentSources { edges: self } - } -} - -impl<'g, N: Debug, E: Debug> Iterator for AdjacentEdges<'g, N, E> { - type Item = (EdgeIndex, &'g Edge); - - fn next(&mut self) -> Option<(EdgeIndex, &'g Edge)> { - let edge_index = self.next; - if edge_index == INVALID_EDGE_INDEX { - return None; - } - - let edge = self.graph.edge(edge_index); - self.next = edge.next_edge[self.direction.repr]; - Some((edge_index, edge)) - } -} - -pub struct AdjacentTargets<'g, N, E> - where N: 'g, - E: 'g +impl ControlFlowGraph for T +where + T: DirectedGraph + + WithStartNode + + WithPredecessors + + WithStartNode + + WithSuccessors + + WithNumNodes, { - edges: AdjacentEdges<'g, N, E>, -} - -impl<'g, N: Debug, E: Debug> Iterator for AdjacentTargets<'g, N, E> { - type Item = NodeIndex; - - fn next(&mut self) -> Option { - self.edges.next().map(|(_, edge)| edge.target) - } -} - -pub struct AdjacentSources<'g, N, E> - where N: 'g, - E: 'g -{ - edges: AdjacentEdges<'g, N, E>, -} - -impl<'g, N: Debug, E: Debug> Iterator for AdjacentSources<'g, N, E> { - type Item = NodeIndex; - - fn next(&mut self) -> Option { - self.edges.next().map(|(_, edge)| edge.source) - } -} - -pub struct DepthFirstTraversal<'g, N, E> - where N: 'g, - E: 'g -{ - graph: &'g Graph, - stack: Vec, - visited: BitVector, - direction: Direction, -} - -impl<'g, N: Debug, E: Debug> DepthFirstTraversal<'g, N, E> { - pub fn with_start_node(graph: &'g Graph, - start_node: NodeIndex, - direction: Direction) - -> Self { - let mut visited = BitVector::new(graph.len_nodes()); - visited.insert(start_node.node_id()); - DepthFirstTraversal { - graph, - stack: vec![start_node], - visited, - direction, - } - } - - fn visit(&mut self, node: NodeIndex) { - if self.visited.insert(node.node_id()) { - self.stack.push(node); - } - } -} - -impl<'g, N: Debug, E: Debug> Iterator for DepthFirstTraversal<'g, N, E> { - type Item = NodeIndex; - - fn next(&mut self) -> Option { - let next = self.stack.pop(); - if let Some(idx) = next { - for (_, edge) in self.graph.adjacent_edges(idx, self.direction) { - let target = edge.source_or_target(self.direction); - self.visit(target); - } - } - next - } -} - -impl Edge { - pub fn source(&self) -> NodeIndex { - self.source - } - - pub fn target(&self) -> NodeIndex { - self.target - } - - pub fn source_or_target(&self, direction: Direction) -> NodeIndex { - if direction == OUTGOING { - self.target - } else { - self.source - } - } } diff --git a/src/librustc_data_structures/graph/reference.rs b/src/librustc_data_structures/graph/reference.rs new file mode 100644 index 000000000000..a7b763db8da2 --- /dev/null +++ b/src/librustc_data_structures/graph/reference.rs @@ -0,0 +1,51 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::*; + +impl<'graph, G: DirectedGraph> DirectedGraph for &'graph G { + type Node = G::Node; +} + +impl<'graph, G: WithNumNodes> WithNumNodes for &'graph G { + fn num_nodes(&self) -> usize { + (**self).num_nodes() + } +} + +impl<'graph, G: WithStartNode> WithStartNode for &'graph G { + fn start_node(&self) -> Self::Node { + (**self).start_node() + } +} + +impl<'graph, G: WithSuccessors> WithSuccessors for &'graph G { + fn successors<'iter>(&'iter self, node: Self::Node) -> >::Iter { + (**self).successors(node) + } +} + +impl<'graph, G: WithPredecessors> WithPredecessors for &'graph G { + fn predecessors<'iter>(&'iter self, + node: Self::Node) + -> >::Iter { + (**self).predecessors(node) + } +} + +impl<'iter, 'graph, G: WithPredecessors> GraphPredecessors<'iter> for &'graph G { + type Item = G::Node; + type Iter = >::Iter; +} + +impl<'iter, 'graph, G: WithSuccessors> GraphSuccessors<'iter> for &'graph G { + type Item = G::Node; + type Iter = >::Iter; +} diff --git a/src/librustc_data_structures/graph/scc/mod.rs b/src/librustc_data_structures/graph/scc/mod.rs new file mode 100644 index 000000000000..a989a5401022 --- /dev/null +++ b/src/librustc_data_structures/graph/scc/mod.rs @@ -0,0 +1,361 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Routine to compute the strongly connected components (SCCs) of a +//! graph, as well as the resulting DAG if each SCC is replaced with a +//! node in the graph. This uses Tarjan's algorithm that completes in +//! O(n) time. + +use fx::FxHashSet; +use graph::{DirectedGraph, WithNumNodes, WithSuccessors}; +use indexed_vec::{Idx, IndexVec}; +use std::ops::Range; + +mod test; + +/// Strongly connected components (SCC) of a graph. The type `N` is +/// the index type for the graph nodes and `S` is the index type for +/// the SCCs. We can map from each node to the SCC that it +/// participates in, and we also have the successors of each SCC. +pub struct Sccs { + /// For each node, what is the SCC index of the SCC to which it + /// belongs. + scc_indices: IndexVec, + + /// Data about each SCC. + scc_data: SccData, +} + +struct SccData { + /// For each SCC, the range of `all_successors` where its + /// successors can be found. + ranges: IndexVec>, + + /// Contains the succcessors for all the Sccs, concatenated. The + /// range of indices corresponding to a given SCC is found in its + /// SccData. + all_successors: Vec, +} + +impl Sccs { + pub fn new(graph: &(impl DirectedGraph + WithNumNodes + WithSuccessors)) -> Self { + SccsConstruction::construct(graph) + } + + /// Returns the number of SCCs in the graph. + pub fn num_sccs(&self) -> usize { + self.scc_data.len() + } + + /// Returns an iterator over the SCCs in the graph. + pub fn all_sccs(&self) -> impl Iterator { + (0 .. self.scc_data.len()).map(S::new) + } + + /// Returns the SCC to which a node `r` belongs. + pub fn scc(&self, r: N) -> S { + self.scc_indices[r] + } + + /// Returns the successors of the given SCC. + pub fn successors(&self, scc: S) -> &[S] { + self.scc_data.successors(scc) + } +} + +impl SccData { + /// Number of SCCs, + fn len(&self) -> usize { + self.ranges.len() + } + + /// Returns the successors of the given SCC. + fn successors(&self, scc: S) -> &[S] { + // Annoyingly, `range` does not implement `Copy`, so we have + // to do `range.start..range.end`: + let range = &self.ranges[scc]; + &self.all_successors[range.start..range.end] + } + + /// Creates a new SCC with `successors` as its successors and + /// returns the resulting index. + fn create_scc(&mut self, successors: impl IntoIterator) -> S { + // Store the successors on `scc_successors_vec`, remembering + // the range of indices. + let all_successors_start = self.all_successors.len(); + self.all_successors.extend(successors); + let all_successors_end = self.all_successors.len(); + + debug!( + "create_scc({:?}) successors={:?}", + self.ranges.len(), + &self.all_successors[all_successors_start..all_successors_end], + ); + + self.ranges.push(all_successors_start..all_successors_end) + } +} + +struct SccsConstruction<'c, G: DirectedGraph + WithNumNodes + WithSuccessors + 'c, S: Idx> { + graph: &'c G, + + /// The state of each node; used during walk to record the stack + /// and after walk to record what cycle each node ended up being + /// in. + node_states: IndexVec>, + + /// The stack of nodes that we are visiting as part of the DFS. + node_stack: Vec, + + /// The stack of successors: as we visit a node, we mark our + /// position in this stack, and when we encounter a successor SCC, + /// we push it on the stack. When we complete an SCC, we can pop + /// everything off the stack that was found along the way. + successors_stack: Vec, + + /// A set used to strip duplicates. As we accumulate successors + /// into the successors_stack, we sometimes get duplicate entries. + /// We use this set to remove those -- we also keep its storage + /// around between successors to amortize memory allocation costs. + duplicate_set: FxHashSet, + + scc_data: SccData, +} + +#[derive(Copy, Clone, Debug)] +enum NodeState { + /// This node has not yet been visited as part of the DFS. + /// + /// After SCC construction is complete, this state ought to be + /// impossible. + NotVisited, + + /// This node is currently being walk as part of our DFS. It is on + /// the stack at the depth `depth`. + /// + /// After SCC construction is complete, this state ought to be + /// impossible. + BeingVisited { depth: usize }, + + /// Indicates that this node is a member of the given cycle. + InCycle { scc_index: S }, + + /// Indicates that this node is a member of whatever cycle + /// `parent` is a member of. This state is transient: whenever we + /// see it, we try to overwrite it with the current state of + /// `parent` (this is the "path compression" step of a union-find + /// algorithm). + InCycleWith { parent: N }, +} + +#[derive(Copy, Clone, Debug)] +enum WalkReturn { + Cycle { min_depth: usize }, + Complete { scc_index: S }, +} + +impl<'c, G, S> SccsConstruction<'c, G, S> +where + G: DirectedGraph + WithNumNodes + WithSuccessors, + S: Idx, +{ + /// Identifies SCCs in the graph `G` and computes the resulting + /// DAG. This uses a variant of [Tarjan's + /// algorithm][wikipedia]. The high-level summary of the algorithm + /// is that we do a depth-first search. Along the way, we keep a + /// stack of each node whose successors are being visited. We + /// track the depth of each node on this stack (there is no depth + /// if the node is not on the stack). When we find that some node + /// N with depth D can reach some other node N' with lower depth + /// D' (i.e., D' < D), we know that N, N', and all nodes in + /// between them on the stack are part of an SCC. + /// + /// [wikipedia]: https://bit.ly/2EZIx84 + fn construct(graph: &'c G) -> Sccs { + let num_nodes = graph.num_nodes(); + + let mut this = Self { + graph, + node_states: IndexVec::from_elem_n(NodeState::NotVisited, num_nodes), + node_stack: Vec::with_capacity(num_nodes), + successors_stack: Vec::new(), + scc_data: SccData { + ranges: IndexVec::new(), + all_successors: Vec::new(), + }, + duplicate_set: FxHashSet::default(), + }; + + let scc_indices = (0..num_nodes) + .map(G::Node::new) + .map(|node| match this.walk_node(0, node) { + WalkReturn::Complete { scc_index } => scc_index, + WalkReturn::Cycle { min_depth } => panic!( + "`walk_node(0, {:?})` returned cycle with depth {:?}", + node, min_depth + ), + }) + .collect(); + + Sccs { + scc_indices, + scc_data: this.scc_data, + } + } + + /// Visit a node during the DFS. We first examine its current + /// state -- if it is not yet visited (`NotVisited`), we can push + /// it onto the stack and start walking its successors. + /// + /// If it is already on the DFS stack it will be in the state + /// `BeingVisited`. In that case, we have found a cycle and we + /// return the depth from the stack. + /// + /// Otherwise, we are looking at a node that has already been + /// completely visited. We therefore return `WalkReturn::Complete` + /// with its associated SCC index. + fn walk_node(&mut self, depth: usize, node: G::Node) -> WalkReturn { + debug!("walk_node(depth = {:?}, node = {:?})", depth, node); + match self.find_state(node) { + NodeState::InCycle { scc_index } => WalkReturn::Complete { scc_index }, + + NodeState::BeingVisited { depth: min_depth } => WalkReturn::Cycle { min_depth }, + + NodeState::NotVisited => self.walk_unvisited_node(depth, node), + + NodeState::InCycleWith { parent } => panic!( + "`find_state` returned `InCycleWith({:?})`, which ought to be impossible", + parent + ), + } + } + + /// Fetches the state of the node `r`. If `r` is recorded as being + /// in a cycle with some other node `r2`, then fetches the state + /// of `r2` (and updates `r` to reflect current result). This is + /// basically the "find" part of a standard union-find algorithm + /// (with path compression). + fn find_state(&mut self, r: G::Node) -> NodeState { + debug!("find_state(r = {:?} in state {:?})", r, self.node_states[r]); + match self.node_states[r] { + NodeState::InCycle { scc_index } => NodeState::InCycle { scc_index }, + NodeState::BeingVisited { depth } => NodeState::BeingVisited { depth }, + NodeState::NotVisited => NodeState::NotVisited, + NodeState::InCycleWith { parent } => { + let parent_state = self.find_state(parent); + debug!("find_state: parent_state = {:?}", parent_state); + match parent_state { + NodeState::InCycle { .. } => { + self.node_states[r] = parent_state; + parent_state + } + + NodeState::BeingVisited { depth } => { + self.node_states[r] = NodeState::InCycleWith { + parent: self.node_stack[depth], + }; + parent_state + } + + NodeState::NotVisited | NodeState::InCycleWith { .. } => { + panic!("invalid parent state: {:?}", parent_state) + } + } + } + } + } + + /// Walks a node that has never been visited before. + fn walk_unvisited_node(&mut self, depth: usize, node: G::Node) -> WalkReturn { + debug!( + "walk_unvisited_node(depth = {:?}, node = {:?})", + depth, node + ); + + debug_assert!(match self.node_states[node] { + NodeState::NotVisited => true, + _ => false, + }); + + // Push `node` onto the stack. + self.node_states[node] = NodeState::BeingVisited { depth }; + self.node_stack.push(node); + + // Walk each successor of the node, looking to see if any of + // them can reach a node that is presently on the stack. If + // so, that means they can also reach us. + let mut min_depth = depth; + let mut min_cycle_root = node; + let successors_len = self.successors_stack.len(); + for successor_node in self.graph.successors(node) { + debug!( + "walk_unvisited_node: node = {:?} successor_ode = {:?}", + node, successor_node + ); + match self.walk_node(depth + 1, successor_node) { + WalkReturn::Cycle { + min_depth: successor_min_depth, + } => { + // Track the minimum depth we can reach. + assert!(successor_min_depth <= depth); + if successor_min_depth < min_depth { + debug!( + "walk_unvisited_node: node = {:?} successor_min_depth = {:?}", + node, successor_min_depth + ); + min_depth = successor_min_depth; + min_cycle_root = successor_node; + } + } + + WalkReturn::Complete { + scc_index: successor_scc_index, + } => { + // Push the completed SCC indices onto + // the `successors_stack` for later. + debug!( + "walk_unvisited_node: node = {:?} successor_scc_index = {:?}", + node, successor_scc_index + ); + self.successors_stack.push(successor_scc_index); + } + } + } + + // Completed walk, remove `node` from the stack. + let r = self.node_stack.pop(); + debug_assert_eq!(r, Some(node)); + + // If `min_depth == depth`, then we are the root of the + // cycle: we can't reach anyone further down the stack. + if min_depth == depth { + // Note that successor stack may have duplicates, so we + // want to remove those: + let deduplicated_successors = { + let duplicate_set = &mut self.duplicate_set; + duplicate_set.clear(); + self.successors_stack + .drain(successors_len..) + .filter(move |&i| duplicate_set.insert(i)) + }; + let scc_index = self.scc_data.create_scc(deduplicated_successors); + self.node_states[node] = NodeState::InCycle { scc_index }; + WalkReturn::Complete { scc_index } + } else { + // We are not the head of the cycle. Return back to our + // caller. They will take ownership of the + // `self.successors` data that we pushed. + self.node_states[node] = NodeState::InCycleWith { + parent: min_cycle_root, + }; + WalkReturn::Cycle { min_depth } + } + } +} diff --git a/src/librustc_data_structures/graph/scc/test.rs b/src/librustc_data_structures/graph/scc/test.rs new file mode 100644 index 000000000000..405e1b3a6174 --- /dev/null +++ b/src/librustc_data_structures/graph/scc/test.rs @@ -0,0 +1,180 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![cfg(test)] + +use graph::test::TestGraph; +use super::*; + +#[test] +fn diamond() { + let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]); + let sccs: Sccs<_, usize> = Sccs::new(&graph); + assert_eq!(sccs.num_sccs(), 4); + assert_eq!(sccs.num_sccs(), 4); +} + +#[test] +fn test_big_scc() { + // The order in which things will be visited is important to this + // test. + // + // We will visit: + // + // 0 -> 1 -> 2 -> 0 + // + // and at this point detect a cycle. 2 will return back to 1 which + // will visit 3. 3 will visit 2 before the cycle is complete, and + // hence it too will return a cycle. + + /* ++-> 0 +| | +| v +| 1 -> 3 +| | | +| v | ++-- 2 <--+ + */ + let graph = TestGraph::new(0, &[ + (0, 1), + (1, 2), + (1, 3), + (2, 0), + (3, 2), + ]); + let sccs: Sccs<_, usize> = Sccs::new(&graph); + assert_eq!(sccs.num_sccs(), 1); +} + +#[test] +fn test_three_sccs() { + /* + 0 + | + v ++-> 1 3 +| | | +| v | ++-- 2 <--+ + */ + let graph = TestGraph::new(0, &[ + (0, 1), + (1, 2), + (2, 1), + (3, 2), + ]); + let sccs: Sccs<_, usize> = Sccs::new(&graph); + assert_eq!(sccs.num_sccs(), 3); + assert_eq!(sccs.scc(0), 1); + assert_eq!(sccs.scc(1), 0); + assert_eq!(sccs.scc(2), 0); + assert_eq!(sccs.scc(3), 2); + assert_eq!(sccs.successors(0), &[]); + assert_eq!(sccs.successors(1), &[0]); + assert_eq!(sccs.successors(2), &[0]); +} + +#[test] +fn test_find_state_2() { + // The order in which things will be visited is important to this + // test. It tests part of the `find_state` behavior. Here is the + // graph: + // + // + // /----+ + // 0 <--+ | + // | | | + // v | | + // +-> 1 -> 3 4 + // | | | + // | v | + // +-- 2 <----+ + + let graph = TestGraph::new(0, &[ + (0, 1), + (0, 4), + (1, 2), + (1, 3), + (2, 1), + (3, 0), + (4, 2), + ]); + + // For this graph, we will start in our DFS by visiting: + // + // 0 -> 1 -> 2 -> 1 + // + // and at this point detect a cycle. The state of 2 will thus be + // `InCycleWith { 1 }`. We will then visit the 1 -> 3 edge, which + // will attempt to visit 0 as well, thus going to the state + // `InCycleWith { 0 }`. Finally, node 1 will complete; the lowest + // depth of any successor was 3 which had depth 0, and thus it + // will be in the state `InCycleWith { 3 }`. + // + // When we finally traverse the `0 -> 4` edge and then visit node 2, + // the states of the nodes are: + // + // 0 BeingVisited { 0 } + // 1 InCycleWith { 3 } + // 2 InCycleWith { 1 } + // 3 InCycleWith { 0 } + // + // and hence 4 will traverse the links, finding an ultimate depth of 0. + // If will also collapse the states to the following: + // + // 0 BeingVisited { 0 } + // 1 InCycleWith { 3 } + // 2 InCycleWith { 1 } + // 3 InCycleWith { 0 } + + let sccs: Sccs<_, usize> = Sccs::new(&graph); + assert_eq!(sccs.num_sccs(), 1); + assert_eq!(sccs.scc(0), 0); + assert_eq!(sccs.scc(1), 0); + assert_eq!(sccs.scc(2), 0); + assert_eq!(sccs.scc(3), 0); + assert_eq!(sccs.scc(4), 0); + assert_eq!(sccs.successors(0), &[]); +} + +#[test] +fn test_find_state_3() { + /* + /----+ + 0 <--+ | + | | | + v | | ++-> 1 -> 3 4 5 +| | | | +| v | | ++-- 2 <----+-+ + */ + let graph = TestGraph::new(0, &[ + (0, 1), + (0, 4), + (1, 2), + (1, 3), + (2, 1), + (3, 0), + (4, 2), + (5, 2), + ]); + let sccs: Sccs<_, usize> = Sccs::new(&graph); + assert_eq!(sccs.num_sccs(), 2); + assert_eq!(sccs.scc(0), 0); + assert_eq!(sccs.scc(1), 0); + assert_eq!(sccs.scc(2), 0); + assert_eq!(sccs.scc(3), 0); + assert_eq!(sccs.scc(4), 0); + assert_eq!(sccs.scc(5), 1); + assert_eq!(sccs.successors(0), &[]); + assert_eq!(sccs.successors(1), &[0]); +} diff --git a/src/librustc_data_structures/graph/test.rs b/src/librustc_data_structures/graph/test.rs new file mode 100644 index 000000000000..b72d011c99ba --- /dev/null +++ b/src/librustc_data_structures/graph/test.rs @@ -0,0 +1,85 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::HashMap; +use std::cmp::max; +use std::slice; +use std::iter; + +use super::*; + +pub struct TestGraph { + num_nodes: usize, + start_node: usize, + successors: HashMap>, + predecessors: HashMap>, +} + +impl TestGraph { + pub fn new(start_node: usize, edges: &[(usize, usize)]) -> Self { + let mut graph = TestGraph { + num_nodes: start_node + 1, + start_node, + successors: HashMap::new(), + predecessors: HashMap::new(), + }; + for &(source, target) in edges { + graph.num_nodes = max(graph.num_nodes, source + 1); + graph.num_nodes = max(graph.num_nodes, target + 1); + graph.successors.entry(source).or_default().push(target); + graph.predecessors.entry(target).or_default().push(source); + } + for node in 0..graph.num_nodes { + graph.successors.entry(node).or_default(); + graph.predecessors.entry(node).or_default(); + } + graph + } +} + +impl DirectedGraph for TestGraph { + type Node = usize; +} + +impl WithStartNode for TestGraph { + fn start_node(&self) -> usize { + self.start_node + } +} + +impl WithNumNodes for TestGraph { + fn num_nodes(&self) -> usize { + self.num_nodes + } +} + +impl WithPredecessors for TestGraph { + fn predecessors<'graph>(&'graph self, + node: usize) + -> >::Iter { + self.predecessors[&node].iter().cloned() + } +} + +impl WithSuccessors for TestGraph { + fn successors<'graph>(&'graph self, node: usize) -> >::Iter { + self.successors[&node].iter().cloned() + } +} + +impl<'graph> GraphPredecessors<'graph> for TestGraph { + type Item = usize; + type Iter = iter::Cloned>; +} + +impl<'graph> GraphSuccessors<'graph> for TestGraph { + type Item = usize; + type Iter = iter::Cloned>; +} diff --git a/src/librustc_data_structures/graph/tests.rs b/src/librustc_data_structures/graph/tests.rs deleted file mode 100644 index 007704357af4..000000000000 --- a/src/librustc_data_structures/graph/tests.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use graph::*; -use std::fmt::Debug; - -type TestGraph = Graph<&'static str, &'static str>; - -fn create_graph() -> TestGraph { - let mut graph = Graph::new(); - - // Create a simple graph - // - // F - // | - // V - // A --> B --> C - // | ^ - // v | - // D --> E - - let a = graph.add_node("A"); - let b = graph.add_node("B"); - let c = graph.add_node("C"); - let d = graph.add_node("D"); - let e = graph.add_node("E"); - let f = graph.add_node("F"); - - graph.add_edge(a, b, "AB"); - graph.add_edge(b, c, "BC"); - graph.add_edge(b, d, "BD"); - graph.add_edge(d, e, "DE"); - graph.add_edge(e, c, "EC"); - graph.add_edge(f, b, "FB"); - - return graph; -} - -#[test] -fn each_node() { - let graph = create_graph(); - let expected = ["A", "B", "C", "D", "E", "F"]; - graph.each_node(|idx, node| { - assert_eq!(&expected[idx.0], graph.node_data(idx)); - assert_eq!(expected[idx.0], node.data); - true - }); -} - -#[test] -fn each_edge() { - let graph = create_graph(); - let expected = ["AB", "BC", "BD", "DE", "EC", "FB"]; - graph.each_edge(|idx, edge| { - assert_eq!(expected[idx.0], edge.data); - true - }); -} - -fn test_adjacent_edges(graph: &Graph, - start_index: NodeIndex, - start_data: N, - expected_incoming: &[(E, N)], - expected_outgoing: &[(E, N)]) { - assert!(graph.node_data(start_index) == &start_data); - - let mut counter = 0; - for (edge_index, edge) in graph.incoming_edges(start_index) { - assert!(counter < expected_incoming.len()); - debug!("counter={:?} expected={:?} edge_index={:?} edge={:?}", - counter, - expected_incoming[counter], - edge_index, - edge); - match expected_incoming[counter] { - (ref e, ref n) => { - assert!(e == &edge.data); - assert!(n == graph.node_data(edge.source())); - assert!(start_index == edge.target); - } - } - counter += 1; - } - assert_eq!(counter, expected_incoming.len()); - - let mut counter = 0; - for (edge_index, edge) in graph.outgoing_edges(start_index) { - assert!(counter < expected_outgoing.len()); - debug!("counter={:?} expected={:?} edge_index={:?} edge={:?}", - counter, - expected_outgoing[counter], - edge_index, - edge); - match expected_outgoing[counter] { - (ref e, ref n) => { - assert!(e == &edge.data); - assert!(start_index == edge.source); - assert!(n == graph.node_data(edge.target)); - } - } - counter += 1; - } - assert_eq!(counter, expected_outgoing.len()); -} - -#[test] -fn each_adjacent_from_a() { - let graph = create_graph(); - test_adjacent_edges(&graph, NodeIndex(0), "A", &[], &[("AB", "B")]); -} - -#[test] -fn each_adjacent_from_b() { - let graph = create_graph(); - test_adjacent_edges(&graph, - NodeIndex(1), - "B", - &[("FB", "F"), ("AB", "A")], - &[("BD", "D"), ("BC", "C")]); -} - -#[test] -fn each_adjacent_from_c() { - let graph = create_graph(); - test_adjacent_edges(&graph, NodeIndex(2), "C", &[("EC", "E"), ("BC", "B")], &[]); -} - -#[test] -fn each_adjacent_from_d() { - let graph = create_graph(); - test_adjacent_edges(&graph, NodeIndex(3), "D", &[("BD", "B")], &[("DE", "E")]); -} diff --git a/src/librustc_data_structures/indexed_set.rs b/src/librustc_data_structures/indexed_set.rs index 223e08de826c..a7672d1ffe89 100644 --- a/src/librustc_data_structures/indexed_set.rs +++ b/src/librustc_data_structures/indexed_set.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use array_vec::ArrayVec; use std::borrow::{Borrow, BorrowMut, ToOwned}; use std::fmt; use std::iter; @@ -25,6 +26,8 @@ use rustc_serialize; /// /// In other words, `T` is the type used to index into the bitvector /// this type uses to represent the set of object it holds. +/// +/// The representation is dense, using one bit per possible element. #[derive(Eq, PartialEq)] pub struct IdxSetBuf { _pd: PhantomData, @@ -59,16 +62,13 @@ impl rustc_serialize::Decodable for IdxSetBuf { // pnkfelix wants to have this be `IdxSet([Word]) and then pass // around `&mut IdxSet` or `&IdxSet`. -// -// WARNING: Mapping a `&IdxSetBuf` to `&IdxSet` (at least today) -// requires a transmute relying on representation guarantees that may -// not hold in the future. /// Represents a set (or packed family of sets), of some element type /// E, where each E is identified by some unique index type `T`. /// /// In other words, `T` is the type used to index into the bitslice /// this type uses to represent the set of object it holds. +#[repr(transparent)] pub struct IdxSet { _pd: PhantomData, bits: [Word], @@ -93,6 +93,8 @@ impl ToOwned for IdxSet { } } +const BITS_PER_WORD: usize = mem::size_of::() * 8; + impl fmt::Debug for IdxSetBuf { fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { w.debug_list() @@ -111,8 +113,7 @@ impl fmt::Debug for IdxSet { impl IdxSetBuf { fn new(init: Word, universe_size: usize) -> Self { - let bits_per_word = mem::size_of::() * 8; - let num_words = (universe_size + (bits_per_word - 1)) / bits_per_word; + let num_words = (universe_size + (BITS_PER_WORD - 1)) / BITS_PER_WORD; IdxSetBuf { _pd: Default::default(), bits: vec![init; num_words], @@ -121,7 +122,9 @@ impl IdxSetBuf { /// Creates set holding every element whose index falls in range 0..universe_size. pub fn new_filled(universe_size: usize) -> Self { - Self::new(!0, universe_size) + let mut result = Self::new(!0, universe_size); + result.trim_to(universe_size); + result } /// Creates set holding no elements. @@ -132,11 +135,11 @@ impl IdxSetBuf { impl IdxSet { unsafe fn from_slice(s: &[Word]) -> &Self { - mem::transmute(s) // (see above WARNING) + &*(s as *const [Word] as *const Self) } unsafe fn from_slice_mut(s: &mut [Word]) -> &mut Self { - mem::transmute(s) // (see above WARNING) + &mut *(s as *mut [Word] as *mut Self) } } @@ -161,6 +164,16 @@ impl IdxSet { } } + /// Duplicates as a hybrid set. + pub fn to_hybrid(&self) -> HybridIdxSetBuf { + // This universe_size may be slightly larger than the one specified + // upon creation, due to rounding up to a whole word. That's ok. + let universe_size = self.bits.len() * BITS_PER_WORD; + + // Note: we currently don't bother trying to make a Sparse set. + HybridIdxSetBuf::Dense(self.to_owned(), universe_size) + } + /// Removes all elements pub fn clear(&mut self) { for b in &mut self.bits { @@ -168,6 +181,34 @@ impl IdxSet { } } + /// Sets all elements up to `universe_size` + pub fn set_up_to(&mut self, universe_size: usize) { + for b in &mut self.bits { + *b = !0; + } + self.trim_to(universe_size); + } + + /// Clear all elements above `universe_size`. + fn trim_to(&mut self, universe_size: usize) { + // `trim_block` is the first block where some bits have + // to be cleared. + let trim_block = universe_size / BITS_PER_WORD; + + // all the blocks above it have to be completely cleared. + if trim_block < self.bits.len() { + for b in &mut self.bits[trim_block+1..] { + *b = 0; + } + + // at that block, the `universe_size % BITS_PER_WORD` lsbs + // should remain. + let remaining_bits = universe_size % BITS_PER_WORD; + let mask = (1< bool { self.bits.clear_bit(elem.index()) @@ -201,18 +242,60 @@ impl IdxSet { &mut self.bits } - pub fn clone_from(&mut self, other: &IdxSet) { + /// Efficiently overwrite `self` with `other`. Panics if `self` and `other` + /// don't have the same length. + pub fn overwrite(&mut self, other: &IdxSet) { self.words_mut().clone_from_slice(other.words()); } + /// Set `self = self | other` and return true if `self` changed + /// (i.e., if new bits were added). pub fn union(&mut self, other: &IdxSet) -> bool { bitwise(self.words_mut(), other.words(), &Union) } + /// Like `union()`, but takes a `SparseIdxSetBuf` argument. + fn union_sparse(&mut self, other: &SparseIdxSetBuf) -> bool { + let mut changed = false; + for elem in other.iter() { + changed |= self.add(&elem); + } + changed + } + + /// Like `union()`, but takes a `HybridIdxSetBuf` argument. + pub fn union_hybrid(&mut self, other: &HybridIdxSetBuf) -> bool { + match other { + HybridIdxSetBuf::Sparse(sparse, _) => self.union_sparse(sparse), + HybridIdxSetBuf::Dense(dense, _) => self.union(dense), + } + } + + /// Set `self = self - other` and return true if `self` changed. + /// (i.e., if any bits were removed). pub fn subtract(&mut self, other: &IdxSet) -> bool { bitwise(self.words_mut(), other.words(), &Subtract) } + /// Like `subtract()`, but takes a `SparseIdxSetBuf` argument. + fn subtract_sparse(&mut self, other: &SparseIdxSetBuf) -> bool { + let mut changed = false; + for elem in other.iter() { + changed |= self.remove(&elem); + } + changed + } + + /// Like `subtract()`, but takes a `HybridIdxSetBuf` argument. + pub fn subtract_hybrid(&mut self, other: &HybridIdxSetBuf) -> bool { + match other { + HybridIdxSetBuf::Sparse(sparse, _) => self.subtract_sparse(sparse), + HybridIdxSetBuf::Dense(dense, _) => self.subtract(dense), + } + } + + /// Set `self = self & other` and return true if `self` changed. + /// (i.e., if any bits were removed). pub fn intersect(&mut self, other: &IdxSet) -> bool { bitwise(self.words_mut(), other.words(), &Intersect) } @@ -224,70 +307,6 @@ impl IdxSet { _pd: PhantomData, } } - - /// Calls `f` on each index value held in this set, up to the - /// bound `max_bits` on the size of universe of indexes. - pub fn each_bit(&self, max_bits: usize, f: F) where F: FnMut(T) { - each_bit(self, max_bits, f) - } - - /// Removes all elements from this set. - pub fn reset_to_empty(&mut self) { - for word in self.words_mut() { *word = 0; } - } - - pub fn elems(&self, universe_size: usize) -> Elems { - Elems { i: 0, set: self, universe_size: universe_size } - } -} - -pub struct Elems<'a, T: Idx> { i: usize, set: &'a IdxSet, universe_size: usize } - -impl<'a, T: Idx> Iterator for Elems<'a, T> { - type Item = T; - fn next(&mut self) -> Option { - if self.i >= self.universe_size { return None; } - let mut i = self.i; - loop { - if i >= self.universe_size { - self.i = i; // (mark iteration as complete.) - return None; - } - if self.set.contains(&T::new(i)) { - self.i = i + 1; // (next element to start at.) - return Some(T::new(i)); - } - i = i + 1; - } - } -} - -fn each_bit(words: &IdxSet, max_bits: usize, mut f: F) where F: FnMut(T) { - let usize_bits: usize = mem::size_of::() * 8; - - for (word_index, &word) in words.words().iter().enumerate() { - if word != 0 { - let base_index = word_index * usize_bits; - for offset in 0..usize_bits { - let bit = 1 << offset; - if (word & bit) != 0 { - // NB: we round up the total number of bits - // that we store in any given bit set so that - // it is an even multiple of usize::BITS. This - // means that there may be some stray bits at - // the end that do not correspond to any - // actual value; that's why we first check - // that we are in range of bits_per_block. - let bit_index = base_index + offset as usize; - if bit_index >= max_bits { - return; - } else { - f(Idx::new(bit_index)); - } - } - } - } - } } pub struct Iter<'a, T: Idx> { @@ -300,11 +319,10 @@ impl<'a, T: Idx> Iterator for Iter<'a, T> { type Item = T; fn next(&mut self) -> Option { - let word_bits = mem::size_of::() * 8; loop { if let Some((ref mut word, offset)) = self.cur { let bit_pos = word.trailing_zeros() as usize; - if bit_pos != word_bits { + if bit_pos != BITS_PER_WORD { let bit = 1 << bit_pos; *word ^= bit; return Some(T::new(bit_pos + offset)) @@ -312,7 +330,229 @@ impl<'a, T: Idx> Iterator for Iter<'a, T> { } let (i, word) = self.iter.next()?; - self.cur = Some((*word, word_bits * i)); + self.cur = Some((*word, BITS_PER_WORD * i)); } } } + +const SPARSE_MAX: usize = 8; + +/// A sparse index set with a maximum of SPARSE_MAX elements. Used by +/// HybridIdxSetBuf; do not use directly. +/// +/// The elements are stored as an unsorted vector with no duplicates. +#[derive(Clone, Debug)] +pub struct SparseIdxSetBuf(ArrayVec<[T; SPARSE_MAX]>); + +impl SparseIdxSetBuf { + fn new() -> Self { + SparseIdxSetBuf(ArrayVec::new()) + } + + fn len(&self) -> usize { + self.0.len() + } + + fn contains(&self, elem: &T) -> bool { + self.0.contains(elem) + } + + fn add(&mut self, elem: &T) -> bool { + // Ensure there are no duplicates. + if self.0.contains(elem) { + false + } else { + self.0.push(*elem); + true + } + } + + fn remove(&mut self, elem: &T) -> bool { + if let Some(i) = self.0.iter().position(|e| e == elem) { + // Swap the found element to the end, then pop it. + let len = self.0.len(); + self.0.swap(i, len - 1); + self.0.pop(); + true + } else { + false + } + } + + fn to_dense(&self, universe_size: usize) -> IdxSetBuf { + let mut dense = IdxSetBuf::new_empty(universe_size); + for elem in self.0.iter() { + dense.add(elem); + } + dense + } + + fn iter(&self) -> SparseIter { + SparseIter { + iter: self.0.iter(), + } + } +} + +pub struct SparseIter<'a, T: Idx> { + iter: slice::Iter<'a, T>, +} + +impl<'a, T: Idx> Iterator for SparseIter<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + self.iter.next().map(|e| *e) + } +} + +/// Like IdxSetBuf, but with a hybrid representation: sparse when there are few +/// elements in the set, but dense when there are many. It's especially +/// efficient for sets that typically have a small number of elements, but a +/// large `universe_size`, and are cleared frequently. +#[derive(Clone, Debug)] +pub enum HybridIdxSetBuf { + Sparse(SparseIdxSetBuf, usize), + Dense(IdxSetBuf, usize), +} + +impl HybridIdxSetBuf { + pub fn new_empty(universe_size: usize) -> Self { + HybridIdxSetBuf::Sparse(SparseIdxSetBuf::new(), universe_size) + } + + fn universe_size(&mut self) -> usize { + match *self { + HybridIdxSetBuf::Sparse(_, size) => size, + HybridIdxSetBuf::Dense(_, size) => size, + } + } + + pub fn clear(&mut self) { + let universe_size = self.universe_size(); + *self = HybridIdxSetBuf::new_empty(universe_size); + } + + /// Returns true iff set `self` contains `elem`. + pub fn contains(&self, elem: &T) -> bool { + match self { + HybridIdxSetBuf::Sparse(sparse, _) => sparse.contains(elem), + HybridIdxSetBuf::Dense(dense, _) => dense.contains(elem), + } + } + + /// Adds `elem` to the set `self`. + pub fn add(&mut self, elem: &T) -> bool { + match self { + HybridIdxSetBuf::Sparse(sparse, _) if sparse.len() < SPARSE_MAX => { + // The set is sparse and has space for `elem`. + sparse.add(elem) + } + HybridIdxSetBuf::Sparse(sparse, _) if sparse.contains(elem) => { + // The set is sparse and does not have space for `elem`, but + // that doesn't matter because `elem` is already present. + false + } + HybridIdxSetBuf::Sparse(_, _) => { + // The set is sparse and full. Convert to a dense set. + // + // FIXME: This code is awful, but I can't work out how else to + // appease the borrow checker. + let dummy = HybridIdxSetBuf::Sparse(SparseIdxSetBuf::new(), 0); + match mem::replace(self, dummy) { + HybridIdxSetBuf::Sparse(sparse, universe_size) => { + let mut dense = sparse.to_dense(universe_size); + let changed = dense.add(elem); + assert!(changed); + mem::replace(self, HybridIdxSetBuf::Dense(dense, universe_size)); + changed + } + _ => panic!("impossible"), + } + } + + HybridIdxSetBuf::Dense(dense, _) => dense.add(elem), + } + } + + /// Removes `elem` from the set `self`. + pub fn remove(&mut self, elem: &T) -> bool { + // Note: we currently don't bother going from Dense back to Sparse. + match self { + HybridIdxSetBuf::Sparse(sparse, _) => sparse.remove(elem), + HybridIdxSetBuf::Dense(dense, _) => dense.remove(elem), + } + } + + /// Converts to a dense set, consuming itself in the process. + pub fn to_dense(self) -> IdxSetBuf { + match self { + HybridIdxSetBuf::Sparse(sparse, universe_size) => sparse.to_dense(universe_size), + HybridIdxSetBuf::Dense(dense, _) => dense, + } + } + + /// Iteration order is unspecified. + pub fn iter(&self) -> HybridIter { + match self { + HybridIdxSetBuf::Sparse(sparse, _) => HybridIter::Sparse(sparse.iter()), + HybridIdxSetBuf::Dense(dense, _) => HybridIter::Dense(dense.iter()), + } + } +} + +pub enum HybridIter<'a, T: Idx> { + Sparse(SparseIter<'a, T>), + Dense(Iter<'a, T>), +} + +impl<'a, T: Idx> Iterator for HybridIter<'a, T> { + type Item = T; + + fn next(&mut self) -> Option { + match self { + HybridIter::Sparse(sparse) => sparse.next(), + HybridIter::Dense(dense) => dense.next(), + } + } +} + +#[test] +fn test_trim_to() { + use std::cmp; + + for i in 0..256 { + let mut idx_buf: IdxSetBuf = IdxSetBuf::new_filled(128); + idx_buf.trim_to(i); + + let elems: Vec = idx_buf.iter().collect(); + let expected: Vec = (0..cmp::min(i, 128)).collect(); + assert_eq!(elems, expected); + } +} + +#[test] +fn test_set_up_to() { + for i in 0..128 { + for mut idx_buf in + vec![IdxSetBuf::new_empty(128), IdxSetBuf::new_filled(128)] + .into_iter() + { + idx_buf.set_up_to(i); + + let elems: Vec = idx_buf.iter().collect(); + let expected: Vec = (0..i).collect(); + assert_eq!(elems, expected); + } + } +} + +#[test] +fn test_new_filled() { + for i in 0..128 { + let idx_buf = IdxSetBuf::new_filled(i); + let elems: Vec = idx_buf.iter().collect(); + let expected: Vec = (0..i).collect(); + assert_eq!(elems, expected); + } +} diff --git a/src/librustc_data_structures/indexed_vec.rs b/src/librustc_data_structures/indexed_vec.rs index 753f12f400bf..c358f2f852e1 100644 --- a/src/librustc_data_structures/indexed_vec.rs +++ b/src/librustc_data_structures/indexed_vec.rs @@ -8,13 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::collections::range::RangeArgument; use std::fmt::Debug; use std::iter::{self, FromIterator}; use std::slice; use std::marker::PhantomData; -use std::ops::{Index, IndexMut, Range}; +use std::ops::{Index, IndexMut, Range, RangeBounds}; use std::fmt; +use std::hash::Hash; use std::vec; use std::u32; @@ -23,18 +23,28 @@ use rustc_serialize as serialize; /// Represents some newtyped `usize` wrapper. /// /// (purpose: avoid mixing indexes for different bitvector domains.) -pub trait Idx: Copy + 'static + Eq + Debug { +pub trait Idx: Copy + 'static + Ord + Debug + Hash { fn new(idx: usize) -> Self; + fn index(self) -> usize; + + fn increment_by(&mut self, amount: usize) { + let v = self.index() + amount; + *self = Self::new(v); + } } impl Idx for usize { + #[inline] fn new(idx: usize) -> Self { idx } + #[inline] fn index(self) -> usize { self } } impl Idx for u32 { + #[inline] fn new(idx: usize) -> Self { assert!(idx <= u32::MAX as usize); idx as u32 } + #[inline] fn index(self) -> usize { self as usize } } @@ -73,16 +83,47 @@ macro_rules! newtype_index { pub struct $type($($pub)* u32); impl Idx for $type { + #[inline] fn new(value: usize) -> Self { assert!(value < ($max) as usize); $type(value as u32) } + #[inline] fn index(self) -> usize { self.0 as usize } } + impl ::std::iter::Step for $type { + fn steps_between(start: &Self, end: &Self) -> Option { + ::steps_between( + &Idx::index(*start), + &Idx::index(*end), + ) + } + + fn replace_one(&mut self) -> Self { + ::std::mem::replace(self, Self::new(1)) + } + + fn replace_zero(&mut self) -> Self { + ::std::mem::replace(self, Self::new(0)) + } + + fn add_one(&self) -> Self { + Self::new(Idx::index(*self) + 1) + } + + fn sub_one(&self) -> Self { + Self::new(Idx::index(*self) - 1) + } + + fn add_usize(&self, u: usize) -> Option { + Idx::index(*self).checked_add(u).map(Self::new) + } + } + newtype_index!( @handle_debug @derives [$($derives,)*] @@ -204,7 +245,7 @@ macro_rules! newtype_index { $($tokens)*); ); - // The case where no derives are added, but encodable is overriden. Don't + // The case where no derives are added, but encodable is overridden. Don't // derive serialization traits (@pub [$($pub:tt)*] @type [$type:ident] @@ -324,7 +365,7 @@ macro_rules! newtype_index { ); } -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, Hash)] pub struct IndexVec { pub raw: Vec, _marker: PhantomData @@ -362,6 +403,11 @@ impl IndexVec { IndexVec { raw: Vec::new(), _marker: PhantomData } } + #[inline] + pub fn from_raw(raw: Vec) -> Self { + IndexVec { raw, _marker: PhantomData } + } + #[inline] pub fn with_capacity(capacity: usize) -> Self { IndexVec { raw: Vec::with_capacity(capacity), _marker: PhantomData } @@ -442,13 +488,13 @@ impl IndexVec { } #[inline] - pub fn drain<'a, R: RangeArgument>( + pub fn drain<'a, R: RangeBounds>( &'a mut self, range: R) -> impl Iterator + 'a { self.raw.drain(range) } #[inline] - pub fn drain_enumerated<'a, R: RangeArgument>( + pub fn drain_enumerated<'a, R: RangeBounds>( &'a mut self, range: R) -> impl Iterator + 'a { self.raw.drain(range).enumerate().map(IntoIdx { _marker: PhantomData }) } @@ -464,8 +510,8 @@ impl IndexVec { } #[inline] - pub fn swap(&mut self, a: usize, b: usize) { - self.raw.swap(a, b) + pub fn swap(&mut self, a: I, b: I) { + self.raw.swap(a.index(), b.index()) } #[inline] @@ -482,13 +528,53 @@ impl IndexVec { pub fn get_mut(&mut self, index: I) -> Option<&mut T> { self.raw.get_mut(index.index()) } + + /// Return mutable references to two distinct elements, a and b. Panics if a == b. + #[inline] + pub fn pick2_mut(&mut self, a: I, b: I) -> (&mut T, &mut T) { + let (ai, bi) = (a.index(), b.index()); + assert!(ai != bi); + + if ai < bi { + let (c1, c2) = self.raw.split_at_mut(bi); + (&mut c1[ai], &mut c2[0]) + } else { + let (c2, c1) = self.pick2_mut(b, a); + (c1, c2) + } + } + + pub fn convert_index_type(self) -> IndexVec { + IndexVec { + raw: self.raw, + _marker: PhantomData, + } + } } impl IndexVec { + /// Grows the index vector so that it contains an entry for + /// `elem`; if that is already true, then has no + /// effect. Otherwise, inserts new values as needed by invoking + /// `fill_value`. + #[inline] + pub fn ensure_contains_elem(&mut self, elem: I, fill_value: impl FnMut() -> T) { + let min_new_len = elem.index() + 1; + if self.len() < min_new_len { + self.raw.resize_with(min_new_len, fill_value); + } + } + #[inline] pub fn resize(&mut self, new_len: usize, value: T) { self.raw.resize(new_len, value) } + + #[inline] + pub fn resize_to_elem(&mut self, elem: I, fill_value: impl FnMut() -> T) { + let min_new_len = elem.index() + 1; + self.raw.resize_with(min_new_len, fill_value); + } } impl IndexVec { diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index 24048e606df4..5699512326a3 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -19,27 +19,22 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://www.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] -#![deny(warnings)] -#![feature(shared)] -#![feature(collections_range)] -#![feature(nonzero)] #![feature(unboxed_closures)] #![feature(fn_traits)] #![feature(unsize)] -#![feature(i128_type)] -#![feature(i128)] -#![feature(conservative_impl_trait)] #![feature(specialization)] #![feature(optin_builtin_traits)] -#![feature(underscore_lifetimes)] #![feature(macro_vis_matcher)] +#![cfg_attr(not(stage0), feature(nll))] #![feature(allow_internal_unstable)] +#![feature(vec_resize_with)] #![cfg_attr(unix, feature(libc))] #![cfg_attr(test, feature(test))] extern crate core; +extern crate ena; #[macro_use] extern crate log; extern crate serialize as rustc_serialize; // used by deriving @@ -49,33 +44,63 @@ extern crate parking_lot; #[macro_use] extern crate cfg_if; extern crate stable_deref_trait; +extern crate rustc_rayon as rayon; +extern crate rustc_rayon_core as rayon_core; +extern crate rustc_hash; +extern crate serialize; + +// See librustc_cratesio_shim/Cargo.toml for a comment explaining this. +#[allow(unused_extern_crates)] +extern crate rustc_cratesio_shim; pub use rustc_serialize::hex::ToHex; -pub mod array_vec; +pub mod svh; pub mod accumulate_vec; -pub mod small_vec; +pub mod array_vec; pub mod base_n; pub mod bitslice; pub mod bitvec; -pub mod blake2b; +pub mod const_cstr; +pub mod flock; +pub mod fx; pub mod graph; pub mod indexed_set; pub mod indexed_vec; pub mod obligation_forest; -pub mod sip128; -pub mod snapshot_map; -pub mod snapshot_vec; -pub mod stable_hasher; -pub mod transitive_relation; -pub mod unify; -pub mod fx; -pub mod tuple_slice; -pub mod veccell; -pub mod control_flow_graph; -pub mod flock; -pub mod sync; pub mod owning_ref; +pub mod ptr_key; +pub mod sip128; +pub mod small_c_str; +pub mod small_vec; +pub mod snapshot_map; +pub use ena::snapshot_vec; +pub mod sorted_map; +#[macro_use] pub mod stable_hasher; +pub mod sync; +pub mod tiny_list; +pub mod thin_vec; +pub mod transitive_relation; +pub mod tuple_slice; +pub use ena::unify; +pub mod work_queue; +pub mod fingerprint; + +pub struct OnDrop(pub F); + +impl OnDrop { + /// Forgets the function which prevents it from running. + /// Ensure that the function owns no memory, otherwise it will be leaked. + pub fn disable(self) { + std::mem::forget(self); + } +} + +impl Drop for OnDrop { + fn drop(&mut self) { + (self.0)(); + } +} // See comments in src/librustc/lib.rs #[doc(hidden)] diff --git a/src/librustc_data_structures/obligation_forest/mod.rs b/src/librustc_data_structures/obligation_forest/mod.rs index 02cae52166ac..7ef88852685d 100644 --- a/src/librustc_data_structures/obligation_forest/mod.rs +++ b/src/librustc_data_structures/obligation_forest/mod.rs @@ -41,7 +41,7 @@ pub trait ObligationProcessor { fn process_obligation(&mut self, obligation: &mut Self::Obligation) - -> Result>, Self::Error>; + -> ProcessResult; /// As we do the cycle check, we invoke this callback when we /// encounter an actual cycle. `cycle` is an iterator that starts @@ -57,6 +57,14 @@ pub trait ObligationProcessor { where I: Clone + Iterator; } +/// The result type used by `process_obligation`. +#[derive(Debug)] +pub enum ProcessResult { + Unchanged, + Changed(Vec), + Error(E), +} + pub struct ObligationForest { /// The list of obligations. In between calls to /// `process_obligations`, this list only contains nodes in the @@ -75,9 +83,6 @@ pub struct ObligationForest { done_cache: FxHashSet, /// An cache of the nodes in `nodes`, indexed by predicate. waiting_cache: FxHashMap, - /// A list of the obligations added in snapshots, to allow - /// for their removal. - cache_list: Vec, scratch: Option>, } @@ -86,13 +91,14 @@ struct Node { obligation: O, state: Cell, + /// The parent of a node - the original obligation of + /// which it is a subobligation. Except for error reporting, + /// it is just like any member of `dependents`. + parent: Option, + /// Obligations that depend on this obligation for their /// completion. They must all be in a non-pending state. dependents: Vec, - /// The parent of a node - the original obligation of - /// which it is a subobligation. Except for error reporting, - /// this is just another member of `dependents`. - parent: Option, } /// The state of one node in some tree within the forest. This @@ -139,8 +145,8 @@ pub struct Outcome { /// If true, then we saw no successful obligations, which means /// there is no point in further iteration. This is based on the - /// assumption that when trait matching returns `Err` or - /// `Ok(None)`, those results do not affect environmental + /// assumption that when trait matching returns `Error` or + /// `Unchanged`, those results do not affect environmental /// inference state. (Note that if we invoke `process_obligations` /// with no pending obligations, stalled will be true.) pub stalled: bool, @@ -158,7 +164,6 @@ impl ObligationForest { nodes: vec![], done_cache: FxHashSet(), waiting_cache: FxHashMap(), - cache_list: vec![], scratch: Some(vec![]), } } @@ -189,15 +194,18 @@ impl ObligationForest { Entry::Occupied(o) => { debug!("register_obligation_at({:?}, {:?}) - duplicate of {:?}!", obligation, parent, o.get()); + let node = &mut self.nodes[o.get().get()]; if let Some(parent) = parent { - if self.nodes[o.get().get()].dependents.contains(&parent) { - debug!("register_obligation_at({:?}, {:?}) - duplicate subobligation", - obligation, parent); - } else { - self.nodes[o.get().get()].dependents.push(parent); + // If the node is already in `waiting_cache`, it's already + // been marked with a parent. (It's possible that parent + // has been cleared by `apply_rewrites`, though.) So just + // dump `parent` into `node.dependents`... unless it's + // already in `node.dependents` or `node.parent`. + if !node.dependents.contains(&parent) && Some(parent) != node.parent { + node.dependents.push(parent); } } - if let NodeState::Error = self.nodes[o.get().get()].state.get() { + if let NodeState::Error = node.state.get() { Err(()) } else { Ok(()) @@ -207,7 +215,6 @@ impl ObligationForest { debug!("register_obligation_at({:?}, {:?}) - ok, new index is {}", obligation, parent, self.nodes.len()); v.insert(NodeIndex::new(self.nodes.len())); - self.cache_list.push(obligation.as_predicate().clone()); self.nodes.push(Node::new(parent, obligation)); Ok(()) } @@ -234,13 +241,13 @@ impl ObligationForest { } /// Returns the set of obligations that are in a pending state. - pub fn pending_obligations(&self) -> Vec - where O: Clone + pub fn map_pending_obligations(&self, f: F) -> Vec

+ where F: Fn(&O) -> P { self.nodes .iter() .filter(|n| n.state.get() == NodeState::Pending) - .map(|n| n.obligation.clone()) + .map(|n| f(&n.obligation)) .collect() } @@ -275,11 +282,11 @@ impl ObligationForest { result); match result { - Ok(None) => { - // no change in state + ProcessResult::Unchanged => { + // No change in state. } - Ok(Some(children)) => { - // if we saw a Some(_) result, we are not (yet) stalled + ProcessResult::Changed(children) => { + // We are not (yet) stalled. stalled = false; self.nodes[index].state.set(NodeState::Success); @@ -295,7 +302,7 @@ impl ObligationForest { } } } - Err(err) => { + ProcessResult::Error(err) => { stalled = false; let backtrace = self.error_at(index); errors.push(Error { @@ -377,10 +384,7 @@ impl ObligationForest { NodeState::Success => { node.state.set(NodeState::OnDfsStack); stack.push(index); - if let Some(parent) = node.parent { - self.find_cycles_from_node(stack, processor, parent.get()); - } - for dependent in &node.dependents { + for dependent in node.parent.iter().chain(node.dependents.iter()) { self.find_cycles_from_node(stack, processor, dependent.get()); } stack.pop(); @@ -415,13 +419,7 @@ impl ObligationForest { } } - loop { - // non-standard `while let` to bypass #6393 - let i = match error_stack.pop() { - Some(i) => i, - None => break - }; - + while let Some(i) = error_stack.pop() { let node = &self.nodes[i]; match node.state.get() { @@ -430,7 +428,7 @@ impl ObligationForest { } error_stack.extend( - node.dependents.iter().cloned().chain(node.parent).map(|x| x.get()) + node.parent.iter().chain(node.dependents.iter()).map(|x| x.get()) ); } @@ -440,11 +438,7 @@ impl ObligationForest { #[inline] fn mark_neighbors_as_waiting_from(&self, node: &Node) { - if let Some(parent) = node.parent { - self.mark_as_waiting_from(&self.nodes[parent.get()]); - } - - for dependent in &node.dependents { + for dependent in node.parent.iter().chain(node.dependents.iter()) { self.mark_as_waiting_from(&self.nodes[dependent.get()]); } } @@ -502,9 +496,14 @@ impl ObligationForest { } } NodeState::Done => { - self.waiting_cache.remove(self.nodes[i].obligation.as_predicate()); - // FIXME(HashMap): why can't I get my key back? - self.done_cache.insert(self.nodes[i].obligation.as_predicate().clone()); + // Avoid cloning the key (predicate) in case it exists in the waiting cache + if let Some((predicate, _)) = self.waiting_cache + .remove_entry(self.nodes[i].obligation.as_predicate()) + { + self.done_cache.insert(predicate); + } else { + self.done_cache.insert(self.nodes[i].obligation.as_predicate().clone()); + } node_rewrites[i] = nodes_len; dead_nodes += 1; } @@ -574,7 +573,7 @@ impl ObligationForest { } let mut kill_list = vec![]; - for (predicate, index) in self.waiting_cache.iter_mut() { + for (predicate, index) in &mut self.waiting_cache { let new_index = node_rewrites[index.get()]; if new_index >= nodes_len { kill_list.push(predicate.clone()); @@ -591,8 +590,8 @@ impl Node { fn new(parent: Option, obligation: O) -> Node { Node { obligation, - parent, state: Cell::new(NodeState::Pending), + parent, dependents: vec![], } } diff --git a/src/librustc_data_structures/obligation_forest/node_index.rs b/src/librustc_data_structures/obligation_forest/node_index.rs index a72cc6b57ead..d89bd22ec963 100644 --- a/src/librustc_data_structures/obligation_forest/node_index.rs +++ b/src/librustc_data_structures/obligation_forest/node_index.rs @@ -8,20 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::nonzero::NonZero; +use std::num::NonZeroU32; use std::u32; #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct NodeIndex { - index: NonZero, + index: NonZeroU32, } impl NodeIndex { + #[inline] pub fn new(value: usize) -> NodeIndex { assert!(value < (u32::MAX as usize)); - NodeIndex { index: NonZero::new((value as u32) + 1).unwrap() } + NodeIndex { index: NonZeroU32::new((value as u32) + 1).unwrap() } } + #[inline] pub fn get(self) -> usize { (self.index.get() - 1) as usize } diff --git a/src/librustc_data_structures/obligation_forest/test.rs b/src/librustc_data_structures/obligation_forest/test.rs index a95b2b84b34c..527a1ef0ec44 100644 --- a/src/librustc_data_structures/obligation_forest/test.rs +++ b/src/librustc_data_structures/obligation_forest/test.rs @@ -10,7 +10,7 @@ #![cfg(test)] -use super::{ObligationForest, ObligationProcessor, Outcome, Error}; +use super::{Error, ObligationForest, ObligationProcessor, Outcome, ProcessResult}; use std::fmt; use std::marker::PhantomData; @@ -31,7 +31,7 @@ struct ClosureObligationProcessor { #[allow(non_snake_case)] fn C(of: OF, bf: BF) -> ClosureObligationProcessor - where OF: FnMut(&mut O) -> Result>, &'static str>, + where OF: FnMut(&mut O) -> ProcessResult, BF: FnMut(&[O]) { ClosureObligationProcessor { @@ -44,7 +44,7 @@ fn C(of: OF, bf: BF) -> ClosureObligationProcessor ObligationProcessor for ClosureObligationProcessor where O: super::ForestObligation + fmt::Debug, E: fmt::Debug, - OF: FnMut(&mut O) -> Result>, E>, + OF: FnMut(&mut O) -> ProcessResult, BF: FnMut(&[O]) { type Obligation = O; @@ -52,7 +52,7 @@ impl ObligationProcessor for ClosureObligationProcessor Result>, Self::Error> + -> ProcessResult { (self.process_obligation)(obligation) } @@ -78,9 +78,9 @@ fn push_pop() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])), - "B" => Err("B is for broken"), - "C" => Ok(Some(vec![])), + "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]), + "B" => ProcessResult::Error("B is for broken"), + "C" => ProcessResult::Changed(vec![]), _ => unreachable!(), } }, |_| {})); @@ -101,10 +101,10 @@ fn push_pop() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A.1" => Ok(None), - "A.2" => Ok(None), - "A.3" => Ok(Some(vec!["A.3.i"])), - "D" => Ok(Some(vec!["D.1", "D.2"])), + "A.1" => ProcessResult::Unchanged, + "A.2" => ProcessResult::Unchanged, + "A.3" => ProcessResult::Changed(vec!["A.3.i"]), + "D" => ProcessResult::Changed(vec!["D.1", "D.2"]), _ => unreachable!(), } }, |_| {})); @@ -119,11 +119,11 @@ fn push_pop() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A.1" => Ok(Some(vec![])), - "A.2" => Err("A is for apple"), - "A.3.i" => Ok(Some(vec![])), - "D.1" => Ok(Some(vec!["D.1.i"])), - "D.2" => Ok(Some(vec!["D.2.i"])), + "A.1" => ProcessResult::Changed(vec![]), + "A.2" => ProcessResult::Error("A is for apple"), + "A.3.i" => ProcessResult::Changed(vec![]), + "D.1" => ProcessResult::Changed(vec!["D.1.i"]), + "D.2" => ProcessResult::Changed(vec!["D.2.i"]), _ => unreachable!(), } }, |_| {})); @@ -138,8 +138,8 @@ fn push_pop() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "D.1.i" => Err("D is for dumb"), - "D.2.i" => Ok(Some(vec![])), + "D.1.i" => ProcessResult::Error("D is for dumb"), + "D.2.i" => ProcessResult::Changed(vec![]), _ => panic!("unexpected obligation {:?}", obligation), } }, |_| {})); @@ -167,7 +167,7 @@ fn success_in_grandchildren() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])), + "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]), _ => unreachable!(), } }, |_| {})); @@ -177,9 +177,9 @@ fn success_in_grandchildren() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A.1" => Ok(Some(vec![])), - "A.2" => Ok(Some(vec!["A.2.i", "A.2.ii"])), - "A.3" => Ok(Some(vec![])), + "A.1" => ProcessResult::Changed(vec![]), + "A.2" => ProcessResult::Changed(vec!["A.2.i", "A.2.ii"]), + "A.3" => ProcessResult::Changed(vec![]), _ => unreachable!(), } }, |_| {})); @@ -189,8 +189,8 @@ fn success_in_grandchildren() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A.2.i" => Ok(Some(vec!["A.2.i.a"])), - "A.2.ii" => Ok(Some(vec![])), + "A.2.i" => ProcessResult::Changed(vec!["A.2.i.a"]), + "A.2.ii" => ProcessResult::Changed(vec![]), _ => unreachable!(), } }, |_| {})); @@ -200,7 +200,7 @@ fn success_in_grandchildren() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A.2.i.a" => Ok(Some(vec![])), + "A.2.i.a" => ProcessResult::Changed(vec![]), _ => unreachable!(), } }, |_| {})); @@ -223,7 +223,7 @@ fn to_errors_no_throw() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])), + "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]), _ => unreachable!(), } }, |_|{})); @@ -244,7 +244,7 @@ fn diamond() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A" => Ok(Some(vec!["A.1", "A.2"])), + "A" => ProcessResult::Changed(vec!["A.1", "A.2"]), _ => unreachable!(), } }, |_|{})); @@ -254,8 +254,8 @@ fn diamond() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A.1" => Ok(Some(vec!["D"])), - "A.2" => Ok(Some(vec!["D"])), + "A.1" => ProcessResult::Changed(vec!["D"]), + "A.2" => ProcessResult::Changed(vec!["D"]), _ => unreachable!(), } }, |_|{})); @@ -266,7 +266,7 @@ fn diamond() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "D" => { d_count += 1; Ok(Some(vec![])) }, + "D" => { d_count += 1; ProcessResult::Changed(vec![]) }, _ => unreachable!(), } }, |_|{})); @@ -281,7 +281,7 @@ fn diamond() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A'" => Ok(Some(vec!["A'.1", "A'.2"])), + "A'" => ProcessResult::Changed(vec!["A'.1", "A'.2"]), _ => unreachable!(), } }, |_|{})); @@ -291,8 +291,8 @@ fn diamond() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A'.1" => Ok(Some(vec!["D'", "A'"])), - "A'.2" => Ok(Some(vec!["D'"])), + "A'.1" => ProcessResult::Changed(vec!["D'", "A'"]), + "A'.2" => ProcessResult::Changed(vec!["D'"]), _ => unreachable!(), } }, |_|{})); @@ -303,7 +303,7 @@ fn diamond() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "D'" => { d_count += 1; Err("operation failed") }, + "D'" => { d_count += 1; ProcessResult::Error("operation failed") }, _ => unreachable!(), } }, |_|{})); @@ -329,7 +329,7 @@ fn done_dependency() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A: Sized" | "B: Sized" | "C: Sized" => Ok(Some(vec![])), + "A: Sized" | "B: Sized" | "C: Sized" => ProcessResult::Changed(vec![]), _ => unreachable!(), } }, |_|{})); @@ -340,11 +340,11 @@ fn done_dependency() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "(A,B,C): Sized" => Ok(Some(vec![ + "(A,B,C): Sized" => ProcessResult::Changed(vec![ "A: Sized", "B: Sized", "C: Sized" - ])), + ]), _ => unreachable!(), } }, |_|{})); @@ -367,10 +367,10 @@ fn orphan() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A" => Ok(Some(vec!["D", "E"])), - "B" => Ok(None), - "C1" => Ok(Some(vec![])), - "C2" => Ok(Some(vec![])), + "A" => ProcessResult::Changed(vec!["D", "E"]), + "B" => ProcessResult::Unchanged, + "C1" => ProcessResult::Changed(vec![]), + "C2" => ProcessResult::Changed(vec![]), _ => unreachable!(), } }, |_|{})); @@ -380,8 +380,8 @@ fn orphan() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "D" | "E" => Ok(None), - "B" => Ok(Some(vec!["D"])), + "D" | "E" => ProcessResult::Unchanged, + "B" => ProcessResult::Changed(vec!["D"]), _ => unreachable!(), } }, |_|{})); @@ -391,8 +391,8 @@ fn orphan() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "D" => Ok(None), - "E" => Err("E is for error"), + "D" => ProcessResult::Unchanged, + "E" => ProcessResult::Error("E is for error"), _ => unreachable!(), } }, |_|{})); @@ -405,7 +405,7 @@ fn orphan() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "D" => Err("D is dead"), + "D" => ProcessResult::Error("D is dead"), _ => unreachable!(), } }, |_|{})); @@ -429,8 +429,8 @@ fn simultaneous_register_and_error() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A" => Err("An error"), - "B" => Ok(Some(vec!["A"])), + "A" => ProcessResult::Error("An error"), + "B" => ProcessResult::Changed(vec!["A"]), _ => unreachable!(), } }, |_|{})); @@ -447,8 +447,8 @@ fn simultaneous_register_and_error() { let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(|obligation| { match *obligation { - "A" => Err("An error"), - "B" => Ok(Some(vec!["A"])), + "A" => ProcessResult::Error("An error"), + "B" => ProcessResult::Changed(vec!["A"]), _ => unreachable!(), } }, |_|{})); diff --git a/src/librustc_data_structures/owning_ref/mod.rs b/src/librustc_data_structures/owning_ref/mod.rs index 23e0733748b4..02640a71010e 100644 --- a/src/librustc_data_structures/owning_ref/mod.rs +++ b/src/librustc_data_structures/owning_ref/mod.rs @@ -243,6 +243,7 @@ fn main() { ``` */ +use std::mem; pub use stable_deref_trait::{StableDeref as StableAddress, CloneStableDeref as CloneStableAddress}; /// An owning reference. @@ -279,7 +280,7 @@ pub struct OwningRefMut { pub trait Erased {} impl Erased for T {} -/// Helper trait for erasing the concrete type of what an owner derferences to, +/// Helper trait for erasing the concrete type of what an owner dereferences to, /// for example `Box -> Box`. This would be unneeded with /// higher kinded types support in the language. pub unsafe trait IntoErased<'a> { @@ -289,10 +290,20 @@ pub unsafe trait IntoErased<'a> { fn into_erased(self) -> Self::Erased; } -/// Helper trait for erasing the concrete type of what an owner derferences to, +/// Helper trait for erasing the concrete type of what an owner dereferences to, +/// for example `Box -> Box`. This would be unneeded with +/// higher kinded types support in the language. +pub unsafe trait IntoErasedSend<'a> { + /// Owner with the dereference type substituted to `Erased + Send`. + type Erased: Send; + /// Perform the type erasure. + fn into_erased_send(self) -> Self::Erased; +} + +/// Helper trait for erasing the concrete type of what an owner dereferences to, /// for example `Box -> Box`. This would be unneeded with /// higher kinded types support in the language. -pub unsafe trait IntoErasedSendSync<'a>: Send + Sync { +pub unsafe trait IntoErasedSendSync<'a> { /// Owner with the dereference type substituted to `Erased + Send + Sync`. type Erased: Send + Sync; /// Perform the type erasure. @@ -472,6 +483,18 @@ impl OwningRef { } } + /// Erases the concrete base type of the owner with a trait object which implements `Send`. + /// + /// This allows mixing of owned references with different owner base types. + pub fn erase_send_owner<'a>(self) -> OwningRef + where O: IntoErasedSend<'a>, + { + OwningRef { + reference: self.reference, + owner: self.owner.into_erased_send(), + } + } + /// Erases the concrete base type of the owner with a trait object which implements `Send` and `Sync`. /// /// This allows mixing of owned references with different owner base types. @@ -979,7 +1002,7 @@ impl Debug for OwningRef where O: Debug, T: Debug, { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "OwningRef {{ owner: {:?}, reference: {:?} }}", self.owner(), @@ -991,7 +1014,7 @@ impl Debug for OwningRefMut where O: Debug, T: Debug, { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "OwningRefMut {{ owner: {:?}, reference: {:?} }}", self.owner(), @@ -1023,8 +1046,8 @@ unsafe impl Send for OwningRefMut unsafe impl Sync for OwningRefMut where O: Sync, for<'a> (&'a mut T): Sync {} -impl Debug for Erased { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { +impl Debug for dyn Erased { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "",) } } @@ -1143,47 +1166,59 @@ pub type MutexGuardRefMut<'a, T, U = T> = OwningRefMut, U>; pub type RwLockWriteGuardRefMut<'a, T, U = T> = OwningRef, U>; unsafe impl<'a, T: 'a> IntoErased<'a> for Box { - type Erased = Box; + type Erased = Box; fn into_erased(self) -> Self::Erased { self } } unsafe impl<'a, T: 'a> IntoErased<'a> for Rc { - type Erased = Rc; + type Erased = Rc; fn into_erased(self) -> Self::Erased { self } } unsafe impl<'a, T: 'a> IntoErased<'a> for Arc { - type Erased = Arc; + type Erased = Arc; fn into_erased(self) -> Self::Erased { self } } -unsafe impl<'a, T: Send + Sync + 'a> IntoErasedSendSync<'a> for Box { - type Erased = Box; - fn into_erased_send_sync(self) -> Self::Erased { +unsafe impl<'a, T: Send + 'a> IntoErasedSend<'a> for Box { + type Erased = Box; + fn into_erased_send(self) -> Self::Erased { self } } +unsafe impl<'a, T: Send + 'a> IntoErasedSendSync<'a> for Box { + type Erased = Box; + fn into_erased_send_sync(self) -> Self::Erased { + let result: Box = self; + // This is safe since Erased can always implement Sync + // Only the destructor is available and it takes &mut self + unsafe { + mem::transmute(result) + } + } +} + unsafe impl<'a, T: Send + Sync + 'a> IntoErasedSendSync<'a> for Arc { - type Erased = Arc; + type Erased = Arc; fn into_erased_send_sync(self) -> Self::Erased { self } } /// Typedef of a owning reference that uses an erased `Box` as the owner. -pub type ErasedBoxRef = OwningRef, U>; +pub type ErasedBoxRef = OwningRef, U>; /// Typedef of a owning reference that uses an erased `Rc` as the owner. -pub type ErasedRcRef = OwningRef, U>; +pub type ErasedRcRef = OwningRef, U>; /// Typedef of a owning reference that uses an erased `Arc` as the owner. -pub type ErasedArcRef = OwningRef, U>; +pub type ErasedArcRef = OwningRef, U>; /// Typedef of a mutable owning reference that uses an erased `Box` as the owner. -pub type ErasedBoxRefMut = OwningRefMut, U>; +pub type ErasedBoxRefMut = OwningRefMut, U>; #[cfg(test)] mod tests { @@ -1408,8 +1443,8 @@ mod tests { let c: OwningRef>, [u8]> = unsafe {a.map_owner(Rc::new)}; let d: OwningRef>, [u8]> = unsafe {b.map_owner(Rc::new)}; - let e: OwningRef, [u8]> = c.erase_owner(); - let f: OwningRef, [u8]> = d.erase_owner(); + let e: OwningRef, [u8]> = c.erase_owner(); + let f: OwningRef, [u8]> = d.erase_owner(); let _g = e.clone(); let _h = f.clone(); @@ -1425,8 +1460,8 @@ mod tests { let c: OwningRef>, [u8]> = a.map_owner_box(); let d: OwningRef>, [u8]> = b.map_owner_box(); - let _e: OwningRef, [u8]> = c.erase_owner(); - let _f: OwningRef, [u8]> = d.erase_owner(); + let _e: OwningRef, [u8]> = c.erase_owner(); + let _f: OwningRef, [u8]> = d.erase_owner(); } #[test] @@ -1434,7 +1469,7 @@ mod tests { use std::any::Any; let x = Box::new(123_i32); - let y: Box = x; + let y: Box = x; OwningRef::new(y).try_map(|x| x.downcast_ref::().ok_or(())).is_ok(); } @@ -1444,7 +1479,7 @@ mod tests { use std::any::Any; let x = Box::new(123_i32); - let y: Box = x; + let y: Box = x; OwningRef::new(y).try_map(|x| x.downcast_ref::().ok_or(())).is_err(); } @@ -1808,8 +1843,8 @@ mod tests { let c: OwningRefMut>, [u8]> = unsafe {a.map_owner(Box::new)}; let d: OwningRefMut>, [u8]> = unsafe {b.map_owner(Box::new)}; - let _e: OwningRefMut, [u8]> = c.erase_owner(); - let _f: OwningRefMut, [u8]> = d.erase_owner(); + let _e: OwningRefMut, [u8]> = c.erase_owner(); + let _f: OwningRefMut, [u8]> = d.erase_owner(); } #[test] @@ -1822,8 +1857,8 @@ mod tests { let c: OwningRefMut>, [u8]> = a.map_owner_box(); let d: OwningRefMut>, [u8]> = b.map_owner_box(); - let _e: OwningRefMut, [u8]> = c.erase_owner(); - let _f: OwningRefMut, [u8]> = d.erase_owner(); + let _e: OwningRefMut, [u8]> = c.erase_owner(); + let _f: OwningRefMut, [u8]> = d.erase_owner(); } #[test] @@ -1831,7 +1866,7 @@ mod tests { use std::any::Any; let x = Box::new(123_i32); - let y: Box = x; + let y: Box = x; OwningRefMut::new(y).try_map_mut(|x| x.downcast_mut::().ok_or(())).is_ok(); } @@ -1841,7 +1876,7 @@ mod tests { use std::any::Any; let x = Box::new(123_i32); - let y: Box = x; + let y: Box = x; OwningRefMut::new(y).try_map_mut(|x| x.downcast_mut::().ok_or(())).is_err(); } @@ -1851,7 +1886,7 @@ mod tests { use std::any::Any; let x = Box::new(123_i32); - let y: Box = x; + let y: Box = x; OwningRefMut::new(y).try_map(|x| x.downcast_ref::().ok_or(())).is_ok(); } @@ -1861,7 +1896,7 @@ mod tests { use std::any::Any; let x = Box::new(123_i32); - let y: Box = x; + let y: Box = x; OwningRefMut::new(y).try_map(|x| x.downcast_ref::().ok_or(())).is_err(); } diff --git a/src/librustc_data_structures/ptr_key.rs b/src/librustc_data_structures/ptr_key.rs new file mode 100644 index 000000000000..6835dab38df0 --- /dev/null +++ b/src/librustc_data_structures/ptr_key.rs @@ -0,0 +1,45 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::{hash, ptr}; +use std::ops::Deref; + +/// A wrapper around reference that compares and hashes like a pointer. +/// Can be used as a key in sets/maps indexed by pointers to avoid `unsafe`. +#[derive(Debug)] +pub struct PtrKey<'a, T: 'a>(pub &'a T); + +impl<'a, T> Clone for PtrKey<'a, T> { + fn clone(&self) -> Self { *self } +} + +impl<'a, T> Copy for PtrKey<'a, T> {} + +impl<'a, T> PartialEq for PtrKey<'a, T> { + fn eq(&self, rhs: &Self) -> bool { + ptr::eq(self.0, rhs.0) + } +} + +impl<'a, T> Eq for PtrKey<'a, T> {} + +impl<'a, T> hash::Hash for PtrKey<'a, T> { + fn hash(&self, hasher: &mut H) { + (self.0 as *const T).hash(hasher) + } +} + +impl<'a, T> Deref for PtrKey<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + self.0 + } +} diff --git a/src/librustc_data_structures/small_c_str.rs b/src/librustc_data_structures/small_c_str.rs new file mode 100644 index 000000000000..b0ad83e49793 --- /dev/null +++ b/src/librustc_data_structures/small_c_str.rs @@ -0,0 +1,131 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::ffi; +use std::ops::Deref; + +const SIZE: usize = 38; + +/// Like SmallVec but for C strings. +#[derive(Clone)] +pub enum SmallCStr { + OnStack { + data: [u8; SIZE], + len_with_nul: u8, + }, + OnHeap { + data: ffi::CString, + } +} + +impl SmallCStr { + #[inline] + pub fn new(s: &str) -> SmallCStr { + if s.len() < SIZE { + let mut data = [0; SIZE]; + data[.. s.len()].copy_from_slice(s.as_bytes()); + let len_with_nul = s.len() + 1; + + // Make sure once that this is a valid CStr + if let Err(e) = ffi::CStr::from_bytes_with_nul(&data[.. len_with_nul]) { + panic!("The string \"{}\" cannot be converted into a CStr: {}", s, e); + } + + SmallCStr::OnStack { + data, + len_with_nul: len_with_nul as u8, + } + } else { + SmallCStr::OnHeap { + data: ffi::CString::new(s).unwrap() + } + } + } + + #[inline] + pub fn as_c_str(&self) -> &ffi::CStr { + match *self { + SmallCStr::OnStack { ref data, len_with_nul } => { + unsafe { + let slice = &data[.. len_with_nul as usize]; + ffi::CStr::from_bytes_with_nul_unchecked(slice) + } + } + SmallCStr::OnHeap { ref data } => { + data.as_c_str() + } + } + } + + #[inline] + pub fn len_with_nul(&self) -> usize { + match *self { + SmallCStr::OnStack { len_with_nul, .. } => { + len_with_nul as usize + } + SmallCStr::OnHeap { ref data } => { + data.as_bytes_with_nul().len() + } + } + } +} + +impl Deref for SmallCStr { + type Target = ffi::CStr; + + fn deref(&self) -> &ffi::CStr { + self.as_c_str() + } +} + + +#[test] +fn short() { + const TEXT: &str = "abcd"; + let reference = ffi::CString::new(TEXT.to_string()).unwrap(); + + let scs = SmallCStr::new(TEXT); + + assert_eq!(scs.len_with_nul(), TEXT.len() + 1); + assert_eq!(scs.as_c_str(), reference.as_c_str()); + assert!(if let SmallCStr::OnStack { .. } = scs { true } else { false }); +} + +#[test] +fn empty() { + const TEXT: &str = ""; + let reference = ffi::CString::new(TEXT.to_string()).unwrap(); + + let scs = SmallCStr::new(TEXT); + + assert_eq!(scs.len_with_nul(), TEXT.len() + 1); + assert_eq!(scs.as_c_str(), reference.as_c_str()); + assert!(if let SmallCStr::OnStack { .. } = scs { true } else { false }); +} + +#[test] +fn long() { + const TEXT: &str = "01234567890123456789012345678901234567890123456789\ + 01234567890123456789012345678901234567890123456789\ + 01234567890123456789012345678901234567890123456789"; + let reference = ffi::CString::new(TEXT.to_string()).unwrap(); + + let scs = SmallCStr::new(TEXT); + + assert_eq!(scs.len_with_nul(), TEXT.len() + 1); + assert_eq!(scs.as_c_str(), reference.as_c_str()); + assert!(if let SmallCStr::OnHeap { .. } = scs { true } else { false }); +} + +#[test] +#[should_panic] +fn internal_nul() { + let _ = SmallCStr::new("abcd\0def"); +} diff --git a/src/librustc_data_structures/small_vec.rs b/src/librustc_data_structures/small_vec.rs index 74738e61b446..6f101b20d880 100644 --- a/src/librustc_data_structures/small_vec.rs +++ b/src/librustc_data_structures/small_vec.rs @@ -29,6 +29,8 @@ use array_vec::Array; pub struct SmallVec(AccumulateVec); +pub type OneVector = SmallVec<[T; 1]>; + impl Clone for SmallVec where A: Array, A::Element: Clone { @@ -50,6 +52,10 @@ impl SmallVec { SmallVec(AccumulateVec::new()) } + pub fn is_array(&self) -> bool { + self.0.is_array() + } + pub fn with_capacity(cap: usize) -> Self { let mut vec = SmallVec::new(); vec.reserve(cap); @@ -167,8 +173,9 @@ impl Extend for SmallVec { fn extend>(&mut self, iter: I) { let iter = iter.into_iter(); self.reserve(iter.size_hint().0); - for el in iter { - self.push(el); + match self.0 { + AccumulateVec::Heap(ref mut vec) => vec.extend(iter), + _ => iter.for_each(|el| self.push(el)) } } } @@ -193,7 +200,7 @@ impl Encodable for SmallVec fn encode(&self, s: &mut S) -> Result<(), S::Error> { s.emit_seq(self.len(), |s| { for (i, e) in self.iter().enumerate() { - try!(s.emit_seq_elt(i, |s| e.encode(s))); + s.emit_seq_elt(i, |s| e.encode(s))?; } Ok(()) }) @@ -206,10 +213,190 @@ impl Decodable for SmallVec fn decode(d: &mut D) -> Result, D::Error> { d.read_seq(|d, len| { let mut vec = SmallVec::with_capacity(len); + // FIXME(#48994) - could just be collected into a Result for i in 0..len { - vec.push(try!(d.read_seq_elt(i, |d| Decodable::decode(d)))); + vec.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); } Ok(vec) }) } } + +#[cfg(test)] +mod tests { + extern crate test; + use self::test::Bencher; + + use super::*; + + #[test] + fn test_len() { + let v: OneVector = OneVector::new(); + assert_eq!(0, v.len()); + + assert_eq!(1, OneVector::one(1).len()); + assert_eq!(5, OneVector::many(vec![1, 2, 3, 4, 5]).len()); + } + + #[test] + fn test_push_get() { + let mut v = OneVector::new(); + v.push(1); + assert_eq!(1, v.len()); + assert_eq!(1, v[0]); + v.push(2); + assert_eq!(2, v.len()); + assert_eq!(2, v[1]); + v.push(3); + assert_eq!(3, v.len()); + assert_eq!(3, v[2]); + } + + #[test] + fn test_from_iter() { + let v: OneVector = (vec![1, 2, 3]).into_iter().collect(); + assert_eq!(3, v.len()); + assert_eq!(1, v[0]); + assert_eq!(2, v[1]); + assert_eq!(3, v[2]); + } + + #[test] + fn test_move_iter() { + let v = OneVector::new(); + let v: Vec = v.into_iter().collect(); + assert_eq!(v, Vec::new()); + + let v = OneVector::one(1); + assert_eq!(v.into_iter().collect::>(), [1]); + + let v = OneVector::many(vec![1, 2, 3]); + assert_eq!(v.into_iter().collect::>(), [1, 2, 3]); + } + + #[test] + #[should_panic] + fn test_expect_one_zero() { + let _: isize = OneVector::new().expect_one(""); + } + + #[test] + #[should_panic] + fn test_expect_one_many() { + OneVector::many(vec![1, 2]).expect_one(""); + } + + #[test] + fn test_expect_one_one() { + assert_eq!(1, OneVector::one(1).expect_one("")); + assert_eq!(1, OneVector::many(vec![1]).expect_one("")); + } + + #[bench] + fn fill_small_vec_1_10_with_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 1]> = SmallVec::with_capacity(10); + + sv.extend(0..10); + }) + } + + #[bench] + fn fill_small_vec_1_10_wo_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 1]> = SmallVec::new(); + + sv.extend(0..10); + }) + } + + #[bench] + fn fill_small_vec_8_10_with_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 8]> = SmallVec::with_capacity(10); + + sv.extend(0..10); + }) + } + + #[bench] + fn fill_small_vec_8_10_wo_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 8]> = SmallVec::new(); + + sv.extend(0..10); + }) + } + + #[bench] + fn fill_small_vec_32_10_with_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 32]> = SmallVec::with_capacity(10); + + sv.extend(0..10); + }) + } + + #[bench] + fn fill_small_vec_32_10_wo_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 32]> = SmallVec::new(); + + sv.extend(0..10); + }) + } + + #[bench] + fn fill_small_vec_1_50_with_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 1]> = SmallVec::with_capacity(50); + + sv.extend(0..50); + }) + } + + #[bench] + fn fill_small_vec_1_50_wo_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 1]> = SmallVec::new(); + + sv.extend(0..50); + }) + } + + #[bench] + fn fill_small_vec_8_50_with_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 8]> = SmallVec::with_capacity(50); + + sv.extend(0..50); + }) + } + + #[bench] + fn fill_small_vec_8_50_wo_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 8]> = SmallVec::new(); + + sv.extend(0..50); + }) + } + + #[bench] + fn fill_small_vec_32_50_with_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 32]> = SmallVec::with_capacity(50); + + sv.extend(0..50); + }) + } + + #[bench] + fn fill_small_vec_32_50_wo_cap(b: &mut Bencher) { + b.iter(|| { + let mut sv: SmallVec<[usize; 32]> = SmallVec::new(); + + sv.extend(0..50); + }) + } +} diff --git a/src/librustc_data_structures/snapshot_map/mod.rs b/src/librustc_data_structures/snapshot_map/mod.rs index cd7143ad3ce8..5030bf98dffd 100644 --- a/src/librustc_data_structures/snapshot_map/mod.rs +++ b/src/librustc_data_structures/snapshot_map/mod.rs @@ -45,6 +45,11 @@ impl SnapshotMap } } + pub fn clear(&mut self) { + self.map.clear(); + self.undo_log.clear(); + } + pub fn insert(&mut self, key: K, value: V) -> bool { match self.map.insert(key.clone(), value) { None => { @@ -62,6 +67,12 @@ impl SnapshotMap } } + pub fn insert_noop(&mut self) { + if !self.undo_log.is_empty() { + self.undo_log.push(UndoLog::Noop); + } + } + pub fn remove(&mut self, key: K) -> bool { match self.map.remove(&key) { Some(old_value) => { @@ -81,7 +92,7 @@ impl SnapshotMap pub fn snapshot(&mut self) -> Snapshot { self.undo_log.push(UndoLog::OpenSnapshot); let len = self.undo_log.len() - 1; - Snapshot { len: len } + Snapshot { len } } fn assert_open_snapshot(&self, snapshot: &Snapshot) { @@ -92,8 +103,8 @@ impl SnapshotMap }); } - pub fn commit(&mut self, snapshot: Snapshot) { - self.assert_open_snapshot(&snapshot); + pub fn commit(&mut self, snapshot: &Snapshot) { + self.assert_open_snapshot(snapshot); if snapshot.len == 0 { // The root snapshot. self.undo_log.truncate(0); @@ -124,8 +135,8 @@ impl SnapshotMap } } - pub fn rollback_to(&mut self, snapshot: Snapshot) { - self.assert_open_snapshot(&snapshot); + pub fn rollback_to(&mut self, snapshot: &Snapshot) { + self.assert_open_snapshot(snapshot); while self.undo_log.len() > snapshot.len + 1 { let entry = self.undo_log.pop().unwrap(); self.reverse(entry); diff --git a/src/librustc_data_structures/snapshot_map/test.rs b/src/librustc_data_structures/snapshot_map/test.rs index 4114082839b0..b163e0fe420e 100644 --- a/src/librustc_data_structures/snapshot_map/test.rs +++ b/src/librustc_data_structures/snapshot_map/test.rs @@ -20,7 +20,7 @@ fn basic() { map.insert(44, "fourty-four"); assert_eq!(map[&44], "fourty-four"); assert_eq!(map.get(&33), None); - map.rollback_to(snapshot); + map.rollback_to(&snapshot); assert_eq!(map[&22], "twenty-two"); assert_eq!(map.get(&33), None); assert_eq!(map.get(&44), None); @@ -33,7 +33,7 @@ fn out_of_order() { map.insert(22, "twenty-two"); let snapshot1 = map.snapshot(); let _snapshot2 = map.snapshot(); - map.rollback_to(snapshot1); + map.rollback_to(&snapshot1); } #[test] @@ -43,8 +43,8 @@ fn nested_commit_then_rollback() { let snapshot1 = map.snapshot(); let snapshot2 = map.snapshot(); map.insert(22, "thirty-three"); - map.commit(snapshot2); + map.commit(&snapshot2); assert_eq!(map[&22], "thirty-three"); - map.rollback_to(snapshot1); + map.rollback_to(&snapshot1); assert_eq!(map[&22], "twenty-two"); } diff --git a/src/librustc_data_structures/snapshot_vec.rs b/src/librustc_data_structures/snapshot_vec.rs deleted file mode 100644 index 2da91918288b..000000000000 --- a/src/librustc_data_structures/snapshot_vec.rs +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A utility class for implementing "snapshottable" things; a snapshottable data structure permits -//! you to take a snapshot (via `start_snapshot`) and then, after making some changes, elect either -//! to rollback to the start of the snapshot or commit those changes. -//! -//! This vector is intended to be used as part of an abstraction, not serve as a complete -//! abstraction on its own. As such, while it will roll back most changes on its own, it also -//! supports a `get_mut` operation that gives you an arbitrary mutable pointer into the vector. To -//! ensure that any changes you make this with this pointer are rolled back, you must invoke -//! `record` to record any changes you make and also supplying a delegate capable of reversing -//! those changes. -use self::UndoLog::*; - -use std::mem; -use std::ops; - -pub enum UndoLog { - /// Indicates where a snapshot started. - OpenSnapshot, - - /// Indicates a snapshot that has been committed. - CommittedSnapshot, - - /// New variable with given index was created. - NewElem(usize), - - /// Variable with given index was changed *from* the given value. - SetElem(usize, D::Value), - - /// Extensible set of actions - Other(D::Undo), -} - -pub struct SnapshotVec { - values: Vec, - undo_log: Vec>, -} - -// Snapshots are tokens that should be created/consumed linearly. -pub struct Snapshot { - // Length of the undo log at the time the snapshot was taken. - length: usize, -} - -pub trait SnapshotVecDelegate { - type Value; - type Undo; - - fn reverse(values: &mut Vec, action: Self::Undo); -} - -impl SnapshotVec { - pub fn new() -> SnapshotVec { - SnapshotVec { - values: Vec::new(), - undo_log: Vec::new(), - } - } - - pub fn with_capacity(n: usize) -> SnapshotVec { - SnapshotVec { - values: Vec::with_capacity(n), - undo_log: Vec::new(), - } - } - - fn in_snapshot(&self) -> bool { - !self.undo_log.is_empty() - } - - pub fn record(&mut self, action: D::Undo) { - if self.in_snapshot() { - self.undo_log.push(Other(action)); - } - } - - pub fn len(&self) -> usize { - self.values.len() - } - - pub fn push(&mut self, elem: D::Value) -> usize { - let len = self.values.len(); - self.values.push(elem); - - if self.in_snapshot() { - self.undo_log.push(NewElem(len)); - } - - len - } - - pub fn get(&self, index: usize) -> &D::Value { - &self.values[index] - } - - /// Returns a mutable pointer into the vec; whatever changes you make here cannot be undone - /// automatically, so you should be sure call `record()` with some sort of suitable undo - /// action. - pub fn get_mut(&mut self, index: usize) -> &mut D::Value { - &mut self.values[index] - } - - /// Updates the element at the given index. The old value will saved (and perhaps restored) if - /// a snapshot is active. - pub fn set(&mut self, index: usize, new_elem: D::Value) { - let old_elem = mem::replace(&mut self.values[index], new_elem); - if self.in_snapshot() { - self.undo_log.push(SetElem(index, old_elem)); - } - } - - pub fn start_snapshot(&mut self) -> Snapshot { - let length = self.undo_log.len(); - self.undo_log.push(OpenSnapshot); - Snapshot { length: length } - } - - pub fn actions_since_snapshot(&self, snapshot: &Snapshot) -> &[UndoLog] { - &self.undo_log[snapshot.length..] - } - - fn assert_open_snapshot(&self, snapshot: &Snapshot) { - // Or else there was a failure to follow a stack discipline: - assert!(self.undo_log.len() > snapshot.length); - - // Invariant established by start_snapshot(): - assert!(match self.undo_log[snapshot.length] { - OpenSnapshot => true, - _ => false, - }); - } - - pub fn rollback_to(&mut self, snapshot: Snapshot) { - debug!("rollback_to({})", snapshot.length); - - self.assert_open_snapshot(&snapshot); - - while self.undo_log.len() > snapshot.length + 1 { - match self.undo_log.pop().unwrap() { - OpenSnapshot => { - // This indicates a failure to obey the stack discipline. - panic!("Cannot rollback an uncommitted snapshot"); - } - - CommittedSnapshot => { - // This occurs when there are nested snapshots and - // the inner is committed but outer is rolled back. - } - - NewElem(i) => { - self.values.pop(); - assert!(self.values.len() == i); - } - - SetElem(i, v) => { - self.values[i] = v; - } - - Other(u) => { - D::reverse(&mut self.values, u); - } - } - } - - let v = self.undo_log.pop().unwrap(); - assert!(match v { - OpenSnapshot => true, - _ => false, - }); - assert!(self.undo_log.len() == snapshot.length); - } - - /// Commits all changes since the last snapshot. Of course, they - /// can still be undone if there is a snapshot further out. - pub fn commit(&mut self, snapshot: Snapshot) { - debug!("commit({})", snapshot.length); - - self.assert_open_snapshot(&snapshot); - - if snapshot.length == 0 { - // The root snapshot. - self.undo_log.truncate(0); - } else { - self.undo_log[snapshot.length] = CommittedSnapshot; - } - } -} - -impl ops::Deref for SnapshotVec { - type Target = [D::Value]; - fn deref(&self) -> &[D::Value] { - &*self.values - } -} - -impl ops::DerefMut for SnapshotVec { - fn deref_mut(&mut self) -> &mut [D::Value] { - &mut *self.values - } -} - -impl ops::Index for SnapshotVec { - type Output = D::Value; - fn index(&self, index: usize) -> &D::Value { - self.get(index) - } -} - -impl ops::IndexMut for SnapshotVec { - fn index_mut(&mut self, index: usize) -> &mut D::Value { - self.get_mut(index) - } -} - -impl Extend for SnapshotVec { - fn extend(&mut self, iterable: T) where T: IntoIterator { - for item in iterable { - self.push(item); - } - } -} diff --git a/src/librustc_data_structures/sorted_map.rs b/src/librustc_data_structures/sorted_map.rs new file mode 100644 index 000000000000..730b13a0584f --- /dev/null +++ b/src/librustc_data_structures/sorted_map.rs @@ -0,0 +1,489 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::borrow::Borrow; +use std::cmp::Ordering; +use std::convert::From; +use std::mem; +use std::ops::{RangeBounds, Bound, Index, IndexMut}; + +/// `SortedMap` is a data structure with similar characteristics as BTreeMap but +/// slightly different trade-offs: lookup, inseration, and removal are O(log(N)) +/// and elements can be iterated in order cheaply. +/// +/// `SortedMap` can be faster than a `BTreeMap` for small sizes (<50) since it +/// stores data in a more compact way. It also supports accessing contiguous +/// ranges of elements as a slice, and slices of already sorted elements can be +/// inserted efficiently. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Debug, RustcEncodable, + RustcDecodable)] +pub struct SortedMap { + data: Vec<(K,V)> +} + +impl SortedMap { + + #[inline] + pub fn new() -> SortedMap { + SortedMap { + data: vec![] + } + } + + /// Construct a `SortedMap` from a presorted set of elements. This is faster + /// than creating an empty map and then inserting the elements individually. + /// + /// It is up to the caller to make sure that the elements are sorted by key + /// and that there are no duplicates. + #[inline] + pub fn from_presorted_elements(elements: Vec<(K, V)>) -> SortedMap + { + debug_assert!(elements.windows(2).all(|w| w[0].0 < w[1].0)); + + SortedMap { + data: elements + } + } + + #[inline] + pub fn insert(&mut self, key: K, mut value: V) -> Option { + match self.lookup_index_for(&key) { + Ok(index) => { + let slot = unsafe { + self.data.get_unchecked_mut(index) + }; + mem::swap(&mut slot.1, &mut value); + Some(value) + } + Err(index) => { + self.data.insert(index, (key, value)); + None + } + } + } + + #[inline] + pub fn remove(&mut self, key: &K) -> Option { + match self.lookup_index_for(key) { + Ok(index) => { + Some(self.data.remove(index).1) + } + Err(_) => { + None + } + } + } + + #[inline] + pub fn get(&self, key: &K) -> Option<&V> { + match self.lookup_index_for(key) { + Ok(index) => { + unsafe { + Some(&self.data.get_unchecked(index).1) + } + } + Err(_) => { + None + } + } + } + + #[inline] + pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { + match self.lookup_index_for(key) { + Ok(index) => { + unsafe { + Some(&mut self.data.get_unchecked_mut(index).1) + } + } + Err(_) => { + None + } + } + } + + #[inline] + pub fn clear(&mut self) { + self.data.clear(); + } + + /// Iterate over elements, sorted by key + #[inline] + pub fn iter(&self) -> ::std::slice::Iter<(K, V)> { + self.data.iter() + } + + /// Iterate over the keys, sorted + #[inline] + pub fn keys(&self) -> impl Iterator + ExactSizeIterator { + self.data.iter().map(|&(ref k, _)| k) + } + + /// Iterate over values, sorted by key + #[inline] + pub fn values(&self) -> impl Iterator + ExactSizeIterator { + self.data.iter().map(|&(_, ref v)| v) + } + + #[inline] + pub fn len(&self) -> usize { + self.data.len() + } + + #[inline] + pub fn range(&self, range: R) -> &[(K, V)] + where R: RangeBounds + { + let (start, end) = self.range_slice_indices(range); + (&self.data[start .. end]) + } + + #[inline] + pub fn remove_range(&mut self, range: R) + where R: RangeBounds + { + let (start, end) = self.range_slice_indices(range); + self.data.splice(start .. end, ::std::iter::empty()); + } + + /// Mutate all keys with the given function `f`. This mutation must not + /// change the sort-order of keys. + #[inline] + pub fn offset_keys(&mut self, f: F) + where F: Fn(&mut K) + { + self.data.iter_mut().map(|&mut (ref mut k, _)| k).for_each(f); + } + + /// Inserts a presorted range of elements into the map. If the range can be + /// inserted as a whole in between to existing elements of the map, this + /// will be faster than inserting the elements individually. + /// + /// It is up to the caller to make sure that the elements are sorted by key + /// and that there are no duplicates. + #[inline] + pub fn insert_presorted(&mut self, mut elements: Vec<(K, V)>) { + if elements.is_empty() { + return + } + + debug_assert!(elements.windows(2).all(|w| w[0].0 < w[1].0)); + + let start_index = self.lookup_index_for(&elements[0].0); + + let drain = match start_index { + Ok(index) => { + let mut drain = elements.drain(..); + self.data[index] = drain.next().unwrap(); + drain + } + Err(index) => { + if index == self.data.len() || + elements.last().unwrap().0 < self.data[index].0 { + // We can copy the whole range without having to mix with + // existing elements. + self.data.splice(index .. index, elements.drain(..)); + return + } + + let mut drain = elements.drain(..); + self.data.insert(index, drain.next().unwrap()); + drain + } + }; + + // Insert the rest + for (k, v) in drain { + self.insert(k, v); + } + } + + /// Looks up the key in `self.data` via `slice::binary_search()`. + #[inline(always)] + fn lookup_index_for(&self, key: &K) -> Result { + self.data.binary_search_by(|&(ref x, _)| x.cmp(key)) + } + + #[inline] + fn range_slice_indices(&self, range: R) -> (usize, usize) + where R: RangeBounds + { + let start = match range.start_bound() { + Bound::Included(ref k) => { + match self.lookup_index_for(k) { + Ok(index) | Err(index) => index + } + } + Bound::Excluded(ref k) => { + match self.lookup_index_for(k) { + Ok(index) => index + 1, + Err(index) => index, + } + } + Bound::Unbounded => 0, + }; + + let end = match range.end_bound() { + Bound::Included(ref k) => { + match self.lookup_index_for(k) { + Ok(index) => index + 1, + Err(index) => index, + } + } + Bound::Excluded(ref k) => { + match self.lookup_index_for(k) { + Ok(index) | Err(index) => index, + } + } + Bound::Unbounded => self.data.len(), + }; + + (start, end) + } +} + +impl IntoIterator for SortedMap { + type Item = (K, V); + type IntoIter = ::std::vec::IntoIter<(K, V)>; + fn into_iter(self) -> Self::IntoIter { + self.data.into_iter() + } +} + +impl> Index for SortedMap { + type Output = V; + fn index(&self, index: Q) -> &Self::Output { + let k: &K = index.borrow(); + self.get(k).unwrap() + } +} + +impl> IndexMut for SortedMap { + fn index_mut(&mut self, index: Q) -> &mut Self::Output { + let k: &K = index.borrow(); + self.get_mut(k).unwrap() + } +} + +impl> From for SortedMap { + fn from(data: I) -> Self { + let mut data: Vec<(K, V)> = data.collect(); + data.sort_unstable_by(|&(ref k1, _), &(ref k2, _)| k1.cmp(k2)); + data.dedup_by(|&mut (ref k1, _), &mut (ref k2, _)| { + k1.cmp(k2) == Ordering::Equal + }); + SortedMap { + data + } + } +} + +#[cfg(test)] +mod tests { + use super::SortedMap; + + #[test] + fn test_insert_and_iter() { + let mut map = SortedMap::new(); + let mut expected = Vec::new(); + + for x in 0 .. 100 { + assert_eq!(map.iter().cloned().collect::>(), expected); + + let x = 1000 - x * 2; + map.insert(x, x); + expected.insert(0, (x, x)); + } + } + + #[test] + fn test_get_and_index() { + let mut map = SortedMap::new(); + let mut expected = Vec::new(); + + for x in 0 .. 100 { + let x = 1000 - x; + if x & 1 == 0 { + map.insert(x, x); + } + expected.push(x); + } + + for mut x in expected { + if x & 1 == 0 { + assert_eq!(map.get(&x), Some(&x)); + assert_eq!(map.get_mut(&x), Some(&mut x)); + assert_eq!(map[&x], x); + assert_eq!(&mut map[&x], &mut x); + } else { + assert_eq!(map.get(&x), None); + assert_eq!(map.get_mut(&x), None); + } + } + } + + #[test] + fn test_range() { + let mut map = SortedMap::new(); + map.insert(1, 1); + map.insert(3, 3); + map.insert(6, 6); + map.insert(9, 9); + + let keys = |s: &[(_, _)]| { + s.into_iter().map(|e| e.0).collect::>() + }; + + for start in 0 .. 11 { + for end in 0 .. 11 { + if end < start { + continue + } + + let mut expected = vec![1, 3, 6, 9]; + expected.retain(|&x| x >= start && x < end); + + assert_eq!(keys(map.range(start..end)), expected, "range = {}..{}", start, end); + } + } + } + + + #[test] + fn test_offset_keys() { + let mut map = SortedMap::new(); + map.insert(1, 1); + map.insert(3, 3); + map.insert(6, 6); + + map.offset_keys(|k| *k += 1); + + let mut expected = SortedMap::new(); + expected.insert(2, 1); + expected.insert(4, 3); + expected.insert(7, 6); + + assert_eq!(map, expected); + } + + fn keys(s: SortedMap) -> Vec { + s.into_iter().map(|(k, _)| k).collect::>() + } + + fn elements(s: SortedMap) -> Vec<(u32, u32)> { + s.into_iter().collect::>() + } + + #[test] + fn test_remove_range() { + let mut map = SortedMap::new(); + map.insert(1, 1); + map.insert(3, 3); + map.insert(6, 6); + map.insert(9, 9); + + for start in 0 .. 11 { + for end in 0 .. 11 { + if end < start { + continue + } + + let mut expected = vec![1, 3, 6, 9]; + expected.retain(|&x| x < start || x >= end); + + let mut map = map.clone(); + map.remove_range(start .. end); + + assert_eq!(keys(map), expected, "range = {}..{}", start, end); + } + } + } + + #[test] + fn test_remove() { + let mut map = SortedMap::new(); + let mut expected = Vec::new(); + + for x in 0..10 { + map.insert(x, x); + expected.push((x, x)); + } + + for x in 0 .. 10 { + let mut map = map.clone(); + let mut expected = expected.clone(); + + assert_eq!(map.remove(&x), Some(x)); + expected.remove(x as usize); + + assert_eq!(map.iter().cloned().collect::>(), expected); + } + } + + #[test] + fn test_insert_presorted_non_overlapping() { + let mut map = SortedMap::new(); + map.insert(2, 0); + map.insert(8, 0); + + map.insert_presorted(vec![(3, 0), (7, 0)]); + + let expected = vec![2, 3, 7, 8]; + assert_eq!(keys(map), expected); + } + + #[test] + fn test_insert_presorted_first_elem_equal() { + let mut map = SortedMap::new(); + map.insert(2, 2); + map.insert(8, 8); + + map.insert_presorted(vec![(2, 0), (7, 7)]); + + let expected = vec![(2, 0), (7, 7), (8, 8)]; + assert_eq!(elements(map), expected); + } + + #[test] + fn test_insert_presorted_last_elem_equal() { + let mut map = SortedMap::new(); + map.insert(2, 2); + map.insert(8, 8); + + map.insert_presorted(vec![(3, 3), (8, 0)]); + + let expected = vec![(2, 2), (3, 3), (8, 0)]; + assert_eq!(elements(map), expected); + } + + #[test] + fn test_insert_presorted_shuffle() { + let mut map = SortedMap::new(); + map.insert(2, 2); + map.insert(7, 7); + + map.insert_presorted(vec![(1, 1), (3, 3), (8, 8)]); + + let expected = vec![(1, 1), (2, 2), (3, 3), (7, 7), (8, 8)]; + assert_eq!(elements(map), expected); + } + + #[test] + fn test_insert_presorted_at_end() { + let mut map = SortedMap::new(); + map.insert(1, 1); + map.insert(2, 2); + + map.insert_presorted(vec![(3, 3), (8, 8)]); + + let expected = vec![(1, 1), (2, 2), (3, 3), (8, 8)]; + assert_eq!(elements(map), expected); + } +} diff --git a/src/librustc_data_structures/stable_hasher.rs b/src/librustc_data_structures/stable_hasher.rs index d82b712b5b14..9f1c7dac1194 100644 --- a/src/librustc_data_structures/stable_hasher.rs +++ b/src/librustc_data_structures/stable_hasher.rs @@ -165,29 +165,6 @@ impl Hasher for StableHasher { } } - -/// Something that can provide a stable hashing context. -pub trait StableHashingContextProvider { - type ContextType; - fn create_stable_hashing_context(&self) -> Self::ContextType; -} - -impl<'a, T: StableHashingContextProvider> StableHashingContextProvider for &'a T { - type ContextType = T::ContextType; - - fn create_stable_hashing_context(&self) -> Self::ContextType { - (**self).create_stable_hashing_context() - } -} - -impl<'a, T: StableHashingContextProvider> StableHashingContextProvider for &'a mut T { - type ContextType = T::ContextType; - - fn create_stable_hashing_context(&self) -> Self::ContextType { - (**self).create_stable_hashing_context() - } -} - /// Something that implements `HashStable` can be hashed in a way that is /// stable across multiple compilation sessions. pub trait HashStable { @@ -206,13 +183,16 @@ pub trait ToStableHashKey { // Implement HashStable by just calling `Hash::hash()`. This works fine for // self-contained values that don't depend on the hashing context `CTX`. +#[macro_export] macro_rules! impl_stable_hash_via_hash { ($t:ty) => ( - impl HashStable for $t { + impl $crate::stable_hasher::HashStable for $t { #[inline] - fn hash_stable(&self, - _: &mut CTX, - hasher: &mut StableHasher) { + fn hash_stable( + &self, + _: &mut CTX, + hasher: &mut $crate::stable_hasher::StableHasher + ) { ::std::hash::Hash::hash(self, hasher); } } @@ -259,6 +239,14 @@ impl HashStable for f64 { } } +impl HashStable for ::std::cmp::Ordering { + fn hash_stable(&self, + ctx: &mut CTX, + hasher: &mut StableHasher) { + (*self as i8).hash_stable(ctx, hasher); + } +} + impl, CTX> HashStable for (T1,) { fn hash_stable(&self, ctx: &mut CTX, diff --git a/src/librustc/hir/svh.rs b/src/librustc_data_structures/svh.rs similarity index 85% rename from src/librustc/hir/svh.rs rename to src/librustc_data_structures/svh.rs index a6cfcb710eda..94f132562b5e 100644 --- a/src/librustc/hir/svh.rs +++ b/src/librustc_data_structures/svh.rs @@ -19,6 +19,8 @@ use std::fmt; use std::hash::{Hash, Hasher}; use serialize::{Encodable, Decodable, Encoder, Decoder}; +use stable_hasher; + #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct Svh { hash: u64, @@ -67,6 +69,16 @@ impl Decodable for Svh { } } -impl_stable_hash_for!(struct Svh { - hash -}); +impl stable_hasher::HashStable for Svh { + #[inline] + fn hash_stable( + &self, + ctx: &mut T, + hasher: &mut stable_hasher::StableHasher + ) { + let Svh { + hash + } = *self; + hash.hash_stable(ctx, hasher); + } +} diff --git a/src/librustc_data_structures/sync.rs b/src/librustc_data_structures/sync.rs index b1ab4eaa0692..d4c6b1c2ced8 100644 --- a/src/librustc_data_structures/sync.rs +++ b/src/librustc_data_structures/sync.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! This mdoule defines types which are thread safe if cfg!(parallel_queries) is true. +//! This module defines types which are thread safe if cfg!(parallel_queries) is true. //! //! `Lrc` is an alias of either Rc or Arc. //! @@ -26,20 +26,44 @@ //! //! `MTLock` is a mutex which disappears if cfg!(parallel_queries) is false. //! -//! `rustc_global!` gives us a way to declare variables which are intended to be -//! global for the current rustc session. This currently maps to thread-locals, -//! since rustdoc uses the rustc libraries in multiple threads. -//! These globals should eventually be moved into the `Session` structure. +//! `MTRef` is a immutable refernce if cfg!(parallel_queries), and an mutable reference otherwise. //! //! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync //! depending on the value of cfg!(parallel_queries). +use std::collections::HashMap; +use std::hash::{Hash, BuildHasher}; use std::cmp::Ordering; +use std::marker::PhantomData; use std::fmt::Debug; use std::fmt::Formatter; use std::fmt; +use std::ops::{Deref, DerefMut}; use owning_ref::{Erased, OwningRef}; +pub fn serial_join(oper_a: A, oper_b: B) -> (RA, RB) + where A: FnOnce() -> RA, + B: FnOnce() -> RB +{ + (oper_a(), oper_b()) +} + +pub struct SerialScope; + +impl SerialScope { + pub fn spawn(&self, f: F) + where F: FnOnce(&SerialScope) + { + f(self) + } +} + +pub fn serial_scope(f: F) -> R + where F: FnOnce(&SerialScope) -> R +{ + f(&SerialScope) +} + cfg_if! { if #[cfg(not(parallel_queries))] { pub auto trait Send {} @@ -55,18 +79,57 @@ cfg_if! { } } - pub type MetadataRef = OwningRef, [u8]>; + pub use self::serial_join as join; + pub use self::serial_scope as scope; + + pub use std::iter::Iterator as ParallelIterator; + + pub fn par_iter(t: T) -> T::IntoIter { + t.into_iter() + } + + pub type MetadataRef = OwningRef, [u8]>; pub use std::rc::Rc as Lrc; + pub use std::rc::Weak as Weak; pub use std::cell::Ref as ReadGuard; pub use std::cell::RefMut as WriteGuard; pub use std::cell::RefMut as LockGuard; - pub use std::cell::RefCell as RwLock; + use std::cell::RefCell as InnerRwLock; use std::cell::RefCell as InnerLock; use std::cell::Cell; + #[derive(Debug)] + pub struct WorkerLocal(OneThread); + + impl WorkerLocal { + /// Creates a new worker local where the `initial` closure computes the + /// value this worker local should take for each thread in the thread pool. + #[inline] + pub fn new T>(mut f: F) -> WorkerLocal { + WorkerLocal(OneThread::new(f(0))) + } + + /// Returns the worker-local value for each thread + #[inline] + pub fn into_inner(self) -> Vec { + vec![OneThread::into_inner(self.0)] + } + } + + impl Deref for WorkerLocal { + type Target = T; + + #[inline(always)] + fn deref(&self) -> &T { + &*self.0 + } + } + + pub type MTRef<'a, T> = &'a mut T; + #[derive(Debug)] pub struct MTLock(T); @@ -92,13 +155,8 @@ cfg_if! { } #[inline(always)] - pub fn borrow(&self) -> &T { - &self.0 - } - - #[inline(always)] - pub fn borrow_mut(&self) -> &T { - &self.0 + pub fn lock_mut(&mut self) -> &mut T { + &mut self.0 } } @@ -159,15 +217,58 @@ cfg_if! { pub use parking_lot::MutexGuard as LockGuard; - use parking_lot; - pub use std::sync::Arc as Lrc; + pub use std::sync::Weak as Weak; - pub use self::Lock as MTLock; + pub type MTRef<'a, T> = &'a T; + + #[derive(Debug)] + pub struct MTLock(Lock); + + impl MTLock { + #[inline(always)] + pub fn new(inner: T) -> Self { + MTLock(Lock::new(inner)) + } + + #[inline(always)] + pub fn into_inner(self) -> T { + self.0.into_inner() + } + + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + self.0.get_mut() + } + + #[inline(always)] + pub fn lock(&self) -> LockGuard { + self.0.lock() + } + + #[inline(always)] + pub fn lock_mut(&self) -> LockGuard { + self.lock() + } + } use parking_lot::Mutex as InnerLock; + use parking_lot::RwLock as InnerRwLock; - pub type MetadataRef = OwningRef, [u8]>; + use std; + use std::thread; + pub use rayon::{join, scope}; + + pub use rayon_core::WorkerLocal; + + pub use rayon::iter::ParallelIterator; + use rayon::iter::IntoParallelIterator; + + pub fn par_iter(t: T) -> T::Iter { + t.into_par_iter() + } + + pub type MetadataRef = OwningRef, [u8]>; /// This makes locks panic if they are already held. /// It is only useful when you are running in a single thread @@ -177,7 +278,7 @@ cfg_if! { macro_rules! rustc_erase_owner { ($v:expr) => {{ let v = $v; - ::rustc_data_structures::sync::assert_send_sync_val(&v); + ::rustc_data_structures::sync::assert_send_val(&v); v.erase_send_sync_owner() }} } @@ -222,70 +323,150 @@ cfg_if! { self.0.lock().take() } } - - #[derive(Debug)] - pub struct RwLock(parking_lot::RwLock); - - impl RwLock { - #[inline(always)] - pub fn new(inner: T) -> Self { - RwLock(parking_lot::RwLock::new(inner)) - } - - #[inline(always)] - pub fn borrow(&self) -> ReadGuard { - if ERROR_CHECKING { - self.0.try_read().expect("lock was already held") - } else { - self.0.read() - } - } - - #[inline(always)] - pub fn borrow_mut(&self) -> WriteGuard { - if ERROR_CHECKING { - self.0.try_write().expect("lock was already held") - } else { - self.0.write() - } - } - } - - // FIXME: Probably a bad idea - impl Clone for RwLock { - #[inline] - fn clone(&self) -> Self { - RwLock::new(self.borrow().clone()) - } - } } } pub fn assert_sync() {} +pub fn assert_send_val(_t: &T) {} pub fn assert_send_sync_val(_t: &T) {} -#[macro_export] -#[allow_internal_unstable] -macro_rules! rustc_global { - // empty (base case for the recursion) - () => {}; - - // process multiple declarations - ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr; $($rest:tt)*) => ( - thread_local!($(#[$attr])* $vis static $name: $t = $init); - rustc_global!($($rest)*); - ); - - // handle a single declaration - ($(#[$attr:meta])* $vis:vis static $name:ident: $t:ty = $init:expr) => ( - thread_local!($(#[$attr])* $vis static $name: $t = $init); - ); +pub trait HashMapExt { + /// Same as HashMap::insert, but it may panic if there's already an + /// entry for `key` with a value not equal to `value` + fn insert_same(&mut self, key: K, value: V); } -#[macro_export] -macro_rules! rustc_access_global { - ($name:path, $callback:expr) => { - $name.with($callback) +impl HashMapExt for HashMap { + fn insert_same(&mut self, key: K, value: V) { + self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value); + } +} + +/// A type whose inner value can be written once and then will stay read-only +// This contains a PhantomData since this type conceptually owns a T outside the Mutex once +// initialized. This ensures that Once is Sync only if T is. If we did not have PhantomData +// we could send a &Once> to multiple threads and call `get` on it to get access +// to &Cell on those threads. +pub struct Once(Lock>, PhantomData); + +impl Once { + /// Creates an Once value which is uninitialized + #[inline(always)] + pub fn new() -> Self { + Once(Lock::new(None), PhantomData) + } + + /// Consumes the value and returns Some(T) if it was initialized + #[inline(always)] + pub fn into_inner(self) -> Option { + self.0.into_inner() + } + + /// Tries to initialize the inner value to `value`. + /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it + /// otherwise if the inner value was already set it returns `value` back to the caller + #[inline] + pub fn try_set(&self, value: T) -> Option { + let mut lock = self.0.lock(); + if lock.is_some() { + return Some(value); + } + *lock = Some(value); + None + } + + /// Tries to initialize the inner value to `value`. + /// Returns `None` if the inner value was uninitialized and `value` was consumed setting it + /// otherwise if the inner value was already set it asserts that `value` is equal to the inner + /// value and then returns `value` back to the caller + #[inline] + pub fn try_set_same(&self, value: T) -> Option where T: Eq { + let mut lock = self.0.lock(); + if let Some(ref inner) = *lock { + assert!(*inner == value); + return Some(value); + } + *lock = Some(value); + None + } + + /// Tries to initialize the inner value to `value` and panics if it was already initialized + #[inline] + pub fn set(&self, value: T) { + assert!(self.try_set(value).is_none()); + } + + /// Tries to initialize the inner value by calling the closure while ensuring that no-one else + /// can access the value in the mean time by holding a lock for the duration of the closure. + /// If the value was already initialized the closure is not called and `false` is returned, + /// otherwise if the value from the closure initializes the inner value, `true` is returned + #[inline] + pub fn init_locking T>(&self, f: F) -> bool { + let mut lock = self.0.lock(); + if lock.is_some() { + return false; + } + *lock = Some(f()); + true + } + + /// Tries to initialize the inner value by calling the closure without ensuring that no-one + /// else can access it. This mean when this is called from multiple threads, multiple + /// closures may concurrently be computing a value which the inner value should take. + /// Only one of these closures are used to actually initialize the value. + /// If some other closure already set the value, + /// we return the value our closure computed wrapped in a `Option`. + /// If our closure set the value, `None` is returned. + /// If the value is already initialized, the closure is not called and `None` is returned. + #[inline] + pub fn init_nonlocking T>(&self, f: F) -> Option { + if self.0.lock().is_some() { + None + } else { + self.try_set(f()) + } + } + + /// Tries to initialize the inner value by calling the closure without ensuring that no-one + /// else can access it. This mean when this is called from multiple threads, multiple + /// closures may concurrently be computing a value which the inner value should take. + /// Only one of these closures are used to actually initialize the value. + /// If some other closure already set the value, we assert that it our closure computed + /// a value equal to the value aready set and then + /// we return the value our closure computed wrapped in a `Option`. + /// If our closure set the value, `None` is returned. + /// If the value is already initialized, the closure is not called and `None` is returned. + #[inline] + pub fn init_nonlocking_same T>(&self, f: F) -> Option where T: Eq { + if self.0.lock().is_some() { + None + } else { + self.try_set_same(f()) + } + } + + /// Tries to get a reference to the inner value, returns `None` if it is not yet initialized + #[inline(always)] + pub fn try_get(&self) -> Option<&T> { + let lock = &*self.0.lock(); + if let Some(ref inner) = *lock { + // This is safe since we won't mutate the inner value + unsafe { Some(&*(inner as *const T)) } + } else { + None + } + } + + /// Gets reference to the inner value, panics if it is not yet initialized + #[inline(always)] + pub fn get(&self) -> &T { + self.try_get().expect("value was not set") + } + + /// Gets reference to the inner value, panics if it is not yet initialized + #[inline(always)] + pub fn borrow(&self) -> &T { + self.get() } } @@ -367,6 +548,18 @@ impl Lock { self.0.get_mut() } + #[cfg(parallel_queries)] + #[inline(always)] + pub fn try_lock(&self) -> Option> { + self.0.try_lock() + } + + #[cfg(not(parallel_queries))] + #[inline(always)] + pub fn try_lock(&self) -> Option> { + self.0.try_borrow_mut().ok() + } + #[cfg(parallel_queries)] #[inline(always)] pub fn lock(&self) -> LockGuard { @@ -383,6 +576,11 @@ impl Lock { self.0.borrow_mut() } + #[inline(always)] + pub fn with_lock R, R>(&self, f: F) -> R { + f(&mut *self.lock()) + } + #[inline(always)] pub fn borrow(&self) -> LockGuard { self.lock() @@ -394,6 +592,13 @@ impl Lock { } } +impl Default for Lock { + #[inline] + fn default() -> Self { + Lock::new(T::default()) + } +} + // FIXME: Probably a bad idea impl Clone for Lock { #[inline] @@ -401,3 +606,148 @@ impl Clone for Lock { Lock::new(self.borrow().clone()) } } + +#[derive(Debug)] +pub struct RwLock(InnerRwLock); + +impl RwLock { + #[inline(always)] + pub fn new(inner: T) -> Self { + RwLock(InnerRwLock::new(inner)) + } + + #[inline(always)] + pub fn into_inner(self) -> T { + self.0.into_inner() + } + + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + self.0.get_mut() + } + + #[cfg(not(parallel_queries))] + #[inline(always)] + pub fn read(&self) -> ReadGuard { + self.0.borrow() + } + + #[cfg(parallel_queries)] + #[inline(always)] + pub fn read(&self) -> ReadGuard { + if ERROR_CHECKING { + self.0.try_read().expect("lock was already held") + } else { + self.0.read() + } + } + + #[inline(always)] + pub fn with_read_lock R, R>(&self, f: F) -> R { + f(&*self.read()) + } + + #[cfg(not(parallel_queries))] + #[inline(always)] + pub fn try_write(&self) -> Result, ()> { + self.0.try_borrow_mut().map_err(|_| ()) + } + + #[cfg(parallel_queries)] + #[inline(always)] + pub fn try_write(&self) -> Result, ()> { + self.0.try_write().ok_or(()) + } + + #[cfg(not(parallel_queries))] + #[inline(always)] + pub fn write(&self) -> WriteGuard { + self.0.borrow_mut() + } + + #[cfg(parallel_queries)] + #[inline(always)] + pub fn write(&self) -> WriteGuard { + if ERROR_CHECKING { + self.0.try_write().expect("lock was already held") + } else { + self.0.write() + } + } + + #[inline(always)] + pub fn with_write_lock R, R>(&self, f: F) -> R { + f(&mut *self.write()) + } + + #[inline(always)] + pub fn borrow(&self) -> ReadGuard { + self.read() + } + + #[inline(always)] + pub fn borrow_mut(&self) -> WriteGuard { + self.write() + } +} + +// FIXME: Probably a bad idea +impl Clone for RwLock { + #[inline] + fn clone(&self) -> Self { + RwLock::new(self.borrow().clone()) + } +} + +/// A type which only allows its inner value to be used in one thread. +/// It will panic if it is used on multiple threads. +#[derive(Copy, Clone, Hash, Debug, Eq, PartialEq)] +pub struct OneThread { + #[cfg(parallel_queries)] + thread: thread::ThreadId, + inner: T, +} + +#[cfg(parallel_queries)] +unsafe impl std::marker::Sync for OneThread {} +#[cfg(parallel_queries)] +unsafe impl std::marker::Send for OneThread {} + +impl OneThread { + #[inline(always)] + fn check(&self) { + #[cfg(parallel_queries)] + assert_eq!(thread::current().id(), self.thread); + } + + #[inline(always)] + pub fn new(inner: T) -> Self { + OneThread { + #[cfg(parallel_queries)] + thread: thread::current().id(), + inner, + } + } + + #[inline(always)] + pub fn into_inner(value: Self) -> T { + value.check(); + value.inner + } +} + +impl Deref for OneThread { + type Target = T; + + fn deref(&self) -> &T { + self.check(); + &self.inner + } +} + +impl DerefMut for OneThread { + fn deref_mut(&mut self) -> &mut T { + self.check(); + &mut self.inner + } +} diff --git a/src/libsyntax/util/thin_vec.rs b/src/librustc_data_structures/thin_vec.rs similarity index 100% rename from src/libsyntax/util/thin_vec.rs rename to src/librustc_data_structures/thin_vec.rs diff --git a/src/librustc_data_structures/tiny_list.rs b/src/librustc_data_structures/tiny_list.rs new file mode 100644 index 000000000000..e1bfdf35b274 --- /dev/null +++ b/src/librustc_data_structures/tiny_list.rs @@ -0,0 +1,269 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +//! A singly-linked list. +//! +//! Using this data structure only makes sense under very specific +//! circumstances: +//! +//! - If you have a list that rarely stores more than one element, then this +//! data-structure can store the element without allocating and only uses as +//! much space as a `Option<(T, usize)>`. If T can double as the `Option` +//! discriminant, it will even only be as large as `T, usize`. +//! +//! If you expect to store more than 1 element in the common case, steer clear +//! and use a `Vec`, `Box<[T]>`, or a `SmallVec`. + +use std::mem; + +#[derive(Clone, Hash, Debug, PartialEq)] +pub struct TinyList { + head: Option> +} + +impl TinyList { + + #[inline] + pub fn new() -> TinyList { + TinyList { + head: None + } + } + + #[inline] + pub fn new_single(data: T) -> TinyList { + TinyList { + head: Some(Element { + data, + next: None, + }) + } + } + + #[inline] + pub fn insert(&mut self, data: T) { + self.head = Some(Element { + data, + next: mem::replace(&mut self.head, None).map(Box::new), + }); + } + + #[inline] + pub fn remove(&mut self, data: &T) -> bool { + self.head = match self.head { + Some(ref mut head) if head.data == *data => { + mem::replace(&mut head.next, None).map(|x| *x) + } + Some(ref mut head) => return head.remove_next(data), + None => return false, + }; + true + } + + #[inline] + pub fn contains(&self, data: &T) -> bool { + if let Some(ref head) = self.head { + head.contains(data) + } else { + false + } + } + + #[inline] + pub fn len(&self) -> usize { + if let Some(ref head) = self.head { + head.len() + } else { + 0 + } + } +} + +#[derive(Clone, Hash, Debug, PartialEq)] +struct Element { + data: T, + next: Option>>, +} + +impl Element { + + fn remove_next(&mut self, data: &T) -> bool { + let new_next = if let Some(ref mut next) = self.next { + if next.data != *data { + return next.remove_next(data) + } else { + mem::replace(&mut next.next, None) + } + } else { + return false + }; + + self.next = new_next; + + true + } + + fn len(&self) -> usize { + if let Some(ref next) = self.next { + 1 + next.len() + } else { + 1 + } + } + + fn contains(&self, data: &T) -> bool { + if self.data == *data { + return true + } + + if let Some(ref next) = self.next { + next.contains(data) + } else { + false + } + } +} + +#[cfg(test)] +mod test { + use super::*; + extern crate test; + use self::test::Bencher; + + #[test] + fn test_contains_and_insert() { + fn do_insert(i : u32) -> bool { + i % 2 == 0 + } + + let mut list = TinyList::new(); + + for i in 0 .. 10 { + for j in 0 .. i { + if do_insert(j) { + assert!(list.contains(&j)); + } else { + assert!(!list.contains(&j)); + } + } + + assert!(!list.contains(&i)); + + if do_insert(i) { + list.insert(i); + assert!(list.contains(&i)); + } + } + } + + #[test] + fn test_remove_first() { + let mut list = TinyList::new(); + list.insert(1); + list.insert(2); + list.insert(3); + list.insert(4); + assert_eq!(list.len(), 4); + + assert!(list.remove(&4)); + assert!(!list.contains(&4)); + + assert_eq!(list.len(), 3); + assert!(list.contains(&1)); + assert!(list.contains(&2)); + assert!(list.contains(&3)); + } + + #[test] + fn test_remove_last() { + let mut list = TinyList::new(); + list.insert(1); + list.insert(2); + list.insert(3); + list.insert(4); + assert_eq!(list.len(), 4); + + assert!(list.remove(&1)); + assert!(!list.contains(&1)); + + assert_eq!(list.len(), 3); + assert!(list.contains(&2)); + assert!(list.contains(&3)); + assert!(list.contains(&4)); + } + + #[test] + fn test_remove_middle() { + let mut list = TinyList::new(); + list.insert(1); + list.insert(2); + list.insert(3); + list.insert(4); + assert_eq!(list.len(), 4); + + assert!(list.remove(&2)); + assert!(!list.contains(&2)); + + assert_eq!(list.len(), 3); + assert!(list.contains(&1)); + assert!(list.contains(&3)); + assert!(list.contains(&4)); + } + + #[test] + fn test_remove_single() { + let mut list = TinyList::new(); + list.insert(1); + assert_eq!(list.len(), 1); + + assert!(list.remove(&1)); + assert!(!list.contains(&1)); + + assert_eq!(list.len(), 0); + } + + #[bench] + fn bench_insert_empty(b: &mut Bencher) { + b.iter(|| { + let mut list = TinyList::new(); + list.insert(1); + }) + } + + #[bench] + fn bench_insert_one(b: &mut Bencher) { + b.iter(|| { + let mut list = TinyList::new_single(0); + list.insert(1); + }) + } + + #[bench] + fn bench_remove_empty(b: &mut Bencher) { + b.iter(|| { + TinyList::new().remove(&1) + }); + } + + #[bench] + fn bench_remove_unknown(b: &mut Bencher) { + b.iter(|| { + TinyList::new_single(0).remove(&1) + }); + } + + #[bench] + fn bench_remove_one(b: &mut Bencher) { + b.iter(|| { + TinyList::new_single(1).remove(&1) + }); + } +} diff --git a/src/librustc_data_structures/transitive_relation.rs b/src/librustc_data_structures/transitive_relation.rs index ba7ab0c07c66..2acc29acb0ca 100644 --- a/src/librustc_data_structures/transitive_relation.rs +++ b/src/librustc_data_structures/transitive_relation.rs @@ -10,16 +10,16 @@ use bitvec::BitMatrix; use fx::FxHashMap; +use sync::Lock; use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; use stable_hasher::{HashStable, StableHasher, StableHasherResult}; -use std::cell::RefCell; use std::fmt::Debug; use std::hash::Hash; use std::mem; #[derive(Clone, Debug)] -pub struct TransitiveRelation { +pub struct TransitiveRelation { // List of elements. This is used to map from a T to a usize. elements: Vec, @@ -32,14 +32,14 @@ pub struct TransitiveRelation { // This is a cached transitive closure derived from the edges. // Currently, we build it lazilly and just throw out any existing - // copy whenever a new edge is added. (The RefCell is to permit + // copy whenever a new edge is added. (The Lock is to permit // the lazy computation.) This is kind of silly, except for the // fact its size is tied to `self.elements.len()`, so I wanted to // wait before building it up to avoid reallocating as new edges // are added with new elements. Perhaps better would be to ask the // user for a batch of edges to minimize this effect, but I // already wrote the code this way. :P -nmatsakis - closure: RefCell>, + closure: Lock>>, } #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable, Debug)] @@ -51,13 +51,13 @@ struct Edge { target: Index, } -impl TransitiveRelation { +impl TransitiveRelation { pub fn new() -> TransitiveRelation { TransitiveRelation { elements: vec![], map: FxHashMap(), edges: vec![], - closure: RefCell::new(None), + closure: Lock::new(None), } } @@ -72,21 +72,20 @@ impl TransitiveRelation { fn add_index(&mut self, a: T) -> Index { let &mut TransitiveRelation { ref mut elements, - ref closure, + ref mut closure, ref mut map, .. } = self; - map.entry(a.clone()) + *map.entry(a.clone()) .or_insert_with(|| { elements.push(a); // if we changed the dimensions, clear the cache - *closure.borrow_mut() = None; + *closure.get_mut() = None; Index(elements.len() - 1) }) - .clone() } /// Applies the (partial) function to each edge and returns a new @@ -98,14 +97,7 @@ impl TransitiveRelation { { let mut result = TransitiveRelation::new(); for edge in &self.edges { - let r = f(&self.elements[edge.source.0]).and_then(|source| { - f(&self.elements[edge.target.0]).and_then(|target| { - Some(result.add(source, target)) - }) - }); - if r.is_none() { - return None; - } + result.add(f(&self.elements[edge.source.0])?, f(&self.elements[edge.target.0])?); } Some(result) } @@ -122,7 +114,7 @@ impl TransitiveRelation { self.edges.push(edge); // added an edge, clear the cache - *self.closure.borrow_mut() = None; + *self.closure.get_mut() = None; } } @@ -354,7 +346,7 @@ impl TransitiveRelation { } fn with_closure(&self, op: OP) -> R - where OP: FnOnce(&BitMatrix) -> R + where OP: FnOnce(&BitMatrix) -> R { let mut closure_cell = self.closure.borrow_mut(); let mut closure = closure_cell.take(); @@ -366,13 +358,13 @@ impl TransitiveRelation { result } - fn compute_closure(&self) -> BitMatrix { + fn compute_closure(&self) -> BitMatrix { let mut matrix = BitMatrix::new(self.elements.len(), self.elements.len()); let mut changed = true; while changed { changed = false; - for edge in self.edges.iter() { + for edge in &self.edges { // add an edge from S -> T changed |= matrix.add(edge.source.0, edge.target.0); @@ -396,7 +388,7 @@ impl TransitiveRelation { /// - Input: `[a, b, x]`. Output: `[a, x]`. /// - Input: `[b, a, x]`. Output: `[b, a, x]`. /// - Input: `[a, x, b, y]`. Output: `[a, x]`. -fn pare_down(candidates: &mut Vec, closure: &BitMatrix) { +fn pare_down(candidates: &mut Vec, closure: &BitMatrix) { let mut i = 0; while i < candidates.len() { let candidate_i = candidates[i]; @@ -443,7 +435,7 @@ impl Decodable for TransitiveRelation .enumerate() .map(|(index, elem)| (elem.clone(), Index(index))) .collect(); - Ok(TransitiveRelation { elements, edges, map, closure: RefCell::new(None) }) + Ok(TransitiveRelation { elements, edges, map, closure: Lock::new(None) }) }) } } diff --git a/src/librustc_data_structures/unify/mod.rs b/src/librustc_data_structures/unify/mod.rs deleted file mode 100644 index 5411ae0257a4..000000000000 --- a/src/librustc_data_structures/unify/mod.rs +++ /dev/null @@ -1,363 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::marker; -use std::fmt::Debug; -use std::marker::PhantomData; -use snapshot_vec as sv; - -#[cfg(test)] -mod tests; - -/// This trait is implemented by any type that can serve as a type -/// variable. We call such variables *unification keys*. For example, -/// this trait is implemented by `IntVid`, which represents integral -/// variables. -/// -/// Each key type has an associated value type `V`. For example, for -/// `IntVid`, this is `Option`, representing some -/// (possibly not yet known) sort of integer. -/// -/// Clients are expected to provide implementations of this trait; you -/// can see some examples in the `test` module. -pub trait UnifyKey: Copy + Clone + Debug + PartialEq { - type Value: Clone + PartialEq + Debug; - - fn index(&self) -> u32; - - fn from_index(u: u32) -> Self; - - fn tag(k: Option) -> &'static str; -} - -/// This trait is implemented for unify values that can be -/// combined. This relation should be a monoid. -pub trait Combine { - fn combine(&self, other: &Self) -> Self; -} - -impl Combine for () { - fn combine(&self, _other: &()) {} -} - -/// Value of a unification key. We implement Tarjan's union-find -/// algorithm: when two keys are unified, one of them is converted -/// into a "redirect" pointing at the other. These redirects form a -/// DAG: the roots of the DAG (nodes that are not redirected) are each -/// associated with a value of type `V` and a rank. The rank is used -/// to keep the DAG relatively balanced, which helps keep the running -/// time of the algorithm under control. For more information, see -/// . -#[derive(PartialEq,Clone,Debug)] -pub struct VarValue { - parent: K, // if equal to self, this is a root - value: K::Value, // value assigned (only relevant to root) - rank: u32, // max depth (only relevant to root) -} - -/// Table of unification keys and their values. -pub struct UnificationTable { - /// Indicates the current value of each key. - values: sv::SnapshotVec>, -} - -/// At any time, users may snapshot a unification table. The changes -/// made during the snapshot may either be *committed* or *rolled back*. -pub struct Snapshot { - // Link snapshot to the key type `K` of the table. - marker: marker::PhantomData, - snapshot: sv::Snapshot, -} - -#[derive(Copy, Clone)] -struct Delegate(PhantomData); - -impl VarValue { - fn new_var(key: K, value: K::Value) -> VarValue { - VarValue::new(key, value, 0) - } - - fn new(parent: K, value: K::Value, rank: u32) -> VarValue { - VarValue { - parent: parent, // this is a root - value, - rank, - } - } - - fn redirect(self, to: K) -> VarValue { - VarValue { parent: to, ..self } - } - - fn root(self, rank: u32, value: K::Value) -> VarValue { - VarValue { - rank, - value, - ..self - } - } - - /// Returns the key of this node. Only valid if this is a root - /// node, which you yourself must ensure. - fn key(&self) -> K { - self.parent - } - - fn parent(&self, self_key: K) -> Option { - self.if_not_self(self.parent, self_key) - } - - fn if_not_self(&self, key: K, self_key: K) -> Option { - if key == self_key { None } else { Some(key) } - } -} - -/// We can't use V:LatticeValue, much as I would like to, -/// because frequently the pattern is that V=Option for some -/// other type parameter U, and we have no way to say -/// Option:LatticeValue. - -impl UnificationTable { - pub fn new() -> UnificationTable { - UnificationTable { values: sv::SnapshotVec::new() } - } - - /// Starts a new snapshot. Each snapshot must be either - /// rolled back or committed in a "LIFO" (stack) order. - pub fn snapshot(&mut self) -> Snapshot { - Snapshot { - marker: marker::PhantomData::, - snapshot: self.values.start_snapshot(), - } - } - - /// Reverses all changes since the last snapshot. Also - /// removes any keys that have been created since then. - pub fn rollback_to(&mut self, snapshot: Snapshot) { - debug!("{}: rollback_to()", UnifyKey::tag(None::)); - self.values.rollback_to(snapshot.snapshot); - } - - /// Commits all changes since the last snapshot. Of course, they - /// can still be undone if there is a snapshot further out. - pub fn commit(&mut self, snapshot: Snapshot) { - debug!("{}: commit()", UnifyKey::tag(None::)); - self.values.commit(snapshot.snapshot); - } - - pub fn new_key(&mut self, value: K::Value) -> K { - let len = self.values.len(); - let key: K = UnifyKey::from_index(len as u32); - self.values.push(VarValue::new_var(key, value)); - debug!("{}: created new key: {:?}", UnifyKey::tag(None::), key); - key - } - - /// Find the root node for `vid`. This uses the standard - /// union-find algorithm with path compression: - /// . - /// - /// NB. This is a building-block operation and you would probably - /// prefer to call `probe` below. - fn get(&mut self, vid: K) -> VarValue { - let index = vid.index() as usize; - let mut value: VarValue = self.values.get(index).clone(); - match value.parent(vid) { - Some(redirect) => { - let root: VarValue = self.get(redirect); - if root.key() != redirect { - // Path compression - value.parent = root.key(); - self.values.set(index, value); - } - root - } - None => value, - } - } - - fn is_root(&self, key: K) -> bool { - let index = key.index() as usize; - self.values.get(index).parent(key).is_none() - } - - /// Sets the value for `vid` to `new_value`. `vid` MUST be a root - /// node! This is an internal operation used to impl other things. - fn set(&mut self, key: K, new_value: VarValue) { - assert!(self.is_root(key)); - - debug!("Updating variable {:?} to {:?}", key, new_value); - - let index = key.index() as usize; - self.values.set(index, new_value); - } - - /// Either redirects `node_a` to `node_b` or vice versa, depending - /// on the relative rank. The value associated with the new root - /// will be `new_value`. - /// - /// NB: This is the "union" operation of "union-find". It is - /// really more of a building block. If the values associated with - /// your key are non-trivial, you would probably prefer to call - /// `unify_var_var` below. - fn unify(&mut self, root_a: VarValue, root_b: VarValue, new_value: K::Value) -> K { - debug!("unify(root_a(id={:?}, rank={:?}), root_b(id={:?}, rank={:?}))", - root_a.key(), - root_a.rank, - root_b.key(), - root_b.rank); - - if root_a.rank > root_b.rank { - // a has greater rank, so a should become b's parent, - // i.e., b should redirect to a. - self.redirect_root(root_a.rank, root_b, root_a, new_value) - } else if root_a.rank < root_b.rank { - // b has greater rank, so a should redirect to b. - self.redirect_root(root_b.rank, root_a, root_b, new_value) - } else { - // If equal, redirect one to the other and increment the - // other's rank. - self.redirect_root(root_a.rank + 1, root_a, root_b, new_value) - } - } - - fn redirect_root(&mut self, - new_rank: u32, - old_root: VarValue, - new_root: VarValue, - new_value: K::Value) - -> K { - let old_root_key = old_root.key(); - let new_root_key = new_root.key(); - self.set(old_root_key, old_root.redirect(new_root_key)); - self.set(new_root_key, new_root.root(new_rank, new_value)); - new_root_key - } -} - -impl sv::SnapshotVecDelegate for Delegate { - type Value = VarValue; - type Undo = (); - - fn reverse(_: &mut Vec>, _: ()) {} -} - -/// # Base union-find algorithm, where we are just making sets - -impl<'tcx, K: UnifyKey> UnificationTable - where K::Value: Combine -{ - pub fn union(&mut self, a_id: K, b_id: K) -> K { - let node_a = self.get(a_id); - let node_b = self.get(b_id); - let a_id = node_a.key(); - let b_id = node_b.key(); - if a_id != b_id { - let new_value = node_a.value.combine(&node_b.value); - self.unify(node_a, node_b, new_value) - } else { - a_id - } - } - - pub fn find(&mut self, id: K) -> K { - self.get(id).key() - } - - pub fn find_value(&mut self, id: K) -> K::Value { - self.get(id).value - } - - #[cfg(test)] - fn unioned(&mut self, a_id: K, b_id: K) -> bool { - self.find(a_id) == self.find(b_id) - } -} - -/// # Non-subtyping unification -/// -/// Code to handle keys which carry a value, like ints, -/// floats---anything that doesn't have a subtyping relationship we -/// need to worry about. - -impl<'tcx, K, V> UnificationTable - where K: UnifyKey>, - V: Clone + PartialEq + Debug -{ - pub fn unify_var_var(&mut self, a_id: K, b_id: K) -> Result { - let node_a = self.get(a_id); - let node_b = self.get(b_id); - let a_id = node_a.key(); - let b_id = node_b.key(); - - if a_id == b_id { - return Ok(a_id); - } - - let combined = { - match (&node_a.value, &node_b.value) { - (&None, &None) => None, - (&Some(ref v), &None) | - (&None, &Some(ref v)) => Some(v.clone()), - (&Some(ref v1), &Some(ref v2)) => { - if *v1 != *v2 { - return Err((v1.clone(), v2.clone())); - } - Some(v1.clone()) - } - } - }; - - Ok(self.unify(node_a, node_b, combined)) - } - - /// Sets the value of the key `a_id` to `b`. Because simple keys do not have any subtyping - /// relationships, if `a_id` already has a value, it must be the same as `b`. - pub fn unify_var_value(&mut self, a_id: K, b: V) -> Result<(), (V, V)> { - let mut node_a = self.get(a_id); - - match node_a.value { - None => { - node_a.value = Some(b); - self.set(node_a.key(), node_a); - Ok(()) - } - - Some(ref a_t) => { - if *a_t == b { - Ok(()) - } else { - Err((a_t.clone(), b)) - } - } - } - } - - pub fn has_value(&mut self, id: K) -> bool { - self.get(id).value.is_some() - } - - pub fn probe(&mut self, a_id: K) -> Option { - self.get(a_id).value - } - - pub fn unsolved_variables(&mut self) -> Vec { - self.values - .iter() - .filter_map(|vv| { - if vv.value.is_some() { - None - } else { - Some(vv.key()) - } - }) - .collect() - } -} diff --git a/src/librustc_data_structures/unify/tests.rs b/src/librustc_data_structures/unify/tests.rs deleted file mode 100644 index f29a7132e831..000000000000 --- a/src/librustc_data_structures/unify/tests.rs +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_snake_case)] - -extern crate test; -use self::test::Bencher; -use unify::{UnifyKey, UnificationTable}; - -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -struct UnitKey(u32); - -impl UnifyKey for UnitKey { - type Value = (); - fn index(&self) -> u32 { - self.0 - } - fn from_index(u: u32) -> UnitKey { - UnitKey(u) - } - fn tag(_: Option) -> &'static str { - "UnitKey" - } -} - -#[test] -fn basic() { - let mut ut: UnificationTable = UnificationTable::new(); - let k1 = ut.new_key(()); - let k2 = ut.new_key(()); - assert_eq!(ut.unioned(k1, k2), false); - ut.union(k1, k2); - assert_eq!(ut.unioned(k1, k2), true); -} - -#[test] -fn big_array() { - let mut ut: UnificationTable = UnificationTable::new(); - let mut keys = Vec::new(); - const MAX: usize = 1 << 15; - - for _ in 0..MAX { - keys.push(ut.new_key(())); - } - - for i in 1..MAX { - let l = keys[i - 1]; - let r = keys[i]; - ut.union(l, r); - } - - for i in 0..MAX { - assert!(ut.unioned(keys[0], keys[i])); - } -} - -#[bench] -fn big_array_bench(b: &mut Bencher) { - let mut ut: UnificationTable = UnificationTable::new(); - let mut keys = Vec::new(); - const MAX: usize = 1 << 15; - - for _ in 0..MAX { - keys.push(ut.new_key(())); - } - - - b.iter(|| { - for i in 1..MAX { - let l = keys[i - 1]; - let r = keys[i]; - ut.union(l, r); - } - - for i in 0..MAX { - assert!(ut.unioned(keys[0], keys[i])); - } - }) -} - -#[test] -fn even_odd() { - let mut ut: UnificationTable = UnificationTable::new(); - let mut keys = Vec::new(); - const MAX: usize = 1 << 10; - - for i in 0..MAX { - let key = ut.new_key(()); - keys.push(key); - - if i >= 2 { - ut.union(key, keys[i - 2]); - } - } - - for i in 1..MAX { - assert!(!ut.unioned(keys[i - 1], keys[i])); - } - - for i in 2..MAX { - assert!(ut.unioned(keys[i - 2], keys[i])); - } -} - -#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] -struct IntKey(u32); - -impl UnifyKey for IntKey { - type Value = Option; - fn index(&self) -> u32 { - self.0 - } - fn from_index(u: u32) -> IntKey { - IntKey(u) - } - fn tag(_: Option) -> &'static str { - "IntKey" - } -} - -/// Test unifying a key whose value is `Some(_)` with a key whose value is `None`. -/// Afterwards both should be `Some(_)`. -#[test] -fn unify_key_Some_key_None() { - let mut ut: UnificationTable = UnificationTable::new(); - let k1 = ut.new_key(Some(22)); - let k2 = ut.new_key(None); - assert!(ut.unify_var_var(k1, k2).is_ok()); - assert_eq!(ut.probe(k2), Some(22)); - assert_eq!(ut.probe(k1), Some(22)); -} - -/// Test unifying a key whose value is `None` with a key whose value is `Some(_)`. -/// Afterwards both should be `Some(_)`. -#[test] -fn unify_key_None_key_Some() { - let mut ut: UnificationTable = UnificationTable::new(); - let k1 = ut.new_key(Some(22)); - let k2 = ut.new_key(None); - assert!(ut.unify_var_var(k2, k1).is_ok()); - assert_eq!(ut.probe(k2), Some(22)); - assert_eq!(ut.probe(k1), Some(22)); -} - -/// Test unifying a key whose value is `Some(x)` with a key whose value is `Some(y)`. -/// This should yield an error. -#[test] -fn unify_key_Some_x_key_Some_y() { - let mut ut: UnificationTable = UnificationTable::new(); - let k1 = ut.new_key(Some(22)); - let k2 = ut.new_key(Some(23)); - assert_eq!(ut.unify_var_var(k1, k2), Err((22, 23))); - assert_eq!(ut.unify_var_var(k2, k1), Err((23, 22))); - assert_eq!(ut.probe(k1), Some(22)); - assert_eq!(ut.probe(k2), Some(23)); -} - -/// Test unifying a key whose value is `Some(x)` with a key whose value is `Some(x)`. -/// This should be ok. -#[test] -fn unify_key_Some_x_key_Some_x() { - let mut ut: UnificationTable = UnificationTable::new(); - let k1 = ut.new_key(Some(22)); - let k2 = ut.new_key(Some(22)); - assert!(ut.unify_var_var(k1, k2).is_ok()); - assert_eq!(ut.probe(k1), Some(22)); - assert_eq!(ut.probe(k2), Some(22)); -} - -/// Test unifying a key whose value is `None` with a value is `x`. -/// Afterwards key should be `x`. -#[test] -fn unify_key_None_val() { - let mut ut: UnificationTable = UnificationTable::new(); - let k1 = ut.new_key(None); - assert!(ut.unify_var_value(k1, 22).is_ok()); - assert_eq!(ut.probe(k1), Some(22)); -} - -/// Test unifying a key whose value is `Some(x)` with the value `y`. -/// This should yield an error. -#[test] -fn unify_key_Some_x_val_y() { - let mut ut: UnificationTable = UnificationTable::new(); - let k1 = ut.new_key(Some(22)); - assert_eq!(ut.unify_var_value(k1, 23), Err((22, 23))); - assert_eq!(ut.probe(k1), Some(22)); -} - -/// Test unifying a key whose value is `Some(x)` with the value `x`. -/// This should be ok. -#[test] -fn unify_key_Some_x_val_x() { - let mut ut: UnificationTable = UnificationTable::new(); - let k1 = ut.new_key(Some(22)); - assert!(ut.unify_var_value(k1, 22).is_ok()); - assert_eq!(ut.probe(k1), Some(22)); -} diff --git a/src/librustc_data_structures/veccell/mod.rs b/src/librustc_data_structures/veccell/mod.rs deleted file mode 100644 index 054eee8829a4..000000000000 --- a/src/librustc_data_structures/veccell/mod.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::cell::UnsafeCell; -use std::mem; - -pub struct VecCell { - data: UnsafeCell>, -} - -impl VecCell { - pub fn with_capacity(capacity: usize) -> VecCell { - VecCell { data: UnsafeCell::new(Vec::with_capacity(capacity)) } - } - - #[inline] - pub fn push(&self, data: T) -> usize { - // The logic here, and in `swap` below, is that the `push` - // method on the vector will not recursively access this - // `VecCell`. Therefore, we can temporarily obtain mutable - // access, secure in the knowledge that even if aliases exist - // -- indeed, even if aliases are reachable from within the - // vector -- they will not be used for the duration of this - // particular fn call. (Note that we also are relying on the - // fact that `VecCell` is not `Sync`.) - unsafe { - let v = self.data.get(); - (*v).push(data); - (*v).len() - } - } - - pub fn swap(&self, mut data: Vec) -> Vec { - unsafe { - let v = self.data.get(); - mem::swap(&mut *v, &mut data); - } - data - } -} diff --git a/src/librustc_data_structures/work_queue.rs b/src/librustc_data_structures/work_queue.rs new file mode 100644 index 000000000000..b8e8b249bb50 --- /dev/null +++ b/src/librustc_data_structures/work_queue.rs @@ -0,0 +1,72 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use indexed_set::IdxSetBuf; +use indexed_vec::Idx; +use std::collections::VecDeque; + +/// A work queue is a handy data structure for tracking work left to +/// do. (For example, basic blocks left to process.) It is basically a +/// de-duplicating queue; so attempting to insert X if X is already +/// enqueued has no effect. This implementation assumes that the +/// elements are dense indices, so it can allocate the queue to size +/// and also use a bit set to track occupancy. +pub struct WorkQueue { + deque: VecDeque, + set: IdxSetBuf, +} + +impl WorkQueue { + /// Create a new work queue with all the elements from (0..len). + #[inline] + pub fn with_all(len: usize) -> Self { + WorkQueue { + deque: (0..len).map(T::new).collect(), + set: IdxSetBuf::new_filled(len), + } + } + + /// Create a new work queue that starts empty, where elements range from (0..len). + #[inline] + pub fn with_none(len: usize) -> Self { + WorkQueue { + deque: VecDeque::with_capacity(len), + set: IdxSetBuf::new_empty(len), + } + } + + /// Attempt to enqueue `element` in the work queue. Returns false if it was already present. + #[inline] + pub fn insert(&mut self, element: T) -> bool { + if self.set.add(&element) { + self.deque.push_back(element); + true + } else { + false + } + } + + /// Attempt to enqueue `element` in the work queue. Returns false if it was already present. + #[inline] + pub fn pop(&mut self) -> Option { + if let Some(element) = self.deque.pop_front() { + self.set.remove(&element); + Some(element) + } else { + None + } + } + + /// True if nothing is enqueued. + #[inline] + pub fn is_empty(&self) -> bool { + self.deque.is_empty() + } +} diff --git a/src/librustc_driver/Cargo.toml b/src/librustc_driver/Cargo.toml index 4f7bbd792757..7a020f331e50 100644 --- a/src/librustc_driver/Cargo.toml +++ b/src/librustc_driver/Cargo.toml @@ -11,13 +11,14 @@ crate-type = ["dylib"] [dependencies] arena = { path = "../libarena" } graphviz = { path = "../libgraphviz" } -log = { version = "0.4", features = ["release_max_level_info"] } -env_logger = { version = "0.4", default-features = false } +log = "0.4" +env_logger = { version = "0.5", default-features = false } +rustc-rayon = "0.1.1" +scoped-tls = { version = "0.1.1", features = ["nightly"] } rustc = { path = "../librustc" } rustc_allocator = { path = "../librustc_allocator" } -rustc_back = { path = "../librustc_back" } +rustc_target = { path = "../librustc_target" } rustc_borrowck = { path = "../librustc_borrowck" } -rustc_const_eval = { path = "../librustc_const_eval" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_errors = { path = "../librustc_errors" } rustc_incremental = { path = "../librustc_incremental" } @@ -29,15 +30,10 @@ rustc_plugin = { path = "../librustc_plugin" } rustc_privacy = { path = "../librustc_privacy" } rustc_resolve = { path = "../librustc_resolve" } rustc_save_analysis = { path = "../librustc_save_analysis" } -rustc_trans = { path = "../librustc_trans", optional = true } -rustc_trans_utils = { path = "../librustc_trans_utils" } +rustc_traits = { path = "../librustc_traits" } +rustc_codegen_utils = { path = "../librustc_codegen_utils" } rustc_typeck = { path = "../librustc_typeck" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } syntax_ext = { path = "../libsyntax_ext" } syntax_pos = { path = "../libsyntax_pos" } - -ar = "0.3.0" - -[features] -llvm = ["rustc_trans"] diff --git a/src/librustc_driver/README.md b/src/librustc_driver/README.md index 839d1831f954..fef249a9e4eb 100644 --- a/src/librustc_driver/README.md +++ b/src/librustc_driver/README.md @@ -1,7 +1,3 @@ -NB: This crate is part of the Rust compiler. For an overview of the -compiler as a whole, see -[the README.md file found in `librustc`](../librustc/README.md). - The `driver` crate is effectively the "main" function for the rust compiler. It orchestrates the compilation process and "knits together" the code from the other crates within rustc. This crate itself does @@ -9,4 +5,6 @@ not contain any of the "main logic" of the compiler (though it does have some code related to pretty printing or other minor compiler options). +For more information about how the driver works, see the [rustc guide]. +[rustc guide]: https://rust-lang-nursery.github.io/rustc-guide/rustc-driver.html diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index f7e35ba0081e..e5042a4a0207 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -11,56 +11,53 @@ use rustc::dep_graph::DepGraph; use rustc::hir::{self, map as hir_map}; use rustc::hir::lowering::lower_crate; -use rustc::ich::Fingerprint; +use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::stable_hasher::StableHasher; use rustc_mir as mir; -use rustc::session::{Session, CompileResult, CrateDisambiguator}; +use rustc::session::{CompileResult, CrateDisambiguator, Session}; use rustc::session::CompileIncomplete; use rustc::session::config::{self, Input, OutputFilenames, OutputType}; use rustc::session::search_paths::PathKind; use rustc::lint; -use rustc::middle::{self, stability, reachable, resolve_lifetime}; -use rustc::middle::cstore::CrateStore; +use rustc::middle::{self, reachable, resolve_lifetime, stability}; use rustc::middle::privacy::AccessLevels; -use rustc::ty::{self, TyCtxt, Resolutions, AllArenas}; +use rustc::ty::{self, AllArenas, Resolutions, TyCtxt}; use rustc::traits; -use rustc::util::common::{ErrorReported, time}; +use rustc::util::common::{install_panic_hook, time, ErrorReported}; +use rustc::util::profiling::ProfileCategory; use rustc_allocator as allocator; use rustc_borrowck as borrowck; use rustc_incremental; -use rustc_resolve::{MakeGlobMap, Resolver}; +use rustc_resolve::{MakeGlobMap, Resolver, ResolverArenas}; use rustc_metadata::creader::CrateLoader; use rustc_metadata::cstore::{self, CStore}; -use rustc_trans as trans; -use rustc_trans_utils::trans_crate::TransCrate; +use rustc_traits; +use rustc_codegen_utils::codegen_backend::CodegenBackend; use rustc_typeck as typeck; use rustc_privacy; use rustc_plugin::registry::Registry; use rustc_plugin as plugin; -use rustc_passes::{self, ast_validation, loops, consts, static_recursion, hir_stats}; -use rustc_const_eval::{self, check_match}; +use rustc_passes::{self, ast_validation, hir_stats, loops, rvalue_promotion}; use super::Compilation; -use ::DefaultTransCrate; use serialize::json; use std::any::Any; use std::env; -use std::ffi::{OsString, OsStr}; +use std::ffi::{OsStr, OsString}; use std::fs; use std::io::{self, Write}; use std::iter; use std::path::{Path, PathBuf}; -use std::rc::Rc; +use rustc_data_structures::sync::{self, Lrc, Lock}; use std::sync::mpsc; -use syntax::{ast, diagnostics, visit}; -use syntax::attr; +use syntax::{self, ast, attr, diagnostics, visit}; +use syntax::early_buffered_lints::BufferedEarlyLint; use syntax::ext::base::ExtCtxt; use syntax::fold::Folder; use syntax::parse::{self, PResult}; use syntax::util::node_count::NodeCounter; -use syntax_pos::FileName; -use syntax; +use syntax_pos::{FileName, hygiene}; use syntax_ext; use derive_registrar; @@ -68,16 +65,70 @@ use pretty::ReplaceBodyWithLoop; use profile; -pub fn compile_input(sess: &Session, - cstore: &CStore, - input_path: &Option, - input: &Input, - outdir: &Option, - output: &Option, - addl_plugins: Option>, - control: &CompileController) -> CompileResult { - use rustc::session::config::CrateType; +#[cfg(not(parallel_queries))] +pub fn spawn_thread_pool R + sync::Send, R: sync::Send>( + opts: config::Options, + f: F +) -> R { + ty::tls::GCX_PTR.set(&Lock::new(0), || { + f(opts) + }) +} +#[cfg(parallel_queries)] +pub fn spawn_thread_pool R + sync::Send, R: sync::Send>( + opts: config::Options, + f: F +) -> R { + use syntax; + use syntax_pos; + use rayon::{ThreadPoolBuilder, ThreadPool}; + + let gcx_ptr = &Lock::new(0); + + let config = ThreadPoolBuilder::new() + .num_threads(Session::query_threads_from_opts(&opts)) + .deadlock_handler(|| unsafe { ty::query::handle_deadlock() }) + .stack_size(16 * 1024 * 1024); + + let with_pool = move |pool: &ThreadPool| { + pool.install(move || f(opts)) + }; + + syntax::GLOBALS.with(|syntax_globals| { + syntax_pos::GLOBALS.with(|syntax_pos_globals| { + // The main handler run for each Rayon worker thread and sets up + // the thread local rustc uses. syntax_globals and syntax_pos_globals are + // captured and set on the new threads. ty::tls::with_thread_locals sets up + // thread local callbacks from libsyntax + let main_handler = move |worker: &mut dyn FnMut()| { + syntax::GLOBALS.set(syntax_globals, || { + syntax_pos::GLOBALS.set(syntax_pos_globals, || { + ty::tls::with_thread_locals(|| { + ty::tls::GCX_PTR.set(gcx_ptr, || { + worker() + }) + }) + }) + }) + }; + + ThreadPool::scoped_pool(config, main_handler, with_pool).unwrap() + }) + }) +} + +pub fn compile_input( + codegen_backend: Box, + sess: &Session, + cstore: &CStore, + input_path: &Option, + input: &Input, + outdir: &Option, + output: &Option, + addl_plugins: Option>, + control: &CompileController, +) -> CompileResult { macro_rules! controller_entry_point { ($point: ident, $tsess: expr, $make_state: expr, $phase_result: expr) => {{ let state = &mut $make_state; @@ -94,30 +145,14 @@ pub fn compile_input(sess: &Session, }} } - if cfg!(not(feature="llvm")) { - for cty in sess.opts.crate_types.iter() { - match *cty { - CrateType::CrateTypeRlib | CrateType::CrateTypeDylib | - CrateType::CrateTypeExecutable => {}, - _ => { - sess.parse_sess.span_diagnostic.warn( - &format!("LLVM unsupported, so output type {} is not supported", cty) - ); - }, - } - } - - sess.abort_if_errors(); - } - if sess.profile_queries() { - profile::begin(); + profile::begin(sess); } // We need nested scopes here, because the intermediate results can keep // large chunks of memory alive and we want to free them as soon as // possible to keep the peak memory usage low - let (outputs, trans, dep_graph) = { + let (outputs, ongoing_codegen, dep_graph) = { let krate = match phase_1_parse_input(control, sess, input) { Ok(krate) => krate, Err(mut parse_error) => { @@ -127,38 +162,25 @@ pub fn compile_input(sess: &Session, }; let (krate, registry) = { - let mut compile_state = CompileState::state_after_parse(input, - sess, - outdir, - output, - krate, - &cstore); - controller_entry_point!(after_parse, - sess, - compile_state, - Ok(())); + let mut compile_state = + CompileState::state_after_parse(input, sess, outdir, output, krate, &cstore); + controller_entry_point!(after_parse, sess, compile_state, Ok(())); (compile_state.krate.unwrap(), compile_state.registry) }; let outputs = build_output_filenames(input, outdir, output, &krate.attrs, sess); - - // Ensure the source file isn't accidentally overwritten during compilation. - match *input_path { - Some(ref input_path) => { - if outputs.contains_path(input_path) && sess.opts.will_create_output_file() { - sess.err(&format!( - "the input file \"{}\" would be overwritten by the generated executable", - input_path.display())); - return Err(CompileIncomplete::Stopped); - } - }, - None => {} - } - let crate_name = - ::rustc_trans_utils::link::find_crate_name(Some(sess), &krate.attrs, input); - let ExpansionResult { expanded_crate, defs, analysis, resolutions, mut hir_forest } = { + ::rustc_codegen_utils::link::find_crate_name(Some(sess), &krate.attrs, input); + install_panic_hook(); + + let ExpansionResult { + expanded_crate, + defs, + analysis, + resolutions, + mut hir_forest, + } = { phase_2_configure_and_expand( sess, &cstore, @@ -169,45 +191,88 @@ pub fn compile_input(sess: &Session, control.make_glob_map, |expanded_crate| { let mut state = CompileState::state_after_expand( - input, sess, outdir, output, &cstore, expanded_crate, &crate_name, + input, + sess, + outdir, + output, + &cstore, + expanded_crate, + &crate_name, ); controller_entry_point!(after_expand, sess, state, Ok(())); Ok(()) - } + }, )? }; - write_out_deps(sess, &outputs, &crate_name); - if sess.opts.output_types.contains_key(&OutputType::DepInfo) && - sess.opts.output_types.keys().count() == 1 { - return Ok(()) + let output_paths = generated_output_paths(sess, &outputs, output.is_some(), &crate_name); + + // Ensure the source file isn't accidentally overwritten during compilation. + if let Some(ref input_path) = *input_path { + if sess.opts.will_create_output_file() { + if output_contains_path(&output_paths, input_path) { + sess.err(&format!( + "the input file \"{}\" would be overwritten by the generated \ + executable", + input_path.display() + )); + return Err(CompileIncomplete::Stopped); + } + if let Some(dir_path) = output_conflicts_with_dir(&output_paths) { + sess.err(&format!( + "the generated executable for the input file \"{}\" conflicts with the \ + existing directory \"{}\"", + input_path.display(), + dir_path.display() + )); + return Err(CompileIncomplete::Stopped); + } + } + } + + write_out_deps(sess, &outputs, &output_paths); + if sess.opts.output_types.contains_key(&OutputType::DepInfo) + && sess.opts.output_types.len() == 1 + { + return Ok(()); + } + + if let &Some(ref dir) = outdir { + if fs::create_dir_all(dir).is_err() { + sess.err("failed to find or create the directory specified by --out-dir"); + return Err(CompileIncomplete::Stopped); + } } let arenas = AllArenas::new(); // Construct the HIR map - let hir_map = time(sess.time_passes(), - "indexing hir", - || hir_map::map_crate(sess, cstore, &mut hir_forest, &defs)); + let hir_map = time(sess, "indexing hir", || { + hir_map::map_crate(sess, cstore, &mut hir_forest, &defs) + }); { hir_map.dep_graph.assert_ignored(); - controller_entry_point!(after_hir_lowering, - sess, - CompileState::state_after_hir_lowering(input, - sess, - outdir, - output, - &arenas, - &cstore, - &hir_map, - &analysis, - &resolutions, - &expanded_crate, - &hir_map.krate(), - &outputs, - &crate_name), - Ok(())); + controller_entry_point!( + after_hir_lowering, + sess, + CompileState::state_after_hir_lowering( + input, + sess, + outdir, + output, + &arenas, + &cstore, + &hir_map, + &analysis, + &resolutions, + &expanded_crate, + &hir_map.krate(), + &outputs, + &crate_name + ), + Ok(()) + ); } let opt_crate = if control.keep_ast { @@ -217,89 +282,84 @@ pub fn compile_input(sess: &Session, None }; - phase_3_run_analysis_passes(control, - sess, - cstore, - hir_map, - analysis, - resolutions, - &arenas, - &crate_name, - &outputs, - |tcx, analysis, rx, result| { - { - // Eventually, we will want to track plugins. - tcx.dep_graph.with_ignore(|| { - let mut state = CompileState::state_after_analysis(input, - sess, - outdir, - output, - opt_crate, - tcx.hir.krate(), - &analysis, - tcx, - &crate_name); - (control.after_analysis.callback)(&mut state); - }); + phase_3_run_analysis_passes( + &*codegen_backend, + control, + sess, + cstore, + hir_map, + analysis, + resolutions, + &arenas, + &crate_name, + &outputs, + |tcx, analysis, rx, result| { + { + // Eventually, we will want to track plugins. + tcx.dep_graph.with_ignore(|| { + let mut state = CompileState::state_after_analysis( + input, + sess, + outdir, + output, + opt_crate, + tcx.hir.krate(), + &analysis, + tcx, + &crate_name, + ); + (control.after_analysis.callback)(&mut state); + }); - if control.after_analysis.stop == Compilation::Stop { - return result.and_then(|_| Err(CompileIncomplete::Stopped)); + if control.after_analysis.stop == Compilation::Stop { + return result.and_then(|_| Err(CompileIncomplete::Stopped)); + } } - } - result?; + result?; - if log_enabled!(::log::Level::Info) { - println!("Pre-trans"); - tcx.print_debug_stats(); - } - - let trans = phase_4_translate_to_llvm::(tcx, rx); - - if log_enabled!(::log::Level::Info) { - println!("Post-trans"); - tcx.print_debug_stats(); - } - - if tcx.sess.opts.output_types.contains_key(&OutputType::Mir) { - if let Err(e) = mir::transform::dump_mir::emit_mir(tcx, &outputs) { - sess.err(&format!("could not emit MIR: {}", e)); - sess.abort_if_errors(); + if log_enabled!(::log::Level::Info) { + println!("Pre-codegen"); + tcx.print_debug_stats(); } - } - Ok((outputs.clone(), trans, tcx.dep_graph.clone())) - })?? + let ongoing_codegen = phase_4_codegen(&*codegen_backend, tcx, rx); + + if log_enabled!(::log::Level::Info) { + println!("Post-codegen"); + tcx.print_debug_stats(); + } + + if tcx.sess.opts.output_types.contains_key(&OutputType::Mir) { + if let Err(e) = mir::transform::dump_mir::emit_mir(tcx, &outputs) { + sess.err(&format!("could not emit MIR: {}", e)); + sess.abort_if_errors(); + } + } + + Ok((outputs.clone(), ongoing_codegen, tcx.dep_graph.clone())) + }, + )?? }; if sess.opts.debugging_opts.print_type_sizes { sess.code_stats.borrow().print_type_sizes(); } - let (phase5_result, trans) = - phase_5_run_llvm_passes::(sess, &dep_graph, trans); - - controller_entry_point!(after_llvm, - sess, - CompileState::state_after_llvm(input, sess, outdir, output, &trans), - phase5_result); - phase5_result?; - - // Run the linker on any artifacts that resulted from the LLVM run. - // This should produce either a finished executable or library. - time(sess.time_passes(), "linking", || { - DefaultTransCrate::link_binary(sess, &trans, &outputs) - }); - - // Now that we won't touch anything in the incremental compilation directory - // any more, we can finalize it (which involves renaming it) - #[cfg(feature="llvm")] - rustc_incremental::finalize_session_directory(sess, trans.link.crate_hash); + codegen_backend.join_codegen_and_link(ongoing_codegen, sess, &dep_graph, &outputs)?; if sess.opts.debugging_opts.perf_stats { sess.print_perf_stats(); } + if sess.opts.debugging_opts.self_profile { + sess.print_profiler_results(); + + if sess.opts.debugging_opts.profile_json { + sess.save_json_results(); + } + } + controller_entry_point!( compilation_done, sess, @@ -310,10 +370,6 @@ pub fn compile_input(sess: &Session, Ok(()) } -fn keep_hygiene_data(sess: &Session) -> bool { - sess.opts.debugging_opts.keep_hygiene_data -} - pub fn source_name(input: &Input) -> FileName { match *input { Input::File(ref ifile) => ifile.clone().into(), @@ -340,7 +396,6 @@ pub struct CompileController<'a> { pub after_expand: PhaseController<'a>, pub after_hir_lowering: PhaseController<'a>, pub after_analysis: PhaseController<'a>, - pub after_llvm: PhaseController<'a>, pub compilation_done: PhaseController<'a>, // FIXME we probably want to group the below options together and offer a @@ -353,10 +408,10 @@ pub struct CompileController<'a> { /// Allows overriding default rustc query providers, /// after `default_provide` has installed them. - pub provide: Box, + pub provide: Box, /// Same as `provide`, but only for non-local crates, /// applied after `default_provide_extern`. - pub provide_extern: Box, + pub provide_extern: Box, } impl<'a> CompileController<'a> { @@ -366,7 +421,6 @@ impl<'a> CompileController<'a> { after_expand: PhaseController::basic(), after_hir_lowering: PhaseController::basic(), after_analysis: PhaseController::basic(), - after_llvm: PhaseController::basic(), compilation_done: PhaseController::basic(), make_glob_map: MakeGlobMap::No, keep_ast: false, @@ -377,12 +431,81 @@ impl<'a> CompileController<'a> { } } +/// This implementation makes it easier to create a custom driver when you only want to hook +/// into callbacks from `CompileController`. +/// +/// # Example +/// +/// ```no_run +/// # extern crate rustc_driver; +/// # use rustc_driver::driver::CompileController; +/// let mut controller = CompileController::basic(); +/// controller.after_analysis.callback = Box::new(move |_state| {}); +/// rustc_driver::run_compiler(&[], Box::new(controller), None, None); +/// ``` +impl<'a> ::CompilerCalls<'a> for CompileController<'a> { + fn early_callback( + &mut self, + matches: &::getopts::Matches, + sopts: &config::Options, + cfg: &ast::CrateConfig, + descriptions: &::errors::registry::Registry, + output: ::ErrorOutputType, + ) -> Compilation { + ::RustcDefaultCalls.early_callback( + matches, + sopts, + cfg, + descriptions, + output, + ) + } + fn no_input( + &mut self, + matches: &::getopts::Matches, + sopts: &config::Options, + cfg: &ast::CrateConfig, + odir: &Option, + ofile: &Option, + descriptions: &::errors::registry::Registry, + ) -> Option<(Input, Option)> { + ::RustcDefaultCalls.no_input( + matches, + sopts, + cfg, + odir, + ofile, + descriptions, + ) + } + fn late_callback( + &mut self, + codegen_backend: &dyn (::CodegenBackend), + matches: &::getopts::Matches, + sess: &Session, + cstore: &CStore, + input: &Input, + odir: &Option, + ofile: &Option, + ) -> Compilation { + ::RustcDefaultCalls + .late_callback(codegen_backend, matches, sess, cstore, input, odir, ofile) + } + fn build_controller( + self: Box, + _: &Session, + _: &::getopts::Matches + ) -> CompileController<'a> { + *self + } +} + pub struct PhaseController<'a> { pub stop: Compilation, // If true then the compiler will try to run the callback even if the phase // ends with an error. Note that this is not always possible. pub run_callback_on_error: bool, - pub callback: Box, + pub callback: Box, } impl<'a> PhaseController<'a> { @@ -415,14 +538,10 @@ pub struct CompileState<'a, 'tcx: 'a> { pub resolutions: Option<&'a Resolutions>, pub analysis: Option<&'a ty::CrateAnalysis>, pub tcx: Option>, - pub trans: Option<&'a trans::CrateTranslation>, } impl<'a, 'tcx> CompileState<'a, 'tcx> { - fn empty(input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option) - -> Self { + fn empty(input: &'a Input, session: &'tcx Session, out_dir: &'a Option) -> Self { CompileState { input, session, @@ -440,17 +559,17 @@ impl<'a, 'tcx> CompileState<'a, 'tcx> { resolutions: None, analysis: None, tcx: None, - trans: None, } } - fn state_after_parse(input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option, - krate: ast::Crate, - cstore: &'tcx CStore) - -> Self { + fn state_after_parse( + input: &'a Input, + session: &'tcx Session, + out_dir: &'a Option, + out_file: &'a Option, + krate: ast::Crate, + cstore: &'tcx CStore, + ) -> Self { CompileState { // Initialize the registry before moving `krate` registry: Some(Registry::new(&session, krate.span)), @@ -461,14 +580,15 @@ impl<'a, 'tcx> CompileState<'a, 'tcx> { } } - fn state_after_expand(input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option, - cstore: &'tcx CStore, - expanded_crate: &'a ast::Crate, - crate_name: &'a str) - -> Self { + fn state_after_expand( + input: &'a Input, + session: &'tcx Session, + out_dir: &'a Option, + out_file: &'a Option, + cstore: &'tcx CStore, + expanded_crate: &'a ast::Crate, + crate_name: &'a str, + ) -> Self { CompileState { crate_name: Some(crate_name), cstore: Some(cstore), @@ -478,20 +598,21 @@ impl<'a, 'tcx> CompileState<'a, 'tcx> { } } - fn state_after_hir_lowering(input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option, - arenas: &'tcx AllArenas<'tcx>, - cstore: &'tcx CStore, - hir_map: &'a hir_map::Map<'tcx>, - analysis: &'a ty::CrateAnalysis, - resolutions: &'a Resolutions, - krate: &'a ast::Crate, - hir_crate: &'a hir::Crate, - output_filenames: &'a OutputFilenames, - crate_name: &'a str) - -> Self { + fn state_after_hir_lowering( + input: &'a Input, + session: &'tcx Session, + out_dir: &'a Option, + out_file: &'a Option, + arenas: &'tcx AllArenas<'tcx>, + cstore: &'tcx CStore, + hir_map: &'a hir_map::Map<'tcx>, + analysis: &'a ty::CrateAnalysis, + resolutions: &'a Resolutions, + krate: &'a ast::Crate, + hir_crate: &'a hir::Crate, + output_filenames: &'a OutputFilenames, + crate_name: &'a str, + ) -> Self { CompileState { crate_name: Some(crate_name), arenas: Some(arenas), @@ -507,16 +628,17 @@ impl<'a, 'tcx> CompileState<'a, 'tcx> { } } - fn state_after_analysis(input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option, - krate: Option<&'a ast::Crate>, - hir_crate: &'a hir::Crate, - analysis: &'a ty::CrateAnalysis, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - crate_name: &'a str) - -> Self { + fn state_after_analysis( + input: &'a Input, + session: &'tcx Session, + out_dir: &'a Option, + out_file: &'a Option, + krate: Option<&'a ast::Crate>, + hir_crate: &'a hir::Crate, + analysis: &'a ty::CrateAnalysis, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + crate_name: &'a str, + ) -> Self { CompileState { analysis: Some(analysis), tcx: Some(tcx), @@ -528,24 +650,12 @@ impl<'a, 'tcx> CompileState<'a, 'tcx> { } } - fn state_after_llvm(input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option, - trans: &'a trans::CrateTranslation) - -> Self { - CompileState { - trans: Some(trans), - out_file: out_file.as_ref().map(|s| &**s), - ..CompileState::empty(input, session, out_dir) - } - } - - fn state_when_compilation_done(input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option) - -> Self { + fn state_when_compilation_done( + input: &'a Input, + session: &'tcx Session, + out_dir: &'a Option, + out_file: &'a Option, + ) -> Self { CompileState { out_file: out_file.as_ref().map(|s| &**s), ..CompileState::empty(input, session, out_dir) @@ -553,28 +663,28 @@ impl<'a, 'tcx> CompileState<'a, 'tcx> { } } -pub fn phase_1_parse_input<'a>(control: &CompileController, - sess: &'a Session, - input: &Input) - -> PResult<'a, ast::Crate> { - sess.diagnostic().set_continue_after_error(control.continue_parse_after_error); +pub fn phase_1_parse_input<'a>( + control: &CompileController, + sess: &'a Session, + input: &Input, +) -> PResult<'a, ast::Crate> { + sess.diagnostic() + .set_continue_after_error(control.continue_parse_after_error); + hygiene::set_default_edition(sess.edition()); if sess.profile_queries() { - profile::begin(); + profile::begin(sess); } - let krate = time(sess.time_passes(), "parsing", || { - match *input { - Input::File(ref file) => { - parse::parse_crate_from_file(file, &sess.parse_sess) - } - Input::Str { ref input, ref name } => { - parse::parse_crate_from_source_str(name.clone(), - input.clone(), - &sess.parse_sess) - } - } + sess.profiler(|p| p.start_activity(ProfileCategory::Parsing)); + let krate = time(sess, "parsing", || match *input { + Input::File(ref file) => parse::parse_crate_from_file(file, &sess.parse_sess), + Input::Str { + ref input, + ref name, + } => parse::parse_crate_from_source_str(name.clone(), input.clone(), &sess.parse_sess), })?; + sess.profiler(|p| p.end_activity(ProfileCategory::Parsing)); sess.diagnostic().set_continue_after_error(true); @@ -583,7 +693,10 @@ pub fn phase_1_parse_input<'a>(control: &CompileController, } if sess.opts.debugging_opts.input_stats { - println!("Lines of code: {}", sess.codemap().count_lines()); + println!( + "Lines of code: {}", + sess.codemap().count_lines() + ); println!("Pre-expansion node count: {}", count_nodes(&krate)); } @@ -615,6 +728,12 @@ pub struct ExpansionResult { pub hir_forest: hir_map::Forest, } +pub struct InnerExpansionResult<'a, 'b: 'a> { + pub expanded_crate: ast::Crate, + pub resolver: Resolver<'a, 'b>, + pub hir_forest: hir_map::Forest, +} + /// Run the "early phases" of the compiler: initial `cfg` processing, /// loading compiler plugins (including those from `addl_plugins`), /// syntax expansion, secondary `cfg` expansion, synthesis of a test @@ -622,68 +741,161 @@ pub struct ExpansionResult { /// standard library and prelude, and name resolution. /// /// Returns `None` if we're aborting after handling -W help. -pub fn phase_2_configure_and_expand(sess: &Session, - cstore: &CStore, - krate: ast::Crate, - registry: Option, - crate_name: &str, - addl_plugins: Option>, - make_glob_map: MakeGlobMap, - after_expand: F) - -> Result - where F: FnOnce(&ast::Crate) -> CompileResult, +pub fn phase_2_configure_and_expand( + sess: &Session, + cstore: &CStore, + krate: ast::Crate, + registry: Option, + crate_name: &str, + addl_plugins: Option>, + make_glob_map: MakeGlobMap, + after_expand: F, +) -> Result +where + F: FnOnce(&ast::Crate) -> CompileResult, { - let time_passes = sess.time_passes(); + // Currently, we ignore the name resolution data structures for the purposes of dependency + // tracking. Instead we will run name resolution and include its output in the hash of each + // item, much like we do for macro expansion. In other words, the hash reflects not just + // its contents but the results of name resolution on those contents. Hopefully we'll push + // this back at some point. + let mut crate_loader = CrateLoader::new(sess, &cstore, &crate_name); + let resolver_arenas = Resolver::arenas(); + let result = phase_2_configure_and_expand_inner( + sess, + cstore, + krate, + registry, + crate_name, + addl_plugins, + make_glob_map, + &resolver_arenas, + &mut crate_loader, + after_expand, + ); + match result { + Ok(InnerExpansionResult { + expanded_crate, + resolver, + hir_forest, + }) => Ok(ExpansionResult { + expanded_crate, + defs: resolver.definitions, + hir_forest, + resolutions: Resolutions { + freevars: resolver.freevars, + export_map: resolver.export_map, + trait_map: resolver.trait_map, + maybe_unused_trait_imports: resolver.maybe_unused_trait_imports, + maybe_unused_extern_crates: resolver.maybe_unused_extern_crates, + }, - let (mut krate, features) = syntax::config::features(krate, &sess.parse_sess, sess.opts.test); + analysis: ty::CrateAnalysis { + access_levels: Lrc::new(AccessLevels::default()), + name: crate_name.to_string(), + glob_map: if resolver.make_glob_map { + Some(resolver.glob_map) + } else { + None + }, + }, + }), + Err(x) => Err(x), + } +} + +/// Same as phase_2_configure_and_expand, but doesn't let you keep the resolver +/// around +pub fn phase_2_configure_and_expand_inner<'a, 'b: 'a, F>( + sess: &'a Session, + cstore: &'a CStore, + mut krate: ast::Crate, + registry: Option, + crate_name: &str, + addl_plugins: Option>, + make_glob_map: MakeGlobMap, + resolver_arenas: &'a ResolverArenas<'a>, + crate_loader: &'a mut CrateLoader<'b>, + after_expand: F, +) -> Result, CompileIncomplete> +where + F: FnOnce(&ast::Crate) -> CompileResult, +{ + krate = time(sess, "attributes injection", || { + syntax::attr::inject(krate, &sess.parse_sess, &sess.opts.debugging_opts.crate_attr) + }); + + let (mut krate, features) = syntax::config::features( + krate, + &sess.parse_sess, + sess.opts.test, + sess.edition(), + ); // these need to be set "early" so that expansion sees `quote` if enabled. - *sess.features.borrow_mut() = features; + sess.init_features(features); - *sess.crate_types.borrow_mut() = collect_crate_types(sess, &krate.attrs); + let crate_types = collect_crate_types(sess, &krate.attrs); + sess.crate_types.set(crate_types); let disambiguator = compute_crate_disambiguator(sess); - *sess.crate_disambiguator.borrow_mut() = Some(disambiguator); - rustc_incremental::prepare_session_directory( - sess, - &crate_name, - disambiguator, - ); + sess.crate_disambiguator.set(disambiguator); + rustc_incremental::prepare_session_directory(sess, &crate_name, disambiguator); + + if sess.opts.incremental.is_some() { + time(sess, "garbage collect incremental cache directory", || { + if let Err(e) = rustc_incremental::garbage_collect_session_directories(sess) { + warn!( + "Error while trying to garbage collect incremental \ + compilation cache directory: {}", + e + ); + } + }); + } // If necessary, compute the dependency graph (in the background). let future_dep_graph = if sess.opts.build_dep_graph() { - Some(rustc_incremental::load_dep_graph(sess, time_passes)) + Some(rustc_incremental::load_dep_graph(sess)) } else { None }; - time(time_passes, "recursion limit", || { + time(sess, "recursion limit", || { middle::recursion_limit::update_limits(sess, &krate); }); - krate = time(time_passes, "crate injection", || { - let alt_std_name = sess.opts.alt_std_name.clone(); - syntax::std_inject::maybe_inject_crates_ref(krate, alt_std_name) + krate = time(sess, "crate injection", || { + let alt_std_name = sess.opts.alt_std_name.as_ref().map(|s| &**s); + syntax::std_inject::maybe_inject_crates_ref(krate, alt_std_name, sess.edition()) }); let mut addl_plugins = Some(addl_plugins); - let registrars = time(time_passes, "plugin loading", || { - plugin::load::load_plugins(sess, - &cstore, - &krate, - crate_name, - addl_plugins.take().unwrap()) + let registrars = time(sess, "plugin loading", || { + plugin::load::load_plugins( + sess, + &cstore, + &krate, + crate_name, + addl_plugins.take().unwrap(), + ) }); let mut registry = registry.unwrap_or(Registry::new(sess, krate.span)); - time(time_passes, "plugin registration", || { - if sess.features.borrow().rustc_diagnostic_macros { - registry.register_macro("__diagnostic_used", - diagnostics::plugin::expand_diagnostic_used); - registry.register_macro("__register_diagnostic", - diagnostics::plugin::expand_register_diagnostic); - registry.register_macro("__build_diagnostic_array", - diagnostics::plugin::expand_build_diagnostic_array); + time(sess, "plugin registration", || { + if sess.features_untracked().rustc_diagnostic_macros { + registry.register_macro( + "__diagnostic_used", + diagnostics::plugin::expand_diagnostic_used, + ); + registry.register_macro( + "__register_diagnostic", + diagnostics::plugin::expand_register_diagnostic, + ); + registry.register_macro( + "__build_diagnostic_array", + diagnostics::plugin::expand_build_diagnostic_array, + ); } for registrar in registrars { @@ -693,8 +905,15 @@ pub fn phase_2_configure_and_expand(sess: &Session, }); let whitelisted_legacy_custom_derives = registry.take_whitelisted_custom_derives(); - let Registry { syntax_exts, early_lint_passes, late_lint_passes, lint_groups, - llvm_passes, attributes, .. } = registry; + let Registry { + syntax_exts, + early_lint_passes, + late_lint_passes, + lint_groups, + llvm_passes, + attributes, + .. + } = registry; sess.track_errors(|| { let mut ls = sess.lint_store.borrow_mut(); @@ -715,28 +934,29 @@ pub fn phase_2_configure_and_expand(sess: &Session, // Lint plugins are registered; now we can process command line flags. if sess.opts.describe_lints { - super::describe_lints(&sess.lint_store.borrow(), true); + super::describe_lints(&sess, &sess.lint_store.borrow(), true); return Err(CompileIncomplete::Stopped); } - // Currently, we ignore the name resolution data structures for the purposes of dependency - // tracking. Instead we will run name resolution and include its output in the hash of each - // item, much like we do for macro expansion. In other words, the hash reflects not just - // its contents but the results of name resolution on those contents. Hopefully we'll push - // this back at some point. - let mut crate_loader = CrateLoader::new(sess, &cstore, crate_name); - let resolver_arenas = Resolver::arenas(); - let mut resolver = Resolver::new(sess, - cstore, - &krate, - crate_name, - make_glob_map, - &mut crate_loader, - &resolver_arenas); - resolver.whitelisted_legacy_custom_derives = whitelisted_legacy_custom_derives; - syntax_ext::register_builtins(&mut resolver, syntax_exts, sess.features.borrow().quote); + time(sess, "pre ast expansion lint checks", || { + lint::check_ast_crate(sess, &krate, true) + }); - krate = time(time_passes, "expansion", || { + let mut resolver = Resolver::new( + sess, + cstore, + &krate, + crate_name, + make_glob_map, + crate_loader, + &resolver_arenas, + ); + resolver.whitelisted_legacy_custom_derives = whitelisted_legacy_custom_derives; + syntax_ext::register_builtins(&mut resolver, syntax_exts, sess.features_untracked().quote); + + // Expand all macros + sess.profiler(|p| p.start_activity(ProfileCategory::Expansion)); + krate = time(sess, "expansion", || { // Windows dlls do not have rpaths, so they don't know how to find their // dependencies. It's up to us to tell the system where to find all the // dependent dlls. Note that this uses cfg!(windows) as opposed to @@ -753,22 +973,27 @@ pub fn phase_2_configure_and_expand(sess: &Session, let mut old_path = OsString::new(); if cfg!(windows) { old_path = env::var_os("PATH").unwrap_or(old_path); - let mut new_path = sess.host_filesearch(PathKind::All) - .get_dylib_search_paths(); + let mut new_path = sess.host_filesearch(PathKind::All).get_dylib_search_paths(); for path in env::split_paths(&old_path) { if !new_path.contains(&path) { new_path.push(path); } } - env::set_var("PATH", - &env::join_paths(new_path.iter() - .filter(|p| env::join_paths(iter::once(p)).is_ok())) - .unwrap()); + env::set_var( + "PATH", + &env::join_paths( + new_path + .iter() + .filter(|p| env::join_paths(iter::once(p)).is_ok()), + ).unwrap(), + ); } - let features = sess.features.borrow(); + + // Create the config for macro expansion + let features = sess.features_untracked(); let cfg = syntax::ext::expand::ExpansionConfig { features: Some(&features), - recursion_limit: sess.recursion_limit.get(), + recursion_limit: *sess.recursion_limit.get(), trace_mac: sess.opts.debugging_opts.trace_macros, should_test: sess.opts.test, ..syntax::ext::expand::ExpansionConfig::default(crate_name.to_string()) @@ -777,12 +1002,23 @@ pub fn phase_2_configure_and_expand(sess: &Session, let mut ecx = ExtCtxt::new(&sess.parse_sess, cfg, &mut resolver); let err_count = ecx.parse_sess.span_diagnostic.err_count(); - let krate = ecx.monotonic_expander().expand_crate(krate); + // Expand macros now! + let krate = time(sess, "expand crate", || { + ecx.monotonic_expander().expand_crate(krate) + }); - ecx.check_unused_macros(); + // The rest is error reporting - let mut missing_fragment_specifiers: Vec<_> = - ecx.parse_sess.missing_fragment_specifiers.borrow().iter().cloned().collect(); + time(sess, "check unused macros", || { + ecx.check_unused_macros(); + }); + + let mut missing_fragment_specifiers: Vec<_> = ecx.parse_sess + .missing_fragment_specifiers + .borrow() + .iter() + .cloned() + .collect(); missing_fragment_specifiers.sort(); for span in missing_fragment_specifiers { let lint = lint::builtin::MISSING_FRAGMENT_SPECIFIER; @@ -797,13 +1033,17 @@ pub fn phase_2_configure_and_expand(sess: &Session, } krate }); + sess.profiler(|p| p.end_activity(ProfileCategory::Expansion)); - krate = time(time_passes, "maybe building test harness", || { - syntax::test::modify_for_testing(&sess.parse_sess, - &mut resolver, - sess.opts.test, - krate, - sess.diagnostic()) + krate = time(sess, "maybe building test harness", || { + syntax::test::modify_for_testing( + &sess.parse_sess, + &mut resolver, + sess.opts.test, + krate, + sess.diagnostic(), + &sess.features_untracked(), + ) }); // If we're actually rustdoc then there's no need to actually compile @@ -816,28 +1056,45 @@ pub fn phase_2_configure_and_expand(sess: &Session, // bunch of checks in the `modify` function below. For now just skip this // step entirely if we're rustdoc as it's not too useful anyway. if !sess.opts.actually_rustdoc { - krate = time(time_passes, "maybe creating a macro crate", || { + krate = time(sess, "maybe creating a macro crate", || { let crate_types = sess.crate_types.borrow(); let num_crate_types = crate_types.len(); - let is_proc_macro_crate = crate_types.contains(&config::CrateTypeProcMacro); + let is_proc_macro_crate = crate_types.contains(&config::CrateType::ProcMacro); let is_test_crate = sess.opts.test; - syntax_ext::proc_macro_registrar::modify(&sess.parse_sess, - &mut resolver, - krate, - is_proc_macro_crate, - is_test_crate, - num_crate_types, - sess.diagnostic()) + syntax_ext::proc_macro_registrar::modify( + &sess.parse_sess, + &mut resolver, + krate, + is_proc_macro_crate, + is_test_crate, + num_crate_types, + sess.diagnostic(), + ) }); } - krate = time(time_passes, "creating allocators", || { - allocator::expand::modify(&sess.parse_sess, - &mut resolver, - krate, - sess.diagnostic()) + // Expand global allocators, which are treated as an in-tree proc macro + krate = time(sess, "creating allocators", || { + allocator::expand::modify( + &sess.parse_sess, + &mut resolver, + krate, + crate_name.to_string(), + sess.diagnostic(), + ) }); + // Add all buffered lints from the `ParseSess` to the `Session`. + sess.parse_sess.buffered_lints.with_lock(|buffered_lints| { + info!("{} parse sess buffered_lints", buffered_lints.len()); + for BufferedEarlyLint{id, span, msg, lint_id} in buffered_lints.drain(..) { + let lint = lint::Lint::from_parser_lint_id(lint_id); + sess.buffer_lint(lint, id, span, &msg); + } + }); + + // Done with macro expansion! + after_expand(&krate)?; if sess.opts.debugging_opts.input_stats { @@ -852,44 +1109,52 @@ pub fn phase_2_configure_and_expand(sess: &Session, println!("{}", json::as_json(&krate)); } - time(time_passes, - "AST validation", - || ast_validation::check_crate(sess, &krate)); + time(sess, "AST validation", || { + ast_validation::check_crate(sess, &krate) + }); - time(time_passes, "name resolution", || -> CompileResult { + time(sess, "name resolution", || -> CompileResult { resolver.resolve_crate(&krate); Ok(()) })?; - if resolver.found_unresolved_macro { - sess.parse_sess.span_diagnostic.abort_if_errors(); - } - // Needs to go *after* expansion to be able to check the results of macro expansion. - time(time_passes, "complete gated feature checking", || { + time(sess, "complete gated feature checking", || { sess.track_errors(|| { - syntax::feature_gate::check_crate(&krate, - &sess.parse_sess, - &sess.features.borrow(), - &attributes, - sess.opts.unstable_features); + syntax::feature_gate::check_crate( + &krate, + &sess.parse_sess, + &sess.features_untracked(), + &attributes, + sess.opts.unstable_features, + ); }) })?; + // Unresolved macros might be due to mistyped `#[macro_use]`, + // so abort after checking for unknown attributes. (#49074) + if resolver.found_unresolved_macro { + sess.diagnostic().abort_if_errors(); + } + // Lower ast -> hir. // First, we need to collect the dep_graph. let dep_graph = match future_dep_graph { None => DepGraph::new_disabled(), Some(future) => { - let prev_graph = time(time_passes, "blocked while dep-graph loading finishes", || { - future.open() - .expect("Could not join with background dep_graph thread") - .open(sess) - }); - DepGraph::new(prev_graph) + let (prev_graph, prev_work_products) = + time(sess, "blocked while dep-graph loading finishes", || { + future + .open() + .unwrap_or_else(|e| rustc_incremental::LoadResult::Error { + message: format!("could not decode incremental cache: {:?}", e), + }) + .open(sess) + }); + DepGraph::new(prev_graph, prev_work_products) } }; - let hir_forest = time(time_passes, "lowering ast -> hir", || { + let hir_forest = time(sess, "lowering ast -> hir", || { let hir_crate = lower_crate(sess, cstore, &dep_graph, &krate, &mut resolver); if sess.opts.debugging_opts.hir_stats { @@ -899,245 +1164,213 @@ pub fn phase_2_configure_and_expand(sess: &Session, hir_map::Forest::new(hir_crate, &dep_graph) }); - time(time_passes, - "early lint checks", - || lint::check_ast_crate(sess, &krate)); + time(sess, "early lint checks", || { + lint::check_ast_crate(sess, &krate, false) + }); // Discard hygiene data, which isn't required after lowering to HIR. - if !keep_hygiene_data(sess) { + if !sess.opts.debugging_opts.keep_hygiene_data { syntax::ext::hygiene::clear_markings(); } - Ok(ExpansionResult { + Ok(InnerExpansionResult { expanded_crate: krate, - defs: resolver.definitions, - analysis: ty::CrateAnalysis { - access_levels: Rc::new(AccessLevels::default()), - name: crate_name.to_string(), - glob_map: if resolver.make_glob_map { Some(resolver.glob_map) } else { None }, - }, - resolutions: Resolutions { - freevars: resolver.freevars, - export_map: resolver.export_map, - trait_map: resolver.trait_map, - maybe_unused_trait_imports: resolver.maybe_unused_trait_imports, - maybe_unused_extern_crates: resolver.maybe_unused_extern_crates, - }, + resolver, hir_forest, }) } -pub fn default_provide(providers: &mut ty::maps::Providers) { +pub fn default_provide(providers: &mut ty::query::Providers) { + hir::provide(providers); borrowck::provide(providers); mir::provide(providers); reachable::provide(providers); resolve_lifetime::provide(providers); rustc_privacy::provide(providers); - DefaultTransCrate::provide(providers); typeck::provide(providers); ty::provide(providers); traits::provide(providers); reachable::provide(providers); - rustc_const_eval::provide(providers); rustc_passes::provide(providers); + rustc_traits::provide(providers); middle::region::provide(providers); cstore::provide(providers); lint::provide(providers); } -pub fn default_provide_extern(providers: &mut ty::maps::Providers) { +pub fn default_provide_extern(providers: &mut ty::query::Providers) { cstore::provide_extern(providers); - DefaultTransCrate::provide_extern(providers); } /// Run the resolution, typechecking, region checking and other /// miscellaneous analysis passes on the crate. Return various /// structures carrying the results of the analysis. -pub fn phase_3_run_analysis_passes<'tcx, F, R>(control: &CompileController, - sess: &'tcx Session, - cstore: &'tcx CrateStore, - hir_map: hir_map::Map<'tcx>, - mut analysis: ty::CrateAnalysis, - resolutions: Resolutions, - arenas: &'tcx AllArenas<'tcx>, - name: &str, - output_filenames: &OutputFilenames, - f: F) - -> Result - where F: for<'a> FnOnce(TyCtxt<'a, 'tcx, 'tcx>, - ty::CrateAnalysis, - mpsc::Receiver>, - CompileResult) -> R +pub fn phase_3_run_analysis_passes<'tcx, F, R>( + codegen_backend: &dyn CodegenBackend, + control: &CompileController, + sess: &'tcx Session, + cstore: &'tcx CStore, + hir_map: hir_map::Map<'tcx>, + mut analysis: ty::CrateAnalysis, + resolutions: Resolutions, + arenas: &'tcx AllArenas<'tcx>, + name: &str, + output_filenames: &OutputFilenames, + f: F, +) -> Result +where + F: for<'a> FnOnce( + TyCtxt<'a, 'tcx, 'tcx>, + ty::CrateAnalysis, + mpsc::Receiver>, + CompileResult, + ) -> R, { - macro_rules! try_with_f { - ($e: expr, ($($t:tt)*)) => { - match $e { - Ok(x) => x, - Err(x) => { - f($($t)*, Err(x)); - return Err(x); - } - } - } - } + let query_result_on_disk_cache = time(sess, "load query result cache", || { + rustc_incremental::load_query_result_cache(sess) + }); - let time_passes = sess.time_passes(); + time(sess, "looking for entry point", || { + middle::entry::find_entry_point(sess, &hir_map, name) + }); - let query_result_on_disk_cache = time(time_passes, - "load query result cache", - || rustc_incremental::load_query_result_cache(sess)); + sess.plugin_registrar_fn + .set(time(sess, "looking for plugin registrar", || { + plugin::build::find_plugin_registrar(sess.diagnostic(), &hir_map) + })); + sess.derive_registrar_fn + .set(derive_registrar::find(&hir_map)); - time(time_passes, - "looking for entry point", - || middle::entry::find_entry_point(sess, &hir_map)); + time(sess, "loop checking", || loops::check_crate(sess, &hir_map)); - sess.plugin_registrar_fn.set(time(time_passes, "looking for plugin registrar", || { - plugin::build::find_plugin_registrar(sess.diagnostic(), &hir_map) - })); - sess.derive_registrar_fn.set(derive_registrar::find(&hir_map)); - - time(time_passes, - "loop checking", - || loops::check_crate(sess, &hir_map)); - - time(time_passes, - "static item recursion checking", - || static_recursion::check_crate(sess, &hir_map))?; - - let mut local_providers = ty::maps::Providers::default(); + let mut local_providers = ty::query::Providers::default(); default_provide(&mut local_providers); + codegen_backend.provide(&mut local_providers); (control.provide)(&mut local_providers); let mut extern_providers = local_providers; default_provide_extern(&mut extern_providers); + codegen_backend.provide_extern(&mut extern_providers); (control.provide_extern)(&mut extern_providers); let (tx, rx) = mpsc::channel(); - TyCtxt::create_and_enter(sess, - cstore, - local_providers, - extern_providers, - arenas, - resolutions, - hir_map, - query_result_on_disk_cache, - name, - tx, - output_filenames, - |tcx| { - // Do some initialization of the DepGraph that can only be done with the - // tcx available. - rustc_incremental::dep_graph_tcx_init(tcx); + TyCtxt::create_and_enter( + sess, + cstore, + local_providers, + extern_providers, + arenas, + resolutions, + hir_map, + query_result_on_disk_cache, + name, + tx, + output_filenames, + |tcx| { + // Do some initialization of the DepGraph that can only be done with the + // tcx available. + rustc_incremental::dep_graph_tcx_init(tcx); - time(sess.time_passes(), "attribute checking", || { - hir::check_attr::check_crate(tcx) - }); + time(sess, "attribute checking", || { + hir::check_attr::check_crate(tcx) + }); - time(time_passes, - "stability checking", - || stability::check_unstable_api_usage(tcx)); + time(sess, "stability checking", || { + stability::check_unstable_api_usage(tcx) + }); - // passes are timed inside typeck - try_with_f!(typeck::check_crate(tcx), (tcx, analysis, rx)); + // passes are timed inside typeck + match typeck::check_crate(tcx) { + Ok(x) => x, + Err(x) => { + f(tcx, analysis, rx, Err(x)); + return Err(x); + } + } - time(time_passes, - "const checking", - || consts::check_crate(tcx)); + time(sess, "rvalue promotion", || { + rvalue_promotion::check_crate(tcx) + }); - analysis.access_levels = - time(time_passes, "privacy checking", || rustc_privacy::check_crate(tcx)); + analysis.access_levels = + time(sess, "privacy checking", || rustc_privacy::check_crate(tcx)); - time(time_passes, - "intrinsic checking", - || middle::intrinsicck::check_crate(tcx)); + time(sess, "intrinsic checking", || { + middle::intrinsicck::check_crate(tcx) + }); - time(time_passes, - "match checking", - || check_match::check_crate(tcx)); + time(sess, "match checking", || mir::matchck_crate(tcx)); - // this must run before MIR dump, because - // "not all control paths return a value" is reported here. - // - // maybe move the check to a MIR pass? - time(time_passes, - "liveness checking", - || middle::liveness::check_crate(tcx)); + // this must run before MIR dump, because + // "not all control paths return a value" is reported here. + // + // maybe move the check to a MIR pass? + time(sess, "liveness checking", || { + middle::liveness::check_crate(tcx) + }); - time(time_passes, - "borrow checking", - || borrowck::check_crate(tcx)); + time(sess, "borrow checking", || { + if tcx.use_ast_borrowck() { + borrowck::check_crate(tcx); + } + }); - time(time_passes, - "MIR borrow checking", - || for def_id in tcx.body_owners() { tcx.mir_borrowck(def_id); }); + time(sess, + "MIR borrow checking", + || tcx.par_body_owners(|def_id| { tcx.mir_borrowck(def_id); })); - time(time_passes, - "MIR effect checking", - || for def_id in tcx.body_owners() { - mir::transform::check_unsafety::check_unsafety(tcx, def_id) - }); - // Avoid overwhelming user with errors if type checking failed. - // I'm not sure how helpful this is, to be honest, but it avoids - // a - // lot of annoying errors in the compile-fail tests (basically, - // lint warnings and so on -- kindck used to do this abort, but - // kindck is gone now). -nmatsakis - if sess.err_count() > 0 { - return Ok(f(tcx, analysis, rx, sess.compile_status())); - } + time(sess, "dumping chalk-like clauses", || { + rustc_traits::lowering::dump_program_clauses(tcx); + }); - time(time_passes, "death checking", || middle::dead::check_crate(tcx)); + time(sess, "MIR effect checking", || { + for def_id in tcx.body_owners() { + mir::transform::check_unsafety::check_unsafety(tcx, def_id) + } + }); + // Avoid overwhelming user with errors if type checking failed. + // I'm not sure how helpful this is, to be honest, but it avoids + // a + // lot of annoying errors in the compile-fail tests (basically, + // lint warnings and so on -- kindck used to do this abort, but + // kindck is gone now). -nmatsakis + if sess.err_count() > 0 { + return Ok(f(tcx, analysis, rx, sess.compile_status())); + } - time(time_passes, "unused lib feature checking", || { - stability::check_unused_or_stable_features(tcx) - }); + time(sess, "death checking", || middle::dead::check_crate(tcx)); - time(time_passes, "lint checking", || lint::check_crate(tcx)); + time(sess, "unused lib feature checking", || { + stability::check_unused_or_stable_features(tcx) + }); - return Ok(f(tcx, analysis, rx, tcx.sess.compile_status())); - }) + time(sess, "lint checking", || lint::check_crate(tcx)); + + return Ok(f(tcx, analysis, rx, tcx.sess.compile_status())); + }, + ) } -/// Run the translation phase to LLVM, after which the AST and analysis can +/// Run the codegen backend, after which the AST and analysis can /// be discarded. -pub fn phase_4_translate_to_llvm<'a, 'tcx, Trans: TransCrate>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - rx: mpsc::Receiver>) - -> ::OngoingCrateTranslation { - let time_passes = tcx.sess.time_passes(); +pub fn phase_4_codegen<'a, 'tcx>( + codegen_backend: &dyn CodegenBackend, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver>, +) -> Box { + time(tcx.sess, "resolving dependency formats", || { + ::rustc::middle::dependency_format::calculate(tcx) + }); - time(time_passes, - "resolving dependency formats", - || ::rustc::middle::dependency_format::calculate(tcx)); - - let translation = - time(time_passes, "translation", move || { - Trans::trans_crate(tcx, rx) - }); + tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); + let codegen = time(tcx.sess, "codegen", move || codegen_backend.codegen_crate(tcx, rx)); + tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); if tcx.sess.profile_queries() { - profile::dump("profile_queries".to_string()) + profile::dump(&tcx.sess, "profile_queries".to_string()) } - translation -} - -/// Run LLVM itself, producing a bitcode file, assembly file or object file -/// as a side effect. -pub fn phase_5_run_llvm_passes(sess: &Session, - dep_graph: &DepGraph, - trans: ::OngoingCrateTranslation) - -> (CompileResult, ::TranslatedCrate) { - let trans = Trans::join_trans(trans, sess, dep_graph); - - if sess.opts.debugging_opts.incremental_info { - Trans::dump_incremental_data(&trans); - } - - time(sess.time_passes(), - "serialize work products", - move || rustc_incremental::save_work_products(sess, dep_graph)); - - (sess.compile_status(), trans) + codegen } fn escape_dep_filename(filename: &FileName) -> String { @@ -1146,22 +1379,28 @@ fn escape_dep_filename(filename: &FileName) -> String { filename.to_string().replace(" ", "\\ ") } -fn write_out_deps(sess: &Session, outputs: &OutputFilenames, crate_name: &str) { +// Returns all the paths that correspond to generated files. +fn generated_output_paths( + sess: &Session, + outputs: &OutputFilenames, + exact_name: bool, + crate_name: &str, +) -> Vec { let mut out_filenames = Vec::new(); for output_type in sess.opts.output_types.keys() { let file = outputs.path(*output_type); match *output_type { - OutputType::Exe => { - for output in sess.crate_types.borrow().iter() { - let p = ::rustc_trans_utils::link::filename_for_input( - sess, - *output, - crate_name, - outputs - ); - out_filenames.push(p); - } - } + // If the filename has been overridden using `-o`, it will not be modified + // by appending `.rlib`, `.exe`, etc., so we can skip this transformation. + OutputType::Exe if !exact_name => for crate_type in sess.crate_types.borrow().iter() { + let p = ::rustc_codegen_utils::link::filename_for_input( + sess, + *crate_type, + crate_name, + outputs, + ); + out_filenames.push(p); + }, OutputType::DepInfo if sess.opts.debugging_opts.dep_info_omit_d_target => { // Don't add the dep-info output when omitting it from dep-info targets } @@ -1170,98 +1409,133 @@ fn write_out_deps(sess: &Session, outputs: &OutputFilenames, crate_name: &str) { } } } + out_filenames +} +// Runs `f` on every output file path and returns the first non-None result, or None if `f` +// returns None for every file path. +fn check_output(output_paths: &[PathBuf], f: F) -> Option +where + F: Fn(&PathBuf) -> Option, +{ + for output_path in output_paths { + if let Some(result) = f(output_path) { + return Some(result); + } + } + None +} + +pub fn output_contains_path(output_paths: &[PathBuf], input_path: &PathBuf) -> bool { + let input_path = input_path.canonicalize().ok(); + if input_path.is_none() { + return false; + } + let check = |output_path: &PathBuf| { + if output_path.canonicalize().ok() == input_path { + Some(()) + } else { + None + } + }; + check_output(output_paths, check).is_some() +} + +pub fn output_conflicts_with_dir(output_paths: &[PathBuf]) -> Option { + let check = |output_path: &PathBuf| { + if output_path.is_dir() { + Some(output_path.clone()) + } else { + None + } + }; + check_output(output_paths, check) +} + +fn write_out_deps(sess: &Session, outputs: &OutputFilenames, out_filenames: &[PathBuf]) { // Write out dependency rules to the dep-info file if requested if !sess.opts.output_types.contains_key(&OutputType::DepInfo) { return; } let deps_filename = outputs.path(OutputType::DepInfo); - let result = - (|| -> io::Result<()> { - // Build a list of files used to compile the output and - // write Makefile-compatible dependency rules - let files: Vec = sess.codemap() - .files() - .iter() - .filter(|fmap| fmap.is_real_file()) - .filter(|fmap| !fmap.is_imported()) - .map(|fmap| escape_dep_filename(&fmap.name)) - .collect(); - let mut file = fs::File::create(&deps_filename)?; - for path in &out_filenames { - write!(file, "{}: {}\n\n", path.display(), files.join(" "))?; - } + let result = (|| -> io::Result<()> { + // Build a list of files used to compile the output and + // write Makefile-compatible dependency rules + let files: Vec = sess.codemap() + .files() + .iter() + .filter(|fmap| fmap.is_real_file()) + .filter(|fmap| !fmap.is_imported()) + .map(|fmap| escape_dep_filename(&fmap.name)) + .collect(); + let mut file = fs::File::create(&deps_filename)?; + for path in out_filenames { + write!(file, "{}: {}\n\n", path.display(), files.join(" "))?; + } - // Emit a fake target for each input file to the compilation. This - // prevents `make` from spitting out an error if a file is later - // deleted. For more info see #28735 - for path in files { - writeln!(file, "{}:", path)?; - } - Ok(()) - })(); + // Emit a fake target for each input file to the compilation. This + // prevents `make` from spitting out an error if a file is later + // deleted. For more info see #28735 + for path in files { + writeln!(file, "{}:", path)?; + } + Ok(()) + })(); match result { Ok(()) => {} Err(e) => { - sess.fatal(&format!("error writing dependencies to `{}`: {}", - deps_filename.display(), - e)); + sess.fatal(&format!( + "error writing dependencies to `{}`: {}", + deps_filename.display(), + e + )); } } } pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec { // Unconditionally collect crate types from attributes to make them used - let attr_types: Vec = - attrs.iter() - .filter_map(|a| { - if a.check_name("crate_type") { - match a.value_str() { - Some(ref n) if *n == "rlib" => { - Some(config::CrateTypeRlib) - } - Some(ref n) if *n == "dylib" => { - Some(config::CrateTypeDylib) - } - Some(ref n) if *n == "cdylib" => { - Some(config::CrateTypeCdylib) - } - Some(ref n) if *n == "lib" => { - Some(config::default_lib_output()) - } - Some(ref n) if *n == "staticlib" => { - Some(config::CrateTypeStaticlib) - } - Some(ref n) if *n == "proc-macro" => { - Some(config::CrateTypeProcMacro) - } - Some(ref n) if *n == "bin" => Some(config::CrateTypeExecutable), - Some(_) => { - session.buffer_lint(lint::builtin::UNKNOWN_CRATE_TYPES, - ast::CRATE_NODE_ID, - a.span, - "invalid `crate_type` value"); - None - } - _ => { - session.struct_span_err(a.span, "`crate_type` requires a value") - .note("for example: `#![crate_type=\"lib\"]`") - .emit(); - None - } - } - } else { - None - } - }) - .collect(); + let attr_types: Vec = attrs + .iter() + .filter_map(|a| { + if a.check_name("crate_type") { + match a.value_str() { + Some(ref n) if *n == "rlib" => Some(config::CrateType::Rlib), + Some(ref n) if *n == "dylib" => Some(config::CrateType::Dylib), + Some(ref n) if *n == "cdylib" => Some(config::CrateType::Cdylib), + Some(ref n) if *n == "lib" => Some(config::default_lib_output()), + Some(ref n) if *n == "staticlib" => Some(config::CrateType::Staticlib), + Some(ref n) if *n == "proc-macro" => Some(config::CrateType::ProcMacro), + Some(ref n) if *n == "bin" => Some(config::CrateType::Executable), + Some(_) => { + session.buffer_lint( + lint::builtin::UNKNOWN_CRATE_TYPES, + ast::CRATE_NODE_ID, + a.span, + "invalid `crate_type` value", + ); + None + } + _ => { + session + .struct_span_err(a.span, "`crate_type` requires a value") + .note("for example: `#![crate_type=\"lib\"]`") + .emit(); + None + } + } + } else { + None + } + }) + .collect(); // If we're generating a test executable, then ignore all other output // styles at all other locations if session.opts.test { - return vec![config::CrateTypeExecutable]; + return vec![config::CrateType::Executable]; } // Only check command line flags if present. If no types are specified by @@ -1271,7 +1545,9 @@ pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec Vec CrateDisambiguator { // Also incorporate crate type, so that we don't get symbol conflicts when // linking against a library of the same name, if this is an executable. - let is_exe = session.crate_types.borrow().contains(&config::CrateTypeExecutable); + let is_exe = session + .crate_types + .borrow() + .contains(&config::CrateType::Executable); hasher.write(if is_exe { b"exe" } else { b"lib" }); CrateDisambiguator::from(hasher.finish()) - } -pub fn build_output_filenames(input: &Input, - odir: &Option, - ofile: &Option, - attrs: &[ast::Attribute], - sess: &Session) - -> OutputFilenames { +pub fn build_output_filenames( + input: &Input, + odir: &Option, + ofile: &Option, + attrs: &[ast::Attribute], + sess: &Session, +) -> OutputFilenames { match *ofile { None => { // "-" as input file will cause the parser to read from stdin so we @@ -1344,10 +1624,10 @@ pub fn build_output_filenames(input: &Input, // If a crate name is present, we use it as the link name let stem = sess.opts - .crate_name - .clone() - .or_else(|| attr::find_crate_name(attrs).map(|n| n.to_string())) - .unwrap_or(input.filestem()); + .crate_name + .clone() + .or_else(|| attr::find_crate_name(attrs).map(|n| n.to_string())) + .unwrap_or(input.filestem()); OutputFilenames { out_directory: dirpath, @@ -1360,30 +1640,36 @@ pub fn build_output_filenames(input: &Input, Some(ref out_file) => { let unnamed_output_types = sess.opts - .output_types - .values() - .filter(|a| a.is_none()) - .count(); + .output_types + .values() + .filter(|a| a.is_none()) + .count(); let ofile = if unnamed_output_types > 1 { - sess.warn("due to multiple output types requested, the explicitly specified \ - output file name will be adapted for each output type"); + sess.warn( + "due to multiple output types requested, the explicitly specified \ + output file name will be adapted for each output type", + ); None } else { Some(out_file.clone()) }; if *odir != None { - sess.warn("ignoring --out-dir flag due to -o flag."); + sess.warn("ignoring --out-dir flag due to -o flag"); + } + if !sess.opts.cg.extra_filename.is_empty() { + sess.warn("ignoring -C extra-filename flag due to -o flag"); } let cur_dir = Path::new(""); OutputFilenames { out_directory: out_file.parent().unwrap_or(cur_dir).to_path_buf(), - out_filestem: out_file.file_stem() - .unwrap_or(OsStr::new("")) - .to_str() - .unwrap() - .to_string(), + out_filestem: out_file + .file_stem() + .unwrap_or(OsStr::new("")) + .to_str() + .unwrap() + .to_string(), single_output_file: ofile, extra: sess.opts.cg.extra_filename.clone(), outputs: sess.opts.output_types.clone(), diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index 237656eb43c6..d25d57a004b5 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -17,13 +17,19 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] -#![deny(warnings)] #![feature(box_syntax)] #![cfg_attr(unix, feature(libc))] +#![cfg_attr(not(stage0), feature(nll))] +#![feature(option_replace)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] +#![feature(slice_sort_by_cached_key)] #![feature(set_stdio)] +#![feature(rustc_stack_internals)] +#![feature(no_debug)] + +#![recursion_limit="256"] extern crate arena; extern crate getopts; @@ -31,11 +37,11 @@ extern crate graphviz; extern crate env_logger; #[cfg(unix)] extern crate libc; +extern crate rustc_rayon as rayon; extern crate rustc; extern crate rustc_allocator; -extern crate rustc_back; +extern crate rustc_target; extern crate rustc_borrowck; -extern crate rustc_const_eval; extern crate rustc_data_structures; extern crate rustc_errors as errors; extern crate rustc_passes; @@ -47,10 +53,10 @@ extern crate rustc_metadata; extern crate rustc_mir; extern crate rustc_resolve; extern crate rustc_save_analysis; -#[cfg(feature="llvm")] -extern crate rustc_trans; -extern crate rustc_trans_utils; +extern crate rustc_traits; +extern crate rustc_codegen_utils; extern crate rustc_typeck; +extern crate scoped_tls; extern crate serialize; #[macro_use] extern crate log; @@ -64,34 +70,40 @@ use pretty::{PpMode, UserIdentifiedItem}; use rustc_resolve as resolve; use rustc_save_analysis as save; use rustc_save_analysis::DumpHandler; +use rustc_data_structures::sync::{self, Lrc}; +use rustc_data_structures::OnDrop; use rustc::session::{self, config, Session, build_session, CompileResult}; use rustc::session::CompileIncomplete; -use rustc::session::config::{Input, PrintRequest, OutputType, ErrorOutputType}; +use rustc::session::config::{Input, PrintRequest, ErrorOutputType}; use rustc::session::config::nightly_options; +use rustc::session::filesearch; use rustc::session::{early_error, early_warn}; use rustc::lint::Lint; use rustc::lint; -use rustc::middle::cstore::CrateStore; use rustc_metadata::locator; use rustc_metadata::cstore::CStore; +use rustc_metadata::dynamic_lib::DynamicLibrary; use rustc::util::common::{time, ErrorReported}; -use rustc_trans_utils::trans_crate::TransCrate; +use rustc_codegen_utils::codegen_backend::CodegenBackend; use serialize::json::ToJson; use std::any::Any; use std::cmp::max; -use std::cmp::Ordering::Equal; use std::default::Default; +use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; use std::env; +use std::error::Error; use std::ffi::OsString; +use std::fmt::{self, Display}; use std::io::{self, Read, Write}; -use std::iter::repeat; -use std::path::PathBuf; +use std::mem; +use std::panic; +use std::path::{PathBuf, Path}; use std::process::{self, Command, Stdio}; -use std::rc::Rc; use std::str; -use std::sync::{Arc, Mutex}; +use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering}; +use std::sync::{Once, ONCE_INIT}; use std::thread; use syntax::ast; @@ -106,12 +118,56 @@ mod test; pub mod profile; pub mod driver; pub mod pretty; -pub mod target_features; mod derive_registrar; +pub mod target_features { + use syntax::ast; + use syntax::symbol::Symbol; + use rustc::session::Session; + use rustc_codegen_utils::codegen_backend::CodegenBackend; + + /// Add `target_feature = "..."` cfgs for a variety of platform + /// specific features (SSE, NEON etc.). + /// + /// This is performed by checking whether a whitelisted set of + /// features is available on the target machine, by querying LLVM. + pub fn add_configuration(cfg: &mut ast::CrateConfig, + sess: &Session, + codegen_backend: &dyn CodegenBackend) { + let tf = Symbol::intern("target_feature"); + + for feat in codegen_backend.target_features(sess) { + cfg.insert((tf, Some(feat))); + } + + if sess.crt_static_feature() { + cfg.insert((tf, Some(Symbol::intern("crt-static")))); + } + } +} + +/// Exit status code used for successful compilation and help output. +pub const EXIT_SUCCESS: isize = 0; + +/// Exit status code used for compilation failures and invalid flags. +pub const EXIT_FAILURE: isize = 1; + const BUG_REPORT_URL: &'static str = "https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.\ md#bug-reports"; +const ICE_REPORT_COMPILER_FLAGS: &'static [&'static str] = &[ + "Z", + "C", + "crate-type", +]; +const ICE_REPORT_COMPILER_FLAGS_EXCLUDE: &'static [&'static str] = &[ + "metadata", + "extra-filename", +]; +const ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE: &'static [&'static str] = &[ + "incremental", +]; + pub fn abort_on_err(result: Result, sess: &Session) -> T { match result { Err(CompileIncomplete::Errored(ErrorReported)) => { @@ -128,7 +184,7 @@ pub fn abort_on_err(result: Result, sess: &Session) -> pub fn run(run_compiler: F) -> isize where F: FnOnce() -> (CompileResult, Option) + Send + 'static { - monitor(move || { + let result = monitor(move || { let (result, session) = run_compiler(); if let Err(CompileIncomplete::Errored(_)) = result { match session { @@ -140,43 +196,268 @@ pub fn run(run_compiler: F) -> isize let emitter = errors::emitter::EmitterWriter::stderr(errors::ColorConfig::Auto, None, - true); + true, + false); let handler = errors::Handler::with_emitter(true, false, Box::new(emitter)); handler.emit(&MultiSpan::new(), "aborting due to previous error(s)", errors::Level::Fatal); - exit_on_err(); + panic::resume_unwind(Box::new(errors::FatalErrorMarker)); } } } }); - 0 + + match result { + Ok(()) => EXIT_SUCCESS, + Err(_) => EXIT_FAILURE, + } } -#[cfg(not(feature="llvm"))] -pub use rustc_trans_utils::trans_crate::MetadataOnlyTransCrate as DefaultTransCrate; -#[cfg(feature="llvm")] -pub use rustc_trans::LlvmTransCrate as DefaultTransCrate; +fn load_backend_from_dylib(path: &Path) -> fn() -> Box { + // Note that we're specifically using `open_global_now` here rather than + // `open`, namely we want the behavior on Unix of RTLD_GLOBAL and RTLD_NOW, + // where NOW means "bind everything right now" because we don't want + // surprises later on and RTLD_GLOBAL allows the symbols to be made + // available for future dynamic libraries opened. This is currently used by + // loading LLVM and then making its symbols available for other dynamic + // libraries. + let lib = match DynamicLibrary::open_global_now(path) { + Ok(lib) => lib, + Err(err) => { + let err = format!("couldn't load codegen backend {:?}: {:?}", + path, + err); + early_error(ErrorOutputType::default(), &err); + } + }; + unsafe { + match lib.symbol("__rustc_codegen_backend") { + Ok(f) => { + mem::forget(lib); + mem::transmute::<*mut u8, _>(f) + } + Err(e) => { + let err = format!("couldn't load codegen backend as it \ + doesn't export the `__rustc_codegen_backend` \ + symbol: {:?}", e); + early_error(ErrorOutputType::default(), &err); + } + } + } +} -#[cfg(not(feature="llvm"))] -mod rustc_trans { - use syntax_pos::symbol::Symbol; - use rustc::session::Session; - use rustc::session::config::PrintRequest; - pub use rustc_trans_utils::trans_crate::MetadataOnlyTransCrate as LlvmTransCrate; - pub use rustc_trans_utils::trans_crate::TranslatedCrate as CrateTranslation; +pub fn get_codegen_backend(sess: &Session) -> Box { + static INIT: Once = ONCE_INIT; - pub fn init(_sess: &Session) {} - pub fn print_version() {} - pub fn print_passes() {} - pub fn print(_req: PrintRequest, _sess: &Session) {} - pub fn target_features(_sess: &Session) -> Vec { vec![] } + #[allow(deprecated)] + #[no_debug] + static mut LOAD: fn() -> Box = || unreachable!(); - pub mod back { - pub mod write { - pub const RELOC_MODEL_ARGS: [(&'static str, ()); 0] = []; - pub const CODE_GEN_MODEL_ARGS: [(&'static str, ()); 0] = []; - pub const TLS_MODEL_ARGS: [(&'static str, ()); 0] = []; + INIT.call_once(|| { + let codegen_name = sess.opts.debugging_opts.codegen_backend.as_ref() + .unwrap_or(&sess.target.target.options.codegen_backend); + let backend = match &codegen_name[..] { + "metadata_only" => { + rustc_codegen_utils::codegen_backend::MetadataOnlyCodegenBackend::new + } + filename if filename.contains(".") => { + load_backend_from_dylib(filename.as_ref()) + } + codegen_name => get_codegen_sysroot(codegen_name), + }; + + unsafe { + LOAD = backend; + } + }); + let backend = unsafe { LOAD() }; + backend.init(sess); + backend +} + +fn get_codegen_sysroot(backend_name: &str) -> fn() -> Box { + // For now we only allow this function to be called once as it'll dlopen a + // few things, which seems to work best if we only do that once. In + // general this assertion never trips due to the once guard in `get_codegen_backend`, + // but there's a few manual calls to this function in this file we protect + // against. + static LOADED: AtomicBool = ATOMIC_BOOL_INIT; + assert!(!LOADED.fetch_or(true, Ordering::SeqCst), + "cannot load the default codegen backend twice"); + + // When we're compiling this library with `--test` it'll run as a binary but + // not actually exercise much functionality. As a result most of the logic + // here is defunkt (it assumes we're a dynamic library in a sysroot) so + // let's just return a dummy creation function which won't be used in + // general anyway. + if cfg!(test) { + return rustc_codegen_utils::codegen_backend::MetadataOnlyCodegenBackend::new + } + + let target = session::config::host_triple(); + let mut sysroot_candidates = vec![filesearch::get_or_default_sysroot()]; + let path = current_dll_path() + .and_then(|s| s.canonicalize().ok()); + if let Some(dll) = path { + // use `parent` twice to chop off the file name and then also the + // directory containing the dll which should be either `lib` or `bin`. + if let Some(path) = dll.parent().and_then(|p| p.parent()) { + // The original `path` pointed at the `rustc_driver` crate's dll. + // Now that dll should only be in one of two locations. The first is + // in the compiler's libdir, for example `$sysroot/lib/*.dll`. The + // other is the target's libdir, for example + // `$sysroot/lib/rustlib/$target/lib/*.dll`. + // + // We don't know which, so let's assume that if our `path` above + // ends in `$target` we *could* be in the target libdir, and always + // assume that we may be in the main libdir. + sysroot_candidates.push(path.to_owned()); + + if path.ends_with(target) { + sysroot_candidates.extend(path.parent() // chop off `$target` + .and_then(|p| p.parent()) // chop off `rustlib` + .and_then(|p| p.parent()) // chop off `lib` + .map(|s| s.to_owned())); + } + } + } + + let sysroot = sysroot_candidates.iter() + .map(|sysroot| { + let libdir = filesearch::relative_target_lib_path(&sysroot, &target); + sysroot.join(libdir) + .with_file_name(option_env!("CFG_CODEGEN_BACKENDS_DIR") + .unwrap_or("codegen-backends")) + }) + .filter(|f| { + info!("codegen backend candidate: {}", f.display()); + f.exists() + }) + .next(); + let sysroot = match sysroot { + Some(path) => path, + None => { + let candidates = sysroot_candidates.iter() + .map(|p| p.display().to_string()) + .collect::>() + .join("\n* "); + let err = format!("failed to find a `codegen-backends` folder \ + in the sysroot candidates:\n* {}", candidates); + early_error(ErrorOutputType::default(), &err); + } + }; + info!("probing {} for a codegen backend", sysroot.display()); + + let d = match sysroot.read_dir() { + Ok(d) => d, + Err(e) => { + let err = format!("failed to load default codegen backend, couldn't \ + read `{}`: {}", sysroot.display(), e); + early_error(ErrorOutputType::default(), &err); + } + }; + + let mut file: Option = None; + + let expected_name = format!("rustc_codegen_llvm-{}", backend_name); + for entry in d.filter_map(|e| e.ok()) { + let path = entry.path(); + let filename = match path.file_name().and_then(|s| s.to_str()) { + Some(s) => s, + None => continue, + }; + if !(filename.starts_with(DLL_PREFIX) && filename.ends_with(DLL_SUFFIX)) { + continue + } + let name = &filename[DLL_PREFIX.len() .. filename.len() - DLL_SUFFIX.len()]; + if name != expected_name { + continue + } + if let Some(ref prev) = file { + let err = format!("duplicate codegen backends found\n\ + first: {}\n\ + second: {}\n\ + ", prev.display(), path.display()); + early_error(ErrorOutputType::default(), &err); + } + file = Some(path.clone()); + } + + match file { + Some(ref s) => return load_backend_from_dylib(s), + None => { + let err = format!("failed to load default codegen backend for `{}`, \ + no appropriate codegen dylib found in `{}`", + backend_name, sysroot.display()); + early_error(ErrorOutputType::default(), &err); + } + } + + #[cfg(unix)] + fn current_dll_path() -> Option { + use std::ffi::{OsStr, CStr}; + use std::os::unix::prelude::*; + + unsafe { + let addr = current_dll_path as usize as *mut _; + let mut info = mem::zeroed(); + if libc::dladdr(addr, &mut info) == 0 { + info!("dladdr failed"); + return None + } + if info.dli_fname.is_null() { + info!("dladdr returned null pointer"); + return None + } + let bytes = CStr::from_ptr(info.dli_fname).to_bytes(); + let os = OsStr::from_bytes(bytes); + Some(PathBuf::from(os)) + } + } + + #[cfg(windows)] + fn current_dll_path() -> Option { + use std::ffi::OsString; + use std::os::windows::prelude::*; + + extern "system" { + fn GetModuleHandleExW(dwFlags: u32, + lpModuleName: usize, + phModule: *mut usize) -> i32; + fn GetModuleFileNameW(hModule: usize, + lpFilename: *mut u16, + nSize: u32) -> u32; + } + + const GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS: u32 = 0x00000004; + + unsafe { + let mut module = 0; + let r = GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, + current_dll_path as usize, + &mut module); + if r == 0 { + info!("GetModuleHandleExW failed: {}", io::Error::last_os_error()); + return None + } + let mut space = Vec::with_capacity(1024); + let r = GetModuleFileNameW(module, + space.as_mut_ptr(), + space.capacity() as u32); + if r == 0 { + info!("GetModuleFileNameW failed: {}", io::Error::last_os_error()); + return None + } + let r = r as usize; + if r >= space.capacity() { + info!("our buffer was too small? {}", + io::Error::last_os_error()); + return None + } + space.set_len(r); + let os = OsString::from_wide(&space); + Some(PathBuf::from(os)) } } } @@ -185,11 +466,33 @@ mod rustc_trans { // See comments on CompilerCalls below for details about the callbacks argument. // The FileLoader provides a way to load files from sources other than the file system. pub fn run_compiler<'a>(args: &[String], - callbacks: &mut CompilerCalls<'a>, - file_loader: Option>, - emitter_dest: Option>) + callbacks: Box + sync::Send + 'a>, + file_loader: Option>, + emitter_dest: Option>) -> (CompileResult, Option) { + syntax::with_globals(|| { + let matches = match handle_options(args) { + Some(matches) => matches, + None => return (Ok(()), None), + }; + + let (sopts, cfg) = config::build_session_options_and_crate_config(&matches); + + driver::spawn_thread_pool(sopts, |sopts| { + run_compiler_with_pool(matches, sopts, cfg, callbacks, file_loader, emitter_dest) + }) + }) +} + +fn run_compiler_with_pool<'a>( + matches: getopts::Matches, + sopts: config::Options, + cfg: ast::CrateConfig, + mut callbacks: Box + sync::Send + 'a>, + file_loader: Option>, + emitter_dest: Option> +) -> (CompileResult, Option) { macro_rules! do_or_return {($expr: expr, $sess: expr) => { match $expr { Compilation::Stop => return (Ok(()), $sess), @@ -197,13 +500,6 @@ pub fn run_compiler<'a>(args: &[String], } }} - let matches = match handle_options(args) { - Some(matches) => matches, - None => return (Ok(()), None), - }; - - let (sopts, cfg) = config::build_session_options_and_crate_config(&matches); - let descriptions = diagnostics_registry(); do_or_return!(callbacks.early_callback(&matches, @@ -214,48 +510,81 @@ pub fn run_compiler<'a>(args: &[String], None); let (odir, ofile) = make_output(&matches); - let (input, input_file_path) = match make_input(&matches.free) { - Some((input, input_file_path)) => callbacks.some_input(input, input_file_path), + let (input, input_file_path, input_err) = match make_input(&matches.free) { + Some((input, input_file_path, input_err)) => { + let (input, input_file_path) = callbacks.some_input(input, input_file_path); + (input, input_file_path, input_err) + }, None => match callbacks.no_input(&matches, &sopts, &cfg, &odir, &ofile, &descriptions) { - Some((input, input_file_path)) => (input, input_file_path), + Some((input, input_file_path)) => (input, input_file_path, None), None => return (Ok(()), None), }, }; - let cstore = CStore::new(DefaultTransCrate::metadata_loader()); - let loader = file_loader.unwrap_or(box RealFileLoader); - let codemap = Rc::new(CodeMap::with_file_loader(loader, sopts.file_path_mapping())); + let codemap = Lrc::new(CodeMap::with_file_loader(loader, sopts.file_path_mapping())); let mut sess = session::build_session_with_codemap( sopts, input_file_path.clone(), descriptions, codemap, emitter_dest, ); - rustc_trans::init(&sess); + + if let Some(err) = input_err { + // Immediately stop compilation if there was an issue reading + // the input (for example if the input stream is not UTF-8). + sess.err(&err.to_string()); + return (Err(CompileIncomplete::Stopped), Some(sess)); + } + + let codegen_backend = get_codegen_backend(&sess); + rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); let mut cfg = config::build_configuration(&sess, cfg); - target_features::add_configuration(&mut cfg, &sess); + target_features::add_configuration(&mut cfg, &sess, &*codegen_backend); sess.parse_sess.config = cfg; - do_or_return!(callbacks.late_callback(&matches, - &sess, - &cstore, - &input, - &odir, - &ofile), Some(sess)); + let result = { + let plugins = sess.opts.debugging_opts.extra_plugins.clone(); - let plugins = sess.opts.debugging_opts.extra_plugins.clone(); - let control = callbacks.build_controller(&sess, &matches); - (driver::compile_input(&sess, - &cstore, - &input_file_path, - &input, - &odir, - &ofile, - Some(plugins), - &control), - Some(sess)) + let cstore = CStore::new(codegen_backend.metadata_loader()); + + do_or_return!(callbacks.late_callback(&*codegen_backend, + &matches, + &sess, + &cstore, + &input, + &odir, + &ofile), Some(sess)); + + let _sess_abort_error = OnDrop(|| sess.diagnostic().print_error_count()); + + let control = callbacks.build_controller(&sess, &matches); + + driver::compile_input(codegen_backend, + &sess, + &cstore, + &input_file_path, + &input, + &odir, + &ofile, + Some(plugins), + &control) + }; + + (result, Some(sess)) } +#[cfg(unix)] +pub fn set_sigpipe_handler() { + unsafe { + // Set the SIGPIPE signal handler, so that an EPIPE + // will cause rustc to terminate, as expected. + assert!(libc::signal(libc::SIGPIPE, libc::SIG_DFL) != libc::SIG_ERR); + } +} + +#[cfg(windows)] +pub fn set_sigpipe_handler() {} + // Extract output directory and file from matches. fn make_output(matches: &getopts::Matches) -> (Option, Option) { let odir = matches.opt_str("out-dir").map(|o| PathBuf::from(&o)); @@ -264,17 +593,22 @@ fn make_output(matches: &getopts::Matches) -> (Option, Option) } // Extract input (string or file and optional path) from matches. -fn make_input(free_matches: &[String]) -> Option<(Input, Option)> { +fn make_input(free_matches: &[String]) -> Option<(Input, Option, Option)> { if free_matches.len() == 1 { let ifile = &free_matches[0]; if ifile == "-" { let mut src = String::new(); - io::stdin().read_to_string(&mut src).unwrap(); + let err = if io::stdin().read_to_string(&mut src).is_err() { + Some(io::Error::new(io::ErrorKind::InvalidData, + "couldn't read from stdin, as it did not contain valid UTF-8")) + } else { + None + }; Some((Input::Str { name: FileName::Anon, input: src }, - None)) + None, err)) } else { Some((Input::File(PathBuf::from(ifile)), - Some(PathBuf::from(ifile)))) + Some(PathBuf::from(ifile)), None)) } } else { None @@ -292,8 +626,9 @@ fn parse_pretty(sess: &Session, } else { None }; - if pretty.is_none() && sess.unstable_options() { - matches.opt_str("unpretty").map(|a| { + + if pretty.is_none() { + sess.opts.debugging_opts.unpretty.as_ref().map(|a| { // extended with unstable pretty-print variants pretty::parse_pretty(sess, &a, true) }) @@ -318,12 +653,12 @@ impl Compilation { } } -// A trait for customising the compilation process. Offers a number of hooks for -// executing custom code or customising input. +/// A trait for customising the compilation process. Offers a number of hooks for +/// executing custom code or customising input. pub trait CompilerCalls<'a> { - // Hook for a callback early in the process of handling arguments. This will - // be called straight after options have been parsed but before anything - // else (e.g., selecting input and output). + /// Hook for a callback early in the process of handling arguments. This will + /// be called straight after options have been parsed but before anything + /// else (e.g., selecting input and output). fn early_callback(&mut self, _: &getopts::Matches, _: &config::Options, @@ -334,13 +669,14 @@ pub trait CompilerCalls<'a> { Compilation::Continue } - // Hook for a callback late in the process of handling arguments. This will - // be called just before actual compilation starts (and before build_controller - // is called), after all arguments etc. have been completely handled. + /// Hook for a callback late in the process of handling arguments. This will + /// be called just before actual compilation starts (and before build_controller + /// is called), after all arguments etc. have been completely handled. fn late_callback(&mut self, + _: &dyn CodegenBackend, _: &getopts::Matches, _: &Session, - _: &CrateStore, + _: &CStore, _: &Input, _: &Option, _: &Option) @@ -348,9 +684,9 @@ pub trait CompilerCalls<'a> { Compilation::Continue } - // Called after we extract the input from the arguments. Gives the implementer - // an opportunity to change the inputs or to add some custom input handling. - // The default behaviour is to simply pass through the inputs. + /// Called after we extract the input from the arguments. Gives the implementer + /// an opportunity to change the inputs or to add some custom input handling. + /// The default behaviour is to simply pass through the inputs. fn some_input(&mut self, input: Input, input_path: Option) @@ -358,11 +694,11 @@ pub trait CompilerCalls<'a> { (input, input_path) } - // Called after we extract the input from the arguments if there is no valid - // input. Gives the implementer an opportunity to supply alternate input (by - // returning a Some value) or to add custom behaviour for this error such as - // emitting error messages. Returning None will cause compilation to stop - // at this point. + /// Called after we extract the input from the arguments if there is no valid + /// input. Gives the implementer an opportunity to supply alternate input (by + /// returning a Some value) or to add custom behaviour for this error such as + /// emitting error messages. Returning None will cause compilation to stop + /// at this point. fn no_input(&mut self, _: &getopts::Matches, _: &config::Options, @@ -376,10 +712,14 @@ pub trait CompilerCalls<'a> { // Create a CompilController struct for controlling the behaviour of // compilation. - fn build_controller(&mut self, _: &Session, _: &getopts::Matches) -> CompileController<'a>; + fn build_controller( + self: Box, + _: &Session, + _: &getopts::Matches + ) -> CompileController<'a>; } -// CompilerCalls instance for a regular rustc build. +/// CompilerCalls instance for a regular rustc build. #[derive(Copy, Clone)] pub struct RustcDefaultCalls; @@ -508,22 +848,27 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { -> Option<(Input, Option)> { match matches.free.len() { 0 => { - if sopts.describe_lints { - let mut ls = lint::LintStore::new(); - rustc_lint::register_builtins(&mut ls, None); - describe_lints(&ls, false); - return None; - } let mut sess = build_session(sopts.clone(), None, descriptions.clone()); - rustc_trans::init(&sess); + if sopts.describe_lints { + let mut ls = lint::LintStore::new(); + rustc_lint::register_builtins(&mut ls, Some(&sess)); + describe_lints(&sess, &ls, false); + return None; + } rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); let mut cfg = config::build_configuration(&sess, cfg.clone()); - target_features::add_configuration(&mut cfg, &sess); + let codegen_backend = get_codegen_backend(&sess); + target_features::add_configuration(&mut cfg, &sess, &*codegen_backend); sess.parse_sess.config = cfg; - let should_stop = - RustcDefaultCalls::print_crate_info(&sess, None, odir, ofile); + let should_stop = RustcDefaultCalls::print_crate_info( + &*codegen_backend, + &sess, + None, + odir, + ofile + ); if should_stop == Compilation::Stop { return None; @@ -536,18 +881,19 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { } fn late_callback(&mut self, + codegen_backend: &dyn CodegenBackend, matches: &getopts::Matches, sess: &Session, - cstore: &CrateStore, + cstore: &CStore, input: &Input, odir: &Option, ofile: &Option) -> Compilation { - RustcDefaultCalls::print_crate_info(sess, Some(input), odir, ofile) + RustcDefaultCalls::print_crate_info(codegen_backend, sess, Some(input), odir, ofile) .and_then(|| RustcDefaultCalls::list_metadata(sess, cstore, matches, input)) } - fn build_controller(&mut self, + fn build_controller(self: Box, sess: &Session, matches: &getopts::Matches) -> CompileController<'a> { @@ -607,12 +953,7 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { control.after_hir_lowering.stop = Compilation::Stop; } - if !sess.opts.output_types.keys().any(|&i| i == OutputType::Exe || - i == OutputType::Metadata) { - control.after_llvm.stop = Compilation::Stop; - } - - if save_analysis(sess) { + if sess.opts.debugging_opts.save_analysis { enable_save_analysis(&mut control); } @@ -633,7 +974,7 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { pub fn enable_save_analysis(control: &mut CompileController) { control.keep_ast = true; control.after_analysis.callback = box |state| { - time(state.session.time_passes(), "save analysis", || { + time(state.session, "save analysis", || { save::process_crate(state.tcx.unwrap(), state.expanded_crate.unwrap(), state.analysis.unwrap(), @@ -647,13 +988,9 @@ pub fn enable_save_analysis(control: &mut CompileController) { control.make_glob_map = resolve::MakeGlobMap::Yes; } -fn save_analysis(sess: &Session) -> bool { - sess.opts.debugging_opts.save_analysis -} - impl RustcDefaultCalls { pub fn list_metadata(sess: &Session, - cstore: &CrateStore, + cstore: &CStore, matches: &getopts::Matches, input: &Input) -> Compilation { @@ -665,7 +1002,7 @@ impl RustcDefaultCalls { let mut v = Vec::new(); locator::list_file_metadata(&sess.target.target, path, - cstore.metadata_loader(), + &*cstore.metadata_loader, &mut v) .unwrap(); println!("{}", String::from_utf8(v).unwrap()); @@ -681,11 +1018,13 @@ impl RustcDefaultCalls { } - fn print_crate_info(sess: &Session, + fn print_crate_info(codegen_backend: &dyn CodegenBackend, + sess: &Session, input: Option<&Input>, odir: &Option, ofile: &Option) -> Compilation { + use rustc::session::config::PrintRequest::*; // PrintRequest::NativeStaticLibs is special - printed during linking // (empty iterator returns true) if sess.opts.prints.iter().all(|&p| p==PrintRequest::NativeStaticLibs) { @@ -707,29 +1046,28 @@ impl RustcDefaultCalls { }; for req in &sess.opts.prints { match *req { - PrintRequest::TargetList => { - let mut targets = rustc_back::target::get_targets().collect::>(); + TargetList => { + let mut targets = rustc_target::spec::get_targets().collect::>(); targets.sort(); println!("{}", targets.join("\n")); }, - PrintRequest::Sysroot => println!("{}", sess.sysroot().display()), - PrintRequest::TargetSpec => println!("{}", sess.target.target.to_json().pretty()), - PrintRequest::FileNames | - PrintRequest::CrateName => { + Sysroot => println!("{}", sess.sysroot().display()), + TargetSpec => println!("{}", sess.target.target.to_json().pretty()), + FileNames | CrateName => { let input = match input { Some(input) => input, None => early_error(ErrorOutputType::default(), "no input file provided"), }; let attrs = attrs.as_ref().unwrap(); let t_outputs = driver::build_output_filenames(input, odir, ofile, attrs, sess); - let id = rustc_trans_utils::link::find_crate_name(Some(sess), attrs, input); + let id = rustc_codegen_utils::link::find_crate_name(Some(sess), attrs, input); if *req == PrintRequest::CrateName { println!("{}", id); continue; } let crate_types = driver::collect_crate_types(sess, attrs); for &style in &crate_types { - let fname = rustc_trans_utils::link::filename_for_input( + let fname = rustc_codegen_utils::link::filename_for_input( sess, style, &id, @@ -741,14 +1079,14 @@ impl RustcDefaultCalls { .to_string_lossy()); } } - PrintRequest::Cfg => { + Cfg => { let allow_unstable_cfg = UnstableFeatures::from_environment() .is_nightly_build(); let mut cfgs = Vec::new(); for &(name, ref value) in sess.parse_sess.config.iter() { let gated_cfg = GatedCfg::gate(&ast::MetaItem { - name, + ident: ast::Path::from_ident(ast::Ident::with_empty_ctxt(name)), node: ast::MetaItemKind::Word, span: DUMMY_SP, }); @@ -772,7 +1110,7 @@ impl RustcDefaultCalls { cfgs.push(if let Some(value) = value { format!("{}=\"{}\"", name, value) } else { - format!("{}", name) + name.to_string() }); } @@ -781,29 +1119,8 @@ impl RustcDefaultCalls { println!("{}", cfg); } } - PrintRequest::RelocationModels => { - println!("Available relocation models:"); - for &(name, _) in rustc_trans::back::write::RELOC_MODEL_ARGS.iter() { - println!(" {}", name); - } - println!(""); - } - PrintRequest::CodeModels => { - println!("Available code models:"); - for &(name, _) in rustc_trans::back::write::CODE_GEN_MODEL_ARGS.iter(){ - println!(" {}", name); - } - println!(""); - } - PrintRequest::TlsModels => { - println!("Available TLS models:"); - for &(name, _) in rustc_trans::back::write::TLS_MODEL_ARGS.iter(){ - println!(" {}", name); - } - println!(""); - } - PrintRequest::TargetCPUs | PrintRequest::TargetFeatures => { - rustc_trans::print(*req, sess); + RelocationModels | CodeModels | TlsModels | TargetCPUs | TargetFeatures => { + codegen_backend.print(*req, sess); } // Any output here interferes with Cargo's parsing of other printed output PrintRequest::NativeStaticLibs => {} @@ -844,7 +1161,7 @@ pub fn version(binary: &str, matches: &getopts::Matches) { println!("commit-date: {}", unw(commit_date_str())); println!("host: {}", config::host_triple()); println!("release: {}", unw(release_str())); - rustc_trans::print_version(); + get_codegen_sysroot("llvm")().print_version(); } } @@ -858,7 +1175,7 @@ fn usage(verbose: bool, include_unstable_options: bool) { for option in groups.iter().filter(|x| include_unstable_options || x.is_stable()) { (option.apply)(&mut options); } - let message = format!("Usage: rustc [OPTIONS] INPUT"); + let message = "Usage: rustc [OPTIONS] INPUT".to_string(); let nightly_help = if nightly_options::is_nightly_build() { "\n -Z help Print internal options for debugging rustc" } else { @@ -878,7 +1195,16 @@ fn usage(verbose: bool, include_unstable_options: bool) { verbose_help); } -fn describe_lints(lint_store: &lint::LintStore, loaded_plugins: bool) { +fn print_wall_help() { + println!(" +The flag `-Wall` does not exist in `rustc`. Most useful lints are enabled by +default. Use `rustc -W help` to see all available lints. It's more common to put +warning settings in the crate root using `#![warn(LINT_NAME)]` instead of using +the command line flag directly. +"); +} + +fn describe_lints(sess: &Session, lint_store: &lint::LintStore, loaded_plugins: bool) { println!(" Available lint options: -W Warn about @@ -890,25 +1216,17 @@ Available lint options: "); - fn sort_lints(lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> { + fn sort_lints(sess: &Session, lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> { let mut lints: Vec<_> = lints.into_iter().map(|(x, _)| x).collect(); - lints.sort_by(|x: &&Lint, y: &&Lint| { - match x.default_level.cmp(&y.default_level) { - // The sort doesn't case-fold but it's doubtful we care. - Equal => x.name.cmp(y.name), - r => r, - } - }); + // The sort doesn't case-fold but it's doubtful we care. + lints.sort_by_cached_key(|x: &&Lint| (x.default_level(sess), x.name)); lints } fn sort_lint_groups(lints: Vec<(&'static str, Vec, bool)>) -> Vec<(&'static str, Vec)> { let mut lints: Vec<_> = lints.into_iter().map(|(x, y, _)| (x, y)).collect(); - lints.sort_by(|&(x, _): &(&'static str, Vec), - &(y, _): &(&'static str, Vec)| { - x.cmp(y) - }); + lints.sort_by_key(|l| l.0); lints } @@ -916,8 +1234,8 @@ Available lint options: .iter() .cloned() .partition(|&(_, p)| p); - let plugin = sort_lints(plugin); - let builtin = sort_lints(builtin); + let plugin = sort_lints(sess, plugin); + let builtin = sort_lints(sess, builtin); let (plugin_groups, builtin_groups): (Vec<_>, _) = lint_store.get_lint_groups() .iter() @@ -932,9 +1250,7 @@ Available lint options: .max() .unwrap_or(0); let padded = |x: &str| { - let mut s = repeat(" ") - .take(max_name_len - x.chars().count()) - .collect::(); + let mut s = " ".repeat(max_name_len - x.chars().count()); s.push_str(x); s }; @@ -966,9 +1282,7 @@ Available lint options: .unwrap_or(0)); let padded = |x: &str| { - let mut s = repeat(" ") - .take(max_name_len - x.chars().count()) - .collect::(); + let mut s = " ".repeat(max_name_len - x.chars().count()); s.push_str(x); s }; @@ -1122,6 +1436,13 @@ pub fn handle_options(args: &[String]) -> Option { return None; } + // Handle the special case of -Wall. + let wall = matches.opt_strs("W"); + if wall.iter().any(|x| *x == "all") { + print_wall_help(); + return None; + } + // Don't handle -W help here, because we might first load plugins. let r = matches.opt_strs("Z"); if r.iter().any(|x| *x == "help") { @@ -1141,7 +1462,7 @@ pub fn handle_options(args: &[String]) -> Option { } if cg_flags.contains(&"passes=list".to_string()) { - rustc_trans::print_passes(); + get_codegen_sysroot("llvm")().print_passes(); return None; } @@ -1166,26 +1487,166 @@ fn parse_crate_attrs<'a>(sess: &'a Session, input: &Input) -> PResult<'a, Vec(f: F) -> Result> +/// Runs `f` in a suitable thread for running `rustc`; returns a `Result` with either the return +/// value of `f` or -- if a panic occurs -- the panic value. +/// +/// This version applies the given name to the thread. This is used by rustdoc to ensure consistent +/// doctest output across platforms and executions. +pub fn in_named_rustc_thread(name: String, f: F) -> Result> where F: FnOnce() -> R + Send + 'static, R: Send + 'static, { // Temporarily have stack size set to 16MB to deal with nom-using crates failing const STACK_SIZE: usize = 16 * 1024 * 1024; // 16MB - let mut cfg = thread::Builder::new().name("rustc".to_string()); + #[cfg(all(unix,not(target_os = "haiku")))] + let spawn_thread = unsafe { + // Fetch the current resource limits + let mut rlim = libc::rlimit { + rlim_cur: 0, + rlim_max: 0, + }; + if libc::getrlimit(libc::RLIMIT_STACK, &mut rlim) != 0 { + let err = io::Error::last_os_error(); + error!("in_rustc_thread: error calling getrlimit: {}", err); + true + } else if rlim.rlim_max < STACK_SIZE as libc::rlim_t { + true + } else if rlim.rlim_cur < STACK_SIZE as libc::rlim_t { + std::rt::deinit_stack_guard(); + rlim.rlim_cur = STACK_SIZE as libc::rlim_t; + if libc::setrlimit(libc::RLIMIT_STACK, &mut rlim) != 0 { + let err = io::Error::last_os_error(); + error!("in_rustc_thread: error calling setrlimit: {}", err); + std::rt::update_stack_guard(); + true + } else { + std::rt::update_stack_guard(); + false + } + } else { + false + } + }; - // FIXME: Hacks on hacks. If the env is trying to override the stack size - // then *don't* set it explicitly. - if env::var_os("RUST_MIN_STACK").is_none() { - cfg = cfg.stack_size(STACK_SIZE); + // We set the stack size at link time. See src/rustc/rustc.rs. + #[cfg(windows)] + let spawn_thread = false; + + #[cfg(target_os = "haiku")] + let spawn_thread = unsafe { + // Haiku does not have setrlimit implemented for the stack size. + // By default it does have the 16 MB stack limit, but we check this in + // case the minimum STACK_SIZE changes or Haiku's defaults change. + let mut rlim = libc::rlimit { + rlim_cur: 0, + rlim_max: 0, + }; + if libc::getrlimit(libc::RLIMIT_STACK, &mut rlim) != 0 { + let err = io::Error::last_os_error(); + error!("in_rustc_thread: error calling getrlimit: {}", err); + true + } else if rlim.rlim_cur >= STACK_SIZE { + false + } else { + true + } + }; + + #[cfg(not(any(windows,unix)))] + let spawn_thread = true; + + // The or condition is added from backward compatibility. + if spawn_thread || env::var_os("RUST_MIN_STACK").is_some() { + let mut cfg = thread::Builder::new().name(name); + + // FIXME: Hacks on hacks. If the env is trying to override the stack size + // then *don't* set it explicitly. + if env::var_os("RUST_MIN_STACK").is_none() { + cfg = cfg.stack_size(STACK_SIZE); + } + + let thread = cfg.spawn(f); + thread.unwrap().join() + } else { + let f = panic::AssertUnwindSafe(f); + panic::catch_unwind(f) + } +} + +/// Runs `f` in a suitable thread for running `rustc`; returns a +/// `Result` with either the return value of `f` or -- if a panic +/// occurs -- the panic value. +pub fn in_rustc_thread(f: F) -> Result> + where F: FnOnce() -> R + Send + 'static, + R: Send + 'static, +{ + in_named_rustc_thread("rustc".to_string(), f) +} + +/// Get a list of extra command-line flags provided by the user, as strings. +/// +/// This function is used during ICEs to show more information useful for +/// debugging, since some ICEs only happens with non-default compiler flags +/// (and the users don't always report them). +fn extra_compiler_flags() -> Option<(Vec, bool)> { + let args = env::args_os().map(|arg| arg.to_string_lossy().to_string()).collect::>(); + + // Avoid printing help because of empty args. This can suggest the compiler + // itself is not the program root (consider RLS). + if args.len() < 2 { + return None; } - let thread = cfg.spawn(f); - thread.unwrap().join() + let matches = if let Some(matches) = handle_options(&args) { + matches + } else { + return None; + }; + + let mut result = Vec::new(); + let mut excluded_cargo_defaults = false; + for flag in ICE_REPORT_COMPILER_FLAGS { + let prefix = if flag.len() == 1 { "-" } else { "--" }; + + for content in &matches.opt_strs(flag) { + // Split always returns the first element + let name = if let Some(first) = content.split('=').next() { + first + } else { + &content + }; + + let content = if ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE.contains(&name) { + name + } else { + content + }; + + if !ICE_REPORT_COMPILER_FLAGS_EXCLUDE.contains(&name) { + result.push(format!("{}{} {}", prefix, flag, content)); + } else { + excluded_cargo_defaults = true; + } + } + } + + if result.len() > 0 { + Some((result, excluded_cargo_defaults)) + } else { + None + } +} + +#[derive(Debug)] +pub struct CompilationFailure; + +impl Error for CompilationFailure {} + +impl Display for CompilationFailure { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "compilation had errors") + } } /// Run a procedure which will detect panics in the compiler and print nicer @@ -1193,31 +1654,20 @@ pub fn in_rustc_thread(f: F) -> Result> /// /// The diagnostic emitter yielded to the procedure should be used for reporting /// errors of the compiler. -pub fn monitor(f: F) { - struct Sink(Arc>>); - impl Write for Sink { - fn write(&mut self, data: &[u8]) -> io::Result { - Write::write(&mut *self.0.lock().unwrap(), data) - } - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } - } - - let data = Arc::new(Mutex::new(Vec::new())); - let err = Sink(data.clone()); - - let result = in_rustc_thread(move || { - io::set_panic(Some(box err)); +pub fn monitor(f: F) -> Result<(), CompilationFailure> { + in_rustc_thread(move || { f() - }); + }).map_err(|value| { + if value.is::() { + CompilationFailure + } else { + // Thread panicked without emitting a fatal diagnostic + eprintln!(""); - if let Err(value) = result { - // Thread panicked without emitting a fatal diagnostic - if !value.is::() { let emitter = Box::new(errors::emitter::EmitterWriter::stderr(errors::ColorConfig::Auto, None, + false, false)); let handler = errors::Handler::with_emitter(true, false, emitter); @@ -1229,48 +1679,33 @@ pub fn monitor(f: F) { errors::Level::Bug); } - let xs = ["the compiler unexpectedly panicked. this is a bug.".to_string(), - format!("we would appreciate a bug report: {}", BUG_REPORT_URL), - format!("rustc {} running on {}", - option_env!("CFG_VERSION").unwrap_or("unknown_version"), - config::host_triple())]; + let mut xs = vec![ + "the compiler unexpectedly panicked. this is a bug.".to_string(), + format!("we would appreciate a bug report: {}", BUG_REPORT_URL), + format!("rustc {} running on {}", + option_env!("CFG_VERSION").unwrap_or("unknown_version"), + config::host_triple()), + ]; + + if let Some((flags, excluded_cargo_defaults)) = extra_compiler_flags() { + xs.push(format!("compiler flags: {}", flags.join(" "))); + + if excluded_cargo_defaults { + xs.push("some of the compiler flags provided by cargo are hidden".to_string()); + } + } + for note in &xs { handler.emit(&MultiSpan::new(), ¬e, errors::Level::Note); } - if match env::var_os("RUST_BACKTRACE") { - Some(val) => &val != "0", - None => false, - } { - handler.emit(&MultiSpan::new(), - "run with `RUST_BACKTRACE=1` for a backtrace", - errors::Level::Note); - } - eprintln!("{}", str::from_utf8(&data.lock().unwrap()).unwrap()); + panic::resume_unwind(Box::new(errors::FatalErrorMarker)); } - - exit_on_err(); - } + }) } -fn exit_on_err() -> ! { - // Panic so the process returns a failure code, but don't pollute the - // output with some unnecessary panic messages, we've already - // printed everything that we needed to. - io::set_panic(Some(box io::sink())); - panic!(); -} - -#[cfg(stage0)] -pub fn diagnostics_registry() -> errors::registry::Registry { - use errors::registry::Registry; - - Registry::new(&[]) -} - -#[cfg(not(stage0))] pub fn diagnostics_registry() -> errors::registry::Registry { use errors::registry::Registry; @@ -1279,9 +1714,9 @@ pub fn diagnostics_registry() -> errors::registry::Registry { all_errors.extend_from_slice(&rustc_typeck::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_resolve::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_privacy::DIAGNOSTICS); - #[cfg(feature="llvm")] - all_errors.extend_from_slice(&rustc_trans::DIAGNOSTICS); - all_errors.extend_from_slice(&rustc_const_eval::DIAGNOSTICS); + // FIXME: need to figure out a way to get these back in here + // all_errors.extend_from_slice(get_codegen_backend(sess).diagnostics()); + all_errors.extend_from_slice(&rustc_codegen_utils::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_passes::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_plugin::DIAGNOSTICS); @@ -1291,20 +1726,25 @@ pub fn diagnostics_registry() -> errors::registry::Registry { Registry::new(&all_errors) } -pub fn get_args() -> Vec { - env::args_os().enumerate() - .map(|(i, arg)| arg.into_string().unwrap_or_else(|arg| { - early_error(ErrorOutputType::default(), - &format!("Argument {} is not valid Unicode: {:?}", i, arg)) - })) - .collect() +/// This allows tools to enable rust logging without having to magically match rustc's +/// log crate version +pub fn init_rustc_env_logger() { + env_logger::init(); } pub fn main() { - env_logger::init().unwrap(); - let result = run(|| run_compiler(&get_args(), - &mut RustcDefaultCalls, - None, - None)); + init_rustc_env_logger(); + let result = run(|| { + let args = env::args_os().enumerate() + .map(|(i, arg)| arg.into_string().unwrap_or_else(|arg| { + early_error(ErrorOutputType::default(), + &format!("Argument {} is not valid Unicode: {:?}", i, arg)) + })) + .collect::>(); + run_compiler(&args, + Box::new(RustcDefaultCalls), + None, + None) + }); process::exit(result as i32); } diff --git a/src/librustc_driver/pretty.rs b/src/librustc_driver/pretty.rs index 6ce6929af5ca..a66392833f69 100644 --- a/src/librustc_driver/pretty.rs +++ b/src/librustc_driver/pretty.rs @@ -20,11 +20,13 @@ use {abort_on_err, driver}; use rustc::ty::{self, TyCtxt, Resolutions, AllArenas}; use rustc::cfg; use rustc::cfg::graphviz::LabelledCFG; -use rustc::middle::cstore::CrateStore; use rustc::session::Session; use rustc::session::config::{Input, OutputFilenames}; use rustc_borrowck as borrowck; use rustc_borrowck::graphviz as borrowck_dot; +use rustc_data_structures::small_vec::OneVector; +use rustc_data_structures::thin_vec::ThinVec; +use rustc_metadata::cstore::CStore; use rustc_mir::util::{write_mir_pretty, write_mir_graphviz}; @@ -33,7 +35,6 @@ use syntax::fold::{self, Folder}; use syntax::print::{pprust}; use syntax::print::pprust::PrintState; use syntax::ptr::P; -use syntax::util::small_vector::SmallVector; use syntax_pos::{self, FileName}; use graphviz as dot; @@ -66,7 +67,7 @@ pub enum PpSourceMode { pub enum PpFlowGraphMode { Default, /// Drops the labels from the edges in the flowgraph output. This - /// is mostly for use in the --unpretty flowgraph run-make tests, + /// is mostly for use in the -Z unpretty flowgraph run-make tests, /// since the labels are largely uninteresting in those cases and /// have become a pain to maintain. UnlabelledEdges, @@ -170,7 +171,7 @@ impl PpSourceMode { hir_map: Option<&hir_map::Map<'tcx>>, f: F) -> A - where F: FnOnce(&PrinterSupport) -> A + where F: FnOnce(&dyn PrinterSupport) -> A { match *self { PpmNormal | PpmEveryBodyLoops | PpmExpanded => { @@ -199,7 +200,7 @@ impl PpSourceMode { } fn call_with_pp_support_hir<'tcx, A, F>(&self, sess: &'tcx Session, - cstore: &'tcx CrateStore, + cstore: &'tcx CStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, @@ -208,7 +209,7 @@ impl PpSourceMode { id: &str, f: F) -> A - where F: FnOnce(&HirPrinterSupport, &hir::Crate) -> A + where F: FnOnce(&dyn HirPrinterSupport, &hir::Crate) -> A { match *self { PpmNormal => { @@ -228,7 +229,9 @@ impl PpSourceMode { } PpmTyped => { let control = &driver::CompileController::basic(); - abort_on_err(driver::phase_3_run_analysis_passes(control, + let codegen_backend = ::get_codegen_backend(sess); + abort_on_err(driver::phase_3_run_analysis_passes(&*codegen_backend, + control, sess, cstore, hir_map.clone(), @@ -263,7 +266,7 @@ trait PrinterSupport: pprust::PpAnn { /// /// (Rust does not yet support upcasting from a trait object to /// an object for one of its super-traits.) - fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn; + fn pp_ann<'a>(&'a self) -> &'a dyn pprust::PpAnn; } trait HirPrinterSupport<'hir>: pprust_hir::PpAnn { @@ -279,7 +282,7 @@ trait HirPrinterSupport<'hir>: pprust_hir::PpAnn { /// /// (Rust does not yet support upcasting from a trait object to /// an object for one of its super-traits.) - fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn; + fn pp_ann<'a>(&'a self) -> &'a dyn pprust_hir::PpAnn; /// Computes an user-readable representation of a path, if possible. fn node_path(&self, id: ast::NodeId) -> Option { @@ -303,7 +306,7 @@ impl<'hir> PrinterSupport for NoAnn<'hir> { self.sess } - fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { + fn pp_ann<'a>(&'a self) -> &'a dyn pprust::PpAnn { self } } @@ -317,7 +320,7 @@ impl<'hir> HirPrinterSupport<'hir> for NoAnn<'hir> { self.hir_map.as_ref() } - fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn { + fn pp_ann<'a>(&'a self) -> &'a dyn pprust_hir::PpAnn { self } } @@ -344,7 +347,7 @@ impl<'hir> PrinterSupport for IdentifiedAnnotation<'hir> { self.sess } - fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { + fn pp_ann<'a>(&'a self) -> &'a dyn pprust::PpAnn { self } } @@ -395,7 +398,7 @@ impl<'hir> HirPrinterSupport<'hir> for IdentifiedAnnotation<'hir> { self.hir_map.as_ref() } - fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn { + fn pp_ann<'a>(&'a self) -> &'a dyn pprust_hir::PpAnn { self } } @@ -456,7 +459,7 @@ impl<'a> PrinterSupport for HygieneAnnotation<'a> { self.sess } - fn pp_ann(&self) -> &pprust::PpAnn { + fn pp_ann(&self) -> &dyn pprust::PpAnn { self } } @@ -464,11 +467,11 @@ impl<'a> PrinterSupport for HygieneAnnotation<'a> { impl<'a> pprust::PpAnn for HygieneAnnotation<'a> { fn post(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { match node { - pprust::NodeIdent(&ast::Ident { name, ctxt }) => { + pprust::NodeIdent(&ast::Ident { name, span }) => { s.s.space()?; // FIXME #16420: this doesn't display the connections // between syntax contexts - s.synth_comment(format!("{}{:?}", name.as_u32(), ctxt)) + s.synth_comment(format!("{}{:?}", name.as_u32(), span.ctxt())) } pprust::NodeName(&name) => { s.s.space()?; @@ -494,7 +497,7 @@ impl<'b, 'tcx> HirPrinterSupport<'tcx> for TypedAnnotation<'b, 'tcx> { Some(&self.tcx.hir) } - fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn { + fn pp_ann<'a>(&'a self) -> &'a dyn pprust_hir::PpAnn { self } @@ -582,6 +585,13 @@ impl<'a, 'hir> Iterator for NodesMatchingUII<'a, 'hir> { &mut NodesMatchingSuffix(ref mut iter) => iter.next(), } } + + fn size_hint(&self) -> (usize, Option) { + match self { + &NodesMatchingDirect(ref iter) => iter.size_hint(), + &NodesMatchingSuffix(ref iter) => iter.size_hint(), + } + } } impl UserIdentifiedItem { @@ -641,18 +651,25 @@ impl UserIdentifiedItem { // [#34511]: https://github.com/rust-lang/rust/issues/34511#issuecomment-322340401 pub struct ReplaceBodyWithLoop<'a> { within_static_or_const: bool, + nested_blocks: Option>, sess: &'a Session, } impl<'a> ReplaceBodyWithLoop<'a> { pub fn new(sess: &'a Session) -> ReplaceBodyWithLoop<'a> { - ReplaceBodyWithLoop { within_static_or_const: false, sess } + ReplaceBodyWithLoop { + within_static_or_const: false, + nested_blocks: None, + sess + } } fn run R>(&mut self, is_const: bool, action: F) -> R { let old_const = mem::replace(&mut self.within_static_or_const, is_const); + let old_blocks = self.nested_blocks.take(); let ret = action(self); self.within_static_or_const = old_const; + self.nested_blocks = old_blocks; ret } @@ -660,7 +677,7 @@ impl<'a> ReplaceBodyWithLoop<'a> { if let ast::FunctionRetTy::Ty(ref ty) = ret_ty.output { fn involves_impl_trait(ty: &ast::Ty) -> bool { match ty.node { - ast::TyKind::ImplTrait(_) => true, + ast::TyKind::ImplTrait(..) => true, ast::TyKind::Slice(ref subty) | ast::TyKind::Array(ref subty, _) | ast::TyKind::Ptr(ast::MutTy { ty: ref subty, .. }) | @@ -668,14 +685,20 @@ impl<'a> ReplaceBodyWithLoop<'a> { ast::TyKind::Paren(ref subty) => involves_impl_trait(subty), ast::TyKind::Tup(ref tys) => any_involves_impl_trait(tys.iter()), ast::TyKind::Path(_, ref path) => path.segments.iter().any(|seg| { - match seg.parameters.as_ref().map(|p| &**p) { + match seg.args.as_ref().map(|generic_arg| &**generic_arg) { None => false, - Some(&ast::PathParameters::AngleBracketed(ref data)) => - any_involves_impl_trait(data.types.iter()) || - any_involves_impl_trait(data.bindings.iter().map(|b| &b.ty)), - Some(&ast::PathParameters::Parenthesized(ref data)) => + Some(&ast::GenericArgs::AngleBracketed(ref data)) => { + let types = data.args.iter().filter_map(|arg| match arg { + ast::GenericArg::Type(ty) => Some(ty), + _ => None, + }); + any_involves_impl_trait(types.into_iter()) || + any_involves_impl_trait(data.bindings.iter().map(|b| &b.ty)) + }, + Some(&ast::GenericArgs::Parenthesized(ref data)) => { any_involves_impl_trait(data.inputs.iter()) || - any_involves_impl_trait(data.output.iter()), + any_involves_impl_trait(data.output.iter()) + } } }), _ => false, @@ -697,69 +720,115 @@ impl<'a> fold::Folder for ReplaceBodyWithLoop<'a> { fn fold_item_kind(&mut self, i: ast::ItemKind) -> ast::ItemKind { let is_const = match i { ast::ItemKind::Static(..) | ast::ItemKind::Const(..) => true, - ast::ItemKind::Fn(ref decl, _, ref constness, _, _, _) => - constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), + ast::ItemKind::Fn(ref decl, ref header, _, _) => + header.constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), _ => false, }; self.run(is_const, |s| fold::noop_fold_item_kind(i, s)) } - fn fold_trait_item(&mut self, i: ast::TraitItem) -> SmallVector { + fn fold_trait_item(&mut self, i: ast::TraitItem) -> OneVector { let is_const = match i.node { ast::TraitItemKind::Const(..) => true, - ast::TraitItemKind::Method(ast::MethodSig { ref decl, ref constness, .. }, _) => - constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), + ast::TraitItemKind::Method(ast::MethodSig { ref decl, ref header, .. }, _) => + header.constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), _ => false, }; self.run(is_const, |s| fold::noop_fold_trait_item(i, s)) } - fn fold_impl_item(&mut self, i: ast::ImplItem) -> SmallVector { + fn fold_impl_item(&mut self, i: ast::ImplItem) -> OneVector { let is_const = match i.node { ast::ImplItemKind::Const(..) => true, - ast::ImplItemKind::Method(ast::MethodSig { ref decl, ref constness, .. }, _) => - constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), + ast::ImplItemKind::Method(ast::MethodSig { ref decl, ref header, .. }, _) => + header.constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), _ => false, }; self.run(is_const, |s| fold::noop_fold_impl_item(i, s)) } + fn fold_anon_const(&mut self, c: ast::AnonConst) -> ast::AnonConst { + self.run(true, |s| fold::noop_fold_anon_const(c, s)) + } + fn fold_block(&mut self, b: P) -> P { - fn expr_to_block(rules: ast::BlockCheckMode, + fn stmt_to_block(rules: ast::BlockCheckMode, recovered: bool, - e: Option>, - sess: &Session) -> P { - P(ast::Block { - stmts: e.map(|e| { - ast::Stmt { - id: sess.next_node_id(), - span: e.span, - node: ast::StmtKind::Expr(e), - } - }) - .into_iter() - .collect(), + s: Option, + sess: &Session) -> ast::Block { + ast::Block { + stmts: s.into_iter().collect(), rules, id: sess.next_node_id(), span: syntax_pos::DUMMY_SP, recovered, - }) + } } - if !self.within_static_or_const { - - let empty_block = expr_to_block(BlockCheckMode::Default, false, None, self.sess); - let loop_expr = P(ast::Expr { - node: ast::ExprKind::Loop(empty_block, None), - id: self.sess.next_node_id(), + fn block_to_stmt(b: ast::Block, sess: &Session) -> ast::Stmt { + let expr = P(ast::Expr { + id: sess.next_node_id(), + node: ast::ExprKind::Block(P(b), None), span: syntax_pos::DUMMY_SP, - attrs: ast::ThinVec::new(), + attrs: ThinVec::new(), }); - expr_to_block(b.rules, b.recovered, Some(loop_expr), self.sess) + ast::Stmt { + id: sess.next_node_id(), + node: ast::StmtKind::Expr(expr), + span: syntax_pos::DUMMY_SP, + } + } - } else { + let empty_block = stmt_to_block(BlockCheckMode::Default, false, None, self.sess); + let loop_expr = P(ast::Expr { + node: ast::ExprKind::Loop(P(empty_block), None), + id: self.sess.next_node_id(), + span: syntax_pos::DUMMY_SP, + attrs: ThinVec::new(), + }); + + let loop_stmt = ast::Stmt { + id: self.sess.next_node_id(), + span: syntax_pos::DUMMY_SP, + node: ast::StmtKind::Expr(loop_expr), + }; + + if self.within_static_or_const { fold::noop_fold_block(b, self) + } else { + b.map(|b| { + let mut stmts = vec![]; + for s in b.stmts { + let old_blocks = self.nested_blocks.replace(vec![]); + + stmts.extend(self.fold_stmt(s).into_iter().filter(|s| s.is_item())); + + // we put a Some in there earlier with that replace(), so this is valid + let new_blocks = self.nested_blocks.take().unwrap(); + self.nested_blocks = old_blocks; + stmts.extend(new_blocks.into_iter().map(|b| block_to_stmt(b, &self.sess))); + } + + let mut new_block = ast::Block { + stmts, + ..b + }; + + if let Some(old_blocks) = self.nested_blocks.as_mut() { + //push our fresh block onto the cache and yield an empty block with `loop {}` + if !new_block.stmts.is_empty() { + old_blocks.push(new_block); + } + + stmt_to_block(b.rules, b.recovered, Some(loop_stmt), self.sess) + } else { + //push `loop {}` onto the end of our fresh block and yield that + new_block.stmts.push(loop_stmt); + + new_block + } + }) } } @@ -881,7 +950,7 @@ pub fn print_after_parsing(sess: &Session, if let PpmSource(s) = ppm { // Silently ignores an identified node. - let out: &mut Write = &mut out; + let out: &mut dyn Write = &mut out; s.call_with_pp_support(sess, None, move |annotation| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); @@ -903,7 +972,7 @@ pub fn print_after_parsing(sess: &Session, } pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, - cstore: &'tcx CrateStore, + cstore: &'tcx CStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, @@ -938,7 +1007,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, match (ppm, opt_uii) { (PpmSource(s), _) => { // Silently ignores an identified node. - let out: &mut Write = &mut out; + let out: &mut dyn Write = &mut out; s.call_with_pp_support(sess, Some(hir_map), move |annotation| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); @@ -954,7 +1023,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, } (PpmHir(s), None) => { - let out: &mut Write = &mut out; + let out: &mut dyn Write = &mut out; s.call_with_pp_support_hir(sess, cstore, hir_map, @@ -978,7 +1047,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, } (PpmHirTree(s), None) => { - let out: &mut Write = &mut out; + let out: &mut dyn Write = &mut out; s.call_with_pp_support_hir(sess, cstore, hir_map, @@ -994,7 +1063,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, } (PpmHir(s), Some(uii)) => { - let out: &mut Write = &mut out; + let out: &mut dyn Write = &mut out; s.call_with_pp_support_hir(sess, cstore, hir_map, @@ -1006,7 +1075,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, move |annotation, _| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); - let hir_map = annotation.hir_map().expect("--unpretty missing HIR map"); + let hir_map = annotation.hir_map().expect("-Z unpretty missing HIR map"); let mut pp_state = pprust_hir::State::new_from_input(sess.codemap(), &sess.parse_sess, src_name, @@ -1019,7 +1088,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, pp_state.print_node(node)?; pp_state.s.space()?; let path = annotation.node_path(node_id) - .expect("--unpretty missing node paths"); + .expect("-Z unpretty missing node paths"); pp_state.synth_comment(path)?; pp_state.s.hardbreak()?; } @@ -1028,7 +1097,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, } (PpmHirTree(s), Some(uii)) => { - let out: &mut Write = &mut out; + let out: &mut dyn Write = &mut out; s.call_with_pp_support_hir(sess, cstore, hir_map, @@ -1059,7 +1128,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, // with a different callback than the standard driver, so that isn't easy. // Instead, we call that function ourselves. fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, - cstore: &'a CrateStore, + cstore: &'a CStore, hir_map: &hir_map::Map<'tcx>, analysis: &ty::CrateAnalysis, resolutions: &Resolutions, @@ -1071,7 +1140,7 @@ fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, ofile: Option<&Path>) { let nodeid = if let Some(uii) = uii { debug!("pretty printing for {:?}", uii); - Some(uii.to_one_node_id("--unpretty", sess, &hir_map)) + Some(uii.to_one_node_id("-Z unpretty", sess, &hir_map)) } else { debug!("pretty printing for whole crate"); None @@ -1080,7 +1149,9 @@ fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, let mut out = Vec::new(); let control = &driver::CompileController::basic(); - abort_on_err(driver::phase_3_run_analysis_passes(control, + let codegen_backend = ::get_codegen_backend(sess); + abort_on_err(driver::phase_3_run_analysis_passes(&*codegen_backend, + control, sess, cstore, hir_map.clone(), @@ -1120,7 +1191,7 @@ fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, Some(code) => { let variants = gather_flowgraph_variants(tcx.sess); - let out: &mut Write = &mut out; + let out: &mut dyn Write = &mut out; print_flowgraph(variants, tcx, code, mode, out) } diff --git a/src/librustc_driver/profile/mod.rs b/src/librustc_driver/profile/mod.rs index 061077d05a43..2ec85e1c27f1 100644 --- a/src/librustc_driver/profile/mod.rs +++ b/src/librustc_driver/profile/mod.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use rustc::session::Session; use rustc::util::common::{ProfQDumpParams, ProfileQueriesMsg, profq_msg, profq_set_chan}; use std::sync::mpsc::{Receiver}; use std::io::{Write}; @@ -17,11 +18,11 @@ use std::time::{Duration, Instant}; pub mod trace; /// begin a profile thread, if not already running -pub fn begin() { +pub fn begin(sess: &Session) { use std::thread; use std::sync::mpsc::{channel}; let (tx, rx) = channel(); - if profq_set_chan(tx) { + if profq_set_chan(sess, tx) { thread::spawn(move||profile_queries_thread(rx)); } } @@ -30,7 +31,7 @@ pub fn begin() { /// wait for this dump to complete. /// /// wraps the RPC (send/recv channel logic) of requesting a dump. -pub fn dump(path:String) { +pub fn dump(sess: &Session, path: String) { use std::sync::mpsc::{channel}; let (tx, rx) = channel(); let params = ProfQDumpParams{ @@ -39,7 +40,7 @@ pub fn dump(path:String) { // is written; false for now dump_profq_msg_log:true, }; - profq_msg(ProfileQueriesMsg::Dump(params)); + profq_msg(sess, ProfileQueriesMsg::Dump(params)); let _ = rx.recv().unwrap(); } @@ -61,7 +62,7 @@ struct StackFrame { pub traces: Vec, } -fn total_duration(traces: &Vec) -> Duration { +fn total_duration(traces: &[trace::Rec]) -> Duration { let mut sum : Duration = Duration::new(0,0); for t in traces.iter() { sum += t.dur_total; } return sum diff --git a/src/librustc_driver/profile/trace.rs b/src/librustc_driver/profile/trace.rs index 280f3c8c7967..f31111e37ba0 100644 --- a/src/librustc_driver/profile/trace.rs +++ b/src/librustc_driver/profile/trace.rs @@ -10,7 +10,7 @@ use super::*; use syntax_pos::SpanData; -use rustc::ty::maps::QueryMsg; +use rustc::util::common::QueryMsg; use std::fs::File; use std::time::{Duration, Instant}; use std::collections::hash_map::HashMap; @@ -62,7 +62,7 @@ pub fn html_of_effect(eff: &Effect) -> (String, String) { match *eff { Effect::TimeBegin(ref msg) => { (msg.clone(), - format!("time-begin")) + "time-begin".to_string()) }, Effect::TaskBegin(ref key) => { let cons = cons_of_key(key); @@ -91,23 +91,23 @@ fn html_of_duration(_start: &Instant, dur: &Duration) -> (String, String) { fn html_of_fraction(frac: f64) -> (String, String) { let css = { - if frac > 0.50 { format!("frac-50") } - else if frac > 0.40 { format!("frac-40") } - else if frac > 0.30 { format!("frac-30") } - else if frac > 0.20 { format!("frac-20") } - else if frac > 0.10 { format!("frac-10") } - else if frac > 0.05 { format!("frac-05") } - else if frac > 0.02 { format!("frac-02") } - else if frac > 0.01 { format!("frac-01") } - else if frac > 0.001 { format!("frac-001") } - else { format!("frac-0") } + if frac > 0.50 { "frac-50".to_string() } + else if frac > 0.40 { "frac-40".to_string() } + else if frac > 0.30 { "frac-30".to_string() } + else if frac > 0.20 { "frac-20".to_string() } + else if frac > 0.10 { "frac-10".to_string() } + else if frac > 0.05 { "frac-05".to_string() } + else if frac > 0.02 { "frac-02".to_string() } + else if frac > 0.01 { "frac-01".to_string() } + else if frac > 0.001 { "frac-001".to_string() } + else { "frac-0".to_string() } }; let percent = frac * 100.0; if percent > 0.1 { (format!("{:.1}%", percent), css) } - else { (format!("< 0.1%", ), css) } + else { ("< 0.1%".to_string(), css) } } -fn total_duration(traces: &Vec) -> Duration { +fn total_duration(traces: &[Rec]) -> Duration { let mut sum : Duration = Duration::new(0,0); for t in traces.iter() { sum += t.dur_total; @@ -123,7 +123,7 @@ fn duration_div(nom: Duration, den: Duration) -> f64 { to_nanos(nom) as f64 / to_nanos(den) as f64 } -fn write_traces_rec(file: &mut File, traces: &Vec, total: Duration, depth: usize) { +fn write_traces_rec(file: &mut File, traces: &[Rec], total: Duration, depth: usize) { for t in traces { let (eff_text, eff_css_classes) = html_of_effect(&t.effect); let (dur_text, dur_css_classes) = html_of_duration(&t.start, &t.dur_total); @@ -149,7 +149,7 @@ fn write_traces_rec(file: &mut File, traces: &Vec, total: Duration, depth: } } -fn compute_counts_rec(counts: &mut HashMap, traces: &Vec) { +fn compute_counts_rec(counts: &mut HashMap, traces: &[Rec]) { for t in traces.iter() { match t.effect { Effect::TimeBegin(ref msg) => { @@ -202,14 +202,12 @@ fn compute_counts_rec(counts: &mut HashMap, traces: &Vec) { use rustc::util::common::duration_to_secs_str; - use std::cmp::Ordering; + use std::cmp::Reverse; - let mut data = vec![]; - for (ref cons, ref qm) in counts.iter() { - data.push((cons.clone(), qm.count.clone(), qm.dur_total.clone(), qm.dur_self.clone())); - }; - data.sort_by(|&(_,_,_,self1),&(_,_,_,self2)| - if self1 > self2 { Ordering::Less } else { Ordering::Greater } ); + let mut data = counts.iter().map(|(ref cons, ref qm)| + (cons.clone(), qm.count.clone(), qm.dur_total.clone(), qm.dur_self.clone()) + ).collect::>(); + data.sort_by_key(|k| Reverse(k.3)); for (cons, count, dur_total, dur_self) in data { write!(count_file, "{}, {}, {}, {}\n", cons, count, @@ -219,8 +217,9 @@ pub fn write_counts(count_file: &mut File, counts: &mut HashMap) { - let mut counts : HashMap = HashMap::new(); +pub fn write_traces(html_file: &mut File, counts_file: &mut File, traces: &[Rec]) { + let capacity = traces.iter().fold(0, |acc, t| acc + 1 + t.extent.len()); + let mut counts : HashMap = HashMap::with_capacity(capacity); compute_counts_rec(&mut counts, traces); write_counts(counts_file, &mut counts); diff --git a/src/librustc_driver/target_features.rs b/src/librustc_driver/target_features.rs deleted file mode 100644 index 96264472b5f8..000000000000 --- a/src/librustc_driver/target_features.rs +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use syntax::ast; -use rustc::session::Session; -use syntax::symbol::Symbol; -use rustc_trans; - -/// Add `target_feature = "..."` cfgs for a variety of platform -/// specific features (SSE, NEON etc.). -/// -/// This is performed by checking whether a whitelisted set of -/// features is available on the target machine, by querying LLVM. -pub fn add_configuration(cfg: &mut ast::CrateConfig, sess: &Session) { - let tf = Symbol::intern("target_feature"); - - for feat in rustc_trans::target_features(sess) { - cfg.insert((tf, Some(feat))); - } - - if sess.crt_static_feature() { - cfg.insert((tf, Some(Symbol::intern("crt-static")))); - } -} diff --git a/src/librustc_driver/test.rs b/src/librustc_driver/test.rs index 371f0e79a3ad..03651322bc98 100644 --- a/src/librustc_driver/test.rs +++ b/src/librustc_driver/test.rs @@ -16,12 +16,11 @@ use std::sync::mpsc; use driver; use rustc_lint; use rustc_resolve::MakeGlobMap; -use rustc_trans; use rustc::middle::region; -use rustc::ty::subst::{Kind, Subst}; -use rustc::traits::{ObligationCause, Reveal}; +use rustc::ty::subst::Subst; +use rustc::traits::ObligationCause; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; -use rustc::ty::maps::OnDiskCache; +use rustc::ty::query::OnDiskCache; use rustc::infer::{self, InferOk, InferResult}; use rustc::infer::outlives::env::OutlivesEnvironment; use rustc::infer::type_variable::TypeVariableOrigin; @@ -29,10 +28,10 @@ use rustc_metadata::cstore::CStore; use rustc::hir::map as hir_map; use rustc::session::{self, config}; use rustc::session::config::{OutputFilenames, OutputTypes}; -use rustc_trans_utils::trans_crate::TransCrate; -use std::rc::Rc; +use rustc_data_structures::sync::{self, Lrc}; +use syntax; use syntax::ast; -use syntax::abi::Abi; +use rustc_target::spec::abi::Abi; use syntax::codemap::{CodeMap, FilePathMapping, FileName}; use errors; use errors::emitter::Emitter; @@ -89,27 +88,41 @@ impl Emitter for ExpectErrorEmitter { } } -fn errors(msgs: &[&str]) -> (Box, usize) { +fn errors(msgs: &[&str]) -> (Box, usize) { let v = msgs.iter().map(|m| m.to_string()).collect(); - (box ExpectErrorEmitter { messages: v } as Box, msgs.len()) + (box ExpectErrorEmitter { messages: v } as Box, msgs.len()) } fn test_env(source_string: &str, - (emitter, expected_err_count): (Box, usize), + args: (Box, usize), body: F) where F: FnOnce(Env) { - let mut options = config::basic_options(); - options.debugging_opts.verbose = true; - options.unstable_features = UnstableFeatures::Allow; - let diagnostic_handler = errors::Handler::with_emitter(true, false, emitter); + syntax::with_globals(|| { + let mut options = config::Options::default(); + options.debugging_opts.verbose = true; + options.unstable_features = UnstableFeatures::Allow; - let cstore = Rc::new(CStore::new(::DefaultTransCrate::metadata_loader())); + driver::spawn_thread_pool(options, |options| { + test_env_with_pool(options, source_string, args, body) + }) + }); +} + +fn test_env_with_pool( + options: config::Options, + source_string: &str, + (emitter, expected_err_count): (Box, usize), + body: F +) + where F: FnOnce(Env) +{ + let diagnostic_handler = errors::Handler::with_emitter(true, false, emitter); let sess = session::build_session_(options, None, diagnostic_handler, - Rc::new(CodeMap::new(FilePathMapping::empty()))); - rustc_trans::init(&sess); + Lrc::new(CodeMap::new(FilePathMapping::empty()))); + let cstore = CStore::new(::get_codegen_backend(&sess).metadata_loader()); rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); let input = config::Input::Str { name: FileName::Anon, @@ -131,7 +144,7 @@ fn test_env(source_string: &str, }; let arenas = ty::AllArenas::new(); - let hir_map = hir_map::map_crate(&sess, &*cstore, &mut hir_forest, &defs); + let hir_map = hir_map::map_crate(&sess, &cstore, &mut hir_forest, &defs); // run just enough stuff to build a tcx: let (tx, _rx) = mpsc::channel(); @@ -143,9 +156,9 @@ fn test_env(source_string: &str, outputs: OutputTypes::new(&[]), }; TyCtxt::create_and_enter(&sess, - &*cstore, - ty::maps::Providers::default(), - ty::maps::Providers::default(), + &cstore, + ty::query::Providers::default(), + ty::query::Providers::default(), &arenas, resolutions, hir_map, @@ -156,7 +169,7 @@ fn test_env(source_string: &str, |tcx| { tcx.infer_ctxt().enter(|infcx| { let mut region_scope_tree = region::ScopeTree::default(); - let param_env = ty::ParamEnv::empty(Reveal::UserFacing); + let param_env = ty::ParamEnv::empty(); body(Env { infcx: &infcx, region_scope_tree: &mut region_scope_tree, @@ -170,16 +183,20 @@ fn test_env(source_string: &str, }); } +const D1: ty::DebruijnIndex = ty::INNERMOST; +const D2: ty::DebruijnIndex = D1.shifted_in(1); + impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.infcx.tcx } - pub fn create_region_hierarchy(&mut self, rh: &RH, parent: region::Scope) { + pub fn create_region_hierarchy(&mut self, rh: &RH, + parent: (region::Scope, region::ScopeDepth)) { let me = region::Scope::Node(rh.id); self.region_scope_tree.record_scope_parent(me, Some(parent)); for child_rh in rh.sub { - self.create_region_hierarchy(child_rh, me); + self.create_region_hierarchy(child_rh, (me, parent.1 + 1)); } } @@ -199,7 +216,7 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { id: hir::ItemLocalId(11), sub: &[], }], - }, dscope); + }, (dscope, 1)); } #[allow(dead_code)] // this seems like it could be useful, even if we don't use it now @@ -232,23 +249,24 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { } return match it.node { - hir::ItemUse(..) | - hir::ItemExternCrate(..) | - hir::ItemConst(..) | - hir::ItemStatic(..) | - hir::ItemFn(..) | - hir::ItemForeignMod(..) | - hir::ItemGlobalAsm(..) | - hir::ItemTy(..) => None, + hir::ItemKind::Use(..) | + hir::ItemKind::ExternCrate(..) | + hir::ItemKind::Const(..) | + hir::ItemKind::Static(..) | + hir::ItemKind::Fn(..) | + hir::ItemKind::ForeignMod(..) | + hir::ItemKind::GlobalAsm(..) | + hir::ItemKind::Existential(..) | + hir::ItemKind::Ty(..) => None, - hir::ItemEnum(..) | - hir::ItemStruct(..) | - hir::ItemUnion(..) | - hir::ItemTrait(..) | - hir::ItemTraitAlias(..) | - hir::ItemImpl(..) => None, + hir::ItemKind::Enum(..) | + hir::ItemKind::Struct(..) | + hir::ItemKind::Union(..) | + hir::ItemKind::Trait(..) | + hir::ItemKind::TraitAlias(..) | + hir::ItemKind::Impl(..) => None, - hir::ItemMod(ref m) => search_mod(this, m, idx, names), + hir::ItemKind::Mod(ref m) => search_mod(this, m, idx, names), }; } } @@ -276,7 +294,7 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { } pub fn t_fn(&self, input_tys: &[Ty<'tcx>], output_ty: Ty<'tcx>) -> Ty<'tcx> { - self.infcx.tcx.mk_fn_ptr(ty::Binder(self.infcx.tcx.mk_fn_sig( + self.infcx.tcx.mk_fn_ptr(ty::Binder::bind(self.infcx.tcx.mk_fn_sig( input_tys.iter().cloned(), output_ty, false, @@ -290,16 +308,16 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { } pub fn t_pair(&self, ty1: Ty<'tcx>, ty2: Ty<'tcx>) -> Ty<'tcx> { - self.infcx.tcx.intern_tup(&[ty1, ty2], false) + self.infcx.tcx.intern_tup(&[ty1, ty2]) } pub fn t_param(&self, index: u32) -> Ty<'tcx> { let name = format!("T{}", index); - self.infcx.tcx.mk_param(index, Symbol::intern(&name)) + self.infcx.tcx.mk_ty_param(index, Symbol::intern(&name).as_interned_str()) } pub fn re_early_bound(&self, index: u32, name: &'static str) -> ty::Region<'tcx> { - let name = Symbol::intern(name); + let name = Symbol::intern(name).as_interned_str(); self.infcx.tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { def_id: self.infcx.tcx.hir.local_def_id(ast::CRATE_NODE_ID), index, @@ -319,7 +337,7 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { } pub fn t_rptr_late_bound(&self, id: u32) -> Ty<'tcx> { - let r = self.re_late_bound_with_debruijn(id, ty::DebruijnIndex::new(1)); + let r = self.re_late_bound_with_debruijn(id, D1); self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) } @@ -471,12 +489,12 @@ fn subst_ty_renumber_bound() { env.t_fn(&[t_param], env.t_nil()) }; - let substs = env.infcx.tcx.intern_substs(&[Kind::from(t_rptr_bound1)]); + let substs = env.infcx.tcx.intern_substs(&[t_rptr_bound1.into()]); let t_substituted = t_source.subst(env.infcx.tcx, substs); // t_expected = fn(&'a isize) let t_expected = { - let t_ptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, ty::DebruijnIndex::new(2)); + let t_ptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, D2); env.t_fn(&[t_ptr_bound2], env.t_nil()) }; @@ -506,14 +524,14 @@ fn subst_ty_renumber_some_bounds() { env.t_pair(t_param, env.t_fn(&[t_param], env.t_nil())) }; - let substs = env.infcx.tcx.intern_substs(&[Kind::from(t_rptr_bound1)]); + let substs = env.infcx.tcx.intern_substs(&[t_rptr_bound1.into()]); let t_substituted = t_source.subst(env.infcx.tcx, substs); // t_expected = (&'a isize, fn(&'a isize)) // // but not that the Debruijn index is different in the different cases. let t_expected = { - let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, ty::DebruijnIndex::new(2)); + let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, D2); env.t_pair(t_rptr_bound1, env.t_fn(&[t_rptr_bound2], env.t_nil())) }; @@ -541,10 +559,10 @@ fn escaping() { let t_rptr_free1 = env.t_rptr_free(1); assert!(!t_rptr_free1.has_escaping_regions()); - let t_rptr_bound1 = env.t_rptr_late_bound_with_debruijn(1, ty::DebruijnIndex::new(1)); + let t_rptr_bound1 = env.t_rptr_late_bound_with_debruijn(1, D1); assert!(t_rptr_bound1.has_escaping_regions()); - let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, ty::DebruijnIndex::new(2)); + let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, D2); assert!(t_rptr_bound2.has_escaping_regions()); // t_fn = fn(A) @@ -560,7 +578,7 @@ fn escaping() { #[test] fn subst_region_renumber_region() { test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { - let re_bound1 = env.re_late_bound_with_debruijn(1, ty::DebruijnIndex::new(1)); + let re_bound1 = env.re_late_bound_with_debruijn(1, D1); // type t_source<'a> = fn(&'a isize) let t_source = { @@ -568,14 +586,14 @@ fn subst_region_renumber_region() { env.t_fn(&[env.t_rptr(re_early)], env.t_nil()) }; - let substs = env.infcx.tcx.intern_substs(&[Kind::from(re_bound1)]); + let substs = env.infcx.tcx.intern_substs(&[re_bound1.into()]); let t_substituted = t_source.subst(env.infcx.tcx, substs); // t_expected = fn(&'a isize) // // but not that the Debruijn index is different in the different cases. let t_expected = { - let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, ty::DebruijnIndex::new(2)); + let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, D2); env.t_fn(&[t_rptr_bound2], env.t_nil()) }; @@ -595,8 +613,8 @@ fn walk_ty() { let tcx = env.infcx.tcx; let int_ty = tcx.types.isize; let usize_ty = tcx.types.usize; - let tup1_ty = tcx.intern_tup(&[int_ty, usize_ty, int_ty, usize_ty], false); - let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, usize_ty], false); + let tup1_ty = tcx.intern_tup(&[int_ty, usize_ty, int_ty, usize_ty]); + let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, usize_ty]); let walked: Vec<_> = tup2_ty.walk().collect(); assert_eq!(walked, [tup2_ty, tup1_ty, int_ty, usize_ty, int_ty, usize_ty, tup1_ty, int_ty, @@ -610,8 +628,8 @@ fn walk_ty_skip_subtree() { let tcx = env.infcx.tcx; let int_ty = tcx.types.isize; let usize_ty = tcx.types.usize; - let tup1_ty = tcx.intern_tup(&[int_ty, usize_ty, int_ty, usize_ty], false); - let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, usize_ty], false); + let tup1_ty = tcx.intern_tup(&[int_ty, usize_ty, int_ty, usize_ty]); + let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, usize_ty]); // types we expect to see (in order), plus a boolean saying // whether to skip the subtree. diff --git a/src/librustc_errors/Cargo.toml b/src/librustc_errors/Cargo.toml index 3e15af7558da..e412d1749d1b 100644 --- a/src/librustc_errors/Cargo.toml +++ b/src/librustc_errors/Cargo.toml @@ -13,3 +13,5 @@ serialize = { path = "../libserialize" } syntax_pos = { path = "../libsyntax_pos" } rustc_data_structures = { path = "../librustc_data_structures" } unicode-width = "0.1.4" +atty = "0.2" +termcolor = "0.3" diff --git a/src/librustc_errors/diagnostic.rs b/src/librustc_errors/diagnostic.rs index 8da4321fa5b7..825e31539c8b 100644 --- a/src/librustc_errors/diagnostic.rs +++ b/src/librustc_errors/diagnostic.rs @@ -11,6 +11,7 @@ use CodeSuggestion; use SubstitutionPart; use Substitution; +use Applicability; use Level; use std::fmt; use syntax_pos::{MultiSpan, Span}; @@ -27,7 +28,7 @@ pub struct Diagnostic { pub suggestions: Vec, } -#[derive(Clone, Debug, PartialEq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum DiagnosticId { Error(String), Lint(String), @@ -98,6 +99,25 @@ impl Diagnostic { } } + pub fn is_error(&self) -> bool { + match self.level { + Level::Bug | + Level::Fatal | + Level::PhaseFatal | + Level::Error | + Level::FailureNote => { + true + } + + Level::Warning | + Level::Note | + Level::Help | + Level::Cancelled => { + false + } + } + } + /// Cancel the diagnostic (a structured diagnostic must either be emitted or /// canceled or it will panic when dropped). pub fn cancel(&mut self) { @@ -120,7 +140,7 @@ impl Diagnostic { } pub fn note_expected_found(&mut self, - label: &fmt::Display, + label: &dyn fmt::Display, expected: DiagnosticStyledString, found: DiagnosticStyledString) -> &mut Self @@ -129,11 +149,11 @@ impl Diagnostic { } pub fn note_expected_found_extra(&mut self, - label: &fmt::Display, + label: &dyn fmt::Display, expected: DiagnosticStyledString, found: DiagnosticStyledString, - expected_extra: &fmt::Display, - found_extra: &fmt::Display) + expected_extra: &dyn fmt::Display, + found_extra: &dyn fmt::Display) -> &mut Self { let mut msg: Vec<_> = vec![(format!("expected {} `", label), Style::NoStyle)]; @@ -222,6 +242,7 @@ impl Diagnostic { }], msg: msg.to_owned(), show_code_when_inline: false, + applicability: Applicability::Unspecified, }); self } @@ -252,6 +273,26 @@ impl Diagnostic { }], msg: msg.to_owned(), show_code_when_inline: true, + applicability: Applicability::Unspecified, + }); + self + } + + pub fn multipart_suggestion( + &mut self, + msg: &str, + suggestion: Vec<(Span, String)>, + ) -> &mut Self { + self.suggestions.push(CodeSuggestion { + substitutions: vec![Substitution { + parts: suggestion + .into_iter() + .map(|(span, snippet)| SubstitutionPart { snippet, span }) + .collect(), + }], + msg: msg.to_owned(), + show_code_when_inline: true, + applicability: Applicability::Unspecified, }); self } @@ -267,6 +308,60 @@ impl Diagnostic { }).collect(), msg: msg.to_owned(), show_code_when_inline: true, + applicability: Applicability::Unspecified, + }); + self + } + + /// This is a suggestion that may contain mistakes or fillers and should + /// be read and understood by a human. + pub fn span_suggestion_with_applicability(&mut self, sp: Span, msg: &str, + suggestion: String, + applicability: Applicability) -> &mut Self { + self.suggestions.push(CodeSuggestion { + substitutions: vec![Substitution { + parts: vec![SubstitutionPart { + snippet: suggestion, + span: sp, + }], + }], + msg: msg.to_owned(), + show_code_when_inline: true, + applicability, + }); + self + } + + pub fn span_suggestions_with_applicability(&mut self, sp: Span, msg: &str, + suggestions: Vec, + applicability: Applicability) -> &mut Self { + self.suggestions.push(CodeSuggestion { + substitutions: suggestions.into_iter().map(|snippet| Substitution { + parts: vec![SubstitutionPart { + snippet, + span: sp, + }], + }).collect(), + msg: msg.to_owned(), + show_code_when_inline: true, + applicability, + }); + self + } + + pub fn span_suggestion_short_with_applicability( + &mut self, sp: Span, msg: &str, suggestion: String, applicability: Applicability + ) -> &mut Self { + self.suggestions.push(CodeSuggestion { + substitutions: vec![Substitution { + parts: vec![SubstitutionPart { + snippet: suggestion, + span: sp, + }], + }], + msg: msg.to_owned(), + show_code_when_inline: false, + applicability: applicability, }); self } @@ -281,6 +376,10 @@ impl Diagnostic { self } + pub fn get_code(&self) -> Option { + self.code.clone() + } + pub fn message(&self) -> String { self.message.iter().map(|i| i.0.to_owned()).collect::() } @@ -299,7 +398,7 @@ impl Diagnostic { /// Convenience function for internal use, clients should use one of the /// public methods above. - pub(crate) fn sub(&mut self, + pub fn sub(&mut self, level: Level, message: &str, span: MultiSpan, diff --git a/src/librustc_errors/diagnostic_builder.rs b/src/librustc_errors/diagnostic_builder.rs index 61674ada6fa6..1b34898b9908 100644 --- a/src/librustc_errors/diagnostic_builder.rs +++ b/src/librustc_errors/diagnostic_builder.rs @@ -11,6 +11,7 @@ use Diagnostic; use DiagnosticId; use DiagnosticStyledString; +use Applicability; use Level; use Handler; @@ -25,6 +26,7 @@ use syntax_pos::{MultiSpan, Span}; pub struct DiagnosticBuilder<'a> { pub handler: &'a Handler, diagnostic: Diagnostic, + allow_suggestions: bool, } /// In general, the `DiagnosticBuilder` uses deref to allow access to @@ -87,22 +89,16 @@ impl<'a> DiagnosticBuilder<'a> { self.cancel(); } - pub fn is_error(&self) -> bool { - match self.level { - Level::Bug | - Level::Fatal | - Level::PhaseFatal | - Level::Error => { - true - } - - Level::Warning | - Level::Note | - Level::Help | - Level::Cancelled => { - false - } - } + /// Buffers the diagnostic for later emission. + pub fn buffer(self, buffered_diagnostics: &mut Vec) { + // We need to use `ptr::read` because `DiagnosticBuilder` + // implements `Drop`. + let diagnostic; + unsafe { + diagnostic = ::std::ptr::read(&self.diagnostic); + ::std::mem::forget(self); + }; + buffered_diagnostics.push(diagnostic); } /// Convenience function for internal use, clients should use one of the @@ -130,7 +126,7 @@ impl<'a> DiagnosticBuilder<'a> { /// locally in whichever way makes the most sense. pub fn delay_as_bug(&mut self) { self.level = Level::Bug; - *self.handler.delayed_span_bug.borrow_mut() = Some(self.diagnostic.clone()); + self.handler.delay_as_bug(self.diagnostic.clone()); self.cancel(); } @@ -146,17 +142,17 @@ impl<'a> DiagnosticBuilder<'a> { } forward!(pub fn note_expected_found(&mut self, - label: &fmt::Display, + label: &dyn fmt::Display, expected: DiagnosticStyledString, found: DiagnosticStyledString) -> &mut Self); forward!(pub fn note_expected_found_extra(&mut self, - label: &fmt::Display, + label: &dyn fmt::Display, expected: DiagnosticStyledString, found: DiagnosticStyledString, - expected_extra: &fmt::Display, - found_extra: &fmt::Display) + expected_extra: &dyn fmt::Display, + found_extra: &dyn fmt::Display) -> &mut Self); forward!(pub fn note(&mut self, msg: &str) -> &mut Self); @@ -176,6 +172,11 @@ impl<'a> DiagnosticBuilder<'a> { msg: &str, suggestion: String) -> &mut Self); + forward!(pub fn multipart_suggestion( + &mut self, + msg: &str, + suggestion: Vec<(Span, String)> + ) -> &mut Self); forward!(pub fn span_suggestion(&mut self, sp: Span, msg: &str, @@ -186,9 +187,67 @@ impl<'a> DiagnosticBuilder<'a> { msg: &str, suggestions: Vec) -> &mut Self); + pub fn span_suggestion_with_applicability(&mut self, + sp: Span, + msg: &str, + suggestion: String, + applicability: Applicability) + -> &mut Self { + if !self.allow_suggestions { + return self + } + self.diagnostic.span_suggestion_with_applicability( + sp, + msg, + suggestion, + applicability, + ); + self + } + + pub fn span_suggestions_with_applicability(&mut self, + sp: Span, + msg: &str, + suggestions: Vec, + applicability: Applicability) + -> &mut Self { + if !self.allow_suggestions { + return self + } + self.diagnostic.span_suggestions_with_applicability( + sp, + msg, + suggestions, + applicability, + ); + self + } + + pub fn span_suggestion_short_with_applicability(&mut self, + sp: Span, + msg: &str, + suggestion: String, + applicability: Applicability) + -> &mut Self { + if !self.allow_suggestions { + return self + } + self.diagnostic.span_suggestion_short_with_applicability( + sp, + msg, + suggestion, + applicability, + ); + self + } forward!(pub fn set_span>(&mut self, sp: S) -> &mut Self); forward!(pub fn code(&mut self, s: DiagnosticId) -> &mut Self); + pub fn allow_suggestions(&mut self, allow: bool) -> &mut Self { + self.allow_suggestions = allow; + self + } + /// Convenience function for internal use, clients should use one of the /// struct_* methods on Handler. pub fn new(handler: &'a Handler, level: Level, message: &str) -> DiagnosticBuilder<'a> { @@ -210,7 +269,11 @@ impl<'a> DiagnosticBuilder<'a> { /// diagnostic. pub fn new_diagnostic(handler: &'a Handler, diagnostic: Diagnostic) -> DiagnosticBuilder<'a> { - DiagnosticBuilder { handler, diagnostic } + DiagnosticBuilder { + handler, + diagnostic, + allow_suggestions: true, + } } } diff --git a/src/librustc_errors/emitter.rs b/src/librustc_errors/emitter.rs index 58f851aea381..6b1298750fba 100644 --- a/src/librustc_errors/emitter.rs +++ b/src/librustc_errors/emitter.rs @@ -10,25 +10,34 @@ use self::Destination::*; -use syntax_pos::{DUMMY_SP, FileMap, Span, MultiSpan}; +use syntax_pos::{FileMap, Span, MultiSpan}; -use {Level, CodeSuggestion, DiagnosticBuilder, SubDiagnostic, CodeMapper, DiagnosticId}; +use {Level, CodeSuggestion, DiagnosticBuilder, SubDiagnostic, CodeMapperDyn, DiagnosticId}; use snippet::{Annotation, AnnotationType, Line, MultilineAnnotation, StyledString, Style}; use styled_buffer::StyledBuffer; +use rustc_data_structures::sync::Lrc; +use atty; use std::borrow::Cow; use std::io::prelude::*; use std::io; -use std::rc::Rc; -use term; use std::collections::HashMap; -use std::cmp::min; +use std::cmp::{min, Reverse}; +use termcolor::{StandardStream, ColorChoice, ColorSpec, BufferWriter}; +use termcolor::{WriteColor, Color, Buffer}; use unicode_width; +const ANONYMIZED_LINE_NUM: &str = "LL"; + /// Emitter trait for emitting errors. pub trait Emitter { /// Emit a structured diagnostic. fn emit(&mut self, db: &DiagnosticBuilder); + + /// Check if should show explanations about "rustc --explain" + fn should_show_explain(&self) -> bool { + true + } } impl Emitter for EmitterWriter { @@ -76,6 +85,10 @@ impl Emitter for EmitterWriter { &children, &suggestions); } + + fn should_show_explain(&self) -> bool { + !self.short_message + } } /// maximum number of lines we will print for each error; arbitrary. @@ -93,62 +106,78 @@ pub enum ColorConfig { } impl ColorConfig { - fn use_color(&self) -> bool { + fn to_color_choice(&self) -> ColorChoice { match *self { - ColorConfig::Always => true, - ColorConfig::Never => false, - ColorConfig::Auto => stderr_isatty(), + ColorConfig::Always => ColorChoice::Always, + ColorConfig::Never => ColorChoice::Never, + ColorConfig::Auto if atty::is(atty::Stream::Stderr) => { + ColorChoice::Auto + } + ColorConfig::Auto => ColorChoice::Never, } } } pub struct EmitterWriter { dst: Destination, - cm: Option>, + cm: Option>, short_message: bool, + teach: bool, + ui_testing: bool, } struct FileWithAnnotatedLines { - file: Rc, + file: Lrc, lines: Vec, multiline_depth: usize, } impl EmitterWriter { pub fn stderr(color_config: ColorConfig, - code_map: Option>, - short_message: bool) + code_map: Option>, + short_message: bool, + teach: bool) -> EmitterWriter { - if color_config.use_color() { - let dst = Destination::from_stderr(); - EmitterWriter { - dst, - cm: code_map, - short_message: short_message, - } - } else { - EmitterWriter { - dst: Raw(Box::new(io::stderr())), - cm: code_map, - short_message: short_message, - } + let dst = Destination::from_stderr(color_config); + EmitterWriter { + dst, + cm: code_map, + short_message, + teach, + ui_testing: false, } } - pub fn new(dst: Box, - code_map: Option>, - short_message: bool) + pub fn new(dst: Box, + code_map: Option>, + short_message: bool, + teach: bool) -> EmitterWriter { EmitterWriter { dst: Raw(dst), cm: code_map, - short_message: short_message, + short_message, + teach, + ui_testing: false, + } + } + + pub fn ui_testing(mut self, ui_testing: bool) -> Self { + self.ui_testing = ui_testing; + self + } + + fn maybe_anonymized(&self, line_num: usize) -> String { + if self.ui_testing { + ANONYMIZED_LINE_NUM.to_string() + } else { + line_num.to_string() } } fn preprocess_annotations(&mut self, msp: &MultiSpan) -> Vec { fn add_annotation_to_file(file_vec: &mut Vec, - file: Rc, + file: Lrc, line_index: usize, ann: Annotation) { @@ -187,7 +216,7 @@ impl EmitterWriter { if let Some(ref cm) = self.cm { for span_label in msp.span_labels() { - if span_label.span == DUMMY_SP { + if span_label.span.is_dummy() { continue; } @@ -236,9 +265,7 @@ impl EmitterWriter { } // Find overlapping multiline annotations, put them at different depths - multiline_annotations.sort_by(|a, b| { - (a.1.line_start, a.1.line_end).cmp(&(b.1.line_start, b.1.line_end)) - }); + multiline_annotations.sort_by_key(|&(_, ref ml)| (ml.line_start, ml.line_end)); for item in multiline_annotations.clone() { let ann = item.1; for item in multiline_annotations.iter_mut() { @@ -280,10 +307,14 @@ impl EmitterWriter { fn render_source_line(&self, buffer: &mut StyledBuffer, - file: Rc, + file: Lrc, line: &Line, width_offset: usize, code_offset: usize) -> Vec<(usize, Style)> { + if line.line_index == 0 { + return Vec::new(); + } + let source_string = match file.get_line(line.line_index - 1) { Some(s) => s, None => return Vec::new(), @@ -295,7 +326,7 @@ impl EmitterWriter { buffer.puts(line_offset, code_offset, &source_string, Style::Quotation); buffer.puts(line_offset, 0, - &(line.line_index.to_string()), + &self.maybe_anonymized(line.line_index), Style::LineNumber); draw_col_separator(buffer, line_offset, width_offset - 2); @@ -370,7 +401,7 @@ impl EmitterWriter { // otherwise the lines would end up needing to go over a message. let mut annotations = line.annotations.clone(); - annotations.sort_by(|a,b| b.start_col.cmp(&a.start_col)); + annotations.sort_by_key(|a| Reverse(a.start_col)); // First, figure out where each label will be positioned. // @@ -497,9 +528,7 @@ impl EmitterWriter { // If there are no annotations or the only annotations on this line are // MultilineLine, then there's only code being shown, stop processing. - if line.annotations.is_empty() || line.annotations.iter() - .filter(|a| !a.is_line()).collect::>().len() == 0 - { + if line.annotations.iter().all(|a| a.is_line()) { return vec![]; } @@ -551,7 +580,14 @@ impl EmitterWriter { code_offset + annotation.start_col, style); } - _ => (), + _ if self.teach => { + buffer.set_style_range(line_offset, + code_offset + annotation.start_col, + code_offset + annotation.end_col, + style, + annotation.is_primary); + } + _ => {} } } @@ -690,7 +726,7 @@ impl EmitterWriter { let mut max = 0; if let Some(ref cm) = self.cm { for primary_span in msp.primary_spans() { - if primary_span != &DUMMY_SP { + if !primary_span.is_dummy() { let hi = cm.lookup_char_pos(primary_span.hi()); if hi.line > max { max = hi.line; @@ -699,7 +735,7 @@ impl EmitterWriter { } if !self.short_message { for span_label in msp.span_labels() { - if span_label.span != DUMMY_SP { + if !span_label.span.is_dummy() { let hi = cm.lookup_char_pos(span_label.span.hi()); if hi.line > max { max = hi.line; @@ -711,7 +747,7 @@ impl EmitterWriter { max } - fn get_max_line_num(&mut self, span: &MultiSpan, children: &Vec) -> usize { + fn get_max_line_num(&mut self, span: &MultiSpan, children: &[SubDiagnostic]) -> usize { let mut max = 0; let primary = self.get_multispan_max_line_num(span); @@ -738,7 +774,7 @@ impl EmitterWriter { // First, find all the spans in <*macros> and point instead at their use site for sp in span.primary_spans() { - if *sp == DUMMY_SP { + if sp.is_dummy() { continue; } let call_sp = cm.call_span_if_macro(*sp); @@ -750,7 +786,7 @@ impl EmitterWriter { // Only show macro locations that are local // and display them like a span_note if let Some(def_site) = trace.def_site_span { - if def_site == DUMMY_SP { + if def_site.is_dummy() { continue; } if always_backtrace { @@ -767,6 +803,7 @@ impl EmitterWriter { } // Check to make sure we're not in any <*macros> if !cm.span_to_filename(def_site).is_macros() && + !trace.macro_decl_name.starts_with("desugaring of ") && !trace.macro_decl_name.starts_with("#[") || always_backtrace { new_labels.push((trace.call_site, @@ -789,7 +826,7 @@ impl EmitterWriter { span.push_span_label(label_span, label_text); } for sp_label in span.span_labels() { - if sp_label.span == DUMMY_SP { + if sp_label.span.is_dummy() { continue; } if cm.span_to_filename(sp_label.span.clone()).is_macros() && @@ -862,9 +899,7 @@ impl EmitterWriter { // | | length of label // | magic `3` // `max_line_num_len` - let padding = (0..padding + label.len() + 5) - .map(|_| " ") - .collect::(); + let padding = " ".repeat(padding + label.len() + 5); /// Return whether `style`, or the override if present and the style is `NoStyle`. fn style_or_override(style: Style, override_style: Option

(self, predicate: P) -> TakeWhile where Self: Sized, P: FnMut(&Self::Item) -> bool, { - TakeWhile{iter: self, flag: false, predicate: predicate} + TakeWhile { iter: self, flag: false, predicate } } /// Creates an iterator that skips the first `n` elements. @@ -905,7 +930,7 @@ pub trait Iterator { #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn skip(self, n: usize) -> Skip where Self: Sized { - Skip{iter: self, n: n} + Skip { iter: self, n } } /// Creates an iterator that yields its first `n` elements. @@ -937,7 +962,7 @@ pub trait Iterator { #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn take(self, n: usize) -> Take where Self: Sized, { - Take{iter: self, n: n} + Take { iter: self, n } } /// An iterator adaptor similar to [`fold`] that holds internal state and @@ -968,13 +993,13 @@ pub trait Iterator { /// // each iteration, we'll multiply the state by the element /// *state = *state * x; /// - /// // the value passed on to the next iteration - /// Some(*state) + /// // then, we'll yield the negation of the state + /// Some(-*state) /// }); /// - /// assert_eq!(iter.next(), Some(1)); - /// assert_eq!(iter.next(), Some(2)); - /// assert_eq!(iter.next(), Some(6)); + /// assert_eq!(iter.next(), Some(-1)); + /// assert_eq!(iter.next(), Some(-2)); + /// assert_eq!(iter.next(), Some(-6)); /// assert_eq!(iter.next(), None); /// ``` #[inline] @@ -982,7 +1007,7 @@ pub trait Iterator { fn scan(self, initial_state: St, f: F) -> Scan where Self: Sized, F: FnMut(&mut St, Self::Item) -> Option, { - Scan{iter: self, f: f, state: initial_state} + Scan { iter: self, f, state: initial_state } } /// Creates an iterator that works like map, but flattens nested structure. @@ -992,11 +1017,15 @@ pub trait Iterator { /// an extra layer of indirection. `flat_map()` will remove this extra layer /// on its own. /// + /// You can think of `flat_map(f)` as the semantic equivalent + /// of [`map`]ping, and then [`flatten`]ing as in `map(f).flatten()`. + /// /// Another way of thinking about `flat_map()`: [`map`]'s closure returns /// one item for each element, and `flat_map()`'s closure returns an /// iterator for each element. /// /// [`map`]: #method.map + /// [`flatten`]: #method.flatten /// /// # Examples /// @@ -1016,7 +1045,75 @@ pub trait Iterator { fn flat_map(self, f: F) -> FlatMap where Self: Sized, U: IntoIterator, F: FnMut(Self::Item) -> U, { - FlatMap{iter: self, f: f, frontiter: None, backiter: None } + FlatMap { inner: flatten_compat(self.map(f)) } + } + + /// Creates an iterator that flattens nested structure. + /// + /// This is useful when you have an iterator of iterators or an iterator of + /// things that can be turned into iterators and you want to remove one + /// level of indirection. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let data = vec![vec![1, 2, 3, 4], vec![5, 6]]; + /// let flattened = data.into_iter().flatten().collect::>(); + /// assert_eq!(flattened, &[1, 2, 3, 4, 5, 6]); + /// ``` + /// + /// Mapping and then flattening: + /// + /// ``` + /// let words = ["alpha", "beta", "gamma"]; + /// + /// // chars() returns an iterator + /// let merged: String = words.iter() + /// .map(|s| s.chars()) + /// .flatten() + /// .collect(); + /// assert_eq!(merged, "alphabetagamma"); + /// ``` + /// + /// You can also rewrite this in terms of [`flat_map()`], which is preferable + /// in this case since it conveys intent more clearly: + /// + /// ``` + /// let words = ["alpha", "beta", "gamma"]; + /// + /// // chars() returns an iterator + /// let merged: String = words.iter() + /// .flat_map(|s| s.chars()) + /// .collect(); + /// assert_eq!(merged, "alphabetagamma"); + /// ``` + /// + /// Flattening once only removes one level of nesting: + /// + /// ``` + /// let d3 = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]; + /// + /// let d2 = d3.iter().flatten().collect::>(); + /// assert_eq!(d2, [&[1, 2], &[3, 4], &[5, 6], &[7, 8]]); + /// + /// let d1 = d3.iter().flatten().flatten().collect::>(); + /// assert_eq!(d1, [&1, &2, &3, &4, &5, &6, &7, &8]); + /// ``` + /// + /// Here we see that `flatten()` does not perform a "deep" flatten. + /// Instead, only one level of nesting is removed. That is, if you + /// `flatten()` a three-dimensional array the result will be + /// two-dimensional and not one-dimensional. To get a one-dimensional + /// structure, you have to `flatten()` again. + /// + /// [`flat_map()`]: #method.flat_map + #[inline] + #[stable(feature = "iterator_flatten", since = "1.29.0")] + fn flatten(self) -> Flatten + where Self: Sized, Self::Item: IntoIterator { + Flatten { inner: flatten_compat(self) } } /// Creates an iterator which ends after the first [`None`]. @@ -1086,8 +1183,9 @@ pub trait Iterator { /// happening at various parts in the pipeline. To do that, insert /// a call to `inspect()`. /// - /// It's much more common for `inspect()` to be used as a debugging tool - /// than to exist in your final code, but never say never. + /// It's more common for `inspect()` to be used as a debugging tool than to + /// exist in your final code, but applications may find it useful in certain + /// situations when errors need to be logged before being discarded. /// /// # Examples /// @@ -1098,19 +1196,19 @@ pub trait Iterator { /// /// // this iterator sequence is complex. /// let sum = a.iter() - /// .cloned() - /// .filter(|&x| x % 2 == 0) - /// .fold(0, |sum, i| sum + i); + /// .cloned() + /// .filter(|x| x % 2 == 0) + /// .fold(0, |sum, i| sum + i); /// /// println!("{}", sum); /// /// // let's add some inspect() calls to investigate what's happening /// let sum = a.iter() - /// .cloned() - /// .inspect(|x| println!("about to filter: {}", x)) - /// .filter(|&x| x % 2 == 0) - /// .inspect(|x| println!("made it through filter: {}", x)) - /// .fold(0, |sum, i| sum + i); + /// .cloned() + /// .inspect(|x| println!("about to filter: {}", x)) + /// .filter(|x| x % 2 == 0) + /// .inspect(|x| println!("made it through filter: {}", x)) + /// .fold(0, |sum, i| sum + i); /// /// println!("{}", sum); /// ``` @@ -1118,6 +1216,7 @@ pub trait Iterator { /// This will print: /// /// ```text + /// 6 /// about to filter: 1 /// about to filter: 4 /// made it through filter: 4 @@ -1126,12 +1225,38 @@ pub trait Iterator { /// about to filter: 3 /// 6 /// ``` + /// + /// Logging errors before discarding them: + /// + /// ``` + /// let lines = ["1", "2", "a"]; + /// + /// let sum: i32 = lines + /// .iter() + /// .map(|line| line.parse::()) + /// .inspect(|num| { + /// if let Err(ref e) = *num { + /// println!("Parsing error: {}", e); + /// } + /// }) + /// .filter_map(Result::ok) + /// .sum(); + /// + /// println!("Sum: {}", sum); + /// ``` + /// + /// This will print: + /// + /// ```text + /// Parsing error: invalid digit found in string + /// Sum: 3 + /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn inspect(self, f: F) -> Inspect where Self: Sized, F: FnMut(&Self::Item), { - Inspect{iter: self, f: f} + Inspect { iter: self, f } } /// Borrows an iterator, rather than consuming it. @@ -1148,8 +1273,7 @@ pub trait Iterator { /// /// let iter = a.into_iter(); /// - /// let sum: i32 = iter.take(5) - /// .fold(0, |acc, &i| acc + i ); + /// let sum: i32 = iter.take(5).fold(0, |acc, i| acc + i ); /// /// assert_eq!(sum, 6); /// @@ -1163,9 +1287,7 @@ pub trait Iterator { /// let mut iter = a.into_iter(); /// /// // instead, we add in a .by_ref() - /// let sum: i32 = iter.by_ref() - /// .take(2) - /// .fold(0, |acc, &i| acc + i ); + /// let sum: i32 = iter.by_ref().take(2).fold(0, |acc, i| acc + i ); /// /// assert_eq!(sum, 3); /// @@ -1222,9 +1344,7 @@ pub trait Iterator { /// /// let a = [1, 2, 3]; /// - /// let doubled: VecDeque = a.iter() - /// .map(|&x| x * 2) - /// .collect(); + /// let doubled: VecDeque = a.iter().map(|&x| x * 2).collect(); /// /// assert_eq!(2, doubled[0]); /// assert_eq!(4, doubled[1]); @@ -1236,9 +1356,7 @@ pub trait Iterator { /// ``` /// let a = [1, 2, 3]; /// - /// let doubled = a.iter() - /// .map(|&x| x * 2) - /// .collect::>(); + /// let doubled = a.iter().map(|x| x * 2).collect::>(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` @@ -1249,9 +1367,7 @@ pub trait Iterator { /// ``` /// let a = [1, 2, 3]; /// - /// let doubled = a.iter() - /// .map(|&x| x * 2) - /// .collect::>(); + /// let doubled = a.iter().map(|x| x * 2).collect::>(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` @@ -1262,9 +1378,9 @@ pub trait Iterator { /// let chars = ['g', 'd', 'k', 'k', 'n']; /// /// let hello: String = chars.iter() - /// .map(|&x| x as u8) - /// .map(|x| (x + 1) as char) - /// .collect(); + /// .map(|&x| x as u8) + /// .map(|x| (x + 1) as char) + /// .collect(); /// /// assert_eq!("hello", hello); /// ``` @@ -1294,6 +1410,7 @@ pub trait Iterator { /// [`Result`]: ../../std/result/enum.Result.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"] fn collect>(self) -> B where Self: Sized { FromIterator::from_iter(self) } @@ -1311,8 +1428,9 @@ pub trait Iterator { /// ``` /// let a = [1, 2, 3]; /// - /// let (even, odd): (Vec, Vec) = a.into_iter() - /// .partition(|&n| n % 2 == 0); + /// let (even, odd): (Vec, Vec) = a + /// .into_iter() + /// .partition(|&n| n % 2 == 0); /// /// assert_eq!(even, vec![2]); /// assert_eq!(odd, vec![1, 3]); @@ -1361,9 +1479,9 @@ pub trait Iterator { /// /// In particular, try to have this call `try_fold()` on the internal parts /// from which this iterator is composed. If multiple calls are needed, - /// the `?` operator be convenient for chaining the accumulator value along, - /// but beware any invariants that need to be upheld before those early - /// returns. This is a `&mut self` method, so iteration needs to be + /// the `?` operator may be convenient for chaining the accumulator value + /// along, but beware any invariants that need to be upheld before those + /// early returns. This is a `&mut self` method, so iteration needs to be /// resumable after hitting an error here. /// /// # Examples @@ -1371,12 +1489,10 @@ pub trait Iterator { /// Basic usage: /// /// ``` - /// #![feature(iterator_try_fold)] /// let a = [1, 2, 3]; /// /// // the checked sum of all of the elements of the array - /// let sum = a.iter() - /// .try_fold(0i8, |acc, &x| acc.checked_add(x)); + /// let sum = a.iter().try_fold(0i8, |acc, &x| acc.checked_add(x)); /// /// assert_eq!(sum, Some(6)); /// ``` @@ -1384,7 +1500,6 @@ pub trait Iterator { /// Short-circuiting: /// /// ``` - /// #![feature(iterator_try_fold)] /// let a = [10, 20, 30, 100, 40, 50]; /// let mut it = a.iter(); /// @@ -1398,7 +1513,7 @@ pub trait Iterator { /// assert_eq!(it.next(), Some(&40)); /// ``` #[inline] - #[unstable(feature = "iterator_try_fold", issue = "45594")] + #[stable(feature = "iterator_try_fold", since = "1.27.0")] fn try_fold(&mut self, init: B, mut f: F) -> R where Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try { @@ -1409,6 +1524,41 @@ pub trait Iterator { Try::from_ok(accum) } + /// An iterator method that applies a fallible function to each item in the + /// iterator, stopping at the first error and returning that error. + /// + /// This can also be thought of as the fallible form of [`for_each()`] + /// or as the stateless version of [`try_fold()`]. + /// + /// [`for_each()`]: #method.for_each + /// [`try_fold()`]: #method.try_fold + /// + /// # Examples + /// + /// ``` + /// use std::fs::rename; + /// use std::io::{stdout, Write}; + /// use std::path::Path; + /// + /// let data = ["no_tea.txt", "stale_bread.json", "torrential_rain.png"]; + /// + /// let res = data.iter().try_for_each(|x| writeln!(stdout(), "{}", x)); + /// assert!(res.is_ok()); + /// + /// let mut it = data.iter().cloned(); + /// let res = it.try_for_each(|x| rename(x, Path::new(x).with_extension("old"))); + /// assert!(res.is_err()); + /// // It short-circuited, so the remaining items are still in the iterator: + /// assert_eq!(it.next(), Some("stale_bread.json")); + /// ``` + #[inline] + #[stable(feature = "iterator_try_fold", since = "1.27.0")] + fn try_for_each(&mut self, mut f: F) -> R where + Self: Sized, F: FnMut(Self::Item) -> R, R: Try + { + self.try_fold((), move |(), x| f(x)) + } + /// An iterator method that applies a function, producing a single, final value. /// /// `fold()` takes two arguments: an initial value, and a closure with two @@ -1426,6 +1576,10 @@ pub trait Iterator { /// Folding is useful whenever you have a collection of something, and want /// to produce a single value from it. /// + /// Note: `fold()`, and similar methods that traverse the entire iterator, + /// may not terminate for infinite iterators, even on traits for which a + /// result is determinable in finite time. + /// /// # Examples /// /// Basic usage: @@ -1434,8 +1588,7 @@ pub trait Iterator { /// let a = [1, 2, 3]; /// /// // the sum of all of the elements of the array - /// let sum = a.iter() - /// .fold(0, |acc, &x| acc + x); + /// let sum = a.iter().fold(0, |acc, x| acc + x); /// /// assert_eq!(sum, 6); /// ``` @@ -1478,7 +1631,7 @@ pub trait Iterator { fn fold(mut self, init: B, mut f: F) -> B where Self: Sized, F: FnMut(B, Self::Item) -> B, { - self.try_fold(init, move |acc, x| AlwaysOk(f(acc, x))).0 + self.try_fold(init, move |acc, x| Ok::(f(acc, x))).unwrap() } /// Tests if every element of the iterator matches a predicate. @@ -1523,7 +1676,7 @@ pub trait Iterator { fn all(&mut self, mut f: F) -> bool where Self: Sized, F: FnMut(Self::Item) -> bool { - self.try_fold((), move |(), x| { + self.try_for_each(move |x| { if f(x) { LoopState::Continue(()) } else { LoopState::Break(()) } }) == LoopState::Continue(()) @@ -1572,7 +1725,7 @@ pub trait Iterator { Self: Sized, F: FnMut(Self::Item) -> bool { - self.try_fold((), move |(), x| { + self.try_for_each(move |x| { if f(x) { LoopState::Break(()) } else { LoopState::Continue(()) } }) == LoopState::Break(()) @@ -1626,12 +1779,44 @@ pub trait Iterator { Self: Sized, P: FnMut(&Self::Item) -> bool, { - self.try_fold((), move |(), x| { + self.try_for_each(move |x| { if predicate(&x) { LoopState::Break(x) } else { LoopState::Continue(()) } }).break_value() } + /// Applies function to the elements of iterator and returns + /// the first non-none result. + /// + /// `iter.find_map(f)` is equivalent to `iter.filter_map(f).next()`. + /// + /// + /// # Examples + /// + /// ``` + /// #![feature(iterator_find_map)] + /// let a = ["lol", "NaN", "2", "5"]; + /// + /// let mut first_number = a.iter().find_map(|s| s.parse().ok()); + /// + /// assert_eq!(first_number, Some(2)); + /// ``` + #[inline] + #[unstable(feature = "iterator_find_map", + reason = "unstable new API", + issue = "49602")] + fn find_map(&mut self, mut f: F) -> Option where + Self: Sized, + F: FnMut(Self::Item) -> Option, + { + self.try_for_each(move |x| { + match f(x) { + Some(x) => LoopState::Break(x), + None => LoopState::Continue(()), + } + }).break_value() + } + /// Searches for an element in an iterator, returning its index. /// /// `position()` takes a closure that returns `true` or `false`. It applies diff --git a/src/libcore/iter/mod.rs b/src/libcore/iter/mod.rs index 06c29b47bf92..ef3f4ced4f9b 100644 --- a/src/libcore/iter/mod.rs +++ b/src/libcore/iter/mod.rs @@ -298,15 +298,31 @@ //! //! This will print the numbers `0` through `4`, each on their own line. //! +//! Bear in mind that methods on infinite iterators, even those for which a +//! result can be determined mathematically in finite time, may not terminate. +//! Specifically, methods such as [`min`], which in the general case require +//! traversing every element in the iterator, are likely not to return +//! successfully for any infinite iterators. +//! +//! ```no_run +//! let ones = std::iter::repeat(1); +//! let least = ones.min().unwrap(); // Oh no! An infinite loop! +//! // `ones.min()` causes an infinite loop, so we won't reach this point! +//! println!("The smallest number one is {}.", least); +//! ``` +//! //! [`take`]: trait.Iterator.html#method.take +//! [`min`]: trait.Iterator.html#method.min #![stable(feature = "rust1", since = "1.0.0")] use cmp; use fmt; use iter_private::TrustedRandomAccess; -use ops::Try; +use ops::{self, Try}; use usize; +use intrinsics; +use mem; #[stable(feature = "rust1", since = "1.0.0")] pub use self::iterator::Iterator; @@ -318,6 +334,8 @@ pub use self::range::Step; #[stable(feature = "rust1", since = "1.0.0")] pub use self::sources::{Repeat, repeat}; +#[stable(feature = "iterator_repeat_with", since = "1.28.0")] +pub use self::sources::{RepeatWith, repeat_with}; #[stable(feature = "iter_empty", since = "1.2.0")] pub use self::sources::{Empty, empty}; #[stable(feature = "iter_once", since = "1.2.0")] @@ -327,7 +345,7 @@ pub use self::sources::{Once, once}; pub use self::traits::{FromIterator, IntoIterator, DoubleEndedIterator, Extend}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::traits::{ExactSizeIterator, Sum, Product}; -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] pub use self::traits::FusedIterator; #[unstable(feature = "trusted_len", issue = "37572")] pub use self::traits::TrustedLen; @@ -337,21 +355,6 @@ mod range; mod sources; mod traits; -/// Transparent newtype used to implement foo methods in terms of try_foo. -/// Important until #43278 is fixed; might be better as `Result` later. -struct AlwaysOk(pub T); - -impl Try for AlwaysOk { - type Ok = T; - type Error = !; - #[inline] - fn into_result(self) -> Result { Ok(self.0) } - #[inline] - fn from_error(v: Self::Error) -> Self { v } - #[inline] - fn from_ok(v: Self::Ok) -> Self { AlwaysOk(v) } -} - /// Used to make try_fold closures more like normal loops #[derive(PartialEq)] enum LoopState { @@ -489,7 +492,7 @@ impl ExactSizeIterator for Rev } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Rev where I: FusedIterator + DoubleEndedIterator {} @@ -572,21 +575,21 @@ impl<'a, I, T: 'a> ExactSizeIterator for Cloned } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl<'a, I, T: 'a> FusedIterator for Cloned where I: FusedIterator, T: Clone {} #[doc(hidden)] -default unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned +unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned where I: TrustedRandomAccess, T: Clone { - unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { + default unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { self.it.get_unchecked(i).clone() } #[inline] - fn may_have_side_effect() -> bool { true } + default fn may_have_side_effect() -> bool { true } } #[doc(hidden)] @@ -645,7 +648,7 @@ impl Iterator for Cycle where I: Clone + Iterator { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Cycle where I: Clone + Iterator {} /// An iterator for stepping iterators by a custom amount. @@ -656,9 +659,7 @@ impl FusedIterator for Cycle where I: Clone + Iterator {} /// [`step_by`]: trait.Iterator.html#method.step_by /// [`Iterator`]: trait.Iterator.html #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[unstable(feature = "iterator_step_by", - reason = "unstable replacement of Range::step_by", - issue = "27741")] +#[stable(feature = "iterator_step_by", since = "1.28.0")] #[derive(Clone, Debug)] pub struct StepBy { iter: I, @@ -666,20 +667,13 @@ pub struct StepBy { first_take: bool, } -#[unstable(feature = "iterator_step_by", - reason = "unstable replacement of Range::step_by", - issue = "27741")] +#[stable(feature = "iterator_step_by", since = "1.28.0")] impl Iterator for StepBy where I: Iterator { type Item = I::Item; #[inline] fn next(&mut self) -> Option { - if self.first_take { - self.first_take = false; - self.iter.next() - } else { - self.iter.nth(self.step) - } + ::spec_next(self) } #[inline] @@ -694,12 +688,125 @@ impl Iterator for StepBy where I: Iterator { (f(inner_hint.0), inner_hint.1.map(f)) } } + + #[inline] + fn nth(&mut self, mut n: usize) -> Option { + if self.first_take { + self.first_take = false; + let first = self.iter.next(); + if n == 0 { + return first; + } + n -= 1; + } + // n and self.step are indices, we need to add 1 to get the amount of elements + // When calling `.nth`, we need to subtract 1 again to convert back to an index + // step + 1 can't overflow because `.step_by` sets `self.step` to `step - 1` + let mut step = self.step + 1; + // n + 1 could overflow + // thus, if n is usize::MAX, instead of adding one, we call .nth(step) + if n == usize::MAX { + self.iter.nth(step - 1); + } else { + n += 1; + } + + // overflow handling + loop { + let mul = n.checked_mul(step); + if unsafe { intrinsics::likely(mul.is_some()) } { + return self.iter.nth(mul.unwrap() - 1); + } + let div_n = usize::MAX / n; + let div_step = usize::MAX / step; + let nth_n = div_n * n; + let nth_step = div_step * step; + let nth = if nth_n > nth_step { + step -= div_n; + nth_n + } else { + n -= div_step; + nth_step + }; + self.iter.nth(nth - 1); + } + } +} + +// hidden trait for specializing iterator methods +// could be generalized but is currently only used for StepBy +trait StepBySpecIterator { + type Item; + fn spec_next(&mut self) -> Option; +} + +impl StepBySpecIterator for StepBy +where + I: Iterator, +{ + type Item = I::Item; + + #[inline] + default fn spec_next(&mut self) -> Option { + if self.first_take { + self.first_take = false; + self.iter.next() + } else { + self.iter.nth(self.step) + } + } +} + +impl StepBySpecIterator for StepBy> +where + T: Step, +{ + #[inline] + fn spec_next(&mut self) -> Option { + self.first_take = false; + if !(self.iter.start < self.iter.end) { + return None; + } + // add 1 to self.step to get original step size back + // it was decremented for the general case on construction + if let Some(n) = self.iter.start.add_usize(self.step+1) { + let next = mem::replace(&mut self.iter.start, n); + Some(next) + } else { + let last = self.iter.start.clone(); + self.iter.start = self.iter.end.clone(); + Some(last) + } + } +} + +impl StepBySpecIterator for StepBy> +where + T: Step, +{ + #[inline] + fn spec_next(&mut self) -> Option { + self.first_take = false; + self.iter.compute_is_empty(); + if self.iter.is_empty.unwrap_or_default() { + return None; + } + // add 1 to self.step to get original step size back + // it was decremented for the general case on construction + if let Some(n) = self.iter.start.add_usize(self.step+1) { + self.iter.is_empty = Some(!(n <= self.iter.end)); + let next = mem::replace(&mut self.iter.start, n); + Some(next) + } else { + let last = self.iter.start.clone(); + self.iter.is_empty = Some(true); + Some(last) + } + } } // StepBy can only make the iterator shorter, so the len will still fit. -#[unstable(feature = "iterator_step_by", - reason = "unstable replacement of Range::step_by", - issue = "27741")] +#[stable(feature = "iterator_step_by", since = "1.28.0")] impl ExactSizeIterator for StepBy where I: ExactSizeIterator {} /// An iterator that strings two iterators together. @@ -942,7 +1049,7 @@ impl DoubleEndedIterator for Chain where } // Note: *both* must be fused to handle double-ended iterators. -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Chain where A: FusedIterator, B: FusedIterator, @@ -985,6 +1092,11 @@ impl Iterator for Zip where A: Iterator, B: Iterator fn size_hint(&self) -> (usize, Option) { ZipImpl::size_hint(self) } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + ZipImpl::nth(self, n) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1005,6 +1117,14 @@ trait ZipImpl { fn new(a: A, b: B) -> Self; fn next(&mut self) -> Option; fn size_hint(&self) -> (usize, Option); + fn nth(&mut self, n: usize) -> Option; + fn super_nth(&mut self, mut n: usize) -> Option { + while let Some(x) = self.next() { + if n == 0 { return Some(x) } + n -= 1; + } + None + } fn next_back(&mut self) -> Option where A: DoubleEndedIterator + ExactSizeIterator, B: DoubleEndedIterator + ExactSizeIterator; @@ -1034,6 +1154,11 @@ impl ZipImpl for Zip }) } + #[inline] + default fn nth(&mut self, n: usize) -> Option { + self.super_nth(n) + } + #[inline] default fn next_back(&mut self) -> Option<(A::Item, B::Item)> where A: DoubleEndedIterator + ExactSizeIterator, @@ -1114,6 +1239,24 @@ impl ZipImpl for Zip (len, Some(len)) } + #[inline] + fn nth(&mut self, n: usize) -> Option { + let delta = cmp::min(n, self.len - self.index); + let end = self.index + delta; + while self.index < end { + let i = self.index; + self.index += 1; + if A::may_have_side_effect() { + unsafe { self.a.get_unchecked(i); } + } + if B::may_have_side_effect() { + unsafe { self.b.get_unchecked(i); } + } + } + + self.super_nth(n - delta) + } + #[inline] fn next_back(&mut self) -> Option<(A::Item, B::Item)> where A: DoubleEndedIterator + ExactSizeIterator, @@ -1166,7 +1309,7 @@ unsafe impl TrustedRandomAccess for Zip } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Zip where A: FusedIterator, B: FusedIterator, {} @@ -1308,7 +1451,7 @@ impl ExactSizeIterator for Map } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Map where F: FnMut(I::Item) -> B {} @@ -1457,7 +1600,7 @@ impl DoubleEndedIterator for Filter } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Filter where P: FnMut(&I::Item) -> bool {} @@ -1567,7 +1710,7 @@ impl DoubleEndedIterator for FilterMap } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for FilterMap where F: FnMut(I::Item) -> Option {} @@ -1722,7 +1865,7 @@ unsafe impl TrustedRandomAccess for Enumerate } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Enumerate where I: FusedIterator {} #[unstable(feature = "trusted_len", issue = "37572")] @@ -1776,7 +1919,7 @@ impl Iterator for Peekable { #[inline] fn nth(&mut self, n: usize) -> Option { - // FIXME(#6393): merge these when borrow-checking gets better. + // FIXME(#43234): merge these when borrow-checking gets better. if n == 0 { match self.peeked.take() { Some(v) => v, @@ -1842,7 +1985,7 @@ impl Iterator for Peekable { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Peekable {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Peekable {} impl Peekable { @@ -1976,7 +2119,7 @@ impl Iterator for SkipWhile } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for SkipWhile where I: FusedIterator, P: FnMut(&I::Item) -> bool {} @@ -2055,7 +2198,7 @@ impl Iterator for TakeWhile } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for TakeWhile where I: FusedIterator, P: FnMut(&I::Item) -> bool {} @@ -2194,7 +2337,7 @@ impl DoubleEndedIterator for Skip where I: DoubleEndedIterator + ExactSize } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Skip where I: FusedIterator {} /// An iterator that only iterates over the first `n` iterations of `iter`. @@ -2275,9 +2418,12 @@ impl Iterator for Take where I: Iterator{ #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Take where I: ExactSizeIterator {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Take where I: FusedIterator {} +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Take {} + /// An iterator to maintain state while iterating another iterator. /// /// This `struct` is created by the [`scan`] method on [`Iterator`]. See its @@ -2347,12 +2493,15 @@ impl Iterator for Scan where /// [`Iterator`]: trait.Iterator.html #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] pub struct FlatMap { - iter: I, - f: F, - frontiter: Option, - backiter: Option, + inner: FlattenCompat, ::IntoIter> +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Clone for FlatMap + where ::IntoIter: Clone +{ + fn clone(&self) -> Self { FlatMap { inner: self.inner.clone() } } } #[stable(feature = "core_impl_debug", since = "1.9.0")] @@ -2360,11 +2509,7 @@ impl fmt::Debug for FlatMap where U::IntoIter: fmt::Debug { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("FlatMap") - .field("iter", &self.iter) - .field("frontiter", &self.frontiter) - .field("backiter", &self.backiter) - .finish() + f.debug_struct("FlatMap").field("inner", &self.inner).finish() } } @@ -2374,17 +2519,173 @@ impl Iterator for FlatMap { type Item = U::Item; + #[inline] + fn next(&mut self) -> Option { self.inner.next() } + + #[inline] + fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.inner.try_fold(init, fold) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.inner.fold(init, fold) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for FlatMap + where F: FnMut(I::Item) -> U, + U: IntoIterator, + U::IntoIter: DoubleEndedIterator +{ + #[inline] + fn next_back(&mut self) -> Option { self.inner.next_back() } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.inner.try_rfold(init, fold) + } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.inner.rfold(init, fold) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for FlatMap + where I: FusedIterator, U: IntoIterator, F: FnMut(I::Item) -> U {} + +/// An iterator that flattens one level of nesting in an iterator of things +/// that can be turned into iterators. +/// +/// This `struct` is created by the [`flatten`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`flatten`]: trait.Iterator.html#method.flatten +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "iterator_flatten", since = "1.29.0")] +pub struct Flatten +where I::Item: IntoIterator { + inner: FlattenCompat::IntoIter>, +} + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +impl fmt::Debug for Flatten + where I: Iterator + fmt::Debug, U: Iterator + fmt::Debug, + I::Item: IntoIterator, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Flatten").field("inner", &self.inner).finish() + } +} + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +impl Clone for Flatten + where I: Iterator + Clone, U: Iterator + Clone, + I::Item: IntoIterator, +{ + fn clone(&self) -> Self { Flatten { inner: self.inner.clone() } } +} + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +impl Iterator for Flatten + where I: Iterator, U: Iterator, + I::Item: IntoIterator +{ + type Item = U::Item; + + #[inline] + fn next(&mut self) -> Option { self.inner.next() } + + #[inline] + fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.inner.try_fold(init, fold) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.inner.fold(init, fold) + } +} + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +impl DoubleEndedIterator for Flatten + where I: DoubleEndedIterator, U: DoubleEndedIterator, + I::Item: IntoIterator +{ + #[inline] + fn next_back(&mut self) -> Option { self.inner.next_back() } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.inner.try_rfold(init, fold) + } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.inner.rfold(init, fold) + } +} + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +impl FusedIterator for Flatten + where I: FusedIterator, U: Iterator, + I::Item: IntoIterator {} + +/// Adapts an iterator by flattening it, for use in `flatten()` and `flat_map()`. +fn flatten_compat(iter: I) -> FlattenCompat { + FlattenCompat { iter, frontiter: None, backiter: None } +} + +/// Real logic of both `Flatten` and `FlatMap` which simply delegate to +/// this type. +#[derive(Clone, Debug)] +struct FlattenCompat { + iter: I, + frontiter: Option, + backiter: Option, +} + +impl Iterator for FlattenCompat + where I: Iterator, U: Iterator, + I::Item: IntoIterator +{ + type Item = U::Item; + #[inline] fn next(&mut self) -> Option { loop { if let Some(ref mut inner) = self.frontiter { - if let Some(x) = inner.by_ref().next() { - return Some(x) - } + if let elt@Some(_) = inner.next() { return elt } } - match self.iter.next().map(&mut self.f) { + match self.iter.next() { None => return self.backiter.as_mut().and_then(|it| it.next()), - next => self.frontiter = next.map(IntoIterator::into_iter), + Some(inner) => self.frontiter = Some(inner.into_iter()), } } } @@ -2410,10 +2711,9 @@ impl Iterator for FlatMap self.frontiter = None; { - let f = &mut self.f; let frontiter = &mut self.frontiter; init = self.iter.try_fold(init, |acc, x| { - let mut mid = f(x).into_iter(); + let mut mid = x.into_iter(); let r = mid.try_fold(acc, &mut fold); *frontiter = Some(mid); r @@ -2434,27 +2734,23 @@ impl Iterator for FlatMap where Fold: FnMut(Acc, Self::Item) -> Acc, { self.frontiter.into_iter() - .chain(self.iter.map(self.f).map(U::into_iter)) + .chain(self.iter.map(IntoIterator::into_iter)) .chain(self.backiter) .fold(init, |acc, iter| iter.fold(acc, &mut fold)) } } -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for FlatMap where - F: FnMut(I::Item) -> U, - U: IntoIterator, - U::IntoIter: DoubleEndedIterator +impl DoubleEndedIterator for FlattenCompat + where I: DoubleEndedIterator, U: DoubleEndedIterator, + I::Item: IntoIterator { #[inline] fn next_back(&mut self) -> Option { loop { if let Some(ref mut inner) = self.backiter { - if let Some(y) = inner.next_back() { - return Some(y) - } + if let elt@Some(_) = inner.next_back() { return elt } } - match self.iter.next_back().map(&mut self.f) { + match self.iter.next_back() { None => return self.frontiter.as_mut().and_then(|it| it.next_back()), next => self.backiter = next.map(IntoIterator::into_iter), } @@ -2471,10 +2767,9 @@ impl DoubleEndedIterator for FlatMap wher self.backiter = None; { - let f = &mut self.f; let backiter = &mut self.backiter; init = self.iter.try_rfold(init, |acc, x| { - let mut mid = f(x).into_iter(); + let mut mid = x.into_iter(); let r = mid.try_rfold(acc, &mut fold); *backiter = Some(mid); r @@ -2495,16 +2790,12 @@ impl DoubleEndedIterator for FlatMap wher where Fold: FnMut(Acc, Self::Item) -> Acc, { self.frontiter.into_iter() - .chain(self.iter.map(self.f).map(U::into_iter)) + .chain(self.iter.map(IntoIterator::into_iter)) .chain(self.backiter) .rfold(init, |acc, iter| iter.rfold(acc, &mut fold)) } } -#[unstable(feature = "fused", issue = "35602")] -impl FusedIterator for FlatMap - where I: FusedIterator, U: IntoIterator, F: FnMut(I::Item) -> U {} - /// An iterator that yields `None` forever after the underlying iterator /// yields `None` once. /// @@ -2521,7 +2812,7 @@ pub struct Fuse { done: bool } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Fuse where I: Iterator {} #[stable(feature = "rust1", since = "1.0.0")] @@ -2652,7 +2943,7 @@ unsafe impl TrustedRandomAccess for Fuse } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl Iterator for Fuse where I: FusedIterator { #[inline] fn next(&mut self) -> Option<::Item> { @@ -2694,7 +2985,7 @@ impl Iterator for Fuse where I: FusedIterator { } } -#[unstable(feature = "fused", reason = "recently added", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl DoubleEndedIterator for Fuse where I: DoubleEndedIterator + FusedIterator { @@ -2838,6 +3129,6 @@ impl ExactSizeIterator for Inspect } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Inspect where F: FnMut(&I::Item) {} diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs index 66a76a24df45..651c7a35d413 100644 --- a/src/libcore/iter/range.rs +++ b/src/libcore/iter/range.rs @@ -186,9 +186,7 @@ macro_rules! range_exact_iter_impl { macro_rules! range_incl_exact_iter_impl { ($($t:ty)*) => ($( - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] + #[stable(feature = "inclusive_range", since = "1.26.0")] impl ExactSizeIterator for ops::RangeInclusive<$t> { } )*) } @@ -202,9 +200,7 @@ macro_rules! range_trusted_len_impl { macro_rules! range_incl_trusted_len_impl { ($($t:ty)*) => ($( - #[unstable(feature = "inclusive_range", - reason = "recently added, follows RFC", - issue = "28237")] + #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl TrustedLen for ops::RangeInclusive<$t> { } )*) } @@ -295,7 +291,7 @@ impl DoubleEndedIterator for ops::Range { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for ops::Range {} #[stable(feature = "rust1", since = "1.0.0")] @@ -322,34 +318,35 @@ impl Iterator for ops::RangeFrom { } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for ops::RangeFrom {} -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for ops::RangeFrom {} + +#[stable(feature = "inclusive_range", since = "1.26.0")] impl Iterator for ops::RangeInclusive { type Item = A; #[inline] fn next(&mut self) -> Option { - use cmp::Ordering::*; - - match self.start.partial_cmp(&self.end) { - Some(Less) => { - let n = self.start.add_one(); - Some(mem::replace(&mut self.start, n)) - }, - Some(Equal) => { - let last = self.start.replace_one(); - self.end.replace_zero(); - Some(last) - }, - _ => None, + self.compute_is_empty(); + if self.is_empty.unwrap_or_default() { + return None; } + let is_iterating = self.start < self.end; + self.is_empty = Some(!is_iterating); + Some(if is_iterating { + let n = self.start.add_one(); + mem::replace(&mut self.start, n) + } else { + self.start.clone() + }) } #[inline] fn size_hint(&self) -> (usize, Option) { - if !(self.start <= self.end) { + if self.is_empty() { return (0, Some(0)); } @@ -361,25 +358,29 @@ impl Iterator for ops::RangeInclusive { #[inline] fn nth(&mut self, n: usize) -> Option { + self.compute_is_empty(); + if self.is_empty.unwrap_or_default() { + return None; + } + if let Some(plus_n) = self.start.add_usize(n) { use cmp::Ordering::*; match plus_n.partial_cmp(&self.end) { Some(Less) => { + self.is_empty = Some(false); self.start = plus_n.add_one(); return Some(plus_n) } Some(Equal) => { - self.start.replace_one(); - self.end.replace_zero(); + self.is_empty = Some(true); return Some(plus_n) } _ => {} } } - self.start.replace_one(); - self.end.replace_zero(); + self.is_empty = Some(true); None } @@ -399,26 +400,24 @@ impl Iterator for ops::RangeInclusive { } } -#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +#[stable(feature = "inclusive_range", since = "1.26.0")] impl DoubleEndedIterator for ops::RangeInclusive { #[inline] fn next_back(&mut self) -> Option { - use cmp::Ordering::*; - - match self.start.partial_cmp(&self.end) { - Some(Less) => { - let n = self.end.sub_one(); - Some(mem::replace(&mut self.end, n)) - }, - Some(Equal) => { - let last = self.end.replace_zero(); - self.start.replace_one(); - Some(last) - }, - _ => None, + self.compute_is_empty(); + if self.is_empty.unwrap_or_default() { + return None; } + let is_iterating = self.start < self.end; + self.is_empty = Some(!is_iterating); + Some(if is_iterating { + let n = self.end.sub_one(); + mem::replace(&mut self.end, n) + } else { + self.end.clone() + }) } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for ops::RangeInclusive {} diff --git a/src/libcore/iter/sources.rs b/src/libcore/iter/sources.rs index b405f35d5e4d..d500cc99fa13 100644 --- a/src/libcore/iter/sources.rs +++ b/src/libcore/iter/sources.rs @@ -41,9 +41,12 @@ impl DoubleEndedIterator for Repeat { fn next_back(&mut self) -> Option { Some(self.element.clone()) } } -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Repeat {} +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Repeat {} + /// Creates a new iterator that endlessly repeats a single element. /// /// The `repeat()` function repeats a single value over and over and over and @@ -54,6 +57,12 @@ impl FusedIterator for Repeat {} /// /// [`take`]: trait.Iterator.html#method.take /// +/// If the element type of the iterator you need does not implement `Clone`, +/// or if you do not want to keep the repeated element in memory, you can +/// instead use the [`repeat_with`] function. +/// +/// [`repeat_with`]: fn.repeat_with.html +/// /// # Examples /// /// Basic usage: @@ -96,6 +105,103 @@ pub fn repeat(elt: T) -> Repeat { Repeat{element: elt} } +/// An iterator that repeats elements of type `A` endlessly by +/// applying the provided closure `F: FnMut() -> A`. +/// +/// This `struct` is created by the [`repeat_with`] function. +/// See its documentation for more. +/// +/// [`repeat_with`]: fn.repeat_with.html +#[derive(Copy, Clone, Debug)] +#[stable(feature = "iterator_repeat_with", since = "1.28.0")] +pub struct RepeatWith { + repeater: F +} + +#[stable(feature = "iterator_repeat_with", since = "1.28.0")] +impl A> Iterator for RepeatWith { + type Item = A; + + #[inline] + fn next(&mut self) -> Option { Some((self.repeater)()) } + + #[inline] + fn size_hint(&self) -> (usize, Option) { (usize::MAX, None) } +} + +#[stable(feature = "iterator_repeat_with", since = "1.28.0")] +impl A> FusedIterator for RepeatWith {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl A> TrustedLen for RepeatWith {} + +/// Creates a new iterator that repeats elements of type `A` endlessly by +/// applying the provided closure, the repeater, `F: FnMut() -> A`. +/// +/// The `repeat_with()` function calls the repeater over and over and over and +/// over and over and 🔁. +/// +/// Infinite iterators like `repeat_with()` are often used with adapters like +/// [`take`], in order to make them finite. +/// +/// [`take`]: trait.Iterator.html#method.take +/// +/// If the element type of the iterator you need implements `Clone`, and +/// it is OK to keep the source element in memory, you should instead use +/// the [`repeat`] function. +/// +/// [`repeat`]: fn.repeat.html +/// +/// An iterator produced by `repeat_with()` is not a `DoubleEndedIterator`. +/// If you need `repeat_with()` to return a `DoubleEndedIterator`, +/// please open a GitHub issue explaining your use case. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter; +/// +/// // let's assume we have some value of a type that is not `Clone` +/// // or which don't want to have in memory just yet because it is expensive: +/// #[derive(PartialEq, Debug)] +/// struct Expensive; +/// +/// // a particular value forever: +/// let mut things = iter::repeat_with(|| Expensive); +/// +/// assert_eq!(Some(Expensive), things.next()); +/// assert_eq!(Some(Expensive), things.next()); +/// assert_eq!(Some(Expensive), things.next()); +/// assert_eq!(Some(Expensive), things.next()); +/// assert_eq!(Some(Expensive), things.next()); +/// ``` +/// +/// Using mutation and going finite: +/// +/// ```rust +/// use std::iter; +/// +/// // From the zeroth to the third power of two: +/// let mut curr = 1; +/// let mut pow2 = iter::repeat_with(|| { let tmp = curr; curr *= 2; tmp }) +/// .take(4); +/// +/// assert_eq!(Some(1), pow2.next()); +/// assert_eq!(Some(2), pow2.next()); +/// assert_eq!(Some(4), pow2.next()); +/// assert_eq!(Some(8), pow2.next()); +/// +/// // ... and now we're done +/// assert_eq!(None, pow2.next()); +/// ``` +#[inline] +#[stable(feature = "iterator_repeat_with", since = "1.28.0")] +pub fn repeat_with A>(repeater: F) -> RepeatWith { + RepeatWith { repeater } +} + /// An iterator that yields nothing. /// /// This `struct` is created by the [`empty`] function. See its documentation for more. @@ -141,7 +247,7 @@ impl ExactSizeIterator for Empty { #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl TrustedLen for Empty {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Empty {} // not #[derive] because that adds a Clone bound on T, @@ -222,7 +328,7 @@ impl ExactSizeIterator for Once { #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl TrustedLen for Once {} -#[unstable(feature = "fused", issue = "35602")] +#[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Once {} /// Creates an iterator that yields an element exactly once. diff --git a/src/libcore/iter/traits.rs b/src/libcore/iter/traits.rs index 11e668d228c4..4b2c1aa551e9 100644 --- a/src/libcore/iter/traits.rs +++ b/src/libcore/iter/traits.rs @@ -10,7 +10,7 @@ use ops::{Mul, Add, Try}; use num::Wrapping; -use super::{AlwaysOk, LoopState}; +use super::LoopState; /// Conversion from an `Iterator`. /// @@ -104,8 +104,11 @@ use super::{AlwaysOk, LoopState}; /// assert_eq!(c.0, vec![0, 1, 2, 3, 4]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented="a collection of type `{Self}` cannot be \ - built from an iterator over elements of type `{A}`"] +#[rustc_on_unimplemented( + message="a collection of type `{Self}` cannot be built from an iterator \ + over elements of type `{A}`", + label="a collection of type `{Self}` cannot be built from `std::iter::Iterator`", +)] pub trait FromIterator: Sized { /// Creates a value from an iterator. /// @@ -352,6 +355,13 @@ pub trait Extend { fn extend>(&mut self, iter: T); } +#[stable(feature = "extend_for_unit", since = "1.28.0")] +impl Extend<()> for () { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each(drop) + } +} + /// An iterator able to yield elements from both ends. /// /// Something that implements `DoubleEndedIterator` has one extra capability @@ -427,7 +437,6 @@ pub trait DoubleEndedIterator: Iterator { /// Basic usage: /// /// ``` - /// #![feature(iterator_try_fold)] /// let a = ["1", "2", "3"]; /// let sum = a.iter() /// .map(|&s| s.parse::()) @@ -438,7 +447,6 @@ pub trait DoubleEndedIterator: Iterator { /// Short-circuiting: /// /// ``` - /// #![feature(iterator_try_fold)] /// let a = ["1", "rust", "3"]; /// let mut it = a.iter(); /// let sum = it @@ -452,7 +460,7 @@ pub trait DoubleEndedIterator: Iterator { /// assert_eq!(it.next_back(), Some(&"1")); /// ``` #[inline] - #[unstable(feature = "iterator_try_fold", issue = "45594")] + #[stable(feature = "iterator_try_fold", since = "1.27.0")] fn try_rfold(&mut self, init: B, mut f: F) -> R where Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try { @@ -491,7 +499,6 @@ pub trait DoubleEndedIterator: Iterator { /// Basic usage: /// /// ``` - /// #![feature(iter_rfold)] /// let a = [1, 2, 3]; /// /// // the sum of all of the elements of a @@ -505,7 +512,6 @@ pub trait DoubleEndedIterator: Iterator { /// and continuing with each element from the back until the front: /// /// ``` - /// #![feature(iter_rfold)] /// let numbers = [1, 2, 3, 4, 5]; /// /// let zero = "0".to_string(); @@ -517,14 +523,14 @@ pub trait DoubleEndedIterator: Iterator { /// assert_eq!(result, "(1 + (2 + (3 + (4 + (5 + 0)))))"); /// ``` #[inline] - #[unstable(feature = "iter_rfold", issue = "44705")] + #[stable(feature = "iter_rfold", since = "1.27.0")] fn rfold(mut self, accum: B, mut f: F) -> B where Self: Sized, F: FnMut(B, Self::Item) -> B, { - self.try_rfold(accum, move |acc, x| AlwaysOk(f(acc, x))).0 + self.try_rfold(accum, move |acc, x| Ok::(f(acc, x))).unwrap() } - /// Searches for an element of an iterator from the right that satisfies a predicate. + /// Searches for an element of an iterator from the back that satisfies a predicate. /// /// `rfind()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, starting at the end, and if any @@ -547,8 +553,6 @@ pub trait DoubleEndedIterator: Iterator { /// Basic usage: /// /// ``` - /// #![feature(iter_rfind)] - /// /// let a = [1, 2, 3]; /// /// assert_eq!(a.iter().rfind(|&&x| x == 2), Some(&2)); @@ -559,8 +563,6 @@ pub trait DoubleEndedIterator: Iterator { /// Stopping at the first `true`: /// /// ``` - /// #![feature(iter_rfind)] - /// /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); @@ -571,7 +573,7 @@ pub trait DoubleEndedIterator: Iterator { /// assert_eq!(iter.next_back(), Some(&1)); /// ``` #[inline] - #[unstable(feature = "iter_rfind", issue = "39480")] + #[stable(feature = "iter_rfind", since = "1.27.0")] fn rfind